Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		The Internet Protocol (IP) output module.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Donald Becker, <becker@super.org>
  12 *		Alan Cox, <Alan.Cox@linux.org>
  13 *		Richard Underwood
  14 *		Stefan Becker, <stefanb@yello.ping.de>
  15 *		Jorge Cwik, <jorge@laser.satlink.net>
  16 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  17 *		Hirokazu Takahashi, <taka@valinux.co.jp>
  18 *
  19 *	See ip_input.c for original log
  20 *
  21 *	Fixes:
  22 *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
  23 *		Mike Kilburn	:	htons() missing in ip_build_xmit.
  24 *		Bradford Johnson:	Fix faulty handling of some frames when
  25 *					no route is found.
  26 *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
  27 *					(in case if packet not accepted by
  28 *					output firewall rules)
  29 *		Mike McLagan	:	Routing by source
  30 *		Alexey Kuznetsov:	use new route cache
  31 *		Andi Kleen:		Fix broken PMTU recovery and remove
  32 *					some redundant tests.
  33 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  34 *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
  35 *		Andi Kleen	:	Split fast and slow ip_build_xmit path
  36 *					for decreased register pressure on x86
  37 *					and more readability.
  38 *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
  39 *					silently drop skb instead of failing with -EPERM.
  40 *		Detlev Wengorz	:	Copy protocol for fragments.
  41 *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
  42 *					datagrams.
  43 *		Hirokazu Takahashi:	sendfile() on UDP works now.
  44 */
  45
  46#include <linux/uaccess.h>
 
  47#include <linux/module.h>
  48#include <linux/types.h>
  49#include <linux/kernel.h>
  50#include <linux/mm.h>
  51#include <linux/string.h>
  52#include <linux/errno.h>
  53#include <linux/highmem.h>
  54#include <linux/slab.h>
  55
  56#include <linux/socket.h>
  57#include <linux/sockios.h>
  58#include <linux/in.h>
  59#include <linux/inet.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/proc_fs.h>
  63#include <linux/stat.h>
  64#include <linux/init.h>
  65
  66#include <net/snmp.h>
  67#include <net/ip.h>
  68#include <net/protocol.h>
  69#include <net/route.h>
  70#include <net/xfrm.h>
  71#include <linux/skbuff.h>
  72#include <net/sock.h>
  73#include <net/arp.h>
  74#include <net/icmp.h>
  75#include <net/checksum.h>
  76#include <net/gso.h>
  77#include <net/inetpeer.h>
  78#include <net/inet_ecn.h>
  79#include <net/lwtunnel.h>
  80#include <net/inet_dscp.h>
  81#include <linux/bpf-cgroup.h>
  82#include <linux/igmp.h>
  83#include <linux/netfilter_ipv4.h>
  84#include <linux/netfilter_bridge.h>
 
  85#include <linux/netlink.h>
  86#include <linux/tcp.h>
  87
  88static int
  89ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
  90	    unsigned int mtu,
  91	    int (*output)(struct net *, struct sock *, struct sk_buff *));
  92
  93/* Generate a checksum for an outgoing IP datagram. */
  94void ip_send_check(struct iphdr *iph)
  95{
  96	iph->check = 0;
  97	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  98}
  99EXPORT_SYMBOL(ip_send_check);
 100
 101int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 102{
 103	struct iphdr *iph = ip_hdr(skb);
 104
 105	IP_INC_STATS(net, IPSTATS_MIB_OUTREQUESTS);
 106
 107	iph_set_totlen(iph, skb->len);
 108	ip_send_check(iph);
 109
 110	/* if egress device is enslaved to an L3 master device pass the
 111	 * skb to its handler for processing
 112	 */
 113	skb = l3mdev_ip_out(sk, skb);
 114	if (unlikely(!skb))
 115		return 0;
 116
 117	skb->protocol = htons(ETH_P_IP);
 118
 119	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
 120		       net, sk, skb, NULL, skb_dst(skb)->dev,
 121		       dst_output);
 122}
 123
 124int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 125{
 126	int err;
 127
 128	err = __ip_local_out(net, sk, skb);
 129	if (likely(err == 1))
 130		err = dst_output(net, sk, skb);
 131
 132	return err;
 133}
 134EXPORT_SYMBOL_GPL(ip_local_out);
 135
 136static inline int ip_select_ttl(const struct inet_sock *inet,
 137				const struct dst_entry *dst)
 
 
 
 
 
 
 
 
 
 
 
 
 138{
 139	int ttl = READ_ONCE(inet->uc_ttl);
 140
 141	if (ttl < 0)
 142		ttl = ip4_dst_hoplimit(dst);
 143	return ttl;
 144}
 145
 146/*
 147 *		Add an ip header to a skbuff and send it out.
 148 *
 149 */
 150int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
 151			  __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
 152			  u8 tos)
 153{
 154	const struct inet_sock *inet = inet_sk(sk);
 155	struct rtable *rt = skb_rtable(skb);
 156	struct net *net = sock_net(sk);
 157	struct iphdr *iph;
 158
 159	/* Build the IP header. */
 160	skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
 161	skb_reset_network_header(skb);
 162	iph = ip_hdr(skb);
 163	iph->version  = 4;
 164	iph->ihl      = 5;
 165	iph->tos      = tos;
 
 
 
 
 166	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 167	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
 168	iph->saddr    = saddr;
 169	iph->protocol = sk->sk_protocol;
 170	/* Do not bother generating IPID for small packets (eg SYNACK) */
 171	if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
 172		iph->frag_off = htons(IP_DF);
 173		iph->id = 0;
 174	} else {
 175		iph->frag_off = 0;
 176		/* TCP packets here are SYNACK with fat IPv4/TCP options.
 177		 * Avoid using the hashed IP ident generator.
 178		 */
 179		if (sk->sk_protocol == IPPROTO_TCP)
 180			iph->id = (__force __be16)get_random_u16();
 181		else
 182			__ip_select_ident(net, iph, 1);
 183	}
 184
 185	if (opt && opt->opt.optlen) {
 186		iph->ihl += opt->opt.optlen>>2;
 187		ip_options_build(skb, &opt->opt, daddr, rt);
 188	}
 189
 190	skb->priority = READ_ONCE(sk->sk_priority);
 191	if (!skb->mark)
 192		skb->mark = READ_ONCE(sk->sk_mark);
 193
 194	/* Send it out. */
 195	return ip_local_out(net, skb->sk, skb);
 196}
 197EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
 198
 199static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 200{
 201	struct dst_entry *dst = skb_dst(skb);
 202	struct rtable *rt = dst_rtable(dst);
 203	struct net_device *dev = dst->dev;
 204	unsigned int hh_len = LL_RESERVED_SPACE(dev);
 205	struct neighbour *neigh;
 206	bool is_v6gw = false;
 207
 208	if (rt->rt_type == RTN_MULTICAST) {
 209		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
 210	} else if (rt->rt_type == RTN_BROADCAST)
 211		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
 212
 213	/* OUTOCTETS should be counted after fragment */
 214	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 215
 
 216	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
 217		skb = skb_expand_head(skb, hh_len);
 218		if (!skb)
 219			return -ENOMEM;
 220	}
 221
 222	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
 223		int res = lwtunnel_xmit(skb);
 224
 225		if (res != LWTUNNEL_XMIT_CONTINUE)
 226			return res;
 
 
 
 
 227	}
 228
 229	rcu_read_lock();
 230	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
 231	if (!IS_ERR(neigh)) {
 232		int res;
 233
 234		sock_confirm_neigh(skb, neigh);
 235		/* if crossing protocols, can not use the cached header */
 236		res = neigh_output(neigh, skb, is_v6gw);
 237		rcu_read_unlock();
 238		return res;
 239	}
 240	rcu_read_unlock();
 241
 242	net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
 243			    __func__);
 244	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
 245	return PTR_ERR(neigh);
 246}
 247
 248static int ip_finish_output_gso(struct net *net, struct sock *sk,
 249				struct sk_buff *skb, unsigned int mtu)
 250{
 251	struct sk_buff *segs, *nskb;
 252	netdev_features_t features;
 253	int ret = 0;
 254
 255	/* common case: seglen is <= mtu
 256	 */
 257	if (skb_gso_validate_network_len(skb, mtu))
 258		return ip_finish_output2(net, sk, skb);
 259
 260	/* Slowpath -  GSO segment length exceeds the egress MTU.
 261	 *
 262	 * This can happen in several cases:
 263	 *  - Forwarding of a TCP GRO skb, when DF flag is not set.
 264	 *  - Forwarding of an skb that arrived on a virtualization interface
 265	 *    (virtio-net/vhost/tap) with TSO/GSO size set by other network
 266	 *    stack.
 267	 *  - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
 268	 *    interface with a smaller MTU.
 269	 *  - Arriving GRO skb (or GSO skb in a virtualized environment) that is
 270	 *    bridged to a NETIF_F_TSO tunnel stacked over an interface with an
 271	 *    insufficient MTU.
 272	 */
 273	features = netif_skb_features(skb);
 274	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
 275	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 276	if (IS_ERR_OR_NULL(segs)) {
 277		kfree_skb(skb);
 278		return -ENOMEM;
 279	}
 280
 281	consume_skb(skb);
 282
 283	skb_list_walk_safe(segs, segs, nskb) {
 284		int err;
 285
 286		skb_mark_not_on_list(segs);
 287		err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
 288
 289		if (err && ret == 0)
 290			ret = err;
 291	}
 292
 293	return ret;
 
 294}
 295
 296static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 297{
 298	unsigned int mtu;
 299
 300#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 301	/* Policy lookup after SNAT yielded a new policy */
 302	if (skb_dst(skb)->xfrm) {
 303		IPCB(skb)->flags |= IPSKB_REROUTED;
 304		return dst_output(net, sk, skb);
 305	}
 306#endif
 307	mtu = ip_skb_dst_mtu(sk, skb);
 308	if (skb_is_gso(skb))
 309		return ip_finish_output_gso(net, sk, skb, mtu);
 310
 311	if (skb->len > mtu || IPCB(skb)->frag_max_size)
 312		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
 313
 314	return ip_finish_output2(net, sk, skb);
 315}
 316
 317static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 318{
 319	int ret;
 320
 321	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
 322	switch (ret) {
 323	case NET_XMIT_SUCCESS:
 324		return __ip_finish_output(net, sk, skb);
 325	case NET_XMIT_CN:
 326		return __ip_finish_output(net, sk, skb) ? : ret;
 327	default:
 328		kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
 329		return ret;
 330	}
 331}
 332
 333static int ip_mc_finish_output(struct net *net, struct sock *sk,
 334			       struct sk_buff *skb)
 335{
 336	struct rtable *new_rt;
 337	bool do_cn = false;
 338	int ret, err;
 339
 340	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
 341	switch (ret) {
 342	case NET_XMIT_CN:
 343		do_cn = true;
 344		fallthrough;
 345	case NET_XMIT_SUCCESS:
 346		break;
 347	default:
 348		kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
 349		return ret;
 350	}
 351
 352	/* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
 353	 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
 354	 * see ipv4_pktinfo_prepare().
 355	 */
 356	new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
 357	if (new_rt) {
 358		new_rt->rt_iif = 0;
 359		skb_dst_drop(skb);
 360		skb_dst_set(skb, &new_rt->dst);
 361	}
 362
 363	err = dev_loopback_xmit(net, sk, skb);
 364	return (do_cn && err) ? ret : err;
 365}
 366
 367int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 368{
 
 369	struct rtable *rt = skb_rtable(skb);
 370	struct net_device *dev = rt->dst.dev;
 371
 372	/*
 373	 *	If the indicated interface is up and running, send the packet.
 374	 */
 
 
 375	skb->dev = dev;
 376	skb->protocol = htons(ETH_P_IP);
 377
 378	/*
 379	 *	Multicasts are looped back for other local users
 380	 */
 381
 382	if (rt->rt_flags&RTCF_MULTICAST) {
 383		if (sk_mc_loop(sk)
 384#ifdef CONFIG_IP_MROUTE
 385		/* Small optimization: do not loopback not local frames,
 386		   which returned after forwarding; they will be  dropped
 387		   by ip_mr_input in any case.
 388		   Note, that local frames are looped back to be delivered
 389		   to local recipients.
 390
 391		   This check is duplicated in ip_mr_input at the moment.
 392		 */
 393		    &&
 394		    ((rt->rt_flags & RTCF_LOCAL) ||
 395		     !(IPCB(skb)->flags & IPSKB_FORWARDED))
 396#endif
 397		   ) {
 398			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 399			if (newskb)
 400				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 401					net, sk, newskb, NULL, newskb->dev,
 402					ip_mc_finish_output);
 403		}
 404
 405		/* Multicasts with ttl 0 must not go beyond the host */
 406
 407		if (ip_hdr(skb)->ttl == 0) {
 408			kfree_skb(skb);
 409			return 0;
 410		}
 411	}
 412
 413	if (rt->rt_flags&RTCF_BROADCAST) {
 414		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 415		if (newskb)
 416			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 417				net, sk, newskb, NULL, newskb->dev,
 418				ip_mc_finish_output);
 419	}
 420
 421	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 422			    net, sk, skb, NULL, skb->dev,
 423			    ip_finish_output,
 424			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 425}
 426
 427int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 428{
 429	struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
 
 
 430
 431	skb->dev = dev;
 432	skb->protocol = htons(ETH_P_IP);
 433
 434	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 435			    net, sk, skb, indev, dev,
 436			    ip_finish_output,
 437			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 438}
 439EXPORT_SYMBOL(ip_output);
 440
 441/*
 442 * copy saddr and daddr, possibly using 64bit load/stores
 443 * Equivalent to :
 444 *   iph->saddr = fl4->saddr;
 445 *   iph->daddr = fl4->daddr;
 446 */
 447static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
 448{
 449	BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
 450		     offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
 451
 452	iph->saddr = fl4->saddr;
 453	iph->daddr = fl4->daddr;
 454}
 455
 456/* Note: skb->sk can be different from sk, in case of tunnels */
 457int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
 458		    __u8 tos)
 459{
 
 460	struct inet_sock *inet = inet_sk(sk);
 461	struct net *net = sock_net(sk);
 462	struct ip_options_rcu *inet_opt;
 463	struct flowi4 *fl4;
 464	struct rtable *rt;
 465	struct iphdr *iph;
 466	int res;
 467
 468	/* Skip all of this if the packet is already routed,
 469	 * f.e. by something like SCTP.
 470	 */
 471	rcu_read_lock();
 472	inet_opt = rcu_dereference(inet->inet_opt);
 473	fl4 = &fl->u.ip4;
 474	rt = skb_rtable(skb);
 475	if (rt)
 476		goto packet_routed;
 477
 478	/* Make sure we can route this packet. */
 479	rt = dst_rtable(__sk_dst_check(sk, 0));
 480	if (!rt) {
 481		__be32 daddr;
 482
 483		/* Use correct destination address if we have options. */
 484		daddr = inet->inet_daddr;
 485		if (inet_opt && inet_opt->opt.srr)
 486			daddr = inet_opt->opt.faddr;
 487
 488		/* If this fails, retransmit mechanism of transport layer will
 489		 * keep trying until route appears or the connection times
 490		 * itself out.
 491		 */
 492		rt = ip_route_output_ports(net, fl4, sk,
 493					   daddr, inet->inet_saddr,
 494					   inet->inet_dport,
 495					   inet->inet_sport,
 496					   sk->sk_protocol,
 497					   tos & INET_DSCP_MASK,
 498					   sk->sk_bound_dev_if);
 499		if (IS_ERR(rt))
 500			goto no_route;
 501		sk_setup_caps(sk, &rt->dst);
 502	}
 503	skb_dst_set_noref(skb, &rt->dst);
 504
 505packet_routed:
 506	if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
 507		goto no_route;
 508
 509	/* OK, we know where to send it, allocate and build IP header. */
 510	skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
 511	skb_reset_network_header(skb);
 512	iph = ip_hdr(skb);
 513	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
 514	if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
 515		iph->frag_off = htons(IP_DF);
 516	else
 517		iph->frag_off = 0;
 518	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 519	iph->protocol = sk->sk_protocol;
 520	ip_copy_addrs(iph, fl4);
 521
 522	/* Transport layer set skb->h.foo itself. */
 523
 524	if (inet_opt && inet_opt->opt.optlen) {
 525		iph->ihl += inet_opt->opt.optlen >> 2;
 526		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt);
 527	}
 528
 529	ip_select_ident_segs(net, skb, sk,
 530			     skb_shinfo(skb)->gso_segs ?: 1);
 531
 532	/* TODO : should we use skb->sk here instead of sk ? */
 533	skb->priority = READ_ONCE(sk->sk_priority);
 534	skb->mark = READ_ONCE(sk->sk_mark);
 535
 536	res = ip_local_out(net, sk, skb);
 537	rcu_read_unlock();
 538	return res;
 539
 540no_route:
 541	rcu_read_unlock();
 542	IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 543	kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES);
 544	return -EHOSTUNREACH;
 545}
 546EXPORT_SYMBOL(__ip_queue_xmit);
 547
 548int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
 549{
 550	return __ip_queue_xmit(sk, skb, fl, READ_ONCE(inet_sk(sk)->tos));
 551}
 552EXPORT_SYMBOL(ip_queue_xmit);
 553
 
 554static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 555{
 556	to->pkt_type = from->pkt_type;
 557	to->priority = from->priority;
 558	to->protocol = from->protocol;
 559	to->skb_iif = from->skb_iif;
 560	skb_dst_drop(to);
 561	skb_dst_copy(to, from);
 562	to->dev = from->dev;
 563	to->mark = from->mark;
 564
 565	skb_copy_hash(to, from);
 
 566
 567#ifdef CONFIG_NET_SCHED
 568	to->tc_index = from->tc_index;
 569#endif
 570	nf_copy(to, from);
 571	skb_ext_copy(to, from);
 572#if IS_ENABLED(CONFIG_IP_VS)
 
 
 
 573	to->ipvs_property = from->ipvs_property;
 574#endif
 575	skb_copy_secmark(to, from);
 576}
 577
 578static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 579		       unsigned int mtu,
 580		       int (*output)(struct net *, struct sock *, struct sk_buff *))
 581{
 582	struct iphdr *iph = ip_hdr(skb);
 583
 584	if ((iph->frag_off & htons(IP_DF)) == 0)
 585		return ip_do_fragment(net, sk, skb, output);
 586
 587	if (unlikely(!skb->ignore_df ||
 588		     (IPCB(skb)->frag_max_size &&
 589		      IPCB(skb)->frag_max_size > mtu))) {
 590		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
 591		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 592			  htonl(mtu));
 593		kfree_skb(skb);
 594		return -EMSGSIZE;
 595	}
 596
 597	return ip_do_fragment(net, sk, skb, output);
 598}
 599
 600void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
 601		      unsigned int hlen, struct ip_fraglist_iter *iter)
 602{
 603	unsigned int first_len = skb_pagelen(skb);
 604
 605	iter->frag = skb_shinfo(skb)->frag_list;
 606	skb_frag_list_init(skb);
 607
 608	iter->offset = 0;
 609	iter->iph = iph;
 610	iter->hlen = hlen;
 611
 612	skb->data_len = first_len - skb_headlen(skb);
 613	skb->len = first_len;
 614	iph->tot_len = htons(first_len);
 615	iph->frag_off = htons(IP_MF);
 616	ip_send_check(iph);
 617}
 618EXPORT_SYMBOL(ip_fraglist_init);
 619
 620void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
 621{
 622	unsigned int hlen = iter->hlen;
 623	struct iphdr *iph = iter->iph;
 624	struct sk_buff *frag;
 625
 626	frag = iter->frag;
 627	frag->ip_summed = CHECKSUM_NONE;
 628	skb_reset_transport_header(frag);
 629	__skb_push(frag, hlen);
 630	skb_reset_network_header(frag);
 631	memcpy(skb_network_header(frag), iph, hlen);
 632	iter->iph = ip_hdr(frag);
 633	iph = iter->iph;
 634	iph->tot_len = htons(frag->len);
 635	ip_copy_metadata(frag, skb);
 636	iter->offset += skb->len - hlen;
 637	iph->frag_off = htons(iter->offset >> 3);
 638	if (frag->next)
 639		iph->frag_off |= htons(IP_MF);
 640	/* Ready, complete checksum */
 641	ip_send_check(iph);
 642}
 643EXPORT_SYMBOL(ip_fraglist_prepare);
 644
 645void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
 646		  unsigned int ll_rs, unsigned int mtu, bool DF,
 647		  struct ip_frag_state *state)
 648{
 649	struct iphdr *iph = ip_hdr(skb);
 650
 651	state->DF = DF;
 652	state->hlen = hlen;
 653	state->ll_rs = ll_rs;
 654	state->mtu = mtu;
 655
 656	state->left = skb->len - hlen;	/* Space per frame */
 657	state->ptr = hlen;		/* Where to start from */
 658
 659	state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
 660	state->not_last_frag = iph->frag_off & htons(IP_MF);
 661}
 662EXPORT_SYMBOL(ip_frag_init);
 663
 664static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
 665			 bool first_frag)
 666{
 667	/* Copy the flags to each fragment. */
 668	IPCB(to)->flags = IPCB(from)->flags;
 669
 670	/* ANK: dirty, but effective trick. Upgrade options only if
 671	 * the segment to be fragmented was THE FIRST (otherwise,
 672	 * options are already fixed) and make it ONCE
 673	 * on the initial skb, so that all the following fragments
 674	 * will inherit fixed options.
 675	 */
 676	if (first_frag)
 677		ip_options_fragment(from);
 678}
 679
 680struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
 681{
 682	unsigned int len = state->left;
 683	struct sk_buff *skb2;
 684	struct iphdr *iph;
 685
 686	/* IF: it doesn't fit, use 'mtu' - the data space left */
 687	if (len > state->mtu)
 688		len = state->mtu;
 689	/* IF: we are not sending up to and including the packet end
 690	   then align the next start on an eight byte boundary */
 691	if (len < state->left)	{
 692		len &= ~7;
 693	}
 694
 695	/* Allocate buffer */
 696	skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
 697	if (!skb2)
 698		return ERR_PTR(-ENOMEM);
 699
 700	/*
 701	 *	Set up data on packet
 702	 */
 703
 704	ip_copy_metadata(skb2, skb);
 705	skb_reserve(skb2, state->ll_rs);
 706	skb_put(skb2, len + state->hlen);
 707	skb_reset_network_header(skb2);
 708	skb2->transport_header = skb2->network_header + state->hlen;
 709
 710	/*
 711	 *	Charge the memory for the fragment to any owner
 712	 *	it might possess
 713	 */
 714
 715	if (skb->sk)
 716		skb_set_owner_w(skb2, skb->sk);
 717
 718	/*
 719	 *	Copy the packet header into the new buffer.
 720	 */
 721
 722	skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
 723
 724	/*
 725	 *	Copy a block of the IP datagram.
 726	 */
 727	if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
 728		BUG();
 729	state->left -= len;
 730
 731	/*
 732	 *	Fill in the new header fields.
 733	 */
 734	iph = ip_hdr(skb2);
 735	iph->frag_off = htons((state->offset >> 3));
 736	if (state->DF)
 737		iph->frag_off |= htons(IP_DF);
 738
 739	/*
 740	 *	Added AC : If we are fragmenting a fragment that's not the
 741	 *		   last fragment then keep MF on each bit
 742	 */
 743	if (state->left > 0 || state->not_last_frag)
 744		iph->frag_off |= htons(IP_MF);
 745	state->ptr += len;
 746	state->offset += len;
 747
 748	iph->tot_len = htons(len + state->hlen);
 749
 750	ip_send_check(iph);
 751
 752	return skb2;
 753}
 754EXPORT_SYMBOL(ip_frag_next);
 755
 756/*
 757 *	This IP datagram is too large to be sent in one piece.  Break it up into
 758 *	smaller pieces (each of size equal to IP header plus
 759 *	a block of the data of the original IP data part) that will yet fit in a
 760 *	single device frame, and queue such a frame for sending.
 761 */
 762
 763int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 764		   int (*output)(struct net *, struct sock *, struct sk_buff *))
 765{
 766	struct iphdr *iph;
 
 
 767	struct sk_buff *skb2;
 768	u8 tstamp_type = skb->tstamp_type;
 
 
 769	struct rtable *rt = skb_rtable(skb);
 770	unsigned int mtu, hlen, ll_rs;
 771	struct ip_fraglist_iter iter;
 772	ktime_t tstamp = skb->tstamp;
 773	struct ip_frag_state state;
 774	int err = 0;
 775
 776	/* for offloaded checksums cleanup checksum before fragmentation */
 777	if (skb->ip_summed == CHECKSUM_PARTIAL &&
 778	    (err = skb_checksum_help(skb)))
 779		goto fail;
 780
 781	/*
 782	 *	Point into the IP datagram header.
 783	 */
 784
 785	iph = ip_hdr(skb);
 786
 787	mtu = ip_skb_dst_mtu(sk, skb);
 788	if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
 789		mtu = IPCB(skb)->frag_max_size;
 
 
 
 
 790
 791	/*
 792	 *	Setup starting values.
 793	 */
 794
 795	hlen = iph->ihl * 4;
 796	mtu = mtu - hlen;	/* Size of data space */
 
 
 
 
 797	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
 798	ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
 799
 800	/* When frag_list is given, use it. First, check its validity:
 801	 * some transformers could create wrong frag_list or break existing
 802	 * one, it is not prohibited. In this case fall back to copying.
 803	 *
 804	 * LATER: this step can be merged to real generation of fragments,
 805	 * we can switch to copy when see the first bad fragment.
 806	 */
 807	if (skb_has_frag_list(skb)) {
 808		struct sk_buff *frag, *frag2;
 809		unsigned int first_len = skb_pagelen(skb);
 810
 811		if (first_len - hlen > mtu ||
 812		    ((first_len - hlen) & 7) ||
 813		    ip_is_fragment(iph) ||
 814		    skb_cloned(skb) ||
 815		    skb_headroom(skb) < ll_rs)
 816			goto slow_path;
 817
 818		skb_walk_frags(skb, frag) {
 819			/* Correct geometry. */
 820			if (frag->len > mtu ||
 821			    ((frag->len & 7) && frag->next) ||
 822			    skb_headroom(frag) < hlen + ll_rs)
 823				goto slow_path_clean;
 824
 825			/* Partially cloned skb? */
 826			if (skb_shared(frag))
 827				goto slow_path_clean;
 828
 829			BUG_ON(frag->sk);
 830			if (skb->sk) {
 831				frag->sk = skb->sk;
 832				frag->destructor = sock_wfree;
 833			}
 834			skb->truesize -= frag->truesize;
 835		}
 836
 837		/* Everything is OK. Generate! */
 838		ip_fraglist_init(skb, iph, hlen, &iter);
 
 
 
 
 
 
 
 
 
 839
 840		for (;;) {
 841			/* Prepare header of the next frame,
 842			 * before previous one went down. */
 843			if (iter.frag) {
 844				bool first_frag = (iter.offset == 0);
 845
 846				IPCB(iter.frag)->flags = IPCB(skb)->flags;
 847				ip_fraglist_prepare(skb, &iter);
 848				if (first_frag && IPCB(skb)->opt.optlen) {
 849					/* ipcb->opt is not populated for frags
 850					 * coming from __ip_make_skb(),
 851					 * ip_options_fragment() needs optlen
 852					 */
 853					IPCB(iter.frag)->opt.optlen =
 854						IPCB(skb)->opt.optlen;
 855					ip_options_fragment(iter.frag);
 856					ip_send_check(iter.iph);
 857				}
 
 
 858			}
 859
 860			skb_set_delivery_time(skb, tstamp, tstamp_type);
 861			err = output(net, sk, skb);
 862
 863			if (!err)
 864				IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
 865			if (err || !iter.frag)
 866				break;
 867
 868			skb = ip_fraglist_next(&iter);
 
 
 869		}
 870
 871		if (err == 0) {
 872			IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
 873			return 0;
 874		}
 875
 876		kfree_skb_list(iter.frag);
 877
 878		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
 
 
 
 879		return err;
 880
 881slow_path_clean:
 882		skb_walk_frags(skb, frag2) {
 883			if (frag2 == frag)
 884				break;
 885			frag2->sk = NULL;
 886			frag2->destructor = NULL;
 887			skb->truesize += frag2->truesize;
 888		}
 889	}
 890
 891slow_path:
 
 
 
 
 
 
 
 
 892	/*
 893	 *	Fragment the datagram.
 894	 */
 895
 896	ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
 897		     &state);
 898
 899	/*
 900	 *	Keep copying data until we run out.
 901	 */
 902
 903	while (state.left > 0) {
 904		bool first_frag = (state.offset == 0);
 
 
 
 
 
 
 
 
 
 
 
 905
 906		skb2 = ip_frag_next(skb, &state);
 907		if (IS_ERR(skb2)) {
 908			err = PTR_ERR(skb2);
 909			goto fail;
 910		}
 911		ip_frag_ipcb(skb, skb2, first_frag);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 912
 913		/*
 914		 *	Put this fragment into the sending queue.
 915		 */
 916		skb_set_delivery_time(skb2, tstamp, tstamp_type);
 917		err = output(net, sk, skb2);
 
 
 
 918		if (err)
 919			goto fail;
 920
 921		IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
 922	}
 923	consume_skb(skb);
 924	IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
 925	return err;
 926
 927fail:
 928	kfree_skb(skb);
 929	IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
 930	return err;
 931}
 932EXPORT_SYMBOL(ip_do_fragment);
 933
 934int
 935ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
 936{
 937	struct msghdr *msg = from;
 938
 939	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 940		if (!copy_from_iter_full(to, len, &msg->msg_iter))
 941			return -EFAULT;
 942	} else {
 943		__wsum csum = 0;
 944		if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
 945			return -EFAULT;
 946		skb->csum = csum_block_add(skb->csum, csum, odd);
 947	}
 948	return 0;
 949}
 950EXPORT_SYMBOL(ip_generic_getfrag);
 951
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 952static int __ip_append_data(struct sock *sk,
 953			    struct flowi4 *fl4,
 954			    struct sk_buff_head *queue,
 955			    struct inet_cork *cork,
 956			    struct page_frag *pfrag,
 957			    int getfrag(void *from, char *to, int offset,
 958					int len, int odd, struct sk_buff *skb),
 959			    void *from, int length, int transhdrlen,
 960			    unsigned int flags)
 961{
 962	struct inet_sock *inet = inet_sk(sk);
 963	struct ubuf_info *uarg = NULL;
 964	struct sk_buff *skb;
 
 965	struct ip_options *opt = cork->opt;
 966	int hh_len;
 967	int exthdrlen;
 968	int mtu;
 969	int copy;
 970	int err;
 971	int offset = 0;
 972	bool zc = false;
 973	unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
 974	int csummode = CHECKSUM_NONE;
 975	struct rtable *rt = dst_rtable(cork->dst);
 976	bool paged, hold_tskey = false, extra_uref = false;
 977	unsigned int wmem_alloc_delta = 0;
 978	u32 tskey = 0;
 979
 980	skb = skb_peek_tail(queue);
 981
 982	exthdrlen = !skb ? rt->dst.header_len : 0;
 983	mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
 984	paged = !!cork->gso_size;
 985
 986	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 987
 988	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
 989	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
 990	maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
 991
 992	if (cork->length + length > maxnonfragsize - fragheaderlen) {
 993		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
 994			       mtu - (opt ? opt->optlen : 0));
 995		return -EMSGSIZE;
 996	}
 997
 998	/*
 999	 * transhdrlen > 0 means that this is the first fragment and we wish
1000	 * it won't be fragmented in the future.
1001	 */
1002	if (transhdrlen &&
1003	    length + fragheaderlen <= mtu &&
1004	    rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1005	    (!(flags & MSG_MORE) || cork->gso_size) &&
1006	    (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
1007		csummode = CHECKSUM_PARTIAL;
1008
1009	if ((flags & MSG_ZEROCOPY) && length) {
1010		struct msghdr *msg = from;
1011
1012		if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1013			if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1014				return -EINVAL;
1015
1016			/* Leave uarg NULL if can't zerocopy, callers should
1017			 * be able to handle it.
1018			 */
1019			if ((rt->dst.dev->features & NETIF_F_SG) &&
1020			    csummode == CHECKSUM_PARTIAL) {
1021				paged = true;
1022				zc = true;
1023				uarg = msg->msg_ubuf;
1024			}
1025		} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1026			uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1027			if (!uarg)
1028				return -ENOBUFS;
1029			extra_uref = !skb_zcopy(skb);	/* only ref on new uarg */
1030			if (rt->dst.dev->features & NETIF_F_SG &&
1031			    csummode == CHECKSUM_PARTIAL) {
1032				paged = true;
1033				zc = true;
1034			} else {
1035				uarg_to_msgzc(uarg)->zerocopy = 0;
1036				skb_zcopy_set(skb, uarg, &extra_uref);
1037			}
1038		}
1039	} else if ((flags & MSG_SPLICE_PAGES) && length) {
1040		if (inet_test_bit(HDRINCL, sk))
1041			return -EPERM;
1042		if (rt->dst.dev->features & NETIF_F_SG &&
1043		    getfrag == ip_generic_getfrag)
1044			/* We need an empty buffer to attach stuff to */
1045			paged = true;
1046		else
1047			flags &= ~MSG_SPLICE_PAGES;
1048	}
1049
1050	cork->length += length;
1051
1052	if (cork->tx_flags & SKBTX_ANY_TSTAMP &&
1053	    READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
1054		if (cork->flags & IPCORK_TS_OPT_ID) {
1055			tskey = cork->ts_opt_id;
1056		} else {
1057			tskey = atomic_inc_return(&sk->sk_tskey) - 1;
1058			hold_tskey = true;
1059		}
1060	}
1061
1062	/* So, what's going on in the loop below?
1063	 *
1064	 * We use calculated fragment length to generate chained skb,
1065	 * each of segments is IP fragment ready for sending to network after
1066	 * adding appropriate IP header.
1067	 */
1068
1069	if (!skb)
1070		goto alloc_new_skb;
1071
1072	while (length > 0) {
1073		/* Check if the remaining data fits into current packet. */
1074		copy = mtu - skb->len;
1075		if (copy < length)
1076			copy = maxfraglen - skb->len;
1077		if (copy <= 0) {
1078			char *data;
1079			unsigned int datalen;
1080			unsigned int fraglen;
1081			unsigned int fraggap;
1082			unsigned int alloclen, alloc_extra;
1083			unsigned int pagedlen;
1084			struct sk_buff *skb_prev;
1085alloc_new_skb:
1086			skb_prev = skb;
1087			if (skb_prev)
1088				fraggap = skb_prev->len - maxfraglen;
1089			else
1090				fraggap = 0;
1091
1092			/*
1093			 * If remaining data exceeds the mtu,
1094			 * we know we need more fragment(s).
1095			 */
1096			datalen = length + fraggap;
1097			if (datalen > mtu - fragheaderlen)
1098				datalen = maxfraglen - fragheaderlen;
1099			fraglen = datalen + fragheaderlen;
1100			pagedlen = 0;
1101
1102			alloc_extra = hh_len + 15;
1103			alloc_extra += exthdrlen;
 
 
 
 
 
1104
1105			/* The last fragment gets additional space at tail.
1106			 * Note, with MSG_MORE we overallocate on fragments,
1107			 * because we have no idea what fragment will be
1108			 * the last.
1109			 */
1110			if (datalen == length + fraggap)
1111				alloc_extra += rt->dst.trailer_len;
1112
1113			if ((flags & MSG_MORE) &&
1114			    !(rt->dst.dev->features&NETIF_F_SG))
1115				alloclen = mtu;
1116			else if (!paged &&
1117				 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1118				  !(rt->dst.dev->features & NETIF_F_SG)))
1119				alloclen = fraglen;
1120			else {
1121				alloclen = fragheaderlen + transhdrlen;
1122				pagedlen = datalen - transhdrlen;
1123			}
1124
1125			alloclen += alloc_extra;
1126
1127			if (transhdrlen) {
1128				skb = sock_alloc_send_skb(sk, alloclen,
 
1129						(flags & MSG_DONTWAIT), &err);
1130			} else {
1131				skb = NULL;
1132				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1133				    2 * sk->sk_sndbuf)
1134					skb = alloc_skb(alloclen,
1135							sk->sk_allocation);
1136				if (unlikely(!skb))
 
1137					err = -ENOBUFS;
 
 
 
 
1138			}
1139			if (!skb)
1140				goto error;
1141
1142			/*
1143			 *	Fill in the control structures
1144			 */
1145			skb->ip_summed = csummode;
1146			skb->csum = 0;
1147			skb_reserve(skb, hh_len);
 
1148
1149			/*
1150			 *	Find where to start putting bytes.
1151			 */
1152			data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1153			skb_set_network_header(skb, exthdrlen);
1154			skb->transport_header = (skb->network_header +
1155						 fragheaderlen);
1156			data += fragheaderlen + exthdrlen;
1157
1158			if (fraggap) {
1159				skb->csum = skb_copy_and_csum_bits(
1160					skb_prev, maxfraglen,
1161					data + transhdrlen, fraggap);
1162				skb_prev->csum = csum_sub(skb_prev->csum,
1163							  skb->csum);
1164				data += fraggap;
1165				pskb_trim_unique(skb_prev, maxfraglen);
1166			}
1167
1168			copy = datalen - transhdrlen - fraggap - pagedlen;
1169			/* [!] NOTE: copy will be negative if pagedlen>0
1170			 * because then the equation reduces to -fraggap.
1171			 */
1172			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1173				err = -EFAULT;
1174				kfree_skb(skb);
1175				goto error;
1176			} else if (flags & MSG_SPLICE_PAGES) {
1177				copy = 0;
1178			}
1179
1180			offset += copy;
1181			length -= copy + transhdrlen;
1182			transhdrlen = 0;
1183			exthdrlen = 0;
1184			csummode = CHECKSUM_NONE;
1185
1186			/* only the initial fragment is time stamped */
1187			skb_shinfo(skb)->tx_flags = cork->tx_flags;
1188			cork->tx_flags = 0;
1189			skb_shinfo(skb)->tskey = tskey;
1190			tskey = 0;
1191			skb_zcopy_set(skb, uarg, &extra_uref);
1192
1193			if ((flags & MSG_CONFIRM) && !skb_prev)
1194				skb_set_dst_pending_confirm(skb, 1);
1195
1196			/*
1197			 * Put the packet on the pending queue.
1198			 */
1199			if (!skb->destructor) {
1200				skb->destructor = sock_wfree;
1201				skb->sk = sk;
1202				wmem_alloc_delta += skb->truesize;
1203			}
1204			__skb_queue_tail(queue, skb);
1205			continue;
1206		}
1207
1208		if (copy > length)
1209			copy = length;
1210
1211		if (!(rt->dst.dev->features&NETIF_F_SG) &&
1212		    skb_tailroom(skb) >= copy) {
1213			unsigned int off;
1214
1215			off = skb->len;
1216			if (getfrag(from, skb_put(skb, copy),
1217					offset, copy, off, skb) < 0) {
1218				__skb_trim(skb, off);
1219				err = -EFAULT;
1220				goto error;
1221			}
1222		} else if (flags & MSG_SPLICE_PAGES) {
1223			struct msghdr *msg = from;
1224
1225			err = -EIO;
1226			if (WARN_ON_ONCE(copy > msg->msg_iter.count))
1227				goto error;
1228
1229			err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
1230						   sk->sk_allocation);
1231			if (err < 0)
1232				goto error;
1233			copy = err;
1234			wmem_alloc_delta += copy;
1235		} else if (!zc) {
1236			int i = skb_shinfo(skb)->nr_frags;
1237
1238			err = -ENOMEM;
1239			if (!sk_page_frag_refill(sk, pfrag))
1240				goto error;
1241
1242			skb_zcopy_downgrade_managed(skb);
1243			if (!skb_can_coalesce(skb, i, pfrag->page,
1244					      pfrag->offset)) {
1245				err = -EMSGSIZE;
1246				if (i == MAX_SKB_FRAGS)
 
 
 
 
 
 
 
 
 
 
 
 
 
1247					goto error;
 
 
 
1248
1249				__skb_fill_page_desc(skb, i, pfrag->page,
1250						     pfrag->offset, 0);
1251				skb_shinfo(skb)->nr_frags = ++i;
1252				get_page(pfrag->page);
 
1253			}
1254			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1255			if (getfrag(from,
1256				    page_address(pfrag->page) + pfrag->offset,
1257				    offset, copy, skb->len, skb) < 0)
1258				goto error_efault;
1259
1260			pfrag->offset += copy;
1261			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1262			skb_len_add(skb, copy);
1263			wmem_alloc_delta += copy;
1264		} else {
1265			err = skb_zerocopy_iter_dgram(skb, from, copy);
1266			if (err < 0)
1267				goto error;
 
 
 
 
 
 
 
1268		}
1269		offset += copy;
1270		length -= copy;
1271	}
1272
1273	if (wmem_alloc_delta)
1274		refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1275	return 0;
1276
1277error_efault:
1278	err = -EFAULT;
1279error:
1280	net_zcopy_put_abort(uarg, extra_uref);
1281	cork->length -= length;
1282	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1283	refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1284	if (hold_tskey)
1285		atomic_dec(&sk->sk_tskey);
1286	return err;
1287}
1288
1289static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1290			 struct ipcm_cookie *ipc, struct rtable **rtp)
1291{
 
1292	struct ip_options_rcu *opt;
1293	struct rtable *rt;
1294
1295	rt = *rtp;
1296	if (unlikely(!rt))
1297		return -EFAULT;
1298
1299	cork->fragsize = ip_sk_use_pmtu(sk) ?
1300			 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1301
1302	if (!inetdev_valid_mtu(cork->fragsize))
1303		return -ENETUNREACH;
1304
1305	/*
1306	 * setup for corking.
1307	 */
1308	opt = ipc->opt;
1309	if (opt) {
1310		if (!cork->opt) {
1311			cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1312					    sk->sk_allocation);
1313			if (unlikely(!cork->opt))
1314				return -ENOBUFS;
1315		}
1316		memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1317		cork->flags |= IPCORK_OPT;
1318		cork->addr = ipc->addr;
1319	}
1320
1321	cork->gso_size = ipc->gso_size;
1322
1323	cork->dst = &rt->dst;
1324	/* We stole this route, caller should not release it. */
 
1325	*rtp = NULL;
1326
 
 
1327	cork->length = 0;
1328	cork->ttl = ipc->ttl;
1329	cork->tos = ipc->tos;
1330	cork->mark = ipc->sockc.mark;
1331	cork->priority = ipc->priority;
1332	cork->transmit_time = ipc->sockc.transmit_time;
1333	cork->tx_flags = 0;
1334	sock_tx_timestamp(sk, &ipc->sockc, &cork->tx_flags);
1335	if (ipc->sockc.tsflags & SOCKCM_FLAG_TS_OPT_ID) {
1336		cork->flags |= IPCORK_TS_OPT_ID;
1337		cork->ts_opt_id = ipc->sockc.ts_opt_id;
1338	}
1339
1340	return 0;
1341}
1342
1343/*
1344 *	ip_append_data() can make one large IP datagram from many pieces of
1345 *	data.  Each piece will be held on the socket until
1346 *	ip_push_pending_frames() is called. Each piece can be a page or
1347 *	non-page data.
1348 *
1349 *	Not only UDP, other transport protocols - e.g. raw sockets - can use
1350 *	this interface potentially.
1351 *
1352 *	LATER: length must be adjusted by pad at tail, when it is required.
1353 */
1354int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1355		   int getfrag(void *from, char *to, int offset, int len,
1356			       int odd, struct sk_buff *skb),
1357		   void *from, int length, int transhdrlen,
1358		   struct ipcm_cookie *ipc, struct rtable **rtp,
1359		   unsigned int flags)
1360{
1361	struct inet_sock *inet = inet_sk(sk);
1362	int err;
1363
1364	if (flags&MSG_PROBE)
1365		return 0;
1366
1367	if (skb_queue_empty(&sk->sk_write_queue)) {
1368		err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1369		if (err)
1370			return err;
1371	} else {
1372		transhdrlen = 0;
1373	}
1374
1375	return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1376				sk_page_frag(sk), getfrag,
1377				from, length, transhdrlen, flags);
1378}
1379
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1380static void ip_cork_release(struct inet_cork *cork)
1381{
1382	cork->flags &= ~IPCORK_OPT;
1383	kfree(cork->opt);
1384	cork->opt = NULL;
1385	dst_release(cork->dst);
1386	cork->dst = NULL;
1387}
1388
1389/*
1390 *	Combined all pending IP fragments on the socket as one IP datagram
1391 *	and push them out.
1392 */
1393struct sk_buff *__ip_make_skb(struct sock *sk,
1394			      struct flowi4 *fl4,
1395			      struct sk_buff_head *queue,
1396			      struct inet_cork *cork)
1397{
1398	struct sk_buff *skb, *tmp_skb;
1399	struct sk_buff **tail_skb;
1400	struct inet_sock *inet = inet_sk(sk);
1401	struct net *net = sock_net(sk);
1402	struct ip_options *opt = NULL;
1403	struct rtable *rt = dst_rtable(cork->dst);
1404	struct iphdr *iph;
1405	u8 pmtudisc, ttl;
1406	__be16 df = 0;
 
1407
1408	skb = __skb_dequeue(queue);
1409	if (!skb)
1410		goto out;
1411	tail_skb = &(skb_shinfo(skb)->frag_list);
1412
1413	/* move skb->data to ip header from ext header */
1414	if (skb->data < skb_network_header(skb))
1415		__skb_pull(skb, skb_network_offset(skb));
1416	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1417		__skb_pull(tmp_skb, skb_network_header_len(skb));
1418		*tail_skb = tmp_skb;
1419		tail_skb = &(tmp_skb->next);
1420		skb->len += tmp_skb->len;
1421		skb->data_len += tmp_skb->len;
1422		skb->truesize += tmp_skb->truesize;
1423		tmp_skb->destructor = NULL;
1424		tmp_skb->sk = NULL;
1425	}
1426
1427	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1428	 * to fragment the frame generated here. No matter, what transforms
1429	 * how transforms change size of the packet, it will come out.
1430	 */
1431	skb->ignore_df = ip_sk_ignore_df(sk);
 
1432
1433	/* DF bit is set when we want to see DF on outgoing frames.
1434	 * If ignore_df is set too, we still allow to fragment this frame
1435	 * locally. */
1436	pmtudisc = READ_ONCE(inet->pmtudisc);
1437	if (pmtudisc == IP_PMTUDISC_DO ||
1438	    pmtudisc == IP_PMTUDISC_PROBE ||
1439	    (skb->len <= dst_mtu(&rt->dst) &&
1440	     ip_dont_fragment(sk, &rt->dst)))
1441		df = htons(IP_DF);
1442
1443	if (cork->flags & IPCORK_OPT)
1444		opt = cork->opt;
1445
1446	if (cork->ttl != 0)
1447		ttl = cork->ttl;
1448	else if (rt->rt_type == RTN_MULTICAST)
1449		ttl = READ_ONCE(inet->mc_ttl);
1450	else
1451		ttl = ip_select_ttl(inet, &rt->dst);
1452
1453	iph = ip_hdr(skb);
1454	iph->version = 4;
1455	iph->ihl = 5;
1456	iph->tos = (cork->tos != -1) ? cork->tos : READ_ONCE(inet->tos);
1457	iph->frag_off = df;
 
1458	iph->ttl = ttl;
1459	iph->protocol = sk->sk_protocol;
1460	ip_copy_addrs(iph, fl4);
1461	ip_select_ident(net, skb, sk);
1462
1463	if (opt) {
1464		iph->ihl += opt->optlen >> 2;
1465		ip_options_build(skb, opt, cork->addr, rt);
1466	}
1467
1468	skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
1469	skb->mark = cork->mark;
1470	if (sk_is_tcp(sk))
1471		skb_set_delivery_time(skb, cork->transmit_time, SKB_CLOCK_MONOTONIC);
1472	else
1473		skb_set_delivery_type_by_clockid(skb, cork->transmit_time, sk->sk_clockid);
1474	/*
1475	 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1476	 * on dst refcount
1477	 */
1478	cork->dst = NULL;
1479	skb_dst_set(skb, &rt->dst);
1480
1481	if (iph->protocol == IPPROTO_ICMP) {
1482		u8 icmp_type;
1483
1484		/* For such sockets, transhdrlen is zero when do ip_append_data(),
1485		 * so icmphdr does not in skb linear region and can not get icmp_type
1486		 * by icmp_hdr(skb)->type.
1487		 */
1488		if (sk->sk_type == SOCK_RAW &&
1489		    !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
1490			icmp_type = fl4->fl4_icmp_type;
1491		else
1492			icmp_type = icmp_hdr(skb)->type;
1493		icmp_out_count(net, icmp_type);
1494	}
1495
1496	ip_cork_release(cork);
1497out:
1498	return skb;
1499}
1500
1501int ip_send_skb(struct net *net, struct sk_buff *skb)
1502{
 
1503	int err;
1504
1505	err = ip_local_out(net, skb->sk, skb);
1506	if (err) {
1507		if (err > 0)
1508			err = net_xmit_errno(err);
1509		if (err)
1510			IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1511	}
1512
1513	return err;
1514}
1515
1516int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1517{
1518	struct sk_buff *skb;
1519
1520	skb = ip_finish_skb(sk, fl4);
1521	if (!skb)
1522		return 0;
1523
1524	/* Netfilter gets whole the not fragmented skb. */
1525	return ip_send_skb(sock_net(sk), skb);
1526}
1527
1528/*
1529 *	Throw away all pending data on the socket.
1530 */
1531static void __ip_flush_pending_frames(struct sock *sk,
1532				      struct sk_buff_head *queue,
1533				      struct inet_cork *cork)
1534{
1535	struct sk_buff *skb;
1536
1537	while ((skb = __skb_dequeue_tail(queue)) != NULL)
1538		kfree_skb(skb);
1539
1540	ip_cork_release(cork);
1541}
1542
1543void ip_flush_pending_frames(struct sock *sk)
1544{
1545	__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1546}
1547
1548struct sk_buff *ip_make_skb(struct sock *sk,
1549			    struct flowi4 *fl4,
1550			    int getfrag(void *from, char *to, int offset,
1551					int len, int odd, struct sk_buff *skb),
1552			    void *from, int length, int transhdrlen,
1553			    struct ipcm_cookie *ipc, struct rtable **rtp,
1554			    struct inet_cork *cork, unsigned int flags)
1555{
 
1556	struct sk_buff_head queue;
1557	int err;
1558
1559	if (flags & MSG_PROBE)
1560		return NULL;
1561
1562	__skb_queue_head_init(&queue);
1563
1564	cork->flags = 0;
1565	cork->addr = 0;
1566	cork->opt = NULL;
1567	err = ip_setup_cork(sk, cork, ipc, rtp);
1568	if (err)
1569		return ERR_PTR(err);
1570
1571	err = __ip_append_data(sk, fl4, &queue, cork,
1572			       &current->task_frag, getfrag,
1573			       from, length, transhdrlen, flags);
1574	if (err) {
1575		__ip_flush_pending_frames(sk, &queue, cork);
1576		return ERR_PTR(err);
1577	}
1578
1579	return __ip_make_skb(sk, fl4, &queue, cork);
1580}
1581
1582/*
1583 *	Fetch data from kernel space and fill in checksum if needed.
1584 */
1585static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1586			      int len, int odd, struct sk_buff *skb)
1587{
1588	__wsum csum;
1589
1590	csum = csum_partial_copy_nocheck(dptr+offset, to, len);
1591	skb->csum = csum_block_add(skb->csum, csum, odd);
1592	return 0;
1593}
1594
1595/*
1596 *	Generic function to send a packet as reply to another packet.
1597 *	Used to send some TCP resets/acks so far.
 
 
 
1598 */
1599void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk,
1600			   struct sk_buff *skb,
1601			   const struct ip_options *sopt,
1602			   __be32 daddr, __be32 saddr,
1603			   const struct ip_reply_arg *arg,
1604			   unsigned int len, u64 transmit_time, u32 txhash)
1605{
 
1606	struct ip_options_data replyopts;
1607	struct ipcm_cookie ipc;
1608	struct flowi4 fl4;
1609	struct rtable *rt = skb_rtable(skb);
1610	struct net *net = sock_net(sk);
1611	struct sk_buff *nskb;
1612	int err;
1613	int oif;
1614
1615	if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1616		return;
1617
1618	ipcm_init(&ipc);
1619	ipc.addr = daddr;
1620	ipc.sockc.transmit_time = transmit_time;
 
1621
1622	if (replyopts.opt.opt.optlen) {
1623		ipc.opt = &replyopts.opt;
1624
1625		if (replyopts.opt.opt.srr)
1626			daddr = replyopts.opt.opt.faddr;
1627	}
1628
1629	oif = arg->bound_dev_if;
1630	if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1631		oif = skb->skb_iif;
1632
1633	flowi4_init_output(&fl4, oif,
1634			   IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1635			   arg->tos & INET_DSCP_MASK,
1636			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1637			   ip_reply_arg_flowi_flags(arg),
1638			   daddr, saddr,
1639			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1640			   arg->uid);
1641	security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1642	rt = ip_route_output_flow(net, &fl4, sk);
1643	if (IS_ERR(rt))
1644		return;
1645
1646	inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
1647
 
 
 
 
 
 
 
1648	sk->sk_protocol = ip_hdr(skb)->protocol;
1649	sk->sk_bound_dev_if = arg->bound_dev_if;
1650	sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
1651	ipc.sockc.mark = fl4.flowi4_mark;
1652	err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1653			     len, 0, &ipc, &rt, MSG_DONTWAIT);
1654	if (unlikely(err)) {
1655		ip_flush_pending_frames(sk);
1656		goto out;
1657	}
1658
1659	nskb = skb_peek(&sk->sk_write_queue);
1660	if (nskb) {
1661		if (arg->csumoffset >= 0)
1662			*((__sum16 *)skb_transport_header(nskb) +
1663			  arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1664								arg->csum));
1665		nskb->ip_summed = CHECKSUM_NONE;
1666		if (orig_sk)
1667			skb_set_owner_edemux(nskb, (struct sock *)orig_sk);
1668		if (transmit_time)
1669			nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
1670		if (txhash)
1671			skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
1672		ip_push_pending_frames(sk, &fl4);
1673	}
1674out:
 
 
1675	ip_rt_put(rt);
1676}
1677
1678void __init ip_init(void)
1679{
1680	ip_rt_init();
1681	inet_initpeers();
1682
1683#if defined(CONFIG_IP_MULTICAST)
1684	igmp_mc_init();
1685#endif
1686}
v3.1
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		The Internet Protocol (IP) output module.
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Donald Becker, <becker@super.org>
  11 *		Alan Cox, <Alan.Cox@linux.org>
  12 *		Richard Underwood
  13 *		Stefan Becker, <stefanb@yello.ping.de>
  14 *		Jorge Cwik, <jorge@laser.satlink.net>
  15 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  16 *		Hirokazu Takahashi, <taka@valinux.co.jp>
  17 *
  18 *	See ip_input.c for original log
  19 *
  20 *	Fixes:
  21 *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
  22 *		Mike Kilburn	:	htons() missing in ip_build_xmit.
  23 *		Bradford Johnson:	Fix faulty handling of some frames when
  24 *					no route is found.
  25 *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
  26 *					(in case if packet not accepted by
  27 *					output firewall rules)
  28 *		Mike McLagan	:	Routing by source
  29 *		Alexey Kuznetsov:	use new route cache
  30 *		Andi Kleen:		Fix broken PMTU recovery and remove
  31 *					some redundant tests.
  32 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  33 *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
  34 *		Andi Kleen	:	Split fast and slow ip_build_xmit path
  35 *					for decreased register pressure on x86
  36 *					and more readibility.
  37 *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
  38 *					silently drop skb instead of failing with -EPERM.
  39 *		Detlev Wengorz	:	Copy protocol for fragments.
  40 *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
  41 *					datagrams.
  42 *		Hirokazu Takahashi:	sendfile() on UDP works now.
  43 */
  44
  45#include <asm/uaccess.h>
  46#include <asm/system.h>
  47#include <linux/module.h>
  48#include <linux/types.h>
  49#include <linux/kernel.h>
  50#include <linux/mm.h>
  51#include <linux/string.h>
  52#include <linux/errno.h>
  53#include <linux/highmem.h>
  54#include <linux/slab.h>
  55
  56#include <linux/socket.h>
  57#include <linux/sockios.h>
  58#include <linux/in.h>
  59#include <linux/inet.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/proc_fs.h>
  63#include <linux/stat.h>
  64#include <linux/init.h>
  65
  66#include <net/snmp.h>
  67#include <net/ip.h>
  68#include <net/protocol.h>
  69#include <net/route.h>
  70#include <net/xfrm.h>
  71#include <linux/skbuff.h>
  72#include <net/sock.h>
  73#include <net/arp.h>
  74#include <net/icmp.h>
  75#include <net/checksum.h>
 
  76#include <net/inetpeer.h>
 
 
 
 
  77#include <linux/igmp.h>
  78#include <linux/netfilter_ipv4.h>
  79#include <linux/netfilter_bridge.h>
  80#include <linux/mroute.h>
  81#include <linux/netlink.h>
  82#include <linux/tcp.h>
  83
  84int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
  85EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
 
  86
  87/* Generate a checksum for an outgoing IP datagram. */
  88__inline__ void ip_send_check(struct iphdr *iph)
  89{
  90	iph->check = 0;
  91	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  92}
  93EXPORT_SYMBOL(ip_send_check);
  94
  95int __ip_local_out(struct sk_buff *skb)
  96{
  97	struct iphdr *iph = ip_hdr(skb);
  98
  99	iph->tot_len = htons(skb->len);
 
 
 100	ip_send_check(iph);
 101	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
 102		       skb_dst(skb)->dev, dst_output);
 
 
 
 
 
 
 
 
 
 
 
 103}
 104
 105int ip_local_out(struct sk_buff *skb)
 106{
 107	int err;
 108
 109	err = __ip_local_out(skb);
 110	if (likely(err == 1))
 111		err = dst_output(skb);
 112
 113	return err;
 114}
 115EXPORT_SYMBOL_GPL(ip_local_out);
 116
 117/* dev_loopback_xmit for use with netfilter. */
 118static int ip_dev_loopback_xmit(struct sk_buff *newskb)
 119{
 120	skb_reset_mac_header(newskb);
 121	__skb_pull(newskb, skb_network_offset(newskb));
 122	newskb->pkt_type = PACKET_LOOPBACK;
 123	newskb->ip_summed = CHECKSUM_UNNECESSARY;
 124	WARN_ON(!skb_dst(newskb));
 125	skb_dst_force(newskb);
 126	netif_rx_ni(newskb);
 127	return 0;
 128}
 129
 130static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
 131{
 132	int ttl = inet->uc_ttl;
 133
 134	if (ttl < 0)
 135		ttl = ip4_dst_hoplimit(dst);
 136	return ttl;
 137}
 138
 139/*
 140 *		Add an ip header to a skbuff and send it out.
 141 *
 142 */
 143int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
 144			  __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
 
 145{
 146	struct inet_sock *inet = inet_sk(sk);
 147	struct rtable *rt = skb_rtable(skb);
 
 148	struct iphdr *iph;
 149
 150	/* Build the IP header. */
 151	skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
 152	skb_reset_network_header(skb);
 153	iph = ip_hdr(skb);
 154	iph->version  = 4;
 155	iph->ihl      = 5;
 156	iph->tos      = inet->tos;
 157	if (ip_dont_fragment(sk, &rt->dst))
 158		iph->frag_off = htons(IP_DF);
 159	else
 160		iph->frag_off = 0;
 161	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 162	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
 163	iph->saddr    = saddr;
 164	iph->protocol = sk->sk_protocol;
 165	ip_select_ident(iph, &rt->dst, sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 166
 167	if (opt && opt->opt.optlen) {
 168		iph->ihl += opt->opt.optlen>>2;
 169		ip_options_build(skb, &opt->opt, daddr, rt, 0);
 170	}
 171
 172	skb->priority = sk->sk_priority;
 173	skb->mark = sk->sk_mark;
 
 174
 175	/* Send it out. */
 176	return ip_local_out(skb);
 177}
 178EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
 179
 180static inline int ip_finish_output2(struct sk_buff *skb)
 181{
 182	struct dst_entry *dst = skb_dst(skb);
 183	struct rtable *rt = (struct rtable *)dst;
 184	struct net_device *dev = dst->dev;
 185	unsigned int hh_len = LL_RESERVED_SPACE(dev);
 186	struct neighbour *neigh;
 
 187
 188	if (rt->rt_type == RTN_MULTICAST) {
 189		IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
 190	} else if (rt->rt_type == RTN_BROADCAST)
 191		IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
 
 
 
 192
 193	/* Be paranoid, rather than too clever. */
 194	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
 195		struct sk_buff *skb2;
 
 
 
 196
 197		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
 198		if (skb2 == NULL) {
 199			kfree_skb(skb);
 200			return -ENOMEM;
 201		}
 202		if (skb->sk)
 203			skb_set_owner_w(skb2, skb->sk);
 204		kfree_skb(skb);
 205		skb = skb2;
 206	}
 207
 208	rcu_read_lock();
 209	neigh = dst_get_neighbour(dst);
 210	if (neigh) {
 211		int res = neigh_output(neigh, skb);
 212
 
 
 
 213		rcu_read_unlock();
 214		return res;
 215	}
 216	rcu_read_unlock();
 217
 218	if (net_ratelimit())
 219		printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
 220	kfree_skb(skb);
 221	return -EINVAL;
 222}
 223
 224static inline int ip_skb_dst_mtu(struct sk_buff *skb)
 
 225{
 226	struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227
 228	return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
 229	       skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
 230}
 231
 232static int ip_finish_output(struct sk_buff *skb)
 233{
 
 
 234#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 235	/* Policy lookup after SNAT yielded a new policy */
 236	if (skb_dst(skb)->xfrm != NULL) {
 237		IPCB(skb)->flags |= IPSKB_REROUTED;
 238		return dst_output(skb);
 239	}
 240#endif
 241	if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
 242		return ip_fragment(skb, ip_finish_output2);
 243	else
 244		return ip_finish_output2(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245}
 246
 247int ip_mc_output(struct sk_buff *skb)
 248{
 249	struct sock *sk = skb->sk;
 250	struct rtable *rt = skb_rtable(skb);
 251	struct net_device *dev = rt->dst.dev;
 252
 253	/*
 254	 *	If the indicated interface is up and running, send the packet.
 255	 */
 256	IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
 257
 258	skb->dev = dev;
 259	skb->protocol = htons(ETH_P_IP);
 260
 261	/*
 262	 *	Multicasts are looped back for other local users
 263	 */
 264
 265	if (rt->rt_flags&RTCF_MULTICAST) {
 266		if (sk_mc_loop(sk)
 267#ifdef CONFIG_IP_MROUTE
 268		/* Small optimization: do not loopback not local frames,
 269		   which returned after forwarding; they will be  dropped
 270		   by ip_mr_input in any case.
 271		   Note, that local frames are looped back to be delivered
 272		   to local recipients.
 273
 274		   This check is duplicated in ip_mr_input at the moment.
 275		 */
 276		    &&
 277		    ((rt->rt_flags & RTCF_LOCAL) ||
 278		     !(IPCB(skb)->flags & IPSKB_FORWARDED))
 279#endif
 280		   ) {
 281			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 282			if (newskb)
 283				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 284					newskb, NULL, newskb->dev,
 285					ip_dev_loopback_xmit);
 286		}
 287
 288		/* Multicasts with ttl 0 must not go beyond the host */
 289
 290		if (ip_hdr(skb)->ttl == 0) {
 291			kfree_skb(skb);
 292			return 0;
 293		}
 294	}
 295
 296	if (rt->rt_flags&RTCF_BROADCAST) {
 297		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 298		if (newskb)
 299			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
 300				NULL, newskb->dev, ip_dev_loopback_xmit);
 
 301	}
 302
 303	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
 304			    skb->dev, ip_finish_output,
 
 305			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 306}
 307
 308int ip_output(struct sk_buff *skb)
 309{
 310	struct net_device *dev = skb_dst(skb)->dev;
 311
 312	IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
 313
 314	skb->dev = dev;
 315	skb->protocol = htons(ETH_P_IP);
 316
 317	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
 
 318			    ip_finish_output,
 319			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 320}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 321
 322int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
 
 
 323{
 324	struct sock *sk = skb->sk;
 325	struct inet_sock *inet = inet_sk(sk);
 
 326	struct ip_options_rcu *inet_opt;
 327	struct flowi4 *fl4;
 328	struct rtable *rt;
 329	struct iphdr *iph;
 330	int res;
 331
 332	/* Skip all of this if the packet is already routed,
 333	 * f.e. by something like SCTP.
 334	 */
 335	rcu_read_lock();
 336	inet_opt = rcu_dereference(inet->inet_opt);
 337	fl4 = &fl->u.ip4;
 338	rt = skb_rtable(skb);
 339	if (rt != NULL)
 340		goto packet_routed;
 341
 342	/* Make sure we can route this packet. */
 343	rt = (struct rtable *)__sk_dst_check(sk, 0);
 344	if (rt == NULL) {
 345		__be32 daddr;
 346
 347		/* Use correct destination address if we have options. */
 348		daddr = inet->inet_daddr;
 349		if (inet_opt && inet_opt->opt.srr)
 350			daddr = inet_opt->opt.faddr;
 351
 352		/* If this fails, retransmit mechanism of transport layer will
 353		 * keep trying until route appears or the connection times
 354		 * itself out.
 355		 */
 356		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
 357					   daddr, inet->inet_saddr,
 358					   inet->inet_dport,
 359					   inet->inet_sport,
 360					   sk->sk_protocol,
 361					   RT_CONN_FLAGS(sk),
 362					   sk->sk_bound_dev_if);
 363		if (IS_ERR(rt))
 364			goto no_route;
 365		sk_setup_caps(sk, &rt->dst);
 366	}
 367	skb_dst_set_noref(skb, &rt->dst);
 368
 369packet_routed:
 370	if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
 371		goto no_route;
 372
 373	/* OK, we know where to send it, allocate and build IP header. */
 374	skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
 375	skb_reset_network_header(skb);
 376	iph = ip_hdr(skb);
 377	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
 378	if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
 379		iph->frag_off = htons(IP_DF);
 380	else
 381		iph->frag_off = 0;
 382	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 383	iph->protocol = sk->sk_protocol;
 384	iph->saddr    = fl4->saddr;
 385	iph->daddr    = fl4->daddr;
 386	/* Transport layer set skb->h.foo itself. */
 387
 388	if (inet_opt && inet_opt->opt.optlen) {
 389		iph->ihl += inet_opt->opt.optlen >> 2;
 390		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
 391	}
 392
 393	ip_select_ident_more(iph, &rt->dst, sk,
 394			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 395
 396	skb->priority = sk->sk_priority;
 397	skb->mark = sk->sk_mark;
 
 398
 399	res = ip_local_out(skb);
 400	rcu_read_unlock();
 401	return res;
 402
 403no_route:
 404	rcu_read_unlock();
 405	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 406	kfree_skb(skb);
 407	return -EHOSTUNREACH;
 408}
 
 
 
 
 
 
 409EXPORT_SYMBOL(ip_queue_xmit);
 410
 411
 412static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 413{
 414	to->pkt_type = from->pkt_type;
 415	to->priority = from->priority;
 416	to->protocol = from->protocol;
 
 417	skb_dst_drop(to);
 418	skb_dst_copy(to, from);
 419	to->dev = from->dev;
 420	to->mark = from->mark;
 421
 422	/* Copy the flags to each fragment. */
 423	IPCB(to)->flags = IPCB(from)->flags;
 424
 425#ifdef CONFIG_NET_SCHED
 426	to->tc_index = from->tc_index;
 427#endif
 428	nf_copy(to, from);
 429#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
 430    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
 431	to->nf_trace = from->nf_trace;
 432#endif
 433#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 434	to->ipvs_property = from->ipvs_property;
 435#endif
 436	skb_copy_secmark(to, from);
 437}
 438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 439/*
 440 *	This IP datagram is too large to be sent in one piece.  Break it up into
 441 *	smaller pieces (each of size equal to IP header plus
 442 *	a block of the data of the original IP data part) that will yet fit in a
 443 *	single device frame, and queue such a frame for sending.
 444 */
 445
 446int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
 447{
 448	struct iphdr *iph;
 449	int ptr;
 450	struct net_device *dev;
 451	struct sk_buff *skb2;
 452	unsigned int mtu, hlen, left, len, ll_rs;
 453	int offset;
 454	__be16 not_last_frag;
 455	struct rtable *rt = skb_rtable(skb);
 
 
 
 
 456	int err = 0;
 457
 458	dev = rt->dst.dev;
 
 
 
 459
 460	/*
 461	 *	Point into the IP datagram header.
 462	 */
 463
 464	iph = ip_hdr(skb);
 465
 466	if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
 467		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
 468		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 469			  htonl(ip_skb_dst_mtu(skb)));
 470		kfree_skb(skb);
 471		return -EMSGSIZE;
 472	}
 473
 474	/*
 475	 *	Setup starting values.
 476	 */
 477
 478	hlen = iph->ihl * 4;
 479	mtu = dst_mtu(&rt->dst) - hlen;	/* Size of data space */
 480#ifdef CONFIG_BRIDGE_NETFILTER
 481	if (skb->nf_bridge)
 482		mtu -= nf_bridge_mtu_reduction(skb);
 483#endif
 484	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
 
 485
 486	/* When frag_list is given, use it. First, check its validity:
 487	 * some transformers could create wrong frag_list or break existing
 488	 * one, it is not prohibited. In this case fall back to copying.
 489	 *
 490	 * LATER: this step can be merged to real generation of fragments,
 491	 * we can switch to copy when see the first bad fragment.
 492	 */
 493	if (skb_has_frag_list(skb)) {
 494		struct sk_buff *frag, *frag2;
 495		int first_len = skb_pagelen(skb);
 496
 497		if (first_len - hlen > mtu ||
 498		    ((first_len - hlen) & 7) ||
 499		    ip_is_fragment(iph) ||
 500		    skb_cloned(skb))
 
 501			goto slow_path;
 502
 503		skb_walk_frags(skb, frag) {
 504			/* Correct geometry. */
 505			if (frag->len > mtu ||
 506			    ((frag->len & 7) && frag->next) ||
 507			    skb_headroom(frag) < hlen)
 508				goto slow_path_clean;
 509
 510			/* Partially cloned skb? */
 511			if (skb_shared(frag))
 512				goto slow_path_clean;
 513
 514			BUG_ON(frag->sk);
 515			if (skb->sk) {
 516				frag->sk = skb->sk;
 517				frag->destructor = sock_wfree;
 518			}
 519			skb->truesize -= frag->truesize;
 520		}
 521
 522		/* Everything is OK. Generate! */
 523
 524		err = 0;
 525		offset = 0;
 526		frag = skb_shinfo(skb)->frag_list;
 527		skb_frag_list_init(skb);
 528		skb->data_len = first_len - skb_headlen(skb);
 529		skb->len = first_len;
 530		iph->tot_len = htons(first_len);
 531		iph->frag_off = htons(IP_MF);
 532		ip_send_check(iph);
 533
 534		for (;;) {
 535			/* Prepare header of the next frame,
 536			 * before previous one went down. */
 537			if (frag) {
 538				frag->ip_summed = CHECKSUM_NONE;
 539				skb_reset_transport_header(frag);
 540				__skb_push(frag, hlen);
 541				skb_reset_network_header(frag);
 542				memcpy(skb_network_header(frag), iph, hlen);
 543				iph = ip_hdr(frag);
 544				iph->tot_len = htons(frag->len);
 545				ip_copy_metadata(frag, skb);
 546				if (offset == 0)
 547					ip_options_fragment(frag);
 548				offset += skb->len - hlen;
 549				iph->frag_off = htons(offset>>3);
 550				if (frag->next != NULL)
 551					iph->frag_off |= htons(IP_MF);
 552				/* Ready, complete checksum */
 553				ip_send_check(iph);
 554			}
 555
 556			err = output(skb);
 
 557
 558			if (!err)
 559				IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
 560			if (err || !frag)
 561				break;
 562
 563			skb = frag;
 564			frag = skb->next;
 565			skb->next = NULL;
 566		}
 567
 568		if (err == 0) {
 569			IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
 570			return 0;
 571		}
 572
 573		while (frag) {
 574			skb = frag->next;
 575			kfree_skb(frag);
 576			frag = skb;
 577		}
 578		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
 579		return err;
 580
 581slow_path_clean:
 582		skb_walk_frags(skb, frag2) {
 583			if (frag2 == frag)
 584				break;
 585			frag2->sk = NULL;
 586			frag2->destructor = NULL;
 587			skb->truesize += frag2->truesize;
 588		}
 589	}
 590
 591slow_path:
 592	left = skb->len - hlen;		/* Space per frame */
 593	ptr = hlen;		/* Where to start from */
 594
 595	/* for bridged IP traffic encapsulated inside f.e. a vlan header,
 596	 * we need to make room for the encapsulating header
 597	 */
 598	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
 599
 600	/*
 601	 *	Fragment the datagram.
 602	 */
 603
 604	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
 605	not_last_frag = iph->frag_off & htons(IP_MF);
 606
 607	/*
 608	 *	Keep copying data until we run out.
 609	 */
 610
 611	while (left > 0) {
 612		len = left;
 613		/* IF: it doesn't fit, use 'mtu' - the data space left */
 614		if (len > mtu)
 615			len = mtu;
 616		/* IF: we are not sending up to and including the packet end
 617		   then align the next start on an eight byte boundary */
 618		if (len < left)	{
 619			len &= ~7;
 620		}
 621		/*
 622		 *	Allocate buffer.
 623		 */
 624
 625		if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
 626			NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
 627			err = -ENOMEM;
 628			goto fail;
 629		}
 630
 631		/*
 632		 *	Set up data on packet
 633		 */
 634
 635		ip_copy_metadata(skb2, skb);
 636		skb_reserve(skb2, ll_rs);
 637		skb_put(skb2, len + hlen);
 638		skb_reset_network_header(skb2);
 639		skb2->transport_header = skb2->network_header + hlen;
 640
 641		/*
 642		 *	Charge the memory for the fragment to any owner
 643		 *	it might possess
 644		 */
 645
 646		if (skb->sk)
 647			skb_set_owner_w(skb2, skb->sk);
 648
 649		/*
 650		 *	Copy the packet header into the new buffer.
 651		 */
 652
 653		skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
 654
 655		/*
 656		 *	Copy a block of the IP datagram.
 657		 */
 658		if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
 659			BUG();
 660		left -= len;
 661
 662		/*
 663		 *	Fill in the new header fields.
 664		 */
 665		iph = ip_hdr(skb2);
 666		iph->frag_off = htons((offset >> 3));
 667
 668		/* ANK: dirty, but effective trick. Upgrade options only if
 669		 * the segment to be fragmented was THE FIRST (otherwise,
 670		 * options are already fixed) and make it ONCE
 671		 * on the initial skb, so that all the following fragments
 672		 * will inherit fixed options.
 673		 */
 674		if (offset == 0)
 675			ip_options_fragment(skb);
 676
 677		/*
 678		 *	Added AC : If we are fragmenting a fragment that's not the
 679		 *		   last fragment then keep MF on each bit
 680		 */
 681		if (left > 0 || not_last_frag)
 682			iph->frag_off |= htons(IP_MF);
 683		ptr += len;
 684		offset += len;
 685
 686		/*
 687		 *	Put this fragment into the sending queue.
 688		 */
 689		iph->tot_len = htons(len + hlen);
 690
 691		ip_send_check(iph);
 692
 693		err = output(skb2);
 694		if (err)
 695			goto fail;
 696
 697		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
 698	}
 699	kfree_skb(skb);
 700	IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
 701	return err;
 702
 703fail:
 704	kfree_skb(skb);
 705	IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
 706	return err;
 707}
 708EXPORT_SYMBOL(ip_fragment);
 709
 710int
 711ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
 712{
 713	struct iovec *iov = from;
 714
 715	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 716		if (memcpy_fromiovecend(to, iov, offset, len) < 0)
 717			return -EFAULT;
 718	} else {
 719		__wsum csum = 0;
 720		if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
 721			return -EFAULT;
 722		skb->csum = csum_block_add(skb->csum, csum, odd);
 723	}
 724	return 0;
 725}
 726EXPORT_SYMBOL(ip_generic_getfrag);
 727
 728static inline __wsum
 729csum_page(struct page *page, int offset, int copy)
 730{
 731	char *kaddr;
 732	__wsum csum;
 733	kaddr = kmap(page);
 734	csum = csum_partial(kaddr + offset, copy, 0);
 735	kunmap(page);
 736	return csum;
 737}
 738
 739static inline int ip_ufo_append_data(struct sock *sk,
 740			struct sk_buff_head *queue,
 741			int getfrag(void *from, char *to, int offset, int len,
 742			       int odd, struct sk_buff *skb),
 743			void *from, int length, int hh_len, int fragheaderlen,
 744			int transhdrlen, int maxfraglen, unsigned int flags)
 745{
 746	struct sk_buff *skb;
 747	int err;
 748
 749	/* There is support for UDP fragmentation offload by network
 750	 * device, so create one single skb packet containing complete
 751	 * udp datagram
 752	 */
 753	if ((skb = skb_peek_tail(queue)) == NULL) {
 754		skb = sock_alloc_send_skb(sk,
 755			hh_len + fragheaderlen + transhdrlen + 20,
 756			(flags & MSG_DONTWAIT), &err);
 757
 758		if (skb == NULL)
 759			return err;
 760
 761		/* reserve space for Hardware header */
 762		skb_reserve(skb, hh_len);
 763
 764		/* create space for UDP/IP header */
 765		skb_put(skb, fragheaderlen + transhdrlen);
 766
 767		/* initialize network header pointer */
 768		skb_reset_network_header(skb);
 769
 770		/* initialize protocol header pointer */
 771		skb->transport_header = skb->network_header + fragheaderlen;
 772
 773		skb->ip_summed = CHECKSUM_PARTIAL;
 774		skb->csum = 0;
 775
 776		/* specify the length of each IP datagram fragment */
 777		skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
 778		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
 779		__skb_queue_tail(queue, skb);
 780	}
 781
 782	return skb_append_datato_frags(sk, skb, getfrag, from,
 783				       (length - transhdrlen));
 784}
 785
 786static int __ip_append_data(struct sock *sk,
 787			    struct flowi4 *fl4,
 788			    struct sk_buff_head *queue,
 789			    struct inet_cork *cork,
 
 790			    int getfrag(void *from, char *to, int offset,
 791					int len, int odd, struct sk_buff *skb),
 792			    void *from, int length, int transhdrlen,
 793			    unsigned int flags)
 794{
 795	struct inet_sock *inet = inet_sk(sk);
 
 796	struct sk_buff *skb;
 797
 798	struct ip_options *opt = cork->opt;
 799	int hh_len;
 800	int exthdrlen;
 801	int mtu;
 802	int copy;
 803	int err;
 804	int offset = 0;
 805	unsigned int maxfraglen, fragheaderlen;
 
 806	int csummode = CHECKSUM_NONE;
 807	struct rtable *rt = (struct rtable *)cork->dst;
 
 
 
 808
 809	skb = skb_peek_tail(queue);
 810
 811	exthdrlen = !skb ? rt->dst.header_len : 0;
 812	mtu = cork->fragsize;
 
 813
 814	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 815
 816	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
 817	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
 
 818
 819	if (cork->length + length > 0xFFFF - fragheaderlen) {
 820		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
 821			       mtu-exthdrlen);
 822		return -EMSGSIZE;
 823	}
 824
 825	/*
 826	 * transhdrlen > 0 means that this is the first fragment and we wish
 827	 * it won't be fragmented in the future.
 828	 */
 829	if (transhdrlen &&
 830	    length + fragheaderlen <= mtu &&
 831	    rt->dst.dev->features & NETIF_F_V4_CSUM &&
 832	    !exthdrlen)
 
 833		csummode = CHECKSUM_PARTIAL;
 834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835	cork->length += length;
 836	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
 837	    (sk->sk_protocol == IPPROTO_UDP) &&
 838	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
 839		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
 840					 hh_len, fragheaderlen, transhdrlen,
 841					 maxfraglen, flags);
 842		if (err)
 843			goto error;
 844		return 0;
 845	}
 846
 847	/* So, what's going on in the loop below?
 848	 *
 849	 * We use calculated fragment length to generate chained skb,
 850	 * each of segments is IP fragment ready for sending to network after
 851	 * adding appropriate IP header.
 852	 */
 853
 854	if (!skb)
 855		goto alloc_new_skb;
 856
 857	while (length > 0) {
 858		/* Check if the remaining data fits into current packet. */
 859		copy = mtu - skb->len;
 860		if (copy < length)
 861			copy = maxfraglen - skb->len;
 862		if (copy <= 0) {
 863			char *data;
 864			unsigned int datalen;
 865			unsigned int fraglen;
 866			unsigned int fraggap;
 867			unsigned int alloclen;
 
 868			struct sk_buff *skb_prev;
 869alloc_new_skb:
 870			skb_prev = skb;
 871			if (skb_prev)
 872				fraggap = skb_prev->len - maxfraglen;
 873			else
 874				fraggap = 0;
 875
 876			/*
 877			 * If remaining data exceeds the mtu,
 878			 * we know we need more fragment(s).
 879			 */
 880			datalen = length + fraggap;
 881			if (datalen > mtu - fragheaderlen)
 882				datalen = maxfraglen - fragheaderlen;
 883			fraglen = datalen + fragheaderlen;
 
 884
 885			if ((flags & MSG_MORE) &&
 886			    !(rt->dst.dev->features&NETIF_F_SG))
 887				alloclen = mtu;
 888			else
 889				alloclen = fraglen;
 890
 891			alloclen += exthdrlen;
 892
 893			/* The last fragment gets additional space at tail.
 894			 * Note, with MSG_MORE we overallocate on fragments,
 895			 * because we have no idea what fragment will be
 896			 * the last.
 897			 */
 898			if (datalen == length + fraggap)
 899				alloclen += rt->dst.trailer_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 900
 901			if (transhdrlen) {
 902				skb = sock_alloc_send_skb(sk,
 903						alloclen + hh_len + 15,
 904						(flags & MSG_DONTWAIT), &err);
 905			} else {
 906				skb = NULL;
 907				if (atomic_read(&sk->sk_wmem_alloc) <=
 908				    2 * sk->sk_sndbuf)
 909					skb = sock_wmalloc(sk,
 910							   alloclen + hh_len + 15, 1,
 911							   sk->sk_allocation);
 912				if (unlikely(skb == NULL))
 913					err = -ENOBUFS;
 914				else
 915					/* only the initial fragment is
 916					   time stamped */
 917					cork->tx_flags = 0;
 918			}
 919			if (skb == NULL)
 920				goto error;
 921
 922			/*
 923			 *	Fill in the control structures
 924			 */
 925			skb->ip_summed = csummode;
 926			skb->csum = 0;
 927			skb_reserve(skb, hh_len);
 928			skb_shinfo(skb)->tx_flags = cork->tx_flags;
 929
 930			/*
 931			 *	Find where to start putting bytes.
 932			 */
 933			data = skb_put(skb, fraglen + exthdrlen);
 934			skb_set_network_header(skb, exthdrlen);
 935			skb->transport_header = (skb->network_header +
 936						 fragheaderlen);
 937			data += fragheaderlen + exthdrlen;
 938
 939			if (fraggap) {
 940				skb->csum = skb_copy_and_csum_bits(
 941					skb_prev, maxfraglen,
 942					data + transhdrlen, fraggap, 0);
 943				skb_prev->csum = csum_sub(skb_prev->csum,
 944							  skb->csum);
 945				data += fraggap;
 946				pskb_trim_unique(skb_prev, maxfraglen);
 947			}
 948
 949			copy = datalen - transhdrlen - fraggap;
 
 
 
 950			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
 951				err = -EFAULT;
 952				kfree_skb(skb);
 953				goto error;
 
 
 954			}
 955
 956			offset += copy;
 957			length -= datalen - fraggap;
 958			transhdrlen = 0;
 959			exthdrlen = 0;
 960			csummode = CHECKSUM_NONE;
 961
 
 
 
 
 
 
 
 
 
 
 962			/*
 963			 * Put the packet on the pending queue.
 964			 */
 
 
 
 
 
 965			__skb_queue_tail(queue, skb);
 966			continue;
 967		}
 968
 969		if (copy > length)
 970			copy = length;
 971
 972		if (!(rt->dst.dev->features&NETIF_F_SG)) {
 
 973			unsigned int off;
 974
 975			off = skb->len;
 976			if (getfrag(from, skb_put(skb, copy),
 977					offset, copy, off, skb) < 0) {
 978				__skb_trim(skb, off);
 979				err = -EFAULT;
 980				goto error;
 981			}
 982		} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 983			int i = skb_shinfo(skb)->nr_frags;
 984			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
 985			struct page *page = cork->page;
 986			int off = cork->off;
 987			unsigned int left;
 988
 989			if (page && (left = PAGE_SIZE - off) > 0) {
 990				if (copy >= left)
 991					copy = left;
 992				if (page != frag->page) {
 993					if (i == MAX_SKB_FRAGS) {
 994						err = -EMSGSIZE;
 995						goto error;
 996					}
 997					get_page(page);
 998					skb_fill_page_desc(skb, i, page, off, 0);
 999					frag = &skb_shinfo(skb)->frags[i];
1000				}
1001			} else if (i < MAX_SKB_FRAGS) {
1002				if (copy > PAGE_SIZE)
1003					copy = PAGE_SIZE;
1004				page = alloc_pages(sk->sk_allocation, 0);
1005				if (page == NULL)  {
1006					err = -ENOMEM;
1007					goto error;
1008				}
1009				cork->page = page;
1010				cork->off = 0;
1011
1012				skb_fill_page_desc(skb, i, page, 0, 0);
1013				frag = &skb_shinfo(skb)->frags[i];
1014			} else {
1015				err = -EMSGSIZE;
1016				goto error;
1017			}
1018			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1019				err = -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
1020				goto error;
1021			}
1022			cork->off += copy;
1023			frag->size += copy;
1024			skb->len += copy;
1025			skb->data_len += copy;
1026			skb->truesize += copy;
1027			atomic_add(copy, &sk->sk_wmem_alloc);
1028		}
1029		offset += copy;
1030		length -= copy;
1031	}
1032
 
 
1033	return 0;
1034
 
 
1035error:
 
1036	cork->length -= length;
1037	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
 
 
 
1038	return err;
1039}
1040
1041static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1042			 struct ipcm_cookie *ipc, struct rtable **rtp)
1043{
1044	struct inet_sock *inet = inet_sk(sk);
1045	struct ip_options_rcu *opt;
1046	struct rtable *rt;
1047
 
 
 
 
 
 
 
 
 
 
1048	/*
1049	 * setup for corking.
1050	 */
1051	opt = ipc->opt;
1052	if (opt) {
1053		if (cork->opt == NULL) {
1054			cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1055					    sk->sk_allocation);
1056			if (unlikely(cork->opt == NULL))
1057				return -ENOBUFS;
1058		}
1059		memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1060		cork->flags |= IPCORK_OPT;
1061		cork->addr = ipc->addr;
1062	}
1063	rt = *rtp;
1064	if (unlikely(!rt))
1065		return -EFAULT;
1066	/*
1067	 * We steal reference to this route, caller should not release it
1068	 */
1069	*rtp = NULL;
1070	cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
1071			 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1072	cork->dst = &rt->dst;
1073	cork->length = 0;
1074	cork->tx_flags = ipc->tx_flags;
1075	cork->page = NULL;
1076	cork->off = 0;
 
 
 
 
 
 
 
 
1077
1078	return 0;
1079}
1080
1081/*
1082 *	ip_append_data() and ip_append_page() can make one large IP datagram
1083 *	from many pieces of data. Each pieces will be holded on the socket
1084 *	until ip_push_pending_frames() is called. Each piece can be a page
1085 *	or non-page data.
1086 *
1087 *	Not only UDP, other transport protocols - e.g. raw sockets - can use
1088 *	this interface potentially.
1089 *
1090 *	LATER: length must be adjusted by pad at tail, when it is required.
1091 */
1092int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1093		   int getfrag(void *from, char *to, int offset, int len,
1094			       int odd, struct sk_buff *skb),
1095		   void *from, int length, int transhdrlen,
1096		   struct ipcm_cookie *ipc, struct rtable **rtp,
1097		   unsigned int flags)
1098{
1099	struct inet_sock *inet = inet_sk(sk);
1100	int err;
1101
1102	if (flags&MSG_PROBE)
1103		return 0;
1104
1105	if (skb_queue_empty(&sk->sk_write_queue)) {
1106		err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1107		if (err)
1108			return err;
1109	} else {
1110		transhdrlen = 0;
1111	}
1112
1113	return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
 
1114				from, length, transhdrlen, flags);
1115}
1116
1117ssize_t	ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1118		       int offset, size_t size, int flags)
1119{
1120	struct inet_sock *inet = inet_sk(sk);
1121	struct sk_buff *skb;
1122	struct rtable *rt;
1123	struct ip_options *opt = NULL;
1124	struct inet_cork *cork;
1125	int hh_len;
1126	int mtu;
1127	int len;
1128	int err;
1129	unsigned int maxfraglen, fragheaderlen, fraggap;
1130
1131	if (inet->hdrincl)
1132		return -EPERM;
1133
1134	if (flags&MSG_PROBE)
1135		return 0;
1136
1137	if (skb_queue_empty(&sk->sk_write_queue))
1138		return -EINVAL;
1139
1140	cork = &inet->cork.base;
1141	rt = (struct rtable *)cork->dst;
1142	if (cork->flags & IPCORK_OPT)
1143		opt = cork->opt;
1144
1145	if (!(rt->dst.dev->features&NETIF_F_SG))
1146		return -EOPNOTSUPP;
1147
1148	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1149	mtu = cork->fragsize;
1150
1151	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1152	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1153
1154	if (cork->length + size > 0xFFFF - fragheaderlen) {
1155		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
1156		return -EMSGSIZE;
1157	}
1158
1159	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1160		return -EINVAL;
1161
1162	cork->length += size;
1163	if ((size + skb->len > mtu) &&
1164	    (sk->sk_protocol == IPPROTO_UDP) &&
1165	    (rt->dst.dev->features & NETIF_F_UFO)) {
1166		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1167		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1168	}
1169
1170
1171	while (size > 0) {
1172		int i;
1173
1174		if (skb_is_gso(skb))
1175			len = size;
1176		else {
1177
1178			/* Check if the remaining data fits into current packet. */
1179			len = mtu - skb->len;
1180			if (len < size)
1181				len = maxfraglen - skb->len;
1182		}
1183		if (len <= 0) {
1184			struct sk_buff *skb_prev;
1185			int alloclen;
1186
1187			skb_prev = skb;
1188			fraggap = skb_prev->len - maxfraglen;
1189
1190			alloclen = fragheaderlen + hh_len + fraggap + 15;
1191			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1192			if (unlikely(!skb)) {
1193				err = -ENOBUFS;
1194				goto error;
1195			}
1196
1197			/*
1198			 *	Fill in the control structures
1199			 */
1200			skb->ip_summed = CHECKSUM_NONE;
1201			skb->csum = 0;
1202			skb_reserve(skb, hh_len);
1203
1204			/*
1205			 *	Find where to start putting bytes.
1206			 */
1207			skb_put(skb, fragheaderlen + fraggap);
1208			skb_reset_network_header(skb);
1209			skb->transport_header = (skb->network_header +
1210						 fragheaderlen);
1211			if (fraggap) {
1212				skb->csum = skb_copy_and_csum_bits(skb_prev,
1213								   maxfraglen,
1214						    skb_transport_header(skb),
1215								   fraggap, 0);
1216				skb_prev->csum = csum_sub(skb_prev->csum,
1217							  skb->csum);
1218				pskb_trim_unique(skb_prev, maxfraglen);
1219			}
1220
1221			/*
1222			 * Put the packet on the pending queue.
1223			 */
1224			__skb_queue_tail(&sk->sk_write_queue, skb);
1225			continue;
1226		}
1227
1228		i = skb_shinfo(skb)->nr_frags;
1229		if (len > size)
1230			len = size;
1231		if (skb_can_coalesce(skb, i, page, offset)) {
1232			skb_shinfo(skb)->frags[i-1].size += len;
1233		} else if (i < MAX_SKB_FRAGS) {
1234			get_page(page);
1235			skb_fill_page_desc(skb, i, page, offset, len);
1236		} else {
1237			err = -EMSGSIZE;
1238			goto error;
1239		}
1240
1241		if (skb->ip_summed == CHECKSUM_NONE) {
1242			__wsum csum;
1243			csum = csum_page(page, offset, len);
1244			skb->csum = csum_block_add(skb->csum, csum, skb->len);
1245		}
1246
1247		skb->len += len;
1248		skb->data_len += len;
1249		skb->truesize += len;
1250		atomic_add(len, &sk->sk_wmem_alloc);
1251		offset += len;
1252		size -= len;
1253	}
1254	return 0;
1255
1256error:
1257	cork->length -= size;
1258	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1259	return err;
1260}
1261
1262static void ip_cork_release(struct inet_cork *cork)
1263{
1264	cork->flags &= ~IPCORK_OPT;
1265	kfree(cork->opt);
1266	cork->opt = NULL;
1267	dst_release(cork->dst);
1268	cork->dst = NULL;
1269}
1270
1271/*
1272 *	Combined all pending IP fragments on the socket as one IP datagram
1273 *	and push them out.
1274 */
1275struct sk_buff *__ip_make_skb(struct sock *sk,
1276			      struct flowi4 *fl4,
1277			      struct sk_buff_head *queue,
1278			      struct inet_cork *cork)
1279{
1280	struct sk_buff *skb, *tmp_skb;
1281	struct sk_buff **tail_skb;
1282	struct inet_sock *inet = inet_sk(sk);
1283	struct net *net = sock_net(sk);
1284	struct ip_options *opt = NULL;
1285	struct rtable *rt = (struct rtable *)cork->dst;
1286	struct iphdr *iph;
 
1287	__be16 df = 0;
1288	__u8 ttl;
1289
1290	if ((skb = __skb_dequeue(queue)) == NULL)
 
1291		goto out;
1292	tail_skb = &(skb_shinfo(skb)->frag_list);
1293
1294	/* move skb->data to ip header from ext header */
1295	if (skb->data < skb_network_header(skb))
1296		__skb_pull(skb, skb_network_offset(skb));
1297	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1298		__skb_pull(tmp_skb, skb_network_header_len(skb));
1299		*tail_skb = tmp_skb;
1300		tail_skb = &(tmp_skb->next);
1301		skb->len += tmp_skb->len;
1302		skb->data_len += tmp_skb->len;
1303		skb->truesize += tmp_skb->truesize;
1304		tmp_skb->destructor = NULL;
1305		tmp_skb->sk = NULL;
1306	}
1307
1308	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1309	 * to fragment the frame generated here. No matter, what transforms
1310	 * how transforms change size of the packet, it will come out.
1311	 */
1312	if (inet->pmtudisc < IP_PMTUDISC_DO)
1313		skb->local_df = 1;
1314
1315	/* DF bit is set when we want to see DF on outgoing frames.
1316	 * If local_df is set too, we still allow to fragment this frame
1317	 * locally. */
1318	if (inet->pmtudisc >= IP_PMTUDISC_DO ||
 
 
1319	    (skb->len <= dst_mtu(&rt->dst) &&
1320	     ip_dont_fragment(sk, &rt->dst)))
1321		df = htons(IP_DF);
1322
1323	if (cork->flags & IPCORK_OPT)
1324		opt = cork->opt;
1325
1326	if (rt->rt_type == RTN_MULTICAST)
1327		ttl = inet->mc_ttl;
 
 
1328	else
1329		ttl = ip_select_ttl(inet, &rt->dst);
1330
1331	iph = (struct iphdr *)skb->data;
1332	iph->version = 4;
1333	iph->ihl = 5;
1334	iph->tos = inet->tos;
1335	iph->frag_off = df;
1336	ip_select_ident(iph, &rt->dst, sk);
1337	iph->ttl = ttl;
1338	iph->protocol = sk->sk_protocol;
1339	iph->saddr = fl4->saddr;
1340	iph->daddr = fl4->daddr;
1341
1342	if (opt) {
1343		iph->ihl += opt->optlen>>2;
1344		ip_options_build(skb, opt, cork->addr, rt, 0);
1345	}
1346
1347	skb->priority = sk->sk_priority;
1348	skb->mark = sk->sk_mark;
 
 
 
 
1349	/*
1350	 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1351	 * on dst refcount
1352	 */
1353	cork->dst = NULL;
1354	skb_dst_set(skb, &rt->dst);
1355
1356	if (iph->protocol == IPPROTO_ICMP)
1357		icmp_out_count(net, ((struct icmphdr *)
1358			skb_transport_header(skb))->type);
 
 
 
 
 
 
 
 
 
 
 
1359
1360	ip_cork_release(cork);
1361out:
1362	return skb;
1363}
1364
1365int ip_send_skb(struct sk_buff *skb)
1366{
1367	struct net *net = sock_net(skb->sk);
1368	int err;
1369
1370	err = ip_local_out(skb);
1371	if (err) {
1372		if (err > 0)
1373			err = net_xmit_errno(err);
1374		if (err)
1375			IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1376	}
1377
1378	return err;
1379}
1380
1381int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1382{
1383	struct sk_buff *skb;
1384
1385	skb = ip_finish_skb(sk, fl4);
1386	if (!skb)
1387		return 0;
1388
1389	/* Netfilter gets whole the not fragmented skb. */
1390	return ip_send_skb(skb);
1391}
1392
1393/*
1394 *	Throw away all pending data on the socket.
1395 */
1396static void __ip_flush_pending_frames(struct sock *sk,
1397				      struct sk_buff_head *queue,
1398				      struct inet_cork *cork)
1399{
1400	struct sk_buff *skb;
1401
1402	while ((skb = __skb_dequeue_tail(queue)) != NULL)
1403		kfree_skb(skb);
1404
1405	ip_cork_release(cork);
1406}
1407
1408void ip_flush_pending_frames(struct sock *sk)
1409{
1410	__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1411}
1412
1413struct sk_buff *ip_make_skb(struct sock *sk,
1414			    struct flowi4 *fl4,
1415			    int getfrag(void *from, char *to, int offset,
1416					int len, int odd, struct sk_buff *skb),
1417			    void *from, int length, int transhdrlen,
1418			    struct ipcm_cookie *ipc, struct rtable **rtp,
1419			    unsigned int flags)
1420{
1421	struct inet_cork cork;
1422	struct sk_buff_head queue;
1423	int err;
1424
1425	if (flags & MSG_PROBE)
1426		return NULL;
1427
1428	__skb_queue_head_init(&queue);
1429
1430	cork.flags = 0;
1431	cork.addr = 0;
1432	cork.opt = NULL;
1433	err = ip_setup_cork(sk, &cork, ipc, rtp);
1434	if (err)
1435		return ERR_PTR(err);
1436
1437	err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
 
1438			       from, length, transhdrlen, flags);
1439	if (err) {
1440		__ip_flush_pending_frames(sk, &queue, &cork);
1441		return ERR_PTR(err);
1442	}
1443
1444	return __ip_make_skb(sk, fl4, &queue, &cork);
1445}
1446
1447/*
1448 *	Fetch data from kernel space and fill in checksum if needed.
1449 */
1450static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1451			      int len, int odd, struct sk_buff *skb)
1452{
1453	__wsum csum;
1454
1455	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1456	skb->csum = csum_block_add(skb->csum, csum, odd);
1457	return 0;
1458}
1459
1460/*
1461 *	Generic function to send a packet as reply to another packet.
1462 *	Used to send TCP resets so far. ICMP should use this function too.
1463 *
1464 *	Should run single threaded per socket because it uses the sock
1465 *     	structure to pass arguments.
1466 */
1467void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1468		   struct ip_reply_arg *arg, unsigned int len)
 
 
 
 
1469{
1470	struct inet_sock *inet = inet_sk(sk);
1471	struct ip_options_data replyopts;
1472	struct ipcm_cookie ipc;
1473	struct flowi4 fl4;
1474	struct rtable *rt = skb_rtable(skb);
 
 
 
 
1475
1476	if (ip_options_echo(&replyopts.opt.opt, skb))
1477		return;
1478
 
1479	ipc.addr = daddr;
1480	ipc.opt = NULL;
1481	ipc.tx_flags = 0;
1482
1483	if (replyopts.opt.opt.optlen) {
1484		ipc.opt = &replyopts.opt;
1485
1486		if (replyopts.opt.opt.srr)
1487			daddr = replyopts.opt.opt.faddr;
1488	}
1489
1490	flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1491			   RT_TOS(ip_hdr(skb)->tos),
1492			   RT_SCOPE_UNIVERSE, sk->sk_protocol,
 
 
 
 
 
1493			   ip_reply_arg_flowi_flags(arg),
1494			   daddr, rt->rt_spec_dst,
1495			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1496	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1497	rt = ip_route_output_key(sock_net(sk), &fl4);
 
1498	if (IS_ERR(rt))
1499		return;
1500
1501	/* And let IP do all the hard work.
1502
1503	   This chunk is not reenterable, hence spinlock.
1504	   Note that it uses the fact, that this function is called
1505	   with locally disabled BH and that sk cannot be already spinlocked.
1506	 */
1507	bh_lock_sock(sk);
1508	inet->tos = ip_hdr(skb)->tos;
1509	sk->sk_priority = skb->priority;
1510	sk->sk_protocol = ip_hdr(skb)->protocol;
1511	sk->sk_bound_dev_if = arg->bound_dev_if;
1512	ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1513		       &ipc, &rt, MSG_DONTWAIT);
1514	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
 
 
 
 
 
 
 
 
1515		if (arg->csumoffset >= 0)
1516			*((__sum16 *)skb_transport_header(skb) +
1517			  arg->csumoffset) = csum_fold(csum_add(skb->csum,
1518								arg->csum));
1519		skb->ip_summed = CHECKSUM_NONE;
 
 
 
 
 
 
1520		ip_push_pending_frames(sk, &fl4);
1521	}
1522
1523	bh_unlock_sock(sk);
1524
1525	ip_rt_put(rt);
1526}
1527
1528void __init ip_init(void)
1529{
1530	ip_rt_init();
1531	inet_initpeers();
1532
1533#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1534	igmp_mc_proc_init();
1535#endif
1536}