Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		The Internet Protocol (IP) output module.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Donald Becker, <becker@super.org>
  12 *		Alan Cox, <Alan.Cox@linux.org>
  13 *		Richard Underwood
  14 *		Stefan Becker, <stefanb@yello.ping.de>
  15 *		Jorge Cwik, <jorge@laser.satlink.net>
  16 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  17 *		Hirokazu Takahashi, <taka@valinux.co.jp>
  18 *
  19 *	See ip_input.c for original log
  20 *
  21 *	Fixes:
  22 *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
  23 *		Mike Kilburn	:	htons() missing in ip_build_xmit.
  24 *		Bradford Johnson:	Fix faulty handling of some frames when
  25 *					no route is found.
  26 *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
  27 *					(in case if packet not accepted by
  28 *					output firewall rules)
  29 *		Mike McLagan	:	Routing by source
  30 *		Alexey Kuznetsov:	use new route cache
  31 *		Andi Kleen:		Fix broken PMTU recovery and remove
  32 *					some redundant tests.
  33 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  34 *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
  35 *		Andi Kleen	:	Split fast and slow ip_build_xmit path
  36 *					for decreased register pressure on x86
  37 *					and more readability.
  38 *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
  39 *					silently drop skb instead of failing with -EPERM.
  40 *		Detlev Wengorz	:	Copy protocol for fragments.
  41 *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
  42 *					datagrams.
  43 *		Hirokazu Takahashi:	sendfile() on UDP works now.
  44 */
  45
  46#include <linux/uaccess.h>
 
  47#include <linux/module.h>
  48#include <linux/types.h>
  49#include <linux/kernel.h>
  50#include <linux/mm.h>
  51#include <linux/string.h>
  52#include <linux/errno.h>
  53#include <linux/highmem.h>
  54#include <linux/slab.h>
  55
  56#include <linux/socket.h>
  57#include <linux/sockios.h>
  58#include <linux/in.h>
  59#include <linux/inet.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/proc_fs.h>
  63#include <linux/stat.h>
  64#include <linux/init.h>
  65
  66#include <net/snmp.h>
  67#include <net/ip.h>
  68#include <net/protocol.h>
  69#include <net/route.h>
  70#include <net/xfrm.h>
  71#include <linux/skbuff.h>
  72#include <net/sock.h>
  73#include <net/arp.h>
  74#include <net/icmp.h>
  75#include <net/checksum.h>
  76#include <net/gso.h>
  77#include <net/inetpeer.h>
  78#include <net/inet_ecn.h>
  79#include <net/lwtunnel.h>
  80#include <linux/bpf-cgroup.h>
  81#include <linux/igmp.h>
  82#include <linux/netfilter_ipv4.h>
  83#include <linux/netfilter_bridge.h>
 
  84#include <linux/netlink.h>
  85#include <linux/tcp.h>
  86
  87static int
  88ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
  89	    unsigned int mtu,
  90	    int (*output)(struct net *, struct sock *, struct sk_buff *));
  91
  92/* Generate a checksum for an outgoing IP datagram. */
  93void ip_send_check(struct iphdr *iph)
  94{
  95	iph->check = 0;
  96	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  97}
  98EXPORT_SYMBOL(ip_send_check);
  99
 100int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 101{
 102	struct iphdr *iph = ip_hdr(skb);
 103
 104	IP_INC_STATS(net, IPSTATS_MIB_OUTREQUESTS);
 105
 106	iph_set_totlen(iph, skb->len);
 107	ip_send_check(iph);
 108
 109	/* if egress device is enslaved to an L3 master device pass the
 110	 * skb to its handler for processing
 111	 */
 112	skb = l3mdev_ip_out(sk, skb);
 113	if (unlikely(!skb))
 114		return 0;
 115
 116	skb->protocol = htons(ETH_P_IP);
 117
 118	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
 119		       net, sk, skb, NULL, skb_dst(skb)->dev,
 120		       dst_output);
 121}
 122
 123int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 124{
 125	int err;
 126
 127	err = __ip_local_out(net, sk, skb);
 128	if (likely(err == 1))
 129		err = dst_output(net, sk, skb);
 130
 131	return err;
 132}
 133EXPORT_SYMBOL_GPL(ip_local_out);
 134
 135static inline int ip_select_ttl(const struct inet_sock *inet,
 136				const struct dst_entry *dst)
 
 
 
 
 
 
 
 
 
 
 
 
 137{
 138	int ttl = READ_ONCE(inet->uc_ttl);
 139
 140	if (ttl < 0)
 141		ttl = ip4_dst_hoplimit(dst);
 142	return ttl;
 143}
 144
 145/*
 146 *		Add an ip header to a skbuff and send it out.
 147 *
 148 */
 149int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
 150			  __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
 151			  u8 tos)
 152{
 153	const struct inet_sock *inet = inet_sk(sk);
 154	struct rtable *rt = skb_rtable(skb);
 155	struct net *net = sock_net(sk);
 156	struct iphdr *iph;
 157
 158	/* Build the IP header. */
 159	skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
 160	skb_reset_network_header(skb);
 161	iph = ip_hdr(skb);
 162	iph->version  = 4;
 163	iph->ihl      = 5;
 164	iph->tos      = tos;
 
 
 
 
 165	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 166	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
 167	iph->saddr    = saddr;
 168	iph->protocol = sk->sk_protocol;
 169	/* Do not bother generating IPID for small packets (eg SYNACK) */
 170	if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
 171		iph->frag_off = htons(IP_DF);
 172		iph->id = 0;
 173	} else {
 174		iph->frag_off = 0;
 175		/* TCP packets here are SYNACK with fat IPv4/TCP options.
 176		 * Avoid using the hashed IP ident generator.
 177		 */
 178		if (sk->sk_protocol == IPPROTO_TCP)
 179			iph->id = (__force __be16)get_random_u16();
 180		else
 181			__ip_select_ident(net, iph, 1);
 182	}
 183
 184	if (opt && opt->opt.optlen) {
 185		iph->ihl += opt->opt.optlen>>2;
 186		ip_options_build(skb, &opt->opt, daddr, rt);
 187	}
 188
 189	skb->priority = READ_ONCE(sk->sk_priority);
 190	if (!skb->mark)
 191		skb->mark = READ_ONCE(sk->sk_mark);
 192
 193	/* Send it out. */
 194	return ip_local_out(net, skb->sk, skb);
 195}
 196EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
 197
 198static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 199{
 200	struct dst_entry *dst = skb_dst(skb);
 201	struct rtable *rt = dst_rtable(dst);
 202	struct net_device *dev = dst->dev;
 203	unsigned int hh_len = LL_RESERVED_SPACE(dev);
 204	struct neighbour *neigh;
 205	bool is_v6gw = false;
 206
 207	if (rt->rt_type == RTN_MULTICAST) {
 208		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
 209	} else if (rt->rt_type == RTN_BROADCAST)
 210		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
 211
 212	/* OUTOCTETS should be counted after fragment */
 213	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
 214
 
 215	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
 216		skb = skb_expand_head(skb, hh_len);
 217		if (!skb)
 218			return -ENOMEM;
 219	}
 220
 221	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
 222		int res = lwtunnel_xmit(skb);
 223
 224		if (res != LWTUNNEL_XMIT_CONTINUE)
 225			return res;
 
 
 
 
 226	}
 227
 228	rcu_read_lock();
 229	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
 230	if (!IS_ERR(neigh)) {
 231		int res;
 232
 233		sock_confirm_neigh(skb, neigh);
 234		/* if crossing protocols, can not use the cached header */
 235		res = neigh_output(neigh, skb, is_v6gw);
 236		rcu_read_unlock();
 237		return res;
 238	}
 239	rcu_read_unlock();
 240
 241	net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
 242			    __func__);
 243	kfree_skb_reason(skb, SKB_DROP_REASON_NEIGH_CREATEFAIL);
 244	return PTR_ERR(neigh);
 245}
 246
 247static int ip_finish_output_gso(struct net *net, struct sock *sk,
 248				struct sk_buff *skb, unsigned int mtu)
 249{
 250	struct sk_buff *segs, *nskb;
 251	netdev_features_t features;
 252	int ret = 0;
 253
 254	/* common case: seglen is <= mtu
 255	 */
 256	if (skb_gso_validate_network_len(skb, mtu))
 257		return ip_finish_output2(net, sk, skb);
 258
 259	/* Slowpath -  GSO segment length exceeds the egress MTU.
 260	 *
 261	 * This can happen in several cases:
 262	 *  - Forwarding of a TCP GRO skb, when DF flag is not set.
 263	 *  - Forwarding of an skb that arrived on a virtualization interface
 264	 *    (virtio-net/vhost/tap) with TSO/GSO size set by other network
 265	 *    stack.
 266	 *  - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
 267	 *    interface with a smaller MTU.
 268	 *  - Arriving GRO skb (or GSO skb in a virtualized environment) that is
 269	 *    bridged to a NETIF_F_TSO tunnel stacked over an interface with an
 270	 *    insufficient MTU.
 271	 */
 272	features = netif_skb_features(skb);
 273	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
 274	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
 275	if (IS_ERR_OR_NULL(segs)) {
 276		kfree_skb(skb);
 277		return -ENOMEM;
 278	}
 279
 280	consume_skb(skb);
 281
 282	skb_list_walk_safe(segs, segs, nskb) {
 283		int err;
 284
 285		skb_mark_not_on_list(segs);
 286		err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
 287
 288		if (err && ret == 0)
 289			ret = err;
 290	}
 291
 292	return ret;
 
 293}
 294
 295static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 296{
 297	unsigned int mtu;
 298
 299#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 300	/* Policy lookup after SNAT yielded a new policy */
 301	if (skb_dst(skb)->xfrm) {
 302		IPCB(skb)->flags |= IPSKB_REROUTED;
 303		return dst_output(net, sk, skb);
 304	}
 305#endif
 306	mtu = ip_skb_dst_mtu(sk, skb);
 307	if (skb_is_gso(skb))
 308		return ip_finish_output_gso(net, sk, skb, mtu);
 309
 310	if (skb->len > mtu || IPCB(skb)->frag_max_size)
 311		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
 312
 313	return ip_finish_output2(net, sk, skb);
 314}
 315
 316static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 317{
 318	int ret;
 319
 320	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
 321	switch (ret) {
 322	case NET_XMIT_SUCCESS:
 323		return __ip_finish_output(net, sk, skb);
 324	case NET_XMIT_CN:
 325		return __ip_finish_output(net, sk, skb) ? : ret;
 326	default:
 327		kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
 328		return ret;
 329	}
 330}
 331
 332static int ip_mc_finish_output(struct net *net, struct sock *sk,
 333			       struct sk_buff *skb)
 334{
 335	struct rtable *new_rt;
 336	bool do_cn = false;
 337	int ret, err;
 338
 339	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
 340	switch (ret) {
 341	case NET_XMIT_CN:
 342		do_cn = true;
 343		fallthrough;
 344	case NET_XMIT_SUCCESS:
 345		break;
 346	default:
 347		kfree_skb_reason(skb, SKB_DROP_REASON_BPF_CGROUP_EGRESS);
 348		return ret;
 349	}
 350
 351	/* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
 352	 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
 353	 * see ipv4_pktinfo_prepare().
 354	 */
 355	new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
 356	if (new_rt) {
 357		new_rt->rt_iif = 0;
 358		skb_dst_drop(skb);
 359		skb_dst_set(skb, &new_rt->dst);
 360	}
 361
 362	err = dev_loopback_xmit(net, sk, skb);
 363	return (do_cn && err) ? ret : err;
 364}
 365
 366int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 367{
 
 368	struct rtable *rt = skb_rtable(skb);
 369	struct net_device *dev = rt->dst.dev;
 370
 371	/*
 372	 *	If the indicated interface is up and running, send the packet.
 373	 */
 
 
 374	skb->dev = dev;
 375	skb->protocol = htons(ETH_P_IP);
 376
 377	/*
 378	 *	Multicasts are looped back for other local users
 379	 */
 380
 381	if (rt->rt_flags&RTCF_MULTICAST) {
 382		if (sk_mc_loop(sk)
 383#ifdef CONFIG_IP_MROUTE
 384		/* Small optimization: do not loopback not local frames,
 385		   which returned after forwarding; they will be  dropped
 386		   by ip_mr_input in any case.
 387		   Note, that local frames are looped back to be delivered
 388		   to local recipients.
 389
 390		   This check is duplicated in ip_mr_input at the moment.
 391		 */
 392		    &&
 393		    ((rt->rt_flags & RTCF_LOCAL) ||
 394		     !(IPCB(skb)->flags & IPSKB_FORWARDED))
 395#endif
 396		   ) {
 397			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 398			if (newskb)
 399				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 400					net, sk, newskb, NULL, newskb->dev,
 401					ip_mc_finish_output);
 402		}
 403
 404		/* Multicasts with ttl 0 must not go beyond the host */
 405
 406		if (ip_hdr(skb)->ttl == 0) {
 407			kfree_skb(skb);
 408			return 0;
 409		}
 410	}
 411
 412	if (rt->rt_flags&RTCF_BROADCAST) {
 413		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 414		if (newskb)
 415			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 416				net, sk, newskb, NULL, newskb->dev,
 417				ip_mc_finish_output);
 418	}
 419
 420	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 421			    net, sk, skb, NULL, skb->dev,
 422			    ip_finish_output,
 423			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 424}
 425
 426int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 427{
 428	struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
 
 
 429
 430	skb->dev = dev;
 431	skb->protocol = htons(ETH_P_IP);
 432
 433	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 434			    net, sk, skb, indev, dev,
 435			    ip_finish_output,
 436			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 437}
 438EXPORT_SYMBOL(ip_output);
 439
 440/*
 441 * copy saddr and daddr, possibly using 64bit load/stores
 442 * Equivalent to :
 443 *   iph->saddr = fl4->saddr;
 444 *   iph->daddr = fl4->daddr;
 445 */
 446static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
 447{
 448	BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
 449		     offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
 450
 451	iph->saddr = fl4->saddr;
 452	iph->daddr = fl4->daddr;
 453}
 454
 455/* Note: skb->sk can be different from sk, in case of tunnels */
 456int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
 457		    __u8 tos)
 458{
 
 459	struct inet_sock *inet = inet_sk(sk);
 460	struct net *net = sock_net(sk);
 461	struct ip_options_rcu *inet_opt;
 462	struct flowi4 *fl4;
 463	struct rtable *rt;
 464	struct iphdr *iph;
 465	int res;
 466
 467	/* Skip all of this if the packet is already routed,
 468	 * f.e. by something like SCTP.
 469	 */
 470	rcu_read_lock();
 471	inet_opt = rcu_dereference(inet->inet_opt);
 472	fl4 = &fl->u.ip4;
 473	rt = skb_rtable(skb);
 474	if (rt)
 475		goto packet_routed;
 476
 477	/* Make sure we can route this packet. */
 478	rt = dst_rtable(__sk_dst_check(sk, 0));
 479	if (!rt) {
 480		__be32 daddr;
 481
 482		/* Use correct destination address if we have options. */
 483		daddr = inet->inet_daddr;
 484		if (inet_opt && inet_opt->opt.srr)
 485			daddr = inet_opt->opt.faddr;
 486
 487		/* If this fails, retransmit mechanism of transport layer will
 488		 * keep trying until route appears or the connection times
 489		 * itself out.
 490		 */
 491		rt = ip_route_output_ports(net, fl4, sk,
 492					   daddr, inet->inet_saddr,
 493					   inet->inet_dport,
 494					   inet->inet_sport,
 495					   sk->sk_protocol,
 496					   RT_TOS(tos),
 497					   sk->sk_bound_dev_if);
 498		if (IS_ERR(rt))
 499			goto no_route;
 500		sk_setup_caps(sk, &rt->dst);
 501	}
 502	skb_dst_set_noref(skb, &rt->dst);
 503
 504packet_routed:
 505	if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
 506		goto no_route;
 507
 508	/* OK, we know where to send it, allocate and build IP header. */
 509	skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
 510	skb_reset_network_header(skb);
 511	iph = ip_hdr(skb);
 512	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
 513	if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
 514		iph->frag_off = htons(IP_DF);
 515	else
 516		iph->frag_off = 0;
 517	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 518	iph->protocol = sk->sk_protocol;
 519	ip_copy_addrs(iph, fl4);
 520
 521	/* Transport layer set skb->h.foo itself. */
 522
 523	if (inet_opt && inet_opt->opt.optlen) {
 524		iph->ihl += inet_opt->opt.optlen >> 2;
 525		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt);
 526	}
 527
 528	ip_select_ident_segs(net, skb, sk,
 529			     skb_shinfo(skb)->gso_segs ?: 1);
 530
 531	/* TODO : should we use skb->sk here instead of sk ? */
 532	skb->priority = READ_ONCE(sk->sk_priority);
 533	skb->mark = READ_ONCE(sk->sk_mark);
 534
 535	res = ip_local_out(net, sk, skb);
 536	rcu_read_unlock();
 537	return res;
 538
 539no_route:
 540	rcu_read_unlock();
 541	IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
 542	kfree_skb_reason(skb, SKB_DROP_REASON_IP_OUTNOROUTES);
 543	return -EHOSTUNREACH;
 544}
 545EXPORT_SYMBOL(__ip_queue_xmit);
 546
 547int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
 548{
 549	return __ip_queue_xmit(sk, skb, fl, READ_ONCE(inet_sk(sk)->tos));
 550}
 551EXPORT_SYMBOL(ip_queue_xmit);
 552
 
 553static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 554{
 555	to->pkt_type = from->pkt_type;
 556	to->priority = from->priority;
 557	to->protocol = from->protocol;
 558	to->skb_iif = from->skb_iif;
 559	skb_dst_drop(to);
 560	skb_dst_copy(to, from);
 561	to->dev = from->dev;
 562	to->mark = from->mark;
 563
 564	skb_copy_hash(to, from);
 
 565
 566#ifdef CONFIG_NET_SCHED
 567	to->tc_index = from->tc_index;
 568#endif
 569	nf_copy(to, from);
 570	skb_ext_copy(to, from);
 571#if IS_ENABLED(CONFIG_IP_VS)
 
 
 
 572	to->ipvs_property = from->ipvs_property;
 573#endif
 574	skb_copy_secmark(to, from);
 575}
 576
 577static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 578		       unsigned int mtu,
 579		       int (*output)(struct net *, struct sock *, struct sk_buff *))
 580{
 581	struct iphdr *iph = ip_hdr(skb);
 582
 583	if ((iph->frag_off & htons(IP_DF)) == 0)
 584		return ip_do_fragment(net, sk, skb, output);
 585
 586	if (unlikely(!skb->ignore_df ||
 587		     (IPCB(skb)->frag_max_size &&
 588		      IPCB(skb)->frag_max_size > mtu))) {
 589		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
 590		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 591			  htonl(mtu));
 592		kfree_skb(skb);
 593		return -EMSGSIZE;
 594	}
 595
 596	return ip_do_fragment(net, sk, skb, output);
 597}
 598
 599void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
 600		      unsigned int hlen, struct ip_fraglist_iter *iter)
 601{
 602	unsigned int first_len = skb_pagelen(skb);
 603
 604	iter->frag = skb_shinfo(skb)->frag_list;
 605	skb_frag_list_init(skb);
 606
 607	iter->offset = 0;
 608	iter->iph = iph;
 609	iter->hlen = hlen;
 610
 611	skb->data_len = first_len - skb_headlen(skb);
 612	skb->len = first_len;
 613	iph->tot_len = htons(first_len);
 614	iph->frag_off = htons(IP_MF);
 615	ip_send_check(iph);
 616}
 617EXPORT_SYMBOL(ip_fraglist_init);
 618
 619void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
 620{
 621	unsigned int hlen = iter->hlen;
 622	struct iphdr *iph = iter->iph;
 623	struct sk_buff *frag;
 624
 625	frag = iter->frag;
 626	frag->ip_summed = CHECKSUM_NONE;
 627	skb_reset_transport_header(frag);
 628	__skb_push(frag, hlen);
 629	skb_reset_network_header(frag);
 630	memcpy(skb_network_header(frag), iph, hlen);
 631	iter->iph = ip_hdr(frag);
 632	iph = iter->iph;
 633	iph->tot_len = htons(frag->len);
 634	ip_copy_metadata(frag, skb);
 635	iter->offset += skb->len - hlen;
 636	iph->frag_off = htons(iter->offset >> 3);
 637	if (frag->next)
 638		iph->frag_off |= htons(IP_MF);
 639	/* Ready, complete checksum */
 640	ip_send_check(iph);
 641}
 642EXPORT_SYMBOL(ip_fraglist_prepare);
 643
 644void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
 645		  unsigned int ll_rs, unsigned int mtu, bool DF,
 646		  struct ip_frag_state *state)
 647{
 648	struct iphdr *iph = ip_hdr(skb);
 649
 650	state->DF = DF;
 651	state->hlen = hlen;
 652	state->ll_rs = ll_rs;
 653	state->mtu = mtu;
 654
 655	state->left = skb->len - hlen;	/* Space per frame */
 656	state->ptr = hlen;		/* Where to start from */
 657
 658	state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
 659	state->not_last_frag = iph->frag_off & htons(IP_MF);
 660}
 661EXPORT_SYMBOL(ip_frag_init);
 662
 663static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
 664			 bool first_frag)
 665{
 666	/* Copy the flags to each fragment. */
 667	IPCB(to)->flags = IPCB(from)->flags;
 668
 669	/* ANK: dirty, but effective trick. Upgrade options only if
 670	 * the segment to be fragmented was THE FIRST (otherwise,
 671	 * options are already fixed) and make it ONCE
 672	 * on the initial skb, so that all the following fragments
 673	 * will inherit fixed options.
 674	 */
 675	if (first_frag)
 676		ip_options_fragment(from);
 677}
 678
 679struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
 680{
 681	unsigned int len = state->left;
 682	struct sk_buff *skb2;
 683	struct iphdr *iph;
 684
 685	/* IF: it doesn't fit, use 'mtu' - the data space left */
 686	if (len > state->mtu)
 687		len = state->mtu;
 688	/* IF: we are not sending up to and including the packet end
 689	   then align the next start on an eight byte boundary */
 690	if (len < state->left)	{
 691		len &= ~7;
 692	}
 693
 694	/* Allocate buffer */
 695	skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
 696	if (!skb2)
 697		return ERR_PTR(-ENOMEM);
 698
 699	/*
 700	 *	Set up data on packet
 701	 */
 702
 703	ip_copy_metadata(skb2, skb);
 704	skb_reserve(skb2, state->ll_rs);
 705	skb_put(skb2, len + state->hlen);
 706	skb_reset_network_header(skb2);
 707	skb2->transport_header = skb2->network_header + state->hlen;
 708
 709	/*
 710	 *	Charge the memory for the fragment to any owner
 711	 *	it might possess
 712	 */
 713
 714	if (skb->sk)
 715		skb_set_owner_w(skb2, skb->sk);
 716
 717	/*
 718	 *	Copy the packet header into the new buffer.
 719	 */
 720
 721	skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
 722
 723	/*
 724	 *	Copy a block of the IP datagram.
 725	 */
 726	if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
 727		BUG();
 728	state->left -= len;
 729
 730	/*
 731	 *	Fill in the new header fields.
 732	 */
 733	iph = ip_hdr(skb2);
 734	iph->frag_off = htons((state->offset >> 3));
 735	if (state->DF)
 736		iph->frag_off |= htons(IP_DF);
 737
 738	/*
 739	 *	Added AC : If we are fragmenting a fragment that's not the
 740	 *		   last fragment then keep MF on each bit
 741	 */
 742	if (state->left > 0 || state->not_last_frag)
 743		iph->frag_off |= htons(IP_MF);
 744	state->ptr += len;
 745	state->offset += len;
 746
 747	iph->tot_len = htons(len + state->hlen);
 748
 749	ip_send_check(iph);
 750
 751	return skb2;
 752}
 753EXPORT_SYMBOL(ip_frag_next);
 754
 755/*
 756 *	This IP datagram is too large to be sent in one piece.  Break it up into
 757 *	smaller pieces (each of size equal to IP header plus
 758 *	a block of the data of the original IP data part) that will yet fit in a
 759 *	single device frame, and queue such a frame for sending.
 760 */
 761
 762int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 763		   int (*output)(struct net *, struct sock *, struct sk_buff *))
 764{
 765	struct iphdr *iph;
 
 
 766	struct sk_buff *skb2;
 767	bool mono_delivery_time = skb->mono_delivery_time;
 
 
 768	struct rtable *rt = skb_rtable(skb);
 769	unsigned int mtu, hlen, ll_rs;
 770	struct ip_fraglist_iter iter;
 771	ktime_t tstamp = skb->tstamp;
 772	struct ip_frag_state state;
 773	int err = 0;
 774
 775	/* for offloaded checksums cleanup checksum before fragmentation */
 776	if (skb->ip_summed == CHECKSUM_PARTIAL &&
 777	    (err = skb_checksum_help(skb)))
 778		goto fail;
 779
 780	/*
 781	 *	Point into the IP datagram header.
 782	 */
 783
 784	iph = ip_hdr(skb);
 785
 786	mtu = ip_skb_dst_mtu(sk, skb);
 787	if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
 788		mtu = IPCB(skb)->frag_max_size;
 
 
 
 
 789
 790	/*
 791	 *	Setup starting values.
 792	 */
 793
 794	hlen = iph->ihl * 4;
 795	mtu = mtu - hlen;	/* Size of data space */
 
 
 
 
 796	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
 797	ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
 798
 799	/* When frag_list is given, use it. First, check its validity:
 800	 * some transformers could create wrong frag_list or break existing
 801	 * one, it is not prohibited. In this case fall back to copying.
 802	 *
 803	 * LATER: this step can be merged to real generation of fragments,
 804	 * we can switch to copy when see the first bad fragment.
 805	 */
 806	if (skb_has_frag_list(skb)) {
 807		struct sk_buff *frag, *frag2;
 808		unsigned int first_len = skb_pagelen(skb);
 809
 810		if (first_len - hlen > mtu ||
 811		    ((first_len - hlen) & 7) ||
 812		    ip_is_fragment(iph) ||
 813		    skb_cloned(skb) ||
 814		    skb_headroom(skb) < ll_rs)
 815			goto slow_path;
 816
 817		skb_walk_frags(skb, frag) {
 818			/* Correct geometry. */
 819			if (frag->len > mtu ||
 820			    ((frag->len & 7) && frag->next) ||
 821			    skb_headroom(frag) < hlen + ll_rs)
 822				goto slow_path_clean;
 823
 824			/* Partially cloned skb? */
 825			if (skb_shared(frag))
 826				goto slow_path_clean;
 827
 828			BUG_ON(frag->sk);
 829			if (skb->sk) {
 830				frag->sk = skb->sk;
 831				frag->destructor = sock_wfree;
 832			}
 833			skb->truesize -= frag->truesize;
 834		}
 835
 836		/* Everything is OK. Generate! */
 837		ip_fraglist_init(skb, iph, hlen, &iter);
 
 
 
 
 
 
 
 
 
 838
 839		for (;;) {
 840			/* Prepare header of the next frame,
 841			 * before previous one went down. */
 842			if (iter.frag) {
 843				bool first_frag = (iter.offset == 0);
 844
 845				IPCB(iter.frag)->flags = IPCB(skb)->flags;
 846				ip_fraglist_prepare(skb, &iter);
 847				if (first_frag && IPCB(skb)->opt.optlen) {
 848					/* ipcb->opt is not populated for frags
 849					 * coming from __ip_make_skb(),
 850					 * ip_options_fragment() needs optlen
 851					 */
 852					IPCB(iter.frag)->opt.optlen =
 853						IPCB(skb)->opt.optlen;
 854					ip_options_fragment(iter.frag);
 855					ip_send_check(iter.iph);
 856				}
 
 
 857			}
 858
 859			skb_set_delivery_time(skb, tstamp, mono_delivery_time);
 860			err = output(net, sk, skb);
 861
 862			if (!err)
 863				IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
 864			if (err || !iter.frag)
 865				break;
 866
 867			skb = ip_fraglist_next(&iter);
 
 
 868		}
 869
 870		if (err == 0) {
 871			IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
 872			return 0;
 873		}
 874
 875		kfree_skb_list(iter.frag);
 876
 877		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
 
 
 
 878		return err;
 879
 880slow_path_clean:
 881		skb_walk_frags(skb, frag2) {
 882			if (frag2 == frag)
 883				break;
 884			frag2->sk = NULL;
 885			frag2->destructor = NULL;
 886			skb->truesize += frag2->truesize;
 887		}
 888	}
 889
 890slow_path:
 
 
 
 
 
 
 
 
 891	/*
 892	 *	Fragment the datagram.
 893	 */
 894
 895	ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
 896		     &state);
 897
 898	/*
 899	 *	Keep copying data until we run out.
 900	 */
 901
 902	while (state.left > 0) {
 903		bool first_frag = (state.offset == 0);
 
 
 
 
 
 
 
 
 
 
 
 904
 905		skb2 = ip_frag_next(skb, &state);
 906		if (IS_ERR(skb2)) {
 907			err = PTR_ERR(skb2);
 908			goto fail;
 909		}
 910		ip_frag_ipcb(skb, skb2, first_frag);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 911
 912		/*
 913		 *	Put this fragment into the sending queue.
 914		 */
 915		skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
 916		err = output(net, sk, skb2);
 
 
 
 917		if (err)
 918			goto fail;
 919
 920		IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
 921	}
 922	consume_skb(skb);
 923	IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
 924	return err;
 925
 926fail:
 927	kfree_skb(skb);
 928	IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
 929	return err;
 930}
 931EXPORT_SYMBOL(ip_do_fragment);
 932
 933int
 934ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
 935{
 936	struct msghdr *msg = from;
 937
 938	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 939		if (!copy_from_iter_full(to, len, &msg->msg_iter))
 940			return -EFAULT;
 941	} else {
 942		__wsum csum = 0;
 943		if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
 944			return -EFAULT;
 945		skb->csum = csum_block_add(skb->csum, csum, odd);
 946	}
 947	return 0;
 948}
 949EXPORT_SYMBOL(ip_generic_getfrag);
 950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951static int __ip_append_data(struct sock *sk,
 952			    struct flowi4 *fl4,
 953			    struct sk_buff_head *queue,
 954			    struct inet_cork *cork,
 955			    struct page_frag *pfrag,
 956			    int getfrag(void *from, char *to, int offset,
 957					int len, int odd, struct sk_buff *skb),
 958			    void *from, int length, int transhdrlen,
 959			    unsigned int flags)
 960{
 961	struct inet_sock *inet = inet_sk(sk);
 962	struct ubuf_info *uarg = NULL;
 963	struct sk_buff *skb;
 
 964	struct ip_options *opt = cork->opt;
 965	int hh_len;
 966	int exthdrlen;
 967	int mtu;
 968	int copy;
 969	int err;
 970	int offset = 0;
 971	bool zc = false;
 972	unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
 973	int csummode = CHECKSUM_NONE;
 974	struct rtable *rt = dst_rtable(cork->dst);
 975	bool paged, hold_tskey, extra_uref = false;
 976	unsigned int wmem_alloc_delta = 0;
 977	u32 tskey = 0;
 978
 979	skb = skb_peek_tail(queue);
 980
 981	exthdrlen = !skb ? rt->dst.header_len : 0;
 982	mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
 983	paged = !!cork->gso_size;
 984
 985	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 986
 987	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
 988	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
 989	maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
 990
 991	if (cork->length + length > maxnonfragsize - fragheaderlen) {
 992		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
 993			       mtu - (opt ? opt->optlen : 0));
 994		return -EMSGSIZE;
 995	}
 996
 997	/*
 998	 * transhdrlen > 0 means that this is the first fragment and we wish
 999	 * it won't be fragmented in the future.
1000	 */
1001	if (transhdrlen &&
1002	    length + fragheaderlen <= mtu &&
1003	    rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1004	    (!(flags & MSG_MORE) || cork->gso_size) &&
1005	    (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
1006		csummode = CHECKSUM_PARTIAL;
1007
1008	if ((flags & MSG_ZEROCOPY) && length) {
1009		struct msghdr *msg = from;
1010
1011		if (getfrag == ip_generic_getfrag && msg->msg_ubuf) {
1012			if (skb_zcopy(skb) && msg->msg_ubuf != skb_zcopy(skb))
1013				return -EINVAL;
1014
1015			/* Leave uarg NULL if can't zerocopy, callers should
1016			 * be able to handle it.
1017			 */
1018			if ((rt->dst.dev->features & NETIF_F_SG) &&
1019			    csummode == CHECKSUM_PARTIAL) {
1020				paged = true;
1021				zc = true;
1022				uarg = msg->msg_ubuf;
1023			}
1024		} else if (sock_flag(sk, SOCK_ZEROCOPY)) {
1025			uarg = msg_zerocopy_realloc(sk, length, skb_zcopy(skb));
1026			if (!uarg)
1027				return -ENOBUFS;
1028			extra_uref = !skb_zcopy(skb);	/* only ref on new uarg */
1029			if (rt->dst.dev->features & NETIF_F_SG &&
1030			    csummode == CHECKSUM_PARTIAL) {
1031				paged = true;
1032				zc = true;
1033			} else {
1034				uarg_to_msgzc(uarg)->zerocopy = 0;
1035				skb_zcopy_set(skb, uarg, &extra_uref);
1036			}
1037		}
1038	} else if ((flags & MSG_SPLICE_PAGES) && length) {
1039		if (inet_test_bit(HDRINCL, sk))
1040			return -EPERM;
1041		if (rt->dst.dev->features & NETIF_F_SG &&
1042		    getfrag == ip_generic_getfrag)
1043			/* We need an empty buffer to attach stuff to */
1044			paged = true;
1045		else
1046			flags &= ~MSG_SPLICE_PAGES;
1047	}
1048
1049	cork->length += length;
1050
1051	hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP &&
1052		     READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID;
1053	if (hold_tskey)
1054		tskey = atomic_inc_return(&sk->sk_tskey) - 1;
 
 
 
 
 
1055
1056	/* So, what's going on in the loop below?
1057	 *
1058	 * We use calculated fragment length to generate chained skb,
1059	 * each of segments is IP fragment ready for sending to network after
1060	 * adding appropriate IP header.
1061	 */
1062
1063	if (!skb)
1064		goto alloc_new_skb;
1065
1066	while (length > 0) {
1067		/* Check if the remaining data fits into current packet. */
1068		copy = mtu - skb->len;
1069		if (copy < length)
1070			copy = maxfraglen - skb->len;
1071		if (copy <= 0) {
1072			char *data;
1073			unsigned int datalen;
1074			unsigned int fraglen;
1075			unsigned int fraggap;
1076			unsigned int alloclen, alloc_extra;
1077			unsigned int pagedlen;
1078			struct sk_buff *skb_prev;
1079alloc_new_skb:
1080			skb_prev = skb;
1081			if (skb_prev)
1082				fraggap = skb_prev->len - maxfraglen;
1083			else
1084				fraggap = 0;
1085
1086			/*
1087			 * If remaining data exceeds the mtu,
1088			 * we know we need more fragment(s).
1089			 */
1090			datalen = length + fraggap;
1091			if (datalen > mtu - fragheaderlen)
1092				datalen = maxfraglen - fragheaderlen;
1093			fraglen = datalen + fragheaderlen;
1094			pagedlen = 0;
1095
1096			alloc_extra = hh_len + 15;
1097			alloc_extra += exthdrlen;
 
 
 
 
 
1098
1099			/* The last fragment gets additional space at tail.
1100			 * Note, with MSG_MORE we overallocate on fragments,
1101			 * because we have no idea what fragment will be
1102			 * the last.
1103			 */
1104			if (datalen == length + fraggap)
1105				alloc_extra += rt->dst.trailer_len;
1106
1107			if ((flags & MSG_MORE) &&
1108			    !(rt->dst.dev->features&NETIF_F_SG))
1109				alloclen = mtu;
1110			else if (!paged &&
1111				 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1112				  !(rt->dst.dev->features & NETIF_F_SG)))
1113				alloclen = fraglen;
1114			else {
1115				alloclen = fragheaderlen + transhdrlen;
1116				pagedlen = datalen - transhdrlen;
1117			}
1118
1119			alloclen += alloc_extra;
1120
1121			if (transhdrlen) {
1122				skb = sock_alloc_send_skb(sk, alloclen,
 
1123						(flags & MSG_DONTWAIT), &err);
1124			} else {
1125				skb = NULL;
1126				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1127				    2 * sk->sk_sndbuf)
1128					skb = alloc_skb(alloclen,
1129							sk->sk_allocation);
1130				if (unlikely(!skb))
 
1131					err = -ENOBUFS;
 
 
 
 
1132			}
1133			if (!skb)
1134				goto error;
1135
1136			/*
1137			 *	Fill in the control structures
1138			 */
1139			skb->ip_summed = csummode;
1140			skb->csum = 0;
1141			skb_reserve(skb, hh_len);
 
1142
1143			/*
1144			 *	Find where to start putting bytes.
1145			 */
1146			data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1147			skb_set_network_header(skb, exthdrlen);
1148			skb->transport_header = (skb->network_header +
1149						 fragheaderlen);
1150			data += fragheaderlen + exthdrlen;
1151
1152			if (fraggap) {
1153				skb->csum = skb_copy_and_csum_bits(
1154					skb_prev, maxfraglen,
1155					data + transhdrlen, fraggap);
1156				skb_prev->csum = csum_sub(skb_prev->csum,
1157							  skb->csum);
1158				data += fraggap;
1159				pskb_trim_unique(skb_prev, maxfraglen);
1160			}
1161
1162			copy = datalen - transhdrlen - fraggap - pagedlen;
1163			/* [!] NOTE: copy will be negative if pagedlen>0
1164			 * because then the equation reduces to -fraggap.
1165			 */
1166			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1167				err = -EFAULT;
1168				kfree_skb(skb);
1169				goto error;
1170			} else if (flags & MSG_SPLICE_PAGES) {
1171				copy = 0;
1172			}
1173
1174			offset += copy;
1175			length -= copy + transhdrlen;
1176			transhdrlen = 0;
1177			exthdrlen = 0;
1178			csummode = CHECKSUM_NONE;
1179
1180			/* only the initial fragment is time stamped */
1181			skb_shinfo(skb)->tx_flags = cork->tx_flags;
1182			cork->tx_flags = 0;
1183			skb_shinfo(skb)->tskey = tskey;
1184			tskey = 0;
1185			skb_zcopy_set(skb, uarg, &extra_uref);
1186
1187			if ((flags & MSG_CONFIRM) && !skb_prev)
1188				skb_set_dst_pending_confirm(skb, 1);
1189
1190			/*
1191			 * Put the packet on the pending queue.
1192			 */
1193			if (!skb->destructor) {
1194				skb->destructor = sock_wfree;
1195				skb->sk = sk;
1196				wmem_alloc_delta += skb->truesize;
1197			}
1198			__skb_queue_tail(queue, skb);
1199			continue;
1200		}
1201
1202		if (copy > length)
1203			copy = length;
1204
1205		if (!(rt->dst.dev->features&NETIF_F_SG) &&
1206		    skb_tailroom(skb) >= copy) {
1207			unsigned int off;
1208
1209			off = skb->len;
1210			if (getfrag(from, skb_put(skb, copy),
1211					offset, copy, off, skb) < 0) {
1212				__skb_trim(skb, off);
1213				err = -EFAULT;
1214				goto error;
1215			}
1216		} else if (flags & MSG_SPLICE_PAGES) {
1217			struct msghdr *msg = from;
1218
1219			err = -EIO;
1220			if (WARN_ON_ONCE(copy > msg->msg_iter.count))
1221				goto error;
1222
1223			err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
1224						   sk->sk_allocation);
1225			if (err < 0)
1226				goto error;
1227			copy = err;
1228			wmem_alloc_delta += copy;
1229		} else if (!zc) {
1230			int i = skb_shinfo(skb)->nr_frags;
1231
1232			err = -ENOMEM;
1233			if (!sk_page_frag_refill(sk, pfrag))
1234				goto error;
1235
1236			skb_zcopy_downgrade_managed(skb);
1237			if (!skb_can_coalesce(skb, i, pfrag->page,
1238					      pfrag->offset)) {
1239				err = -EMSGSIZE;
1240				if (i == MAX_SKB_FRAGS)
 
 
 
 
 
 
 
 
 
 
 
 
 
1241					goto error;
 
 
 
1242
1243				__skb_fill_page_desc(skb, i, pfrag->page,
1244						     pfrag->offset, 0);
1245				skb_shinfo(skb)->nr_frags = ++i;
1246				get_page(pfrag->page);
 
1247			}
1248			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1249			if (getfrag(from,
1250				    page_address(pfrag->page) + pfrag->offset,
1251				    offset, copy, skb->len, skb) < 0)
1252				goto error_efault;
1253
1254			pfrag->offset += copy;
1255			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1256			skb_len_add(skb, copy);
1257			wmem_alloc_delta += copy;
1258		} else {
1259			err = skb_zerocopy_iter_dgram(skb, from, copy);
1260			if (err < 0)
1261				goto error;
 
 
 
 
 
 
 
1262		}
1263		offset += copy;
1264		length -= copy;
1265	}
1266
1267	if (wmem_alloc_delta)
1268		refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1269	return 0;
1270
1271error_efault:
1272	err = -EFAULT;
1273error:
1274	net_zcopy_put_abort(uarg, extra_uref);
1275	cork->length -= length;
1276	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1277	refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1278	if (hold_tskey)
1279		atomic_dec(&sk->sk_tskey);
1280	return err;
1281}
1282
1283static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1284			 struct ipcm_cookie *ipc, struct rtable **rtp)
1285{
 
1286	struct ip_options_rcu *opt;
1287	struct rtable *rt;
1288
1289	rt = *rtp;
1290	if (unlikely(!rt))
1291		return -EFAULT;
1292
1293	cork->fragsize = ip_sk_use_pmtu(sk) ?
1294			 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1295
1296	if (!inetdev_valid_mtu(cork->fragsize))
1297		return -ENETUNREACH;
1298
1299	/*
1300	 * setup for corking.
1301	 */
1302	opt = ipc->opt;
1303	if (opt) {
1304		if (!cork->opt) {
1305			cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1306					    sk->sk_allocation);
1307			if (unlikely(!cork->opt))
1308				return -ENOBUFS;
1309		}
1310		memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1311		cork->flags |= IPCORK_OPT;
1312		cork->addr = ipc->addr;
1313	}
1314
1315	cork->gso_size = ipc->gso_size;
1316
1317	cork->dst = &rt->dst;
1318	/* We stole this route, caller should not release it. */
 
1319	*rtp = NULL;
1320
 
 
1321	cork->length = 0;
1322	cork->ttl = ipc->ttl;
1323	cork->tos = ipc->tos;
1324	cork->mark = ipc->sockc.mark;
1325	cork->priority = ipc->priority;
1326	cork->transmit_time = ipc->sockc.transmit_time;
1327	cork->tx_flags = 0;
1328	sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
1329
1330	return 0;
1331}
1332
1333/*
1334 *	ip_append_data() can make one large IP datagram from many pieces of
1335 *	data.  Each piece will be held on the socket until
1336 *	ip_push_pending_frames() is called. Each piece can be a page or
1337 *	non-page data.
1338 *
1339 *	Not only UDP, other transport protocols - e.g. raw sockets - can use
1340 *	this interface potentially.
1341 *
1342 *	LATER: length must be adjusted by pad at tail, when it is required.
1343 */
1344int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1345		   int getfrag(void *from, char *to, int offset, int len,
1346			       int odd, struct sk_buff *skb),
1347		   void *from, int length, int transhdrlen,
1348		   struct ipcm_cookie *ipc, struct rtable **rtp,
1349		   unsigned int flags)
1350{
1351	struct inet_sock *inet = inet_sk(sk);
1352	int err;
1353
1354	if (flags&MSG_PROBE)
1355		return 0;
1356
1357	if (skb_queue_empty(&sk->sk_write_queue)) {
1358		err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1359		if (err)
1360			return err;
1361	} else {
1362		transhdrlen = 0;
1363	}
1364
1365	return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1366				sk_page_frag(sk), getfrag,
1367				from, length, transhdrlen, flags);
1368}
1369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1370static void ip_cork_release(struct inet_cork *cork)
1371{
1372	cork->flags &= ~IPCORK_OPT;
1373	kfree(cork->opt);
1374	cork->opt = NULL;
1375	dst_release(cork->dst);
1376	cork->dst = NULL;
1377}
1378
1379/*
1380 *	Combined all pending IP fragments on the socket as one IP datagram
1381 *	and push them out.
1382 */
1383struct sk_buff *__ip_make_skb(struct sock *sk,
1384			      struct flowi4 *fl4,
1385			      struct sk_buff_head *queue,
1386			      struct inet_cork *cork)
1387{
1388	struct sk_buff *skb, *tmp_skb;
1389	struct sk_buff **tail_skb;
1390	struct inet_sock *inet = inet_sk(sk);
1391	struct net *net = sock_net(sk);
1392	struct ip_options *opt = NULL;
1393	struct rtable *rt = dst_rtable(cork->dst);
1394	struct iphdr *iph;
1395	u8 pmtudisc, ttl;
1396	__be16 df = 0;
 
1397
1398	skb = __skb_dequeue(queue);
1399	if (!skb)
1400		goto out;
1401	tail_skb = &(skb_shinfo(skb)->frag_list);
1402
1403	/* move skb->data to ip header from ext header */
1404	if (skb->data < skb_network_header(skb))
1405		__skb_pull(skb, skb_network_offset(skb));
1406	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1407		__skb_pull(tmp_skb, skb_network_header_len(skb));
1408		*tail_skb = tmp_skb;
1409		tail_skb = &(tmp_skb->next);
1410		skb->len += tmp_skb->len;
1411		skb->data_len += tmp_skb->len;
1412		skb->truesize += tmp_skb->truesize;
1413		tmp_skb->destructor = NULL;
1414		tmp_skb->sk = NULL;
1415	}
1416
1417	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1418	 * to fragment the frame generated here. No matter, what transforms
1419	 * how transforms change size of the packet, it will come out.
1420	 */
1421	skb->ignore_df = ip_sk_ignore_df(sk);
 
1422
1423	/* DF bit is set when we want to see DF on outgoing frames.
1424	 * If ignore_df is set too, we still allow to fragment this frame
1425	 * locally. */
1426	pmtudisc = READ_ONCE(inet->pmtudisc);
1427	if (pmtudisc == IP_PMTUDISC_DO ||
1428	    pmtudisc == IP_PMTUDISC_PROBE ||
1429	    (skb->len <= dst_mtu(&rt->dst) &&
1430	     ip_dont_fragment(sk, &rt->dst)))
1431		df = htons(IP_DF);
1432
1433	if (cork->flags & IPCORK_OPT)
1434		opt = cork->opt;
1435
1436	if (cork->ttl != 0)
1437		ttl = cork->ttl;
1438	else if (rt->rt_type == RTN_MULTICAST)
1439		ttl = READ_ONCE(inet->mc_ttl);
1440	else
1441		ttl = ip_select_ttl(inet, &rt->dst);
1442
1443	iph = ip_hdr(skb);
1444	iph->version = 4;
1445	iph->ihl = 5;
1446	iph->tos = (cork->tos != -1) ? cork->tos : READ_ONCE(inet->tos);
1447	iph->frag_off = df;
 
1448	iph->ttl = ttl;
1449	iph->protocol = sk->sk_protocol;
1450	ip_copy_addrs(iph, fl4);
1451	ip_select_ident(net, skb, sk);
1452
1453	if (opt) {
1454		iph->ihl += opt->optlen >> 2;
1455		ip_options_build(skb, opt, cork->addr, rt);
1456	}
1457
1458	skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority);
1459	skb->mark = cork->mark;
1460	skb->tstamp = cork->transmit_time;
1461	/*
1462	 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1463	 * on dst refcount
1464	 */
1465	cork->dst = NULL;
1466	skb_dst_set(skb, &rt->dst);
1467
1468	if (iph->protocol == IPPROTO_ICMP) {
1469		u8 icmp_type;
1470
1471		/* For such sockets, transhdrlen is zero when do ip_append_data(),
1472		 * so icmphdr does not in skb linear region and can not get icmp_type
1473		 * by icmp_hdr(skb)->type.
1474		 */
1475		if (sk->sk_type == SOCK_RAW &&
1476		    !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
1477			icmp_type = fl4->fl4_icmp_type;
1478		else
1479			icmp_type = icmp_hdr(skb)->type;
1480		icmp_out_count(net, icmp_type);
1481	}
1482
1483	ip_cork_release(cork);
1484out:
1485	return skb;
1486}
1487
1488int ip_send_skb(struct net *net, struct sk_buff *skb)
1489{
 
1490	int err;
1491
1492	err = ip_local_out(net, skb->sk, skb);
1493	if (err) {
1494		if (err > 0)
1495			err = net_xmit_errno(err);
1496		if (err)
1497			IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1498	}
1499
1500	return err;
1501}
1502
1503int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1504{
1505	struct sk_buff *skb;
1506
1507	skb = ip_finish_skb(sk, fl4);
1508	if (!skb)
1509		return 0;
1510
1511	/* Netfilter gets whole the not fragmented skb. */
1512	return ip_send_skb(sock_net(sk), skb);
1513}
1514
1515/*
1516 *	Throw away all pending data on the socket.
1517 */
1518static void __ip_flush_pending_frames(struct sock *sk,
1519				      struct sk_buff_head *queue,
1520				      struct inet_cork *cork)
1521{
1522	struct sk_buff *skb;
1523
1524	while ((skb = __skb_dequeue_tail(queue)) != NULL)
1525		kfree_skb(skb);
1526
1527	ip_cork_release(cork);
1528}
1529
1530void ip_flush_pending_frames(struct sock *sk)
1531{
1532	__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1533}
1534
1535struct sk_buff *ip_make_skb(struct sock *sk,
1536			    struct flowi4 *fl4,
1537			    int getfrag(void *from, char *to, int offset,
1538					int len, int odd, struct sk_buff *skb),
1539			    void *from, int length, int transhdrlen,
1540			    struct ipcm_cookie *ipc, struct rtable **rtp,
1541			    struct inet_cork *cork, unsigned int flags)
1542{
 
1543	struct sk_buff_head queue;
1544	int err;
1545
1546	if (flags & MSG_PROBE)
1547		return NULL;
1548
1549	__skb_queue_head_init(&queue);
1550
1551	cork->flags = 0;
1552	cork->addr = 0;
1553	cork->opt = NULL;
1554	err = ip_setup_cork(sk, cork, ipc, rtp);
1555	if (err)
1556		return ERR_PTR(err);
1557
1558	err = __ip_append_data(sk, fl4, &queue, cork,
1559			       &current->task_frag, getfrag,
1560			       from, length, transhdrlen, flags);
1561	if (err) {
1562		__ip_flush_pending_frames(sk, &queue, cork);
1563		return ERR_PTR(err);
1564	}
1565
1566	return __ip_make_skb(sk, fl4, &queue, cork);
1567}
1568
1569/*
1570 *	Fetch data from kernel space and fill in checksum if needed.
1571 */
1572static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1573			      int len, int odd, struct sk_buff *skb)
1574{
1575	__wsum csum;
1576
1577	csum = csum_partial_copy_nocheck(dptr+offset, to, len);
1578	skb->csum = csum_block_add(skb->csum, csum, odd);
1579	return 0;
1580}
1581
1582/*
1583 *	Generic function to send a packet as reply to another packet.
1584 *	Used to send some TCP resets/acks so far.
 
 
 
1585 */
1586void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1587			   const struct ip_options *sopt,
1588			   __be32 daddr, __be32 saddr,
1589			   const struct ip_reply_arg *arg,
1590			   unsigned int len, u64 transmit_time, u32 txhash)
1591{
 
1592	struct ip_options_data replyopts;
1593	struct ipcm_cookie ipc;
1594	struct flowi4 fl4;
1595	struct rtable *rt = skb_rtable(skb);
1596	struct net *net = sock_net(sk);
1597	struct sk_buff *nskb;
1598	int err;
1599	int oif;
1600
1601	if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1602		return;
1603
1604	ipcm_init(&ipc);
1605	ipc.addr = daddr;
1606	ipc.sockc.transmit_time = transmit_time;
 
1607
1608	if (replyopts.opt.opt.optlen) {
1609		ipc.opt = &replyopts.opt;
1610
1611		if (replyopts.opt.opt.srr)
1612			daddr = replyopts.opt.opt.faddr;
1613	}
1614
1615	oif = arg->bound_dev_if;
1616	if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1617		oif = skb->skb_iif;
1618
1619	flowi4_init_output(&fl4, oif,
1620			   IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1621			   RT_TOS(arg->tos),
1622			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1623			   ip_reply_arg_flowi_flags(arg),
1624			   daddr, saddr,
1625			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1626			   arg->uid);
1627	security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1628	rt = ip_route_output_flow(net, &fl4, sk);
1629	if (IS_ERR(rt))
1630		return;
1631
1632	inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
1633
 
 
 
 
 
 
 
1634	sk->sk_protocol = ip_hdr(skb)->protocol;
1635	sk->sk_bound_dev_if = arg->bound_dev_if;
1636	sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
1637	ipc.sockc.mark = fl4.flowi4_mark;
1638	err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1639			     len, 0, &ipc, &rt, MSG_DONTWAIT);
1640	if (unlikely(err)) {
1641		ip_flush_pending_frames(sk);
1642		goto out;
1643	}
1644
1645	nskb = skb_peek(&sk->sk_write_queue);
1646	if (nskb) {
1647		if (arg->csumoffset >= 0)
1648			*((__sum16 *)skb_transport_header(nskb) +
1649			  arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1650								arg->csum));
1651		nskb->ip_summed = CHECKSUM_NONE;
1652		nskb->mono_delivery_time = !!transmit_time;
1653		if (txhash)
1654			skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
1655		ip_push_pending_frames(sk, &fl4);
1656	}
1657out:
 
 
1658	ip_rt_put(rt);
1659}
1660
1661void __init ip_init(void)
1662{
1663	ip_rt_init();
1664	inet_initpeers();
1665
1666#if defined(CONFIG_IP_MULTICAST)
1667	igmp_mc_init();
1668#endif
1669}
v3.1
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		The Internet Protocol (IP) output module.
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Donald Becker, <becker@super.org>
  11 *		Alan Cox, <Alan.Cox@linux.org>
  12 *		Richard Underwood
  13 *		Stefan Becker, <stefanb@yello.ping.de>
  14 *		Jorge Cwik, <jorge@laser.satlink.net>
  15 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  16 *		Hirokazu Takahashi, <taka@valinux.co.jp>
  17 *
  18 *	See ip_input.c for original log
  19 *
  20 *	Fixes:
  21 *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
  22 *		Mike Kilburn	:	htons() missing in ip_build_xmit.
  23 *		Bradford Johnson:	Fix faulty handling of some frames when
  24 *					no route is found.
  25 *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
  26 *					(in case if packet not accepted by
  27 *					output firewall rules)
  28 *		Mike McLagan	:	Routing by source
  29 *		Alexey Kuznetsov:	use new route cache
  30 *		Andi Kleen:		Fix broken PMTU recovery and remove
  31 *					some redundant tests.
  32 *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
  33 *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
  34 *		Andi Kleen	:	Split fast and slow ip_build_xmit path
  35 *					for decreased register pressure on x86
  36 *					and more readibility.
  37 *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
  38 *					silently drop skb instead of failing with -EPERM.
  39 *		Detlev Wengorz	:	Copy protocol for fragments.
  40 *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
  41 *					datagrams.
  42 *		Hirokazu Takahashi:	sendfile() on UDP works now.
  43 */
  44
  45#include <asm/uaccess.h>
  46#include <asm/system.h>
  47#include <linux/module.h>
  48#include <linux/types.h>
  49#include <linux/kernel.h>
  50#include <linux/mm.h>
  51#include <linux/string.h>
  52#include <linux/errno.h>
  53#include <linux/highmem.h>
  54#include <linux/slab.h>
  55
  56#include <linux/socket.h>
  57#include <linux/sockios.h>
  58#include <linux/in.h>
  59#include <linux/inet.h>
  60#include <linux/netdevice.h>
  61#include <linux/etherdevice.h>
  62#include <linux/proc_fs.h>
  63#include <linux/stat.h>
  64#include <linux/init.h>
  65
  66#include <net/snmp.h>
  67#include <net/ip.h>
  68#include <net/protocol.h>
  69#include <net/route.h>
  70#include <net/xfrm.h>
  71#include <linux/skbuff.h>
  72#include <net/sock.h>
  73#include <net/arp.h>
  74#include <net/icmp.h>
  75#include <net/checksum.h>
 
  76#include <net/inetpeer.h>
 
 
 
  77#include <linux/igmp.h>
  78#include <linux/netfilter_ipv4.h>
  79#include <linux/netfilter_bridge.h>
  80#include <linux/mroute.h>
  81#include <linux/netlink.h>
  82#include <linux/tcp.h>
  83
  84int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
  85EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
 
  86
  87/* Generate a checksum for an outgoing IP datagram. */
  88__inline__ void ip_send_check(struct iphdr *iph)
  89{
  90	iph->check = 0;
  91	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
  92}
  93EXPORT_SYMBOL(ip_send_check);
  94
  95int __ip_local_out(struct sk_buff *skb)
  96{
  97	struct iphdr *iph = ip_hdr(skb);
  98
  99	iph->tot_len = htons(skb->len);
 
 
 100	ip_send_check(iph);
 101	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
 102		       skb_dst(skb)->dev, dst_output);
 
 
 
 
 
 
 
 
 
 
 
 103}
 104
 105int ip_local_out(struct sk_buff *skb)
 106{
 107	int err;
 108
 109	err = __ip_local_out(skb);
 110	if (likely(err == 1))
 111		err = dst_output(skb);
 112
 113	return err;
 114}
 115EXPORT_SYMBOL_GPL(ip_local_out);
 116
 117/* dev_loopback_xmit for use with netfilter. */
 118static int ip_dev_loopback_xmit(struct sk_buff *newskb)
 119{
 120	skb_reset_mac_header(newskb);
 121	__skb_pull(newskb, skb_network_offset(newskb));
 122	newskb->pkt_type = PACKET_LOOPBACK;
 123	newskb->ip_summed = CHECKSUM_UNNECESSARY;
 124	WARN_ON(!skb_dst(newskb));
 125	skb_dst_force(newskb);
 126	netif_rx_ni(newskb);
 127	return 0;
 128}
 129
 130static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
 131{
 132	int ttl = inet->uc_ttl;
 133
 134	if (ttl < 0)
 135		ttl = ip4_dst_hoplimit(dst);
 136	return ttl;
 137}
 138
 139/*
 140 *		Add an ip header to a skbuff and send it out.
 141 *
 142 */
 143int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
 144			  __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
 
 145{
 146	struct inet_sock *inet = inet_sk(sk);
 147	struct rtable *rt = skb_rtable(skb);
 
 148	struct iphdr *iph;
 149
 150	/* Build the IP header. */
 151	skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
 152	skb_reset_network_header(skb);
 153	iph = ip_hdr(skb);
 154	iph->version  = 4;
 155	iph->ihl      = 5;
 156	iph->tos      = inet->tos;
 157	if (ip_dont_fragment(sk, &rt->dst))
 158		iph->frag_off = htons(IP_DF);
 159	else
 160		iph->frag_off = 0;
 161	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 162	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
 163	iph->saddr    = saddr;
 164	iph->protocol = sk->sk_protocol;
 165	ip_select_ident(iph, &rt->dst, sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 166
 167	if (opt && opt->opt.optlen) {
 168		iph->ihl += opt->opt.optlen>>2;
 169		ip_options_build(skb, &opt->opt, daddr, rt, 0);
 170	}
 171
 172	skb->priority = sk->sk_priority;
 173	skb->mark = sk->sk_mark;
 
 174
 175	/* Send it out. */
 176	return ip_local_out(skb);
 177}
 178EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
 179
 180static inline int ip_finish_output2(struct sk_buff *skb)
 181{
 182	struct dst_entry *dst = skb_dst(skb);
 183	struct rtable *rt = (struct rtable *)dst;
 184	struct net_device *dev = dst->dev;
 185	unsigned int hh_len = LL_RESERVED_SPACE(dev);
 186	struct neighbour *neigh;
 
 187
 188	if (rt->rt_type == RTN_MULTICAST) {
 189		IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
 190	} else if (rt->rt_type == RTN_BROADCAST)
 191		IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
 
 
 
 192
 193	/* Be paranoid, rather than too clever. */
 194	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
 195		struct sk_buff *skb2;
 
 
 
 196
 197		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
 198		if (skb2 == NULL) {
 199			kfree_skb(skb);
 200			return -ENOMEM;
 201		}
 202		if (skb->sk)
 203			skb_set_owner_w(skb2, skb->sk);
 204		kfree_skb(skb);
 205		skb = skb2;
 206	}
 207
 208	rcu_read_lock();
 209	neigh = dst_get_neighbour(dst);
 210	if (neigh) {
 211		int res = neigh_output(neigh, skb);
 212
 
 
 
 213		rcu_read_unlock();
 214		return res;
 215	}
 216	rcu_read_unlock();
 217
 218	if (net_ratelimit())
 219		printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
 220	kfree_skb(skb);
 221	return -EINVAL;
 222}
 223
 224static inline int ip_skb_dst_mtu(struct sk_buff *skb)
 
 225{
 226	struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227
 228	return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
 229	       skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
 230}
 231
 232static int ip_finish_output(struct sk_buff *skb)
 233{
 
 
 234#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 235	/* Policy lookup after SNAT yielded a new policy */
 236	if (skb_dst(skb)->xfrm != NULL) {
 237		IPCB(skb)->flags |= IPSKB_REROUTED;
 238		return dst_output(skb);
 239	}
 240#endif
 241	if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
 242		return ip_fragment(skb, ip_finish_output2);
 243	else
 244		return ip_finish_output2(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245}
 246
 247int ip_mc_output(struct sk_buff *skb)
 248{
 249	struct sock *sk = skb->sk;
 250	struct rtable *rt = skb_rtable(skb);
 251	struct net_device *dev = rt->dst.dev;
 252
 253	/*
 254	 *	If the indicated interface is up and running, send the packet.
 255	 */
 256	IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
 257
 258	skb->dev = dev;
 259	skb->protocol = htons(ETH_P_IP);
 260
 261	/*
 262	 *	Multicasts are looped back for other local users
 263	 */
 264
 265	if (rt->rt_flags&RTCF_MULTICAST) {
 266		if (sk_mc_loop(sk)
 267#ifdef CONFIG_IP_MROUTE
 268		/* Small optimization: do not loopback not local frames,
 269		   which returned after forwarding; they will be  dropped
 270		   by ip_mr_input in any case.
 271		   Note, that local frames are looped back to be delivered
 272		   to local recipients.
 273
 274		   This check is duplicated in ip_mr_input at the moment.
 275		 */
 276		    &&
 277		    ((rt->rt_flags & RTCF_LOCAL) ||
 278		     !(IPCB(skb)->flags & IPSKB_FORWARDED))
 279#endif
 280		   ) {
 281			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 282			if (newskb)
 283				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 284					newskb, NULL, newskb->dev,
 285					ip_dev_loopback_xmit);
 286		}
 287
 288		/* Multicasts with ttl 0 must not go beyond the host */
 289
 290		if (ip_hdr(skb)->ttl == 0) {
 291			kfree_skb(skb);
 292			return 0;
 293		}
 294	}
 295
 296	if (rt->rt_flags&RTCF_BROADCAST) {
 297		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 298		if (newskb)
 299			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
 300				NULL, newskb->dev, ip_dev_loopback_xmit);
 
 301	}
 302
 303	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
 304			    skb->dev, ip_finish_output,
 
 305			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 306}
 307
 308int ip_output(struct sk_buff *skb)
 309{
 310	struct net_device *dev = skb_dst(skb)->dev;
 311
 312	IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
 313
 314	skb->dev = dev;
 315	skb->protocol = htons(ETH_P_IP);
 316
 317	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
 
 318			    ip_finish_output,
 319			    !(IPCB(skb)->flags & IPSKB_REROUTED));
 320}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 321
 322int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
 
 
 323{
 324	struct sock *sk = skb->sk;
 325	struct inet_sock *inet = inet_sk(sk);
 
 326	struct ip_options_rcu *inet_opt;
 327	struct flowi4 *fl4;
 328	struct rtable *rt;
 329	struct iphdr *iph;
 330	int res;
 331
 332	/* Skip all of this if the packet is already routed,
 333	 * f.e. by something like SCTP.
 334	 */
 335	rcu_read_lock();
 336	inet_opt = rcu_dereference(inet->inet_opt);
 337	fl4 = &fl->u.ip4;
 338	rt = skb_rtable(skb);
 339	if (rt != NULL)
 340		goto packet_routed;
 341
 342	/* Make sure we can route this packet. */
 343	rt = (struct rtable *)__sk_dst_check(sk, 0);
 344	if (rt == NULL) {
 345		__be32 daddr;
 346
 347		/* Use correct destination address if we have options. */
 348		daddr = inet->inet_daddr;
 349		if (inet_opt && inet_opt->opt.srr)
 350			daddr = inet_opt->opt.faddr;
 351
 352		/* If this fails, retransmit mechanism of transport layer will
 353		 * keep trying until route appears or the connection times
 354		 * itself out.
 355		 */
 356		rt = ip_route_output_ports(sock_net(sk), fl4, sk,
 357					   daddr, inet->inet_saddr,
 358					   inet->inet_dport,
 359					   inet->inet_sport,
 360					   sk->sk_protocol,
 361					   RT_CONN_FLAGS(sk),
 362					   sk->sk_bound_dev_if);
 363		if (IS_ERR(rt))
 364			goto no_route;
 365		sk_setup_caps(sk, &rt->dst);
 366	}
 367	skb_dst_set_noref(skb, &rt->dst);
 368
 369packet_routed:
 370	if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
 371		goto no_route;
 372
 373	/* OK, we know where to send it, allocate and build IP header. */
 374	skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
 375	skb_reset_network_header(skb);
 376	iph = ip_hdr(skb);
 377	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
 378	if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
 379		iph->frag_off = htons(IP_DF);
 380	else
 381		iph->frag_off = 0;
 382	iph->ttl      = ip_select_ttl(inet, &rt->dst);
 383	iph->protocol = sk->sk_protocol;
 384	iph->saddr    = fl4->saddr;
 385	iph->daddr    = fl4->daddr;
 386	/* Transport layer set skb->h.foo itself. */
 387
 388	if (inet_opt && inet_opt->opt.optlen) {
 389		iph->ihl += inet_opt->opt.optlen >> 2;
 390		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
 391	}
 392
 393	ip_select_ident_more(iph, &rt->dst, sk,
 394			     (skb_shinfo(skb)->gso_segs ?: 1) - 1);
 395
 396	skb->priority = sk->sk_priority;
 397	skb->mark = sk->sk_mark;
 
 398
 399	res = ip_local_out(skb);
 400	rcu_read_unlock();
 401	return res;
 402
 403no_route:
 404	rcu_read_unlock();
 405	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
 406	kfree_skb(skb);
 407	return -EHOSTUNREACH;
 408}
 
 
 
 
 
 
 409EXPORT_SYMBOL(ip_queue_xmit);
 410
 411
 412static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 413{
 414	to->pkt_type = from->pkt_type;
 415	to->priority = from->priority;
 416	to->protocol = from->protocol;
 
 417	skb_dst_drop(to);
 418	skb_dst_copy(to, from);
 419	to->dev = from->dev;
 420	to->mark = from->mark;
 421
 422	/* Copy the flags to each fragment. */
 423	IPCB(to)->flags = IPCB(from)->flags;
 424
 425#ifdef CONFIG_NET_SCHED
 426	to->tc_index = from->tc_index;
 427#endif
 428	nf_copy(to, from);
 429#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
 430    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
 431	to->nf_trace = from->nf_trace;
 432#endif
 433#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 434	to->ipvs_property = from->ipvs_property;
 435#endif
 436	skb_copy_secmark(to, from);
 437}
 438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 439/*
 440 *	This IP datagram is too large to be sent in one piece.  Break it up into
 441 *	smaller pieces (each of size equal to IP header plus
 442 *	a block of the data of the original IP data part) that will yet fit in a
 443 *	single device frame, and queue such a frame for sending.
 444 */
 445
 446int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
 447{
 448	struct iphdr *iph;
 449	int ptr;
 450	struct net_device *dev;
 451	struct sk_buff *skb2;
 452	unsigned int mtu, hlen, left, len, ll_rs;
 453	int offset;
 454	__be16 not_last_frag;
 455	struct rtable *rt = skb_rtable(skb);
 
 
 
 
 456	int err = 0;
 457
 458	dev = rt->dst.dev;
 
 
 
 459
 460	/*
 461	 *	Point into the IP datagram header.
 462	 */
 463
 464	iph = ip_hdr(skb);
 465
 466	if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
 467		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
 468		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
 469			  htonl(ip_skb_dst_mtu(skb)));
 470		kfree_skb(skb);
 471		return -EMSGSIZE;
 472	}
 473
 474	/*
 475	 *	Setup starting values.
 476	 */
 477
 478	hlen = iph->ihl * 4;
 479	mtu = dst_mtu(&rt->dst) - hlen;	/* Size of data space */
 480#ifdef CONFIG_BRIDGE_NETFILTER
 481	if (skb->nf_bridge)
 482		mtu -= nf_bridge_mtu_reduction(skb);
 483#endif
 484	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
 
 485
 486	/* When frag_list is given, use it. First, check its validity:
 487	 * some transformers could create wrong frag_list or break existing
 488	 * one, it is not prohibited. In this case fall back to copying.
 489	 *
 490	 * LATER: this step can be merged to real generation of fragments,
 491	 * we can switch to copy when see the first bad fragment.
 492	 */
 493	if (skb_has_frag_list(skb)) {
 494		struct sk_buff *frag, *frag2;
 495		int first_len = skb_pagelen(skb);
 496
 497		if (first_len - hlen > mtu ||
 498		    ((first_len - hlen) & 7) ||
 499		    ip_is_fragment(iph) ||
 500		    skb_cloned(skb))
 
 501			goto slow_path;
 502
 503		skb_walk_frags(skb, frag) {
 504			/* Correct geometry. */
 505			if (frag->len > mtu ||
 506			    ((frag->len & 7) && frag->next) ||
 507			    skb_headroom(frag) < hlen)
 508				goto slow_path_clean;
 509
 510			/* Partially cloned skb? */
 511			if (skb_shared(frag))
 512				goto slow_path_clean;
 513
 514			BUG_ON(frag->sk);
 515			if (skb->sk) {
 516				frag->sk = skb->sk;
 517				frag->destructor = sock_wfree;
 518			}
 519			skb->truesize -= frag->truesize;
 520		}
 521
 522		/* Everything is OK. Generate! */
 523
 524		err = 0;
 525		offset = 0;
 526		frag = skb_shinfo(skb)->frag_list;
 527		skb_frag_list_init(skb);
 528		skb->data_len = first_len - skb_headlen(skb);
 529		skb->len = first_len;
 530		iph->tot_len = htons(first_len);
 531		iph->frag_off = htons(IP_MF);
 532		ip_send_check(iph);
 533
 534		for (;;) {
 535			/* Prepare header of the next frame,
 536			 * before previous one went down. */
 537			if (frag) {
 538				frag->ip_summed = CHECKSUM_NONE;
 539				skb_reset_transport_header(frag);
 540				__skb_push(frag, hlen);
 541				skb_reset_network_header(frag);
 542				memcpy(skb_network_header(frag), iph, hlen);
 543				iph = ip_hdr(frag);
 544				iph->tot_len = htons(frag->len);
 545				ip_copy_metadata(frag, skb);
 546				if (offset == 0)
 547					ip_options_fragment(frag);
 548				offset += skb->len - hlen;
 549				iph->frag_off = htons(offset>>3);
 550				if (frag->next != NULL)
 551					iph->frag_off |= htons(IP_MF);
 552				/* Ready, complete checksum */
 553				ip_send_check(iph);
 554			}
 555
 556			err = output(skb);
 
 557
 558			if (!err)
 559				IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
 560			if (err || !frag)
 561				break;
 562
 563			skb = frag;
 564			frag = skb->next;
 565			skb->next = NULL;
 566		}
 567
 568		if (err == 0) {
 569			IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
 570			return 0;
 571		}
 572
 573		while (frag) {
 574			skb = frag->next;
 575			kfree_skb(frag);
 576			frag = skb;
 577		}
 578		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
 579		return err;
 580
 581slow_path_clean:
 582		skb_walk_frags(skb, frag2) {
 583			if (frag2 == frag)
 584				break;
 585			frag2->sk = NULL;
 586			frag2->destructor = NULL;
 587			skb->truesize += frag2->truesize;
 588		}
 589	}
 590
 591slow_path:
 592	left = skb->len - hlen;		/* Space per frame */
 593	ptr = hlen;		/* Where to start from */
 594
 595	/* for bridged IP traffic encapsulated inside f.e. a vlan header,
 596	 * we need to make room for the encapsulating header
 597	 */
 598	ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
 599
 600	/*
 601	 *	Fragment the datagram.
 602	 */
 603
 604	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
 605	not_last_frag = iph->frag_off & htons(IP_MF);
 606
 607	/*
 608	 *	Keep copying data until we run out.
 609	 */
 610
 611	while (left > 0) {
 612		len = left;
 613		/* IF: it doesn't fit, use 'mtu' - the data space left */
 614		if (len > mtu)
 615			len = mtu;
 616		/* IF: we are not sending up to and including the packet end
 617		   then align the next start on an eight byte boundary */
 618		if (len < left)	{
 619			len &= ~7;
 620		}
 621		/*
 622		 *	Allocate buffer.
 623		 */
 624
 625		if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
 626			NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
 627			err = -ENOMEM;
 628			goto fail;
 629		}
 630
 631		/*
 632		 *	Set up data on packet
 633		 */
 634
 635		ip_copy_metadata(skb2, skb);
 636		skb_reserve(skb2, ll_rs);
 637		skb_put(skb2, len + hlen);
 638		skb_reset_network_header(skb2);
 639		skb2->transport_header = skb2->network_header + hlen;
 640
 641		/*
 642		 *	Charge the memory for the fragment to any owner
 643		 *	it might possess
 644		 */
 645
 646		if (skb->sk)
 647			skb_set_owner_w(skb2, skb->sk);
 648
 649		/*
 650		 *	Copy the packet header into the new buffer.
 651		 */
 652
 653		skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
 654
 655		/*
 656		 *	Copy a block of the IP datagram.
 657		 */
 658		if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
 659			BUG();
 660		left -= len;
 661
 662		/*
 663		 *	Fill in the new header fields.
 664		 */
 665		iph = ip_hdr(skb2);
 666		iph->frag_off = htons((offset >> 3));
 667
 668		/* ANK: dirty, but effective trick. Upgrade options only if
 669		 * the segment to be fragmented was THE FIRST (otherwise,
 670		 * options are already fixed) and make it ONCE
 671		 * on the initial skb, so that all the following fragments
 672		 * will inherit fixed options.
 673		 */
 674		if (offset == 0)
 675			ip_options_fragment(skb);
 676
 677		/*
 678		 *	Added AC : If we are fragmenting a fragment that's not the
 679		 *		   last fragment then keep MF on each bit
 680		 */
 681		if (left > 0 || not_last_frag)
 682			iph->frag_off |= htons(IP_MF);
 683		ptr += len;
 684		offset += len;
 685
 686		/*
 687		 *	Put this fragment into the sending queue.
 688		 */
 689		iph->tot_len = htons(len + hlen);
 690
 691		ip_send_check(iph);
 692
 693		err = output(skb2);
 694		if (err)
 695			goto fail;
 696
 697		IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
 698	}
 699	kfree_skb(skb);
 700	IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
 701	return err;
 702
 703fail:
 704	kfree_skb(skb);
 705	IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
 706	return err;
 707}
 708EXPORT_SYMBOL(ip_fragment);
 709
 710int
 711ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
 712{
 713	struct iovec *iov = from;
 714
 715	if (skb->ip_summed == CHECKSUM_PARTIAL) {
 716		if (memcpy_fromiovecend(to, iov, offset, len) < 0)
 717			return -EFAULT;
 718	} else {
 719		__wsum csum = 0;
 720		if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
 721			return -EFAULT;
 722		skb->csum = csum_block_add(skb->csum, csum, odd);
 723	}
 724	return 0;
 725}
 726EXPORT_SYMBOL(ip_generic_getfrag);
 727
 728static inline __wsum
 729csum_page(struct page *page, int offset, int copy)
 730{
 731	char *kaddr;
 732	__wsum csum;
 733	kaddr = kmap(page);
 734	csum = csum_partial(kaddr + offset, copy, 0);
 735	kunmap(page);
 736	return csum;
 737}
 738
 739static inline int ip_ufo_append_data(struct sock *sk,
 740			struct sk_buff_head *queue,
 741			int getfrag(void *from, char *to, int offset, int len,
 742			       int odd, struct sk_buff *skb),
 743			void *from, int length, int hh_len, int fragheaderlen,
 744			int transhdrlen, int maxfraglen, unsigned int flags)
 745{
 746	struct sk_buff *skb;
 747	int err;
 748
 749	/* There is support for UDP fragmentation offload by network
 750	 * device, so create one single skb packet containing complete
 751	 * udp datagram
 752	 */
 753	if ((skb = skb_peek_tail(queue)) == NULL) {
 754		skb = sock_alloc_send_skb(sk,
 755			hh_len + fragheaderlen + transhdrlen + 20,
 756			(flags & MSG_DONTWAIT), &err);
 757
 758		if (skb == NULL)
 759			return err;
 760
 761		/* reserve space for Hardware header */
 762		skb_reserve(skb, hh_len);
 763
 764		/* create space for UDP/IP header */
 765		skb_put(skb, fragheaderlen + transhdrlen);
 766
 767		/* initialize network header pointer */
 768		skb_reset_network_header(skb);
 769
 770		/* initialize protocol header pointer */
 771		skb->transport_header = skb->network_header + fragheaderlen;
 772
 773		skb->ip_summed = CHECKSUM_PARTIAL;
 774		skb->csum = 0;
 775
 776		/* specify the length of each IP datagram fragment */
 777		skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
 778		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
 779		__skb_queue_tail(queue, skb);
 780	}
 781
 782	return skb_append_datato_frags(sk, skb, getfrag, from,
 783				       (length - transhdrlen));
 784}
 785
 786static int __ip_append_data(struct sock *sk,
 787			    struct flowi4 *fl4,
 788			    struct sk_buff_head *queue,
 789			    struct inet_cork *cork,
 
 790			    int getfrag(void *from, char *to, int offset,
 791					int len, int odd, struct sk_buff *skb),
 792			    void *from, int length, int transhdrlen,
 793			    unsigned int flags)
 794{
 795	struct inet_sock *inet = inet_sk(sk);
 
 796	struct sk_buff *skb;
 797
 798	struct ip_options *opt = cork->opt;
 799	int hh_len;
 800	int exthdrlen;
 801	int mtu;
 802	int copy;
 803	int err;
 804	int offset = 0;
 805	unsigned int maxfraglen, fragheaderlen;
 
 806	int csummode = CHECKSUM_NONE;
 807	struct rtable *rt = (struct rtable *)cork->dst;
 
 
 
 808
 809	skb = skb_peek_tail(queue);
 810
 811	exthdrlen = !skb ? rt->dst.header_len : 0;
 812	mtu = cork->fragsize;
 
 813
 814	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
 815
 816	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
 817	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
 
 818
 819	if (cork->length + length > 0xFFFF - fragheaderlen) {
 820		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
 821			       mtu-exthdrlen);
 822		return -EMSGSIZE;
 823	}
 824
 825	/*
 826	 * transhdrlen > 0 means that this is the first fragment and we wish
 827	 * it won't be fragmented in the future.
 828	 */
 829	if (transhdrlen &&
 830	    length + fragheaderlen <= mtu &&
 831	    rt->dst.dev->features & NETIF_F_V4_CSUM &&
 832	    !exthdrlen)
 
 833		csummode = CHECKSUM_PARTIAL;
 834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 835	cork->length += length;
 836	if (((length > mtu) || (skb && skb_is_gso(skb))) &&
 837	    (sk->sk_protocol == IPPROTO_UDP) &&
 838	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
 839		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
 840					 hh_len, fragheaderlen, transhdrlen,
 841					 maxfraglen, flags);
 842		if (err)
 843			goto error;
 844		return 0;
 845	}
 846
 847	/* So, what's going on in the loop below?
 848	 *
 849	 * We use calculated fragment length to generate chained skb,
 850	 * each of segments is IP fragment ready for sending to network after
 851	 * adding appropriate IP header.
 852	 */
 853
 854	if (!skb)
 855		goto alloc_new_skb;
 856
 857	while (length > 0) {
 858		/* Check if the remaining data fits into current packet. */
 859		copy = mtu - skb->len;
 860		if (copy < length)
 861			copy = maxfraglen - skb->len;
 862		if (copy <= 0) {
 863			char *data;
 864			unsigned int datalen;
 865			unsigned int fraglen;
 866			unsigned int fraggap;
 867			unsigned int alloclen;
 
 868			struct sk_buff *skb_prev;
 869alloc_new_skb:
 870			skb_prev = skb;
 871			if (skb_prev)
 872				fraggap = skb_prev->len - maxfraglen;
 873			else
 874				fraggap = 0;
 875
 876			/*
 877			 * If remaining data exceeds the mtu,
 878			 * we know we need more fragment(s).
 879			 */
 880			datalen = length + fraggap;
 881			if (datalen > mtu - fragheaderlen)
 882				datalen = maxfraglen - fragheaderlen;
 883			fraglen = datalen + fragheaderlen;
 
 884
 885			if ((flags & MSG_MORE) &&
 886			    !(rt->dst.dev->features&NETIF_F_SG))
 887				alloclen = mtu;
 888			else
 889				alloclen = fraglen;
 890
 891			alloclen += exthdrlen;
 892
 893			/* The last fragment gets additional space at tail.
 894			 * Note, with MSG_MORE we overallocate on fragments,
 895			 * because we have no idea what fragment will be
 896			 * the last.
 897			 */
 898			if (datalen == length + fraggap)
 899				alloclen += rt->dst.trailer_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 900
 901			if (transhdrlen) {
 902				skb = sock_alloc_send_skb(sk,
 903						alloclen + hh_len + 15,
 904						(flags & MSG_DONTWAIT), &err);
 905			} else {
 906				skb = NULL;
 907				if (atomic_read(&sk->sk_wmem_alloc) <=
 908				    2 * sk->sk_sndbuf)
 909					skb = sock_wmalloc(sk,
 910							   alloclen + hh_len + 15, 1,
 911							   sk->sk_allocation);
 912				if (unlikely(skb == NULL))
 913					err = -ENOBUFS;
 914				else
 915					/* only the initial fragment is
 916					   time stamped */
 917					cork->tx_flags = 0;
 918			}
 919			if (skb == NULL)
 920				goto error;
 921
 922			/*
 923			 *	Fill in the control structures
 924			 */
 925			skb->ip_summed = csummode;
 926			skb->csum = 0;
 927			skb_reserve(skb, hh_len);
 928			skb_shinfo(skb)->tx_flags = cork->tx_flags;
 929
 930			/*
 931			 *	Find where to start putting bytes.
 932			 */
 933			data = skb_put(skb, fraglen + exthdrlen);
 934			skb_set_network_header(skb, exthdrlen);
 935			skb->transport_header = (skb->network_header +
 936						 fragheaderlen);
 937			data += fragheaderlen + exthdrlen;
 938
 939			if (fraggap) {
 940				skb->csum = skb_copy_and_csum_bits(
 941					skb_prev, maxfraglen,
 942					data + transhdrlen, fraggap, 0);
 943				skb_prev->csum = csum_sub(skb_prev->csum,
 944							  skb->csum);
 945				data += fraggap;
 946				pskb_trim_unique(skb_prev, maxfraglen);
 947			}
 948
 949			copy = datalen - transhdrlen - fraggap;
 
 
 
 950			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
 951				err = -EFAULT;
 952				kfree_skb(skb);
 953				goto error;
 
 
 954			}
 955
 956			offset += copy;
 957			length -= datalen - fraggap;
 958			transhdrlen = 0;
 959			exthdrlen = 0;
 960			csummode = CHECKSUM_NONE;
 961
 
 
 
 
 
 
 
 
 
 
 962			/*
 963			 * Put the packet on the pending queue.
 964			 */
 
 
 
 
 
 965			__skb_queue_tail(queue, skb);
 966			continue;
 967		}
 968
 969		if (copy > length)
 970			copy = length;
 971
 972		if (!(rt->dst.dev->features&NETIF_F_SG)) {
 
 973			unsigned int off;
 974
 975			off = skb->len;
 976			if (getfrag(from, skb_put(skb, copy),
 977					offset, copy, off, skb) < 0) {
 978				__skb_trim(skb, off);
 979				err = -EFAULT;
 980				goto error;
 981			}
 982		} else {
 
 
 
 
 
 
 
 
 
 
 
 
 
 983			int i = skb_shinfo(skb)->nr_frags;
 984			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
 985			struct page *page = cork->page;
 986			int off = cork->off;
 987			unsigned int left;
 988
 989			if (page && (left = PAGE_SIZE - off) > 0) {
 990				if (copy >= left)
 991					copy = left;
 992				if (page != frag->page) {
 993					if (i == MAX_SKB_FRAGS) {
 994						err = -EMSGSIZE;
 995						goto error;
 996					}
 997					get_page(page);
 998					skb_fill_page_desc(skb, i, page, off, 0);
 999					frag = &skb_shinfo(skb)->frags[i];
1000				}
1001			} else if (i < MAX_SKB_FRAGS) {
1002				if (copy > PAGE_SIZE)
1003					copy = PAGE_SIZE;
1004				page = alloc_pages(sk->sk_allocation, 0);
1005				if (page == NULL)  {
1006					err = -ENOMEM;
1007					goto error;
1008				}
1009				cork->page = page;
1010				cork->off = 0;
1011
1012				skb_fill_page_desc(skb, i, page, 0, 0);
1013				frag = &skb_shinfo(skb)->frags[i];
1014			} else {
1015				err = -EMSGSIZE;
1016				goto error;
1017			}
1018			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1019				err = -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
1020				goto error;
1021			}
1022			cork->off += copy;
1023			frag->size += copy;
1024			skb->len += copy;
1025			skb->data_len += copy;
1026			skb->truesize += copy;
1027			atomic_add(copy, &sk->sk_wmem_alloc);
1028		}
1029		offset += copy;
1030		length -= copy;
1031	}
1032
 
 
1033	return 0;
1034
 
 
1035error:
 
1036	cork->length -= length;
1037	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
 
 
 
1038	return err;
1039}
1040
1041static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1042			 struct ipcm_cookie *ipc, struct rtable **rtp)
1043{
1044	struct inet_sock *inet = inet_sk(sk);
1045	struct ip_options_rcu *opt;
1046	struct rtable *rt;
1047
 
 
 
 
 
 
 
 
 
 
1048	/*
1049	 * setup for corking.
1050	 */
1051	opt = ipc->opt;
1052	if (opt) {
1053		if (cork->opt == NULL) {
1054			cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1055					    sk->sk_allocation);
1056			if (unlikely(cork->opt == NULL))
1057				return -ENOBUFS;
1058		}
1059		memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1060		cork->flags |= IPCORK_OPT;
1061		cork->addr = ipc->addr;
1062	}
1063	rt = *rtp;
1064	if (unlikely(!rt))
1065		return -EFAULT;
1066	/*
1067	 * We steal reference to this route, caller should not release it
1068	 */
1069	*rtp = NULL;
1070	cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
1071			 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1072	cork->dst = &rt->dst;
1073	cork->length = 0;
1074	cork->tx_flags = ipc->tx_flags;
1075	cork->page = NULL;
1076	cork->off = 0;
 
 
 
 
1077
1078	return 0;
1079}
1080
1081/*
1082 *	ip_append_data() and ip_append_page() can make one large IP datagram
1083 *	from many pieces of data. Each pieces will be holded on the socket
1084 *	until ip_push_pending_frames() is called. Each piece can be a page
1085 *	or non-page data.
1086 *
1087 *	Not only UDP, other transport protocols - e.g. raw sockets - can use
1088 *	this interface potentially.
1089 *
1090 *	LATER: length must be adjusted by pad at tail, when it is required.
1091 */
1092int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1093		   int getfrag(void *from, char *to, int offset, int len,
1094			       int odd, struct sk_buff *skb),
1095		   void *from, int length, int transhdrlen,
1096		   struct ipcm_cookie *ipc, struct rtable **rtp,
1097		   unsigned int flags)
1098{
1099	struct inet_sock *inet = inet_sk(sk);
1100	int err;
1101
1102	if (flags&MSG_PROBE)
1103		return 0;
1104
1105	if (skb_queue_empty(&sk->sk_write_queue)) {
1106		err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1107		if (err)
1108			return err;
1109	} else {
1110		transhdrlen = 0;
1111	}
1112
1113	return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
 
1114				from, length, transhdrlen, flags);
1115}
1116
1117ssize_t	ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1118		       int offset, size_t size, int flags)
1119{
1120	struct inet_sock *inet = inet_sk(sk);
1121	struct sk_buff *skb;
1122	struct rtable *rt;
1123	struct ip_options *opt = NULL;
1124	struct inet_cork *cork;
1125	int hh_len;
1126	int mtu;
1127	int len;
1128	int err;
1129	unsigned int maxfraglen, fragheaderlen, fraggap;
1130
1131	if (inet->hdrincl)
1132		return -EPERM;
1133
1134	if (flags&MSG_PROBE)
1135		return 0;
1136
1137	if (skb_queue_empty(&sk->sk_write_queue))
1138		return -EINVAL;
1139
1140	cork = &inet->cork.base;
1141	rt = (struct rtable *)cork->dst;
1142	if (cork->flags & IPCORK_OPT)
1143		opt = cork->opt;
1144
1145	if (!(rt->dst.dev->features&NETIF_F_SG))
1146		return -EOPNOTSUPP;
1147
1148	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1149	mtu = cork->fragsize;
1150
1151	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1152	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1153
1154	if (cork->length + size > 0xFFFF - fragheaderlen) {
1155		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
1156		return -EMSGSIZE;
1157	}
1158
1159	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1160		return -EINVAL;
1161
1162	cork->length += size;
1163	if ((size + skb->len > mtu) &&
1164	    (sk->sk_protocol == IPPROTO_UDP) &&
1165	    (rt->dst.dev->features & NETIF_F_UFO)) {
1166		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1167		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1168	}
1169
1170
1171	while (size > 0) {
1172		int i;
1173
1174		if (skb_is_gso(skb))
1175			len = size;
1176		else {
1177
1178			/* Check if the remaining data fits into current packet. */
1179			len = mtu - skb->len;
1180			if (len < size)
1181				len = maxfraglen - skb->len;
1182		}
1183		if (len <= 0) {
1184			struct sk_buff *skb_prev;
1185			int alloclen;
1186
1187			skb_prev = skb;
1188			fraggap = skb_prev->len - maxfraglen;
1189
1190			alloclen = fragheaderlen + hh_len + fraggap + 15;
1191			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1192			if (unlikely(!skb)) {
1193				err = -ENOBUFS;
1194				goto error;
1195			}
1196
1197			/*
1198			 *	Fill in the control structures
1199			 */
1200			skb->ip_summed = CHECKSUM_NONE;
1201			skb->csum = 0;
1202			skb_reserve(skb, hh_len);
1203
1204			/*
1205			 *	Find where to start putting bytes.
1206			 */
1207			skb_put(skb, fragheaderlen + fraggap);
1208			skb_reset_network_header(skb);
1209			skb->transport_header = (skb->network_header +
1210						 fragheaderlen);
1211			if (fraggap) {
1212				skb->csum = skb_copy_and_csum_bits(skb_prev,
1213								   maxfraglen,
1214						    skb_transport_header(skb),
1215								   fraggap, 0);
1216				skb_prev->csum = csum_sub(skb_prev->csum,
1217							  skb->csum);
1218				pskb_trim_unique(skb_prev, maxfraglen);
1219			}
1220
1221			/*
1222			 * Put the packet on the pending queue.
1223			 */
1224			__skb_queue_tail(&sk->sk_write_queue, skb);
1225			continue;
1226		}
1227
1228		i = skb_shinfo(skb)->nr_frags;
1229		if (len > size)
1230			len = size;
1231		if (skb_can_coalesce(skb, i, page, offset)) {
1232			skb_shinfo(skb)->frags[i-1].size += len;
1233		} else if (i < MAX_SKB_FRAGS) {
1234			get_page(page);
1235			skb_fill_page_desc(skb, i, page, offset, len);
1236		} else {
1237			err = -EMSGSIZE;
1238			goto error;
1239		}
1240
1241		if (skb->ip_summed == CHECKSUM_NONE) {
1242			__wsum csum;
1243			csum = csum_page(page, offset, len);
1244			skb->csum = csum_block_add(skb->csum, csum, skb->len);
1245		}
1246
1247		skb->len += len;
1248		skb->data_len += len;
1249		skb->truesize += len;
1250		atomic_add(len, &sk->sk_wmem_alloc);
1251		offset += len;
1252		size -= len;
1253	}
1254	return 0;
1255
1256error:
1257	cork->length -= size;
1258	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1259	return err;
1260}
1261
1262static void ip_cork_release(struct inet_cork *cork)
1263{
1264	cork->flags &= ~IPCORK_OPT;
1265	kfree(cork->opt);
1266	cork->opt = NULL;
1267	dst_release(cork->dst);
1268	cork->dst = NULL;
1269}
1270
1271/*
1272 *	Combined all pending IP fragments on the socket as one IP datagram
1273 *	and push them out.
1274 */
1275struct sk_buff *__ip_make_skb(struct sock *sk,
1276			      struct flowi4 *fl4,
1277			      struct sk_buff_head *queue,
1278			      struct inet_cork *cork)
1279{
1280	struct sk_buff *skb, *tmp_skb;
1281	struct sk_buff **tail_skb;
1282	struct inet_sock *inet = inet_sk(sk);
1283	struct net *net = sock_net(sk);
1284	struct ip_options *opt = NULL;
1285	struct rtable *rt = (struct rtable *)cork->dst;
1286	struct iphdr *iph;
 
1287	__be16 df = 0;
1288	__u8 ttl;
1289
1290	if ((skb = __skb_dequeue(queue)) == NULL)
 
1291		goto out;
1292	tail_skb = &(skb_shinfo(skb)->frag_list);
1293
1294	/* move skb->data to ip header from ext header */
1295	if (skb->data < skb_network_header(skb))
1296		__skb_pull(skb, skb_network_offset(skb));
1297	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1298		__skb_pull(tmp_skb, skb_network_header_len(skb));
1299		*tail_skb = tmp_skb;
1300		tail_skb = &(tmp_skb->next);
1301		skb->len += tmp_skb->len;
1302		skb->data_len += tmp_skb->len;
1303		skb->truesize += tmp_skb->truesize;
1304		tmp_skb->destructor = NULL;
1305		tmp_skb->sk = NULL;
1306	}
1307
1308	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1309	 * to fragment the frame generated here. No matter, what transforms
1310	 * how transforms change size of the packet, it will come out.
1311	 */
1312	if (inet->pmtudisc < IP_PMTUDISC_DO)
1313		skb->local_df = 1;
1314
1315	/* DF bit is set when we want to see DF on outgoing frames.
1316	 * If local_df is set too, we still allow to fragment this frame
1317	 * locally. */
1318	if (inet->pmtudisc >= IP_PMTUDISC_DO ||
 
 
1319	    (skb->len <= dst_mtu(&rt->dst) &&
1320	     ip_dont_fragment(sk, &rt->dst)))
1321		df = htons(IP_DF);
1322
1323	if (cork->flags & IPCORK_OPT)
1324		opt = cork->opt;
1325
1326	if (rt->rt_type == RTN_MULTICAST)
1327		ttl = inet->mc_ttl;
 
 
1328	else
1329		ttl = ip_select_ttl(inet, &rt->dst);
1330
1331	iph = (struct iphdr *)skb->data;
1332	iph->version = 4;
1333	iph->ihl = 5;
1334	iph->tos = inet->tos;
1335	iph->frag_off = df;
1336	ip_select_ident(iph, &rt->dst, sk);
1337	iph->ttl = ttl;
1338	iph->protocol = sk->sk_protocol;
1339	iph->saddr = fl4->saddr;
1340	iph->daddr = fl4->daddr;
1341
1342	if (opt) {
1343		iph->ihl += opt->optlen>>2;
1344		ip_options_build(skb, opt, cork->addr, rt, 0);
1345	}
1346
1347	skb->priority = sk->sk_priority;
1348	skb->mark = sk->sk_mark;
 
1349	/*
1350	 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1351	 * on dst refcount
1352	 */
1353	cork->dst = NULL;
1354	skb_dst_set(skb, &rt->dst);
1355
1356	if (iph->protocol == IPPROTO_ICMP)
1357		icmp_out_count(net, ((struct icmphdr *)
1358			skb_transport_header(skb))->type);
 
 
 
 
 
 
 
 
 
 
 
1359
1360	ip_cork_release(cork);
1361out:
1362	return skb;
1363}
1364
1365int ip_send_skb(struct sk_buff *skb)
1366{
1367	struct net *net = sock_net(skb->sk);
1368	int err;
1369
1370	err = ip_local_out(skb);
1371	if (err) {
1372		if (err > 0)
1373			err = net_xmit_errno(err);
1374		if (err)
1375			IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1376	}
1377
1378	return err;
1379}
1380
1381int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1382{
1383	struct sk_buff *skb;
1384
1385	skb = ip_finish_skb(sk, fl4);
1386	if (!skb)
1387		return 0;
1388
1389	/* Netfilter gets whole the not fragmented skb. */
1390	return ip_send_skb(skb);
1391}
1392
1393/*
1394 *	Throw away all pending data on the socket.
1395 */
1396static void __ip_flush_pending_frames(struct sock *sk,
1397				      struct sk_buff_head *queue,
1398				      struct inet_cork *cork)
1399{
1400	struct sk_buff *skb;
1401
1402	while ((skb = __skb_dequeue_tail(queue)) != NULL)
1403		kfree_skb(skb);
1404
1405	ip_cork_release(cork);
1406}
1407
1408void ip_flush_pending_frames(struct sock *sk)
1409{
1410	__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1411}
1412
1413struct sk_buff *ip_make_skb(struct sock *sk,
1414			    struct flowi4 *fl4,
1415			    int getfrag(void *from, char *to, int offset,
1416					int len, int odd, struct sk_buff *skb),
1417			    void *from, int length, int transhdrlen,
1418			    struct ipcm_cookie *ipc, struct rtable **rtp,
1419			    unsigned int flags)
1420{
1421	struct inet_cork cork;
1422	struct sk_buff_head queue;
1423	int err;
1424
1425	if (flags & MSG_PROBE)
1426		return NULL;
1427
1428	__skb_queue_head_init(&queue);
1429
1430	cork.flags = 0;
1431	cork.addr = 0;
1432	cork.opt = NULL;
1433	err = ip_setup_cork(sk, &cork, ipc, rtp);
1434	if (err)
1435		return ERR_PTR(err);
1436
1437	err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
 
1438			       from, length, transhdrlen, flags);
1439	if (err) {
1440		__ip_flush_pending_frames(sk, &queue, &cork);
1441		return ERR_PTR(err);
1442	}
1443
1444	return __ip_make_skb(sk, fl4, &queue, &cork);
1445}
1446
1447/*
1448 *	Fetch data from kernel space and fill in checksum if needed.
1449 */
1450static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1451			      int len, int odd, struct sk_buff *skb)
1452{
1453	__wsum csum;
1454
1455	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1456	skb->csum = csum_block_add(skb->csum, csum, odd);
1457	return 0;
1458}
1459
1460/*
1461 *	Generic function to send a packet as reply to another packet.
1462 *	Used to send TCP resets so far. ICMP should use this function too.
1463 *
1464 *	Should run single threaded per socket because it uses the sock
1465 *     	structure to pass arguments.
1466 */
1467void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1468		   struct ip_reply_arg *arg, unsigned int len)
 
 
 
1469{
1470	struct inet_sock *inet = inet_sk(sk);
1471	struct ip_options_data replyopts;
1472	struct ipcm_cookie ipc;
1473	struct flowi4 fl4;
1474	struct rtable *rt = skb_rtable(skb);
 
 
 
 
1475
1476	if (ip_options_echo(&replyopts.opt.opt, skb))
1477		return;
1478
 
1479	ipc.addr = daddr;
1480	ipc.opt = NULL;
1481	ipc.tx_flags = 0;
1482
1483	if (replyopts.opt.opt.optlen) {
1484		ipc.opt = &replyopts.opt;
1485
1486		if (replyopts.opt.opt.srr)
1487			daddr = replyopts.opt.opt.faddr;
1488	}
1489
1490	flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1491			   RT_TOS(ip_hdr(skb)->tos),
1492			   RT_SCOPE_UNIVERSE, sk->sk_protocol,
 
 
 
 
 
1493			   ip_reply_arg_flowi_flags(arg),
1494			   daddr, rt->rt_spec_dst,
1495			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1496	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1497	rt = ip_route_output_key(sock_net(sk), &fl4);
 
1498	if (IS_ERR(rt))
1499		return;
1500
1501	/* And let IP do all the hard work.
1502
1503	   This chunk is not reenterable, hence spinlock.
1504	   Note that it uses the fact, that this function is called
1505	   with locally disabled BH and that sk cannot be already spinlocked.
1506	 */
1507	bh_lock_sock(sk);
1508	inet->tos = ip_hdr(skb)->tos;
1509	sk->sk_priority = skb->priority;
1510	sk->sk_protocol = ip_hdr(skb)->protocol;
1511	sk->sk_bound_dev_if = arg->bound_dev_if;
1512	ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1513		       &ipc, &rt, MSG_DONTWAIT);
1514	if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
 
 
 
 
 
 
 
 
1515		if (arg->csumoffset >= 0)
1516			*((__sum16 *)skb_transport_header(skb) +
1517			  arg->csumoffset) = csum_fold(csum_add(skb->csum,
1518								arg->csum));
1519		skb->ip_summed = CHECKSUM_NONE;
 
 
 
1520		ip_push_pending_frames(sk, &fl4);
1521	}
1522
1523	bh_unlock_sock(sk);
1524
1525	ip_rt_put(rt);
1526}
1527
1528void __init ip_init(void)
1529{
1530	ip_rt_init();
1531	inet_initpeers();
1532
1533#if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1534	igmp_mc_proc_init();
1535#endif
1536}