Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *	IPv6 output functions
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on linux/net/ipv4/ip_output.c
   9 *
  10 *	This program is free software; you can redistribute it and/or
  11 *      modify it under the terms of the GNU General Public License
  12 *      as published by the Free Software Foundation; either version
  13 *      2 of the License, or (at your option) any later version.
  14 *
  15 *	Changes:
  16 *	A.N.Kuznetsov	:	airthmetics in fragmentation.
  17 *				extension headers are implemented.
  18 *				route changes now work.
  19 *				ip6_forward does not confuse sniffers.
  20 *				etc.
  21 *
  22 *      H. von Brand    :       Added missing #include <linux/string.h>
  23 *	Imran Patel	: 	frag id should be in NBO
  24 *      Kazunori MIYAZAWA @USAGI
  25 *			:       add ip6_append_data and related functions
  26 *				for datagram xmit
  27 */
  28
  29#include <linux/errno.h>
  30#include <linux/kernel.h>
  31#include <linux/string.h>
  32#include <linux/socket.h>
  33#include <linux/net.h>
  34#include <linux/netdevice.h>
  35#include <linux/if_arp.h>
  36#include <linux/in6.h>
  37#include <linux/tcp.h>
  38#include <linux/route.h>
  39#include <linux/module.h>
  40#include <linux/slab.h>
  41
 
  42#include <linux/netfilter.h>
  43#include <linux/netfilter_ipv6.h>
  44
  45#include <net/sock.h>
  46#include <net/snmp.h>
  47
  48#include <net/ipv6.h>
  49#include <net/ndisc.h>
  50#include <net/protocol.h>
  51#include <net/ip6_route.h>
  52#include <net/addrconf.h>
  53#include <net/rawv6.h>
  54#include <net/icmp.h>
  55#include <net/xfrm.h>
  56#include <net/checksum.h>
  57#include <linux/mroute6.h>
 
 
  58
  59int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  60
  61int __ip6_local_out(struct sk_buff *skb)
  62{
  63	int len;
  64
  65	len = skb->len - sizeof(struct ipv6hdr);
  66	if (len > IPV6_MAXPLEN)
  67		len = 0;
  68	ipv6_hdr(skb)->payload_len = htons(len);
  69
  70	return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
  71		       skb_dst(skb)->dev, dst_output);
  72}
  73
  74int ip6_local_out(struct sk_buff *skb)
  75{
  76	int err;
  77
  78	err = __ip6_local_out(skb);
  79	if (likely(err == 1))
  80		err = dst_output(skb);
  81
  82	return err;
  83}
  84EXPORT_SYMBOL_GPL(ip6_local_out);
  85
  86/* dev_loopback_xmit for use with netfilter. */
  87static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  88{
  89	skb_reset_mac_header(newskb);
  90	__skb_pull(newskb, skb_network_offset(newskb));
  91	newskb->pkt_type = PACKET_LOOPBACK;
  92	newskb->ip_summed = CHECKSUM_UNNECESSARY;
  93	WARN_ON(!skb_dst(newskb));
  94
  95	netif_rx_ni(newskb);
  96	return 0;
  97}
  98
  99static int ip6_finish_output2(struct sk_buff *skb)
 100{
 101	struct dst_entry *dst = skb_dst(skb);
 102	struct net_device *dev = dst->dev;
 103	struct neighbour *neigh;
 
 
 104
 105	skb->protocol = htons(ETH_P_IPV6);
 106	skb->dev = dev;
 107
 108	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
 109		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 110
 111		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
 112		    ((mroute6_socket(dev_net(dev), skb) &&
 113		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
 114		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
 115					 &ipv6_hdr(skb)->saddr))) {
 116			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 117
 118			/* Do not check for IFF_ALLMULTI; multicast routing
 119			   is not supported in any case.
 120			 */
 121			if (newskb)
 122				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 123					newskb, NULL, newskb->dev,
 124					ip6_dev_loopback_xmit);
 125
 126			if (ipv6_hdr(skb)->hop_limit == 0) {
 127				IP6_INC_STATS(dev_net(dev), idev,
 128					      IPSTATS_MIB_OUTDISCARDS);
 129				kfree_skb(skb);
 130				return 0;
 131			}
 132		}
 133
 134		IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
 135				skb->len);
 
 
 
 
 
 
 136	}
 137
 138	rcu_read_lock();
 139	neigh = dst_get_neighbour(dst);
 140	if (neigh) {
 141		int res = neigh_output(neigh, skb);
 142
 143		rcu_read_unlock();
 144		return res;
 145	}
 146	rcu_read_unlock();
 147	IP6_INC_STATS_BH(dev_net(dst->dev),
 148			 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 
 
 
 
 
 
 
 
 
 
 
 149	kfree_skb(skb);
 150	return -EINVAL;
 151}
 152
 153static int ip6_finish_output(struct sk_buff *skb)
 154{
 
 
 
 
 
 
 
 
 155	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 156	    dst_allfrag(skb_dst(skb)))
 157		return ip6_fragment(skb, ip6_finish_output2);
 
 158	else
 159		return ip6_finish_output2(skb);
 160}
 161
 162int ip6_output(struct sk_buff *skb)
 163{
 164	struct net_device *dev = skb_dst(skb)->dev;
 165	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
 166	if (unlikely(idev->cnf.disable_ipv6)) {
 167		IP6_INC_STATS(dev_net(dev), idev,
 168			      IPSTATS_MIB_OUTDISCARDS);
 169		kfree_skb(skb);
 170		return 0;
 171	}
 172
 173	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
 
 174			    ip6_finish_output,
 175			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 176}
 177
 178/*
 179 *	xmit an sk_buff (used by TCP, SCTP and DCCP)
 
 
 
 180 */
 181
 182int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 183	     struct ipv6_txoptions *opt)
 184{
 185	struct net *net = sock_net(sk);
 186	struct ipv6_pinfo *np = inet6_sk(sk);
 187	struct in6_addr *first_hop = &fl6->daddr;
 188	struct dst_entry *dst = skb_dst(skb);
 189	struct ipv6hdr *hdr;
 190	u8  proto = fl6->flowi6_proto;
 191	int seg_len = skb->len;
 192	int hlimit = -1;
 193	int tclass = 0;
 194	u32 mtu;
 195
 196	if (opt) {
 197		unsigned int head_room;
 198
 199		/* First: exthdrs may take lots of space (~8K for now)
 200		   MAX_HEADER is not enough.
 201		 */
 202		head_room = opt->opt_nflen + opt->opt_flen;
 203		seg_len += head_room;
 204		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
 205
 206		if (skb_headroom(skb) < head_room) {
 207			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
 208			if (skb2 == NULL) {
 209				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 210					      IPSTATS_MIB_OUTDISCARDS);
 211				kfree_skb(skb);
 212				return -ENOBUFS;
 213			}
 214			kfree_skb(skb);
 215			skb = skb2;
 216			skb_set_owner_w(skb, sk);
 
 
 
 217		}
 218		if (opt->opt_flen)
 219			ipv6_push_frag_opts(skb, opt, &proto);
 220		if (opt->opt_nflen)
 221			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
 
 222	}
 223
 224	skb_push(skb, sizeof(struct ipv6hdr));
 225	skb_reset_network_header(skb);
 226	hdr = ipv6_hdr(skb);
 227
 228	/*
 229	 *	Fill in the IPv6 header
 230	 */
 231	if (np) {
 232		tclass = np->tclass;
 233		hlimit = np->hop_limit;
 234	}
 235	if (hlimit < 0)
 236		hlimit = ip6_dst_hoplimit(dst);
 237
 238	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;
 
 239
 240	hdr->payload_len = htons(seg_len);
 241	hdr->nexthdr = proto;
 242	hdr->hop_limit = hlimit;
 243
 244	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
 245	ipv6_addr_copy(&hdr->daddr, first_hop);
 246
 
 247	skb->priority = sk->sk_priority;
 248	skb->mark = sk->sk_mark;
 249
 250	mtu = dst_mtu(dst);
 251	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
 252		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
 253			      IPSTATS_MIB_OUT, skb->len);
 254		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
 255			       dst->dev, dst_output);
 
 
 
 
 
 
 
 
 
 
 
 
 256	}
 257
 258	if (net_ratelimit())
 259		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
 260	skb->dev = dst->dev;
 261	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 
 
 
 
 262	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
 263	kfree_skb(skb);
 264	return -EMSGSIZE;
 265}
 266
 267EXPORT_SYMBOL(ip6_xmit);
 268
 269/*
 270 *	To avoid extra problems ND packets are send through this
 271 *	routine. It's code duplication but I really want to avoid
 272 *	extra checks since ipv6_build_header is used by TCP (which
 273 *	is for us performance critical)
 274 */
 275
 276int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
 277	       const struct in6_addr *saddr, const struct in6_addr *daddr,
 278	       int proto, int len)
 279{
 280	struct ipv6_pinfo *np = inet6_sk(sk);
 281	struct ipv6hdr *hdr;
 282
 283	skb->protocol = htons(ETH_P_IPV6);
 284	skb->dev = dev;
 285
 286	skb_reset_network_header(skb);
 287	skb_put(skb, sizeof(struct ipv6hdr));
 288	hdr = ipv6_hdr(skb);
 289
 290	*(__be32*)hdr = htonl(0x60000000);
 291
 292	hdr->payload_len = htons(len);
 293	hdr->nexthdr = proto;
 294	hdr->hop_limit = np->hop_limit;
 295
 296	ipv6_addr_copy(&hdr->saddr, saddr);
 297	ipv6_addr_copy(&hdr->daddr, daddr);
 298
 299	return 0;
 300}
 301
 302static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
 303{
 304	struct ip6_ra_chain *ra;
 305	struct sock *last = NULL;
 306
 307	read_lock(&ip6_ra_lock);
 308	for (ra = ip6_ra_chain; ra; ra = ra->next) {
 309		struct sock *sk = ra->sk;
 310		if (sk && ra->sel == sel &&
 311		    (!sk->sk_bound_dev_if ||
 312		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
 313			if (last) {
 314				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 315				if (skb2)
 316					rawv6_rcv(last, skb2);
 317			}
 318			last = sk;
 319		}
 320	}
 321
 322	if (last) {
 323		rawv6_rcv(last, skb);
 324		read_unlock(&ip6_ra_lock);
 325		return 1;
 326	}
 327	read_unlock(&ip6_ra_lock);
 328	return 0;
 329}
 330
 331static int ip6_forward_proxy_check(struct sk_buff *skb)
 332{
 333	struct ipv6hdr *hdr = ipv6_hdr(skb);
 334	u8 nexthdr = hdr->nexthdr;
 
 335	int offset;
 336
 337	if (ipv6_ext_hdr(nexthdr)) {
 338		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
 339		if (offset < 0)
 340			return 0;
 341	} else
 342		offset = sizeof(struct ipv6hdr);
 343
 344	if (nexthdr == IPPROTO_ICMPV6) {
 345		struct icmp6hdr *icmp6;
 346
 347		if (!pskb_may_pull(skb, (skb_network_header(skb) +
 348					 offset + 1 - skb->data)))
 349			return 0;
 350
 351		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
 352
 353		switch (icmp6->icmp6_type) {
 354		case NDISC_ROUTER_SOLICITATION:
 355		case NDISC_ROUTER_ADVERTISEMENT:
 356		case NDISC_NEIGHBOUR_SOLICITATION:
 357		case NDISC_NEIGHBOUR_ADVERTISEMENT:
 358		case NDISC_REDIRECT:
 359			/* For reaction involving unicast neighbor discovery
 360			 * message destined to the proxied address, pass it to
 361			 * input function.
 362			 */
 363			return 1;
 364		default:
 365			break;
 366		}
 367	}
 368
 369	/*
 370	 * The proxying router can't forward traffic sent to a link-local
 371	 * address, so signal the sender and discard the packet. This
 372	 * behavior is clarified by the MIPv6 specification.
 373	 */
 374	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
 375		dst_link_failure(skb);
 376		return -1;
 377	}
 378
 379	return 0;
 380}
 381
 382static inline int ip6_forward_finish(struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 383{
 384	return dst_output(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 385}
 386
 387int ip6_forward(struct sk_buff *skb)
 388{
 389	struct dst_entry *dst = skb_dst(skb);
 390	struct ipv6hdr *hdr = ipv6_hdr(skb);
 391	struct inet6_skb_parm *opt = IP6CB(skb);
 392	struct net *net = dev_net(dst->dev);
 393	struct neighbour *n;
 394	u32 mtu;
 395
 396	if (net->ipv6.devconf_all->forwarding == 0)
 397		goto error;
 398
 
 
 
 
 
 
 399	if (skb_warn_if_lro(skb))
 400		goto drop;
 401
 402	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
 403		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 
 404		goto drop;
 405	}
 406
 407	if (skb->pkt_type != PACKET_HOST)
 408		goto drop;
 409
 410	skb_forward_csum(skb);
 411
 412	/*
 413	 *	We DO NOT make any processing on
 414	 *	RA packets, pushing them to user level AS IS
 415	 *	without ane WARRANTY that application will be able
 416	 *	to interpret them. The reason is that we
 417	 *	cannot make anything clever here.
 418	 *
 419	 *	We are not end-node, so that if packet contains
 420	 *	AH/ESP, we cannot make anything.
 421	 *	Defragmentation also would be mistake, RA packets
 422	 *	cannot be fragmented, because there is no warranty
 423	 *	that different fragments will go along one path. --ANK
 424	 */
 425	if (opt->ra) {
 426		u8 *ptr = skb_network_header(skb) + opt->ra;
 427		if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
 428			return 0;
 429	}
 430
 431	/*
 432	 *	check and decrement ttl
 433	 */
 434	if (hdr->hop_limit <= 1) {
 435		/* Force OUTPUT device used as source address */
 436		skb->dev = dst->dev;
 437		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
 438		IP6_INC_STATS_BH(net,
 439				 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
 440
 441		kfree_skb(skb);
 442		return -ETIMEDOUT;
 443	}
 444
 445	/* XXX: idev->cnf.proxy_ndp? */
 446	if (net->ipv6.devconf_all->proxy_ndp &&
 447	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
 448		int proxied = ip6_forward_proxy_check(skb);
 449		if (proxied > 0)
 450			return ip6_input(skb);
 451		else if (proxied < 0) {
 452			IP6_INC_STATS(net, ip6_dst_idev(dst),
 453				      IPSTATS_MIB_INDISCARDS);
 454			goto drop;
 455		}
 456	}
 457
 458	if (!xfrm6_route_forward(skb)) {
 459		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 
 460		goto drop;
 461	}
 462	dst = skb_dst(skb);
 463
 464	/* IPv6 specs say nothing about it, but it is clear that we cannot
 465	   send redirects to source routed frames.
 466	   We don't send redirects to frames decapsulated from IPsec.
 467	 */
 468	n = dst_get_neighbour(dst);
 469	if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
 470		struct in6_addr *target = NULL;
 
 471		struct rt6_info *rt;
 472
 473		/*
 474		 *	incoming and outgoing devices are the same
 475		 *	send a redirect.
 476		 */
 477
 478		rt = (struct rt6_info *) dst;
 479		if ((rt->rt6i_flags & RTF_GATEWAY))
 480			target = (struct in6_addr*)&n->primary_key;
 481		else
 482			target = &hdr->daddr;
 483
 484		if (!rt->rt6i_peer)
 485			rt6_bind_peer(rt, 1);
 486
 487		/* Limit redirects both by destination (here)
 488		   and by source (inside ndisc_send_redirect)
 489		 */
 490		if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
 491			ndisc_send_redirect(skb, n, target);
 
 
 492	} else {
 493		int addrtype = ipv6_addr_type(&hdr->saddr);
 494
 495		/* This check is security critical. */
 496		if (addrtype == IPV6_ADDR_ANY ||
 497		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
 498			goto error;
 499		if (addrtype & IPV6_ADDR_LINKLOCAL) {
 500			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
 501				    ICMPV6_NOT_NEIGHBOUR, 0);
 502			goto error;
 503		}
 504	}
 505
 506	mtu = dst_mtu(dst);
 507	if (mtu < IPV6_MIN_MTU)
 508		mtu = IPV6_MIN_MTU;
 509
 510	if (skb->len > mtu && !skb_is_gso(skb)) {
 511		/* Again, force OUTPUT device used as source address */
 512		skb->dev = dst->dev;
 513		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 514		IP6_INC_STATS_BH(net,
 515				 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
 516		IP6_INC_STATS_BH(net,
 517				 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
 518		kfree_skb(skb);
 519		return -EMSGSIZE;
 520	}
 521
 522	if (skb_cow(skb, dst->dev->hard_header_len)) {
 523		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
 
 524		goto drop;
 525	}
 526
 527	hdr = ipv6_hdr(skb);
 528
 529	/* Mangling hops number delayed to point after skb COW */
 530
 531	hdr->hop_limit--;
 532
 533	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 534	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
 
 
 535		       ip6_forward_finish);
 536
 537error:
 538	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
 539drop:
 540	kfree_skb(skb);
 541	return -EINVAL;
 542}
 543
 544static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 545{
 546	to->pkt_type = from->pkt_type;
 547	to->priority = from->priority;
 548	to->protocol = from->protocol;
 549	skb_dst_drop(to);
 550	skb_dst_set(to, dst_clone(skb_dst(from)));
 551	to->dev = from->dev;
 552	to->mark = from->mark;
 553
 554#ifdef CONFIG_NET_SCHED
 555	to->tc_index = from->tc_index;
 556#endif
 557	nf_copy(to, from);
 558#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
 559    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
 560	to->nf_trace = from->nf_trace;
 561#endif
 562	skb_copy_secmark(to, from);
 563}
 564
 565int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 566{
 567	u16 offset = sizeof(struct ipv6hdr);
 568	struct ipv6_opt_hdr *exthdr =
 569				(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
 570	unsigned int packet_len = skb->tail - skb->network_header;
 571	int found_rhdr = 0;
 572	*nexthdr = &ipv6_hdr(skb)->nexthdr;
 573
 574	while (offset + 1 <= packet_len) {
 575
 576		switch (**nexthdr) {
 577
 578		case NEXTHDR_HOP:
 579			break;
 580		case NEXTHDR_ROUTING:
 581			found_rhdr = 1;
 582			break;
 583		case NEXTHDR_DEST:
 584#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
 585			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
 586				break;
 587#endif
 588			if (found_rhdr)
 589				return offset;
 590			break;
 591		default :
 592			return offset;
 593		}
 594
 595		offset += ipv6_optlen(exthdr);
 596		*nexthdr = &exthdr->nexthdr;
 597		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
 598						 offset);
 599	}
 600
 601	return offset;
 602}
 603
 604void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
 605{
 606	static atomic_t ipv6_fragmentation_id;
 607	int old, new;
 608
 609	if (rt) {
 610		struct inet_peer *peer;
 611
 612		if (!rt->rt6i_peer)
 613			rt6_bind_peer(rt, 1);
 614		peer = rt->rt6i_peer;
 615		if (peer) {
 616			fhdr->identification = htonl(inet_getid(peer, 0));
 617			return;
 618		}
 619	}
 620	do {
 621		old = atomic_read(&ipv6_fragmentation_id);
 622		new = old + 1;
 623		if (!new)
 624			new = 1;
 625	} while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
 626	fhdr->identification = htonl(new);
 627}
 628
 629int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 630{
 631	struct sk_buff *frag;
 632	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
 633	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
 
 634	struct ipv6hdr *tmp_hdr;
 635	struct frag_hdr *fh;
 636	unsigned int mtu, hlen, left, len;
 637	__be32 frag_id = 0;
 638	int ptr, offset = 0, err=0;
 
 639	u8 *prevhdr, nexthdr = 0;
 640	struct net *net = dev_net(skb_dst(skb)->dev);
 641
 642	hlen = ip6_find_1stfragopt(skb, &prevhdr);
 643	nexthdr = *prevhdr;
 644
 645	mtu = ip6_skb_dst_mtu(skb);
 646
 647	/* We must not fragment if the socket is set to force MTU discovery
 648	 * or if the skb it not generated by a local socket.
 649	 */
 650	if (!skb->local_df && skb->len > mtu) {
 651		skb->dev = skb_dst(skb)->dev;
 652		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 653		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 654			      IPSTATS_MIB_FRAGFAILS);
 655		kfree_skb(skb);
 656		return -EMSGSIZE;
 
 
 
 
 657	}
 658
 659	if (np && np->frag_size < mtu) {
 660		if (np->frag_size)
 661			mtu = np->frag_size;
 662	}
 
 
 663	mtu -= hlen + sizeof(struct frag_hdr);
 664
 
 
 
 
 
 
 
 
 665	if (skb_has_frag_list(skb)) {
 666		int first_len = skb_pagelen(skb);
 667		struct sk_buff *frag2;
 668
 669		if (first_len - hlen > mtu ||
 670		    ((first_len - hlen) & 7) ||
 671		    skb_cloned(skb))
 
 672			goto slow_path;
 673
 674		skb_walk_frags(skb, frag) {
 675			/* Correct geometry. */
 676			if (frag->len > mtu ||
 677			    ((frag->len & 7) && frag->next) ||
 678			    skb_headroom(frag) < hlen)
 679				goto slow_path_clean;
 680
 681			/* Partially cloned skb? */
 682			if (skb_shared(frag))
 683				goto slow_path_clean;
 684
 685			BUG_ON(frag->sk);
 686			if (skb->sk) {
 687				frag->sk = skb->sk;
 688				frag->destructor = sock_wfree;
 689			}
 690			skb->truesize -= frag->truesize;
 691		}
 692
 693		err = 0;
 694		offset = 0;
 695		frag = skb_shinfo(skb)->frag_list;
 696		skb_frag_list_init(skb);
 697		/* BUILD HEADER */
 698
 699		*prevhdr = NEXTHDR_FRAGMENT;
 700		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
 701		if (!tmp_hdr) {
 702			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 703				      IPSTATS_MIB_FRAGFAILS);
 704			return -ENOMEM;
 
 705		}
 
 
 706
 707		__skb_pull(skb, hlen);
 708		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
 709		__skb_push(skb, hlen);
 710		skb_reset_network_header(skb);
 711		memcpy(skb_network_header(skb), tmp_hdr, hlen);
 712
 713		ipv6_select_ident(fh, rt);
 714		fh->nexthdr = nexthdr;
 715		fh->reserved = 0;
 716		fh->frag_off = htons(IP6_MF);
 717		frag_id = fh->identification;
 718
 719		first_len = skb_pagelen(skb);
 720		skb->data_len = first_len - skb_headlen(skb);
 721		skb->len = first_len;
 722		ipv6_hdr(skb)->payload_len = htons(first_len -
 723						   sizeof(struct ipv6hdr));
 724
 725		dst_hold(&rt->dst);
 726
 727		for (;;) {
 728			/* Prepare header of the next frame,
 729			 * before previous one went down. */
 730			if (frag) {
 731				frag->ip_summed = CHECKSUM_NONE;
 732				skb_reset_transport_header(frag);
 733				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
 734				__skb_push(frag, hlen);
 735				skb_reset_network_header(frag);
 736				memcpy(skb_network_header(frag), tmp_hdr,
 737				       hlen);
 738				offset += skb->len - hlen - sizeof(struct frag_hdr);
 739				fh->nexthdr = nexthdr;
 740				fh->reserved = 0;
 741				fh->frag_off = htons(offset);
 742				if (frag->next != NULL)
 743					fh->frag_off |= htons(IP6_MF);
 744				fh->identification = frag_id;
 745				ipv6_hdr(frag)->payload_len =
 746						htons(frag->len -
 747						      sizeof(struct ipv6hdr));
 748				ip6_copy_metadata(frag, skb);
 749			}
 750
 751			err = output(skb);
 752			if(!err)
 753				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 754					      IPSTATS_MIB_FRAGCREATES);
 755
 756			if (err || !frag)
 757				break;
 758
 759			skb = frag;
 760			frag = skb->next;
 761			skb->next = NULL;
 762		}
 763
 764		kfree(tmp_hdr);
 765
 766		if (err == 0) {
 767			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 768				      IPSTATS_MIB_FRAGOKS);
 769			dst_release(&rt->dst);
 770			return 0;
 771		}
 772
 773		while (frag) {
 774			skb = frag->next;
 775			kfree_skb(frag);
 776			frag = skb;
 777		}
 778
 779		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 780			      IPSTATS_MIB_FRAGFAILS);
 781		dst_release(&rt->dst);
 782		return err;
 783
 784slow_path_clean:
 785		skb_walk_frags(skb, frag2) {
 786			if (frag2 == frag)
 787				break;
 788			frag2->sk = NULL;
 789			frag2->destructor = NULL;
 790			skb->truesize += frag2->truesize;
 791		}
 792	}
 793
 794slow_path:
 795	left = skb->len - hlen;		/* Space per frame */
 796	ptr = hlen;			/* Where to start from */
 797
 798	/*
 799	 *	Fragment the datagram.
 800	 */
 801
 802	*prevhdr = NEXTHDR_FRAGMENT;
 803
 804	/*
 805	 *	Keep copying data until we run out.
 806	 */
 807	while(left > 0)	{
 
 
 808		len = left;
 809		/* IF: it doesn't fit, use 'mtu' - the data space left */
 810		if (len > mtu)
 811			len = mtu;
 812		/* IF: we are not sending up to and including the packet end
 813		   then align the next start on an eight byte boundary */
 814		if (len < left)	{
 815			len &= ~7;
 816		}
 817		/*
 818		 *	Allocate buffer.
 819		 */
 820
 821		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
 822			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
 
 
 823			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 824				      IPSTATS_MIB_FRAGFAILS);
 825			err = -ENOMEM;
 826			goto fail;
 827		}
 828
 829		/*
 830		 *	Set up data on packet
 831		 */
 832
 833		ip6_copy_metadata(frag, skb);
 834		skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
 835		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
 836		skb_reset_network_header(frag);
 837		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
 838		frag->transport_header = (frag->network_header + hlen +
 839					  sizeof(struct frag_hdr));
 840
 841		/*
 842		 *	Charge the memory for the fragment to any owner
 843		 *	it might possess
 844		 */
 845		if (skb->sk)
 846			skb_set_owner_w(frag, skb->sk);
 847
 848		/*
 849		 *	Copy the packet header into the new buffer.
 850		 */
 851		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
 852
 
 
 
 
 853		/*
 854		 *	Build fragment header.
 855		 */
 856		fh->nexthdr = nexthdr;
 857		fh->reserved = 0;
 858		if (!frag_id) {
 859			ipv6_select_ident(fh, rt);
 860			frag_id = fh->identification;
 861		} else
 862			fh->identification = frag_id;
 863
 864		/*
 865		 *	Copy a block of the IP datagram.
 866		 */
 867		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
 868			BUG();
 869		left -= len;
 870
 871		fh->frag_off = htons(offset);
 872		if (left > 0)
 873			fh->frag_off |= htons(IP6_MF);
 874		ipv6_hdr(frag)->payload_len = htons(frag->len -
 875						    sizeof(struct ipv6hdr));
 876
 877		ptr += len;
 878		offset += len;
 879
 880		/*
 881		 *	Put this fragment into the sending queue.
 882		 */
 883		err = output(frag);
 884		if (err)
 885			goto fail;
 886
 887		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 888			      IPSTATS_MIB_FRAGCREATES);
 889	}
 890	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 891		      IPSTATS_MIB_FRAGOKS);
 892	kfree_skb(skb);
 893	return err;
 894
 
 
 
 
 
 
 
 
 895fail:
 896	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 897		      IPSTATS_MIB_FRAGFAILS);
 898	kfree_skb(skb);
 899	return err;
 900}
 901
 902static inline int ip6_rt_check(const struct rt6key *rt_key,
 903			       const struct in6_addr *fl_addr,
 904			       const struct in6_addr *addr_cache)
 905{
 906	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
 907		(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
 908}
 909
 910static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
 911					  struct dst_entry *dst,
 912					  const struct flowi6 *fl6)
 913{
 914	struct ipv6_pinfo *np = inet6_sk(sk);
 915	struct rt6_info *rt = (struct rt6_info *)dst;
 916
 917	if (!dst)
 918		goto out;
 919
 
 
 
 
 
 
 920	/* Yes, checking route validity in not connected
 921	 * case is not very simple. Take into account,
 922	 * that we do not support routing by source, TOS,
 923	 * and MSG_DONTROUTE 		--ANK (980726)
 924	 *
 925	 * 1. ip6_rt_check(): If route was host route,
 926	 *    check that cached destination is current.
 927	 *    If it is network route, we still may
 928	 *    check its validity using saved pointer
 929	 *    to the last used address: daddr_cache.
 930	 *    We do not want to save whole address now,
 931	 *    (because main consumer of this service
 932	 *    is tcp, which has not this problem),
 933	 *    so that the last trick works only on connected
 934	 *    sockets.
 935	 * 2. oif also should be the same.
 936	 */
 937	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
 938#ifdef CONFIG_IPV6_SUBTREES
 939	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 940#endif
 941	    (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
 
 942		dst_release(dst);
 943		dst = NULL;
 944	}
 945
 946out:
 947	return dst;
 948}
 949
 950static int ip6_dst_lookup_tail(struct sock *sk,
 951			       struct dst_entry **dst, struct flowi6 *fl6)
 952{
 953	struct net *net = sock_net(sk);
 954#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 955	struct neighbour *n;
 
 956#endif
 957	int err;
 
 958
 959	if (*dst == NULL)
 960		*dst = ip6_route_output(net, sk, fl6);
 961
 962	if ((err = (*dst)->error))
 963		goto out_err_release;
 
 
 
 
 
 
 
 964
 965	if (ipv6_addr_any(&fl6->saddr)) {
 966		struct rt6_info *rt = (struct rt6_info *) *dst;
 
 967		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
 968					  sk ? inet6_sk(sk)->srcprefs : 0,
 969					  &fl6->saddr);
 970		if (err)
 971			goto out_err_release;
 
 
 
 
 
 
 
 
 
 
 
 
 972	}
 973
 
 
 
 
 
 
 
 974#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 975	/*
 976	 * Here if the dst entry we've looked up
 977	 * has a neighbour entry that is in the INCOMPLETE
 978	 * state and the src address from the flow is
 979	 * marked as OPTIMISTIC, we release the found
 980	 * dst entry and replace it instead with the
 981	 * dst entry of the nexthop router
 982	 */
 983	rcu_read_lock();
 984	n = dst_get_neighbour(*dst);
 985	if (n && !(n->nud_state & NUD_VALID)) {
 
 
 
 
 
 986		struct inet6_ifaddr *ifp;
 987		struct flowi6 fl_gw6;
 988		int redirect;
 989
 990		rcu_read_unlock();
 991		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
 992				      (*dst)->dev, 1);
 993
 994		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
 995		if (ifp)
 996			in6_ifa_put(ifp);
 997
 998		if (redirect) {
 999			/*
1000			 * We need to get the dst entry for the
1001			 * default router instead
1002			 */
1003			dst_release(*dst);
1004			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1005			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1006			*dst = ip6_route_output(net, sk, &fl_gw6);
1007			if ((err = (*dst)->error))
 
1008				goto out_err_release;
1009		}
1010	} else {
1011		rcu_read_unlock();
1012	}
1013#endif
 
 
 
 
 
1014
1015	return 0;
1016
1017out_err_release:
1018	if (err == -ENETUNREACH)
1019		IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1020	dst_release(*dst);
1021	*dst = NULL;
 
 
 
1022	return err;
1023}
1024
1025/**
1026 *	ip6_dst_lookup - perform route lookup on flow
1027 *	@sk: socket which provides route info
1028 *	@dst: pointer to dst_entry * for result
1029 *	@fl6: flow to lookup
1030 *
1031 *	This function performs a route lookup on the given flow.
1032 *
1033 *	It returns zero on success, or a standard errno code on error.
1034 */
1035int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
 
1036{
1037	*dst = NULL;
1038	return ip6_dst_lookup_tail(sk, dst, fl6);
1039}
1040EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1041
1042/**
1043 *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1044 *	@sk: socket which provides route info
1045 *	@fl6: flow to lookup
1046 *	@final_dst: final destination address for ipsec lookup
1047 *	@can_sleep: we are in a sleepable context
1048 *
1049 *	This function performs a route lookup on the given flow.
1050 *
1051 *	It returns a valid dst pointer on success, or a pointer encoded
1052 *	error code.
1053 */
1054struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1055				      const struct in6_addr *final_dst,
1056				      bool can_sleep)
1057{
1058	struct dst_entry *dst = NULL;
1059	int err;
1060
1061	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1062	if (err)
1063		return ERR_PTR(err);
1064	if (final_dst)
1065		ipv6_addr_copy(&fl6->daddr, final_dst);
1066	if (can_sleep)
1067		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1068
1069	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1070}
1071EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1072
1073/**
1074 *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1075 *	@sk: socket which provides the dst cache and route info
1076 *	@fl6: flow to lookup
1077 *	@final_dst: final destination address for ipsec lookup
1078 *	@can_sleep: we are in a sleepable context
1079 *
1080 *	This function performs a route lookup on the given flow with the
1081 *	possibility of using the cached route in the socket if it is valid.
1082 *	It will take the socket dst lock when operating on the dst cache.
1083 *	As a result, this function can only be used in process context.
1084 *
1085 *	It returns a valid dst pointer on success, or a pointer encoded
1086 *	error code.
1087 */
1088struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1089					 const struct in6_addr *final_dst,
1090					 bool can_sleep)
1091{
1092	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1093	int err;
1094
1095	dst = ip6_sk_dst_check(sk, dst, fl6);
 
 
1096
1097	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1098	if (err)
1099		return ERR_PTR(err);
1100	if (final_dst)
1101		ipv6_addr_copy(&fl6->daddr, final_dst);
1102	if (can_sleep)
1103		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1104
1105	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1106}
1107EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1108
1109static inline int ip6_ufo_append_data(struct sock *sk,
 
1110			int getfrag(void *from, char *to, int offset, int len,
1111			int odd, struct sk_buff *skb),
1112			void *from, int length, int hh_len, int fragheaderlen,
1113			int transhdrlen, int mtu,unsigned int flags,
1114			struct rt6_info *rt)
1115
1116{
1117	struct sk_buff *skb;
1118	int err;
1119
1120	/* There is support for UDP large send offload by network
1121	 * device, so create one single skb packet containing complete
1122	 * udp datagram
1123	 */
1124	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
 
1125		skb = sock_alloc_send_skb(sk,
1126			hh_len + fragheaderlen + transhdrlen + 20,
1127			(flags & MSG_DONTWAIT), &err);
1128		if (skb == NULL)
1129			return -ENOMEM;
1130
1131		/* reserve space for Hardware header */
1132		skb_reserve(skb, hh_len);
1133
1134		/* create space for UDP/IP header */
1135		skb_put(skb,fragheaderlen + transhdrlen);
1136
1137		/* initialize network header pointer */
1138		skb_reset_network_header(skb);
1139
1140		/* initialize protocol header pointer */
1141		skb->transport_header = skb->network_header + fragheaderlen;
1142
1143		skb->ip_summed = CHECKSUM_PARTIAL;
1144		skb->csum = 0;
1145	}
1146
1147	err = skb_append_datato_frags(sk,skb, getfrag, from,
1148				      (length - transhdrlen));
1149	if (!err) {
1150		struct frag_hdr fhdr;
1151
1152		/* Specify the length of each IPv6 datagram fragment.
1153		 * It has to be a multiple of 8.
1154		 */
1155		skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1156					     sizeof(struct frag_hdr)) & ~7;
1157		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1158		ipv6_select_ident(&fhdr, rt);
1159		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1160		__skb_queue_tail(&sk->sk_write_queue, skb);
1161
1162		return 0;
 
 
1163	}
1164	/* There is not enough support do UPD LSO,
1165	 * so follow normal path
1166	 */
1167	kfree_skb(skb);
1168
1169	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
1170}
1171
1172static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1173					       gfp_t gfp)
1174{
1175	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1176}
1177
1178static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1179						gfp_t gfp)
1180{
1181	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1182}
1183
1184int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1185	int offset, int len, int odd, struct sk_buff *skb),
1186	void *from, int length, int transhdrlen,
1187	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1188	struct rt6_info *rt, unsigned int flags, int dontfrag)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1189{
1190	struct inet_sock *inet = inet_sk(sk);
1191	struct ipv6_pinfo *np = inet6_sk(sk);
1192	struct inet_cork *cork;
1193	struct sk_buff *skb;
1194	unsigned int maxfraglen, fragheaderlen;
1195	int exthdrlen;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1196	int hh_len;
1197	int mtu;
1198	int copy;
1199	int err;
1200	int offset = 0;
1201	int csummode = CHECKSUM_NONE;
1202	__u8 tx_flags = 0;
 
 
 
 
 
1203
1204	if (flags&MSG_PROBE)
1205		return 0;
1206	cork = &inet->cork.base;
1207	if (skb_queue_empty(&sk->sk_write_queue)) {
1208		/*
1209		 * setup for corking
1210		 */
1211		if (opt) {
1212			if (WARN_ON(np->cork.opt))
1213				return -EINVAL;
1214
1215			np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
1216			if (unlikely(np->cork.opt == NULL))
1217				return -ENOBUFS;
1218
1219			np->cork.opt->tot_len = opt->tot_len;
1220			np->cork.opt->opt_flen = opt->opt_flen;
1221			np->cork.opt->opt_nflen = opt->opt_nflen;
1222
1223			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1224							    sk->sk_allocation);
1225			if (opt->dst0opt && !np->cork.opt->dst0opt)
1226				return -ENOBUFS;
1227
1228			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1229							    sk->sk_allocation);
1230			if (opt->dst1opt && !np->cork.opt->dst1opt)
1231				return -ENOBUFS;
1232
1233			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1234							   sk->sk_allocation);
1235			if (opt->hopopt && !np->cork.opt->hopopt)
1236				return -ENOBUFS;
1237
1238			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1239							    sk->sk_allocation);
1240			if (opt->srcrt && !np->cork.opt->srcrt)
1241				return -ENOBUFS;
1242
1243			/* need source address above miyazawa*/
1244		}
1245		dst_hold(&rt->dst);
1246		cork->dst = &rt->dst;
1247		inet->cork.fl.u.ip6 = *fl6;
1248		np->cork.hop_limit = hlimit;
1249		np->cork.tclass = tclass;
1250		mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1251		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1252		if (np->frag_size < mtu) {
1253			if (np->frag_size)
1254				mtu = np->frag_size;
1255		}
1256		cork->fragsize = mtu;
1257		if (dst_allfrag(rt->dst.path))
1258			cork->flags |= IPCORK_ALLFRAG;
1259		cork->length = 0;
1260		sk->sk_sndmsg_page = NULL;
1261		sk->sk_sndmsg_off = 0;
1262		exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
1263			    rt->rt6i_nfheader_len;
1264		length += exthdrlen;
1265		transhdrlen += exthdrlen;
1266	} else {
1267		rt = (struct rt6_info *)cork->dst;
1268		fl6 = &inet->cork.fl.u.ip6;
1269		opt = np->cork.opt;
1270		transhdrlen = 0;
1271		exthdrlen = 0;
1272		mtu = cork->fragsize;
1273	}
1274
 
 
 
1275	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1276
1277	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1278			(opt ? opt->opt_nflen : 0);
1279	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
 
1280
1281	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1282		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1283			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1284			return -EMSGSIZE;
1285		}
 
 
 
 
 
 
 
1286	}
1287
1288	/* For UDP, check if TX timestamp is enabled */
1289	if (sk->sk_type == SOCK_DGRAM) {
1290		err = sock_tx_timestamp(sk, &tx_flags);
1291		if (err)
1292			goto error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1293	}
1294
1295	/*
1296	 * Let's try using as much space as possible.
1297	 * Use MTU if total length of the message fits into the MTU.
1298	 * Otherwise, we need to reserve fragment header and
1299	 * fragment alignment (= 8-15 octects, in total).
1300	 *
1301	 * Note that we may need to "move" the data from the tail of
1302	 * of the buffer to the new fragment when we split
1303	 * the message.
1304	 *
1305	 * FIXME: It may be fragmented into multiple chunks
1306	 *        at once if non-fragmentable extension headers
1307	 *        are too large.
1308	 * --yoshfuji
1309	 */
1310
1311	cork->length += length;
1312	if (length > mtu) {
1313		int proto = sk->sk_protocol;
1314		if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
1315			ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1316			return -EMSGSIZE;
1317		}
1318
1319		if (proto == IPPROTO_UDP &&
1320		    (rt->dst.dev->features & NETIF_F_UFO)) {
1321
1322			err = ip6_ufo_append_data(sk, getfrag, from, length,
1323						  hh_len, fragheaderlen,
1324						  transhdrlen, mtu, flags, rt);
1325			if (err)
1326				goto error;
1327			return 0;
1328		}
1329	}
1330
1331	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1332		goto alloc_new_skb;
1333
1334	while (length > 0) {
1335		/* Check if the remaining data fits into current packet. */
1336		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1337		if (copy < length)
1338			copy = maxfraglen - skb->len;
1339
1340		if (copy <= 0) {
1341			char *data;
1342			unsigned int datalen;
1343			unsigned int fraglen;
1344			unsigned int fraggap;
1345			unsigned int alloclen;
1346			struct sk_buff *skb_prev;
1347alloc_new_skb:
1348			skb_prev = skb;
1349
1350			/* There's no room in the current skb */
1351			if (skb_prev)
1352				fraggap = skb_prev->len - maxfraglen;
1353			else
1354				fraggap = 0;
 
 
 
 
 
 
 
1355
1356			/*
1357			 * If remaining data exceeds the mtu,
1358			 * we know we need more fragment(s).
1359			 */
1360			datalen = length + fraggap;
1361			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1362				datalen = maxfraglen - fragheaderlen;
1363
1364			fraglen = datalen + fragheaderlen;
 
1365			if ((flags & MSG_MORE) &&
1366			    !(rt->dst.dev->features&NETIF_F_SG))
1367				alloclen = mtu;
1368			else
1369				alloclen = datalen + fragheaderlen;
1370
1371			/*
1372			 * The last fragment gets additional space at tail.
1373			 * Note: we overallocate on fragments with MSG_MODE
1374			 * because we have no idea if we're the last one.
1375			 */
1376			if (datalen == length + fraggap)
1377				alloclen += rt->dst.trailer_len;
 
 
 
 
 
1378
1379			/*
1380			 * We just reserve space for fragment header.
1381			 * Note: this may be overallocation if the message
1382			 * (without MSG_MORE) fits into the MTU.
1383			 */
1384			alloclen += sizeof(struct frag_hdr);
1385
1386			if (transhdrlen) {
1387				skb = sock_alloc_send_skb(sk,
1388						alloclen + hh_len,
1389						(flags & MSG_DONTWAIT), &err);
1390			} else {
1391				skb = NULL;
1392				if (atomic_read(&sk->sk_wmem_alloc) <=
1393				    2 * sk->sk_sndbuf)
1394					skb = sock_wmalloc(sk,
1395							   alloclen + hh_len, 1,
1396							   sk->sk_allocation);
1397				if (unlikely(skb == NULL))
1398					err = -ENOBUFS;
1399				else {
1400					/* Only the initial fragment
1401					 * is time stamped.
1402					 */
1403					tx_flags = 0;
1404				}
1405			}
1406			if (skb == NULL)
1407				goto error;
1408			/*
1409			 *	Fill in the control structures
1410			 */
 
1411			skb->ip_summed = csummode;
1412			skb->csum = 0;
1413			/* reserve for fragmentation */
1414			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1415
1416			if (sk->sk_type == SOCK_DGRAM)
1417				skb_shinfo(skb)->tx_flags = tx_flags;
 
 
 
 
1418
1419			/*
1420			 *	Find where to start putting bytes
1421			 */
1422			data = skb_put(skb, fraglen);
1423			skb_set_network_header(skb, exthdrlen);
1424			data += fragheaderlen;
1425			skb->transport_header = (skb->network_header +
1426						 fragheaderlen);
1427			if (fraggap) {
1428				skb->csum = skb_copy_and_csum_bits(
1429					skb_prev, maxfraglen,
1430					data + transhdrlen, fraggap, 0);
1431				skb_prev->csum = csum_sub(skb_prev->csum,
1432							  skb->csum);
1433				data += fraggap;
1434				pskb_trim_unique(skb_prev, maxfraglen);
1435			}
1436			copy = datalen - transhdrlen - fraggap;
 
1437			if (copy < 0) {
1438				err = -EINVAL;
1439				kfree_skb(skb);
1440				goto error;
1441			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1442				err = -EFAULT;
1443				kfree_skb(skb);
1444				goto error;
1445			}
1446
1447			offset += copy;
1448			length -= datalen - fraggap;
1449			transhdrlen = 0;
1450			exthdrlen = 0;
1451			csummode = CHECKSUM_NONE;
1452
1453			/*
1454			 * Put the packet on the pending queue
1455			 */
1456			__skb_queue_tail(&sk->sk_write_queue, skb);
1457			continue;
1458		}
1459
1460		if (copy > length)
1461			copy = length;
1462
1463		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1464			unsigned int off;
1465
1466			off = skb->len;
1467			if (getfrag(from, skb_put(skb, copy),
1468						offset, copy, off, skb) < 0) {
1469				__skb_trim(skb, off);
1470				err = -EFAULT;
1471				goto error;
1472			}
1473		} else {
1474			int i = skb_shinfo(skb)->nr_frags;
1475			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1476			struct page *page = sk->sk_sndmsg_page;
1477			int off = sk->sk_sndmsg_off;
1478			unsigned int left;
1479
1480			if (page && (left = PAGE_SIZE - off) > 0) {
1481				if (copy >= left)
1482					copy = left;
1483				if (page != frag->page) {
1484					if (i == MAX_SKB_FRAGS) {
1485						err = -EMSGSIZE;
1486						goto error;
1487					}
1488					get_page(page);
1489					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1490					frag = &skb_shinfo(skb)->frags[i];
1491				}
1492			} else if(i < MAX_SKB_FRAGS) {
1493				if (copy > PAGE_SIZE)
1494					copy = PAGE_SIZE;
1495				page = alloc_pages(sk->sk_allocation, 0);
1496				if (page == NULL) {
1497					err = -ENOMEM;
1498					goto error;
1499				}
1500				sk->sk_sndmsg_page = page;
1501				sk->sk_sndmsg_off = 0;
1502
1503				skb_fill_page_desc(skb, i, page, 0, 0);
1504				frag = &skb_shinfo(skb)->frags[i];
1505			} else {
1506				err = -EMSGSIZE;
1507				goto error;
1508			}
1509			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1510				err = -EFAULT;
1511				goto error;
 
 
 
 
 
 
 
 
 
 
 
1512			}
1513			sk->sk_sndmsg_off += copy;
1514			frag->size += copy;
 
 
 
 
 
 
1515			skb->len += copy;
1516			skb->data_len += copy;
1517			skb->truesize += copy;
1518			atomic_add(copy, &sk->sk_wmem_alloc);
1519		}
1520		offset += copy;
1521		length -= copy;
1522	}
 
1523	return 0;
 
 
 
1524error:
1525	cork->length -= length;
1526	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1527	return err;
1528}
1529
1530static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
 
 
 
 
 
 
1531{
1532	if (np->cork.opt) {
1533		kfree(np->cork.opt->dst0opt);
1534		kfree(np->cork.opt->dst1opt);
1535		kfree(np->cork.opt->hopopt);
1536		kfree(np->cork.opt->srcrt);
1537		kfree(np->cork.opt);
1538		np->cork.opt = NULL;
1539	}
1540
1541	if (inet->cork.base.dst) {
1542		dst_release(inet->cork.base.dst);
1543		inet->cork.base.dst = NULL;
1544		inet->cork.base.flags &= ~IPCORK_ALLFRAG;
 
 
 
 
 
 
 
 
 
1545	}
1546	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
 
 
 
1547}
 
1548
1549int ip6_push_pending_frames(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1550{
1551	struct sk_buff *skb, *tmp_skb;
1552	struct sk_buff **tail_skb;
1553	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1554	struct inet_sock *inet = inet_sk(sk);
1555	struct ipv6_pinfo *np = inet6_sk(sk);
1556	struct net *net = sock_net(sk);
1557	struct ipv6hdr *hdr;
1558	struct ipv6_txoptions *opt = np->cork.opt;
1559	struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1560	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1561	unsigned char proto = fl6->flowi6_proto;
1562	int err = 0;
1563
1564	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
 
1565		goto out;
1566	tail_skb = &(skb_shinfo(skb)->frag_list);
1567
1568	/* move skb->data to ip header from ext header */
1569	if (skb->data < skb_network_header(skb))
1570		__skb_pull(skb, skb_network_offset(skb));
1571	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1572		__skb_pull(tmp_skb, skb_network_header_len(skb));
1573		*tail_skb = tmp_skb;
1574		tail_skb = &(tmp_skb->next);
1575		skb->len += tmp_skb->len;
1576		skb->data_len += tmp_skb->len;
1577		skb->truesize += tmp_skb->truesize;
1578		tmp_skb->destructor = NULL;
1579		tmp_skb->sk = NULL;
1580	}
1581
1582	/* Allow local fragmentation. */
1583	if (np->pmtudisc < IPV6_PMTUDISC_DO)
1584		skb->local_df = 1;
1585
1586	ipv6_addr_copy(final_dst, &fl6->daddr);
1587	__skb_pull(skb, skb_network_header_len(skb));
1588	if (opt && opt->opt_flen)
1589		ipv6_push_frag_opts(skb, opt, &proto);
1590	if (opt && opt->opt_nflen)
1591		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1592
1593	skb_push(skb, sizeof(struct ipv6hdr));
1594	skb_reset_network_header(skb);
1595	hdr = ipv6_hdr(skb);
1596
1597	*(__be32*)hdr = fl6->flowlabel |
1598		     htonl(0x60000000 | ((int)np->cork.tclass << 20));
1599
1600	hdr->hop_limit = np->cork.hop_limit;
1601	hdr->nexthdr = proto;
1602	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
1603	ipv6_addr_copy(&hdr->daddr, final_dst);
1604
1605	skb->priority = sk->sk_priority;
1606	skb->mark = sk->sk_mark;
1607
1608	skb_dst_set(skb, dst_clone(&rt->dst));
1609	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1610	if (proto == IPPROTO_ICMPV6) {
1611		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1612
1613		ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
1614		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1615	}
1616
1617	err = ip6_local_out(skb);
 
 
 
 
 
 
 
 
 
 
 
1618	if (err) {
1619		if (err > 0)
1620			err = net_xmit_errno(err);
1621		if (err)
1622			goto error;
 
1623	}
1624
1625out:
1626	ip6_cork_release(inet, np);
1627	return err;
1628error:
1629	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1630	goto out;
1631}
1632
1633void ip6_flush_pending_frames(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1634{
1635	struct sk_buff *skb;
1636
1637	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1638		if (skb_dst(skb))
1639			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1640				      IPSTATS_MIB_OUTDISCARDS);
1641		kfree_skb(skb);
1642	}
1643
1644	ip6_cork_release(inet_sk(sk), inet6_sk(sk));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1645}
v4.10.11
   1/*
   2 *	IPv6 output functions
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on linux/net/ipv4/ip_output.c
   9 *
  10 *	This program is free software; you can redistribute it and/or
  11 *      modify it under the terms of the GNU General Public License
  12 *      as published by the Free Software Foundation; either version
  13 *      2 of the License, or (at your option) any later version.
  14 *
  15 *	Changes:
  16 *	A.N.Kuznetsov	:	airthmetics in fragmentation.
  17 *				extension headers are implemented.
  18 *				route changes now work.
  19 *				ip6_forward does not confuse sniffers.
  20 *				etc.
  21 *
  22 *      H. von Brand    :       Added missing #include <linux/string.h>
  23 *	Imran Patel	:	frag id should be in NBO
  24 *      Kazunori MIYAZAWA @USAGI
  25 *			:       add ip6_append_data and related functions
  26 *				for datagram xmit
  27 */
  28
  29#include <linux/errno.h>
  30#include <linux/kernel.h>
  31#include <linux/string.h>
  32#include <linux/socket.h>
  33#include <linux/net.h>
  34#include <linux/netdevice.h>
  35#include <linux/if_arp.h>
  36#include <linux/in6.h>
  37#include <linux/tcp.h>
  38#include <linux/route.h>
  39#include <linux/module.h>
  40#include <linux/slab.h>
  41
  42#include <linux/bpf-cgroup.h>
  43#include <linux/netfilter.h>
  44#include <linux/netfilter_ipv6.h>
  45
  46#include <net/sock.h>
  47#include <net/snmp.h>
  48
  49#include <net/ipv6.h>
  50#include <net/ndisc.h>
  51#include <net/protocol.h>
  52#include <net/ip6_route.h>
  53#include <net/addrconf.h>
  54#include <net/rawv6.h>
  55#include <net/icmp.h>
  56#include <net/xfrm.h>
  57#include <net/checksum.h>
  58#include <linux/mroute6.h>
  59#include <net/l3mdev.h>
  60#include <net/lwtunnel.h>
  61
  62static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  63{
  64	struct dst_entry *dst = skb_dst(skb);
  65	struct net_device *dev = dst->dev;
  66	struct neighbour *neigh;
  67	struct in6_addr *nexthop;
  68	int ret;
  69
  70	skb->protocol = htons(ETH_P_IPV6);
  71	skb->dev = dev;
  72
  73	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
  74		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
  75
  76		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
  77		    ((mroute6_socket(net, skb) &&
  78		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
  79		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
  80					 &ipv6_hdr(skb)->saddr))) {
  81			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  82
  83			/* Do not check for IFF_ALLMULTI; multicast routing
  84			   is not supported in any case.
  85			 */
  86			if (newskb)
  87				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
  88					net, sk, newskb, NULL, newskb->dev,
  89					dev_loopback_xmit);
  90
  91			if (ipv6_hdr(skb)->hop_limit == 0) {
  92				IP6_INC_STATS(net, idev,
  93					      IPSTATS_MIB_OUTDISCARDS);
  94				kfree_skb(skb);
  95				return 0;
  96			}
  97		}
  98
  99		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
 100
 101		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
 102		    IPV6_ADDR_SCOPE_NODELOCAL &&
 103		    !(dev->flags & IFF_LOOPBACK)) {
 104			kfree_skb(skb);
 105			return 0;
 106		}
 107	}
 108
 109	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
 110		int res = lwtunnel_xmit(skb);
 
 
 111
 112		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
 113			return res;
 114	}
 115
 116	rcu_read_lock_bh();
 117	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
 118	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
 119	if (unlikely(!neigh))
 120		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
 121	if (!IS_ERR(neigh)) {
 122		ret = dst_neigh_output(dst, neigh, skb);
 123		rcu_read_unlock_bh();
 124		return ret;
 125	}
 126	rcu_read_unlock_bh();
 127
 128	IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 129	kfree_skb(skb);
 130	return -EINVAL;
 131}
 132
 133static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 134{
 135	int ret;
 136
 137	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
 138	if (ret) {
 139		kfree_skb(skb);
 140		return ret;
 141	}
 142
 143	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 144	    dst_allfrag(skb_dst(skb)) ||
 145	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
 146		return ip6_fragment(net, sk, skb, ip6_finish_output2);
 147	else
 148		return ip6_finish_output2(net, sk, skb);
 149}
 150
 151int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 152{
 153	struct net_device *dev = skb_dst(skb)->dev;
 154	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 155
 156	if (unlikely(idev->cnf.disable_ipv6)) {
 157		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
 
 158		kfree_skb(skb);
 159		return 0;
 160	}
 161
 162	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 163			    net, sk, skb, NULL, dev,
 164			    ip6_finish_output,
 165			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 166}
 167
 168/*
 169 * xmit an sk_buff (used by TCP, SCTP and DCCP)
 170 * Note : socket lock is not held for SYNACK packets, but might be modified
 171 * by calls to skb_set_owner_w() and ipv6_local_error(),
 172 * which are using proper atomic operations or spinlocks.
 173 */
 174int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 175	     __u32 mark, struct ipv6_txoptions *opt, int tclass)
 
 176{
 177	struct net *net = sock_net(sk);
 178	const struct ipv6_pinfo *np = inet6_sk(sk);
 179	struct in6_addr *first_hop = &fl6->daddr;
 180	struct dst_entry *dst = skb_dst(skb);
 181	struct ipv6hdr *hdr;
 182	u8  proto = fl6->flowi6_proto;
 183	int seg_len = skb->len;
 184	int hlimit = -1;
 
 185	u32 mtu;
 186
 187	if (opt) {
 188		unsigned int head_room;
 189
 190		/* First: exthdrs may take lots of space (~8K for now)
 191		   MAX_HEADER is not enough.
 192		 */
 193		head_room = opt->opt_nflen + opt->opt_flen;
 194		seg_len += head_room;
 195		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
 196
 197		if (skb_headroom(skb) < head_room) {
 198			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
 199			if (!skb2) {
 200				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 201					      IPSTATS_MIB_OUTDISCARDS);
 202				kfree_skb(skb);
 203				return -ENOBUFS;
 204			}
 205			consume_skb(skb);
 206			skb = skb2;
 207			/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
 208			 * it is safe to call in our context (socket lock not held)
 209			 */
 210			skb_set_owner_w(skb, (struct sock *)sk);
 211		}
 212		if (opt->opt_flen)
 213			ipv6_push_frag_opts(skb, opt, &proto);
 214		if (opt->opt_nflen)
 215			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
 216					     &fl6->saddr);
 217	}
 218
 219	skb_push(skb, sizeof(struct ipv6hdr));
 220	skb_reset_network_header(skb);
 221	hdr = ipv6_hdr(skb);
 222
 223	/*
 224	 *	Fill in the IPv6 header
 225	 */
 226	if (np)
 
 227		hlimit = np->hop_limit;
 
 228	if (hlimit < 0)
 229		hlimit = ip6_dst_hoplimit(dst);
 230
 231	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
 232						     np->autoflowlabel, fl6));
 233
 234	hdr->payload_len = htons(seg_len);
 235	hdr->nexthdr = proto;
 236	hdr->hop_limit = hlimit;
 237
 238	hdr->saddr = fl6->saddr;
 239	hdr->daddr = *first_hop;
 240
 241	skb->protocol = htons(ETH_P_IPV6);
 242	skb->priority = sk->sk_priority;
 243	skb->mark = mark;
 244
 245	mtu = dst_mtu(dst);
 246	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
 247		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
 248			      IPSTATS_MIB_OUT, skb->len);
 249
 250		/* if egress device is enslaved to an L3 master device pass the
 251		 * skb to its handler for processing
 252		 */
 253		skb = l3mdev_ip6_out((struct sock *)sk, skb);
 254		if (unlikely(!skb))
 255			return 0;
 256
 257		/* hooks should never assume socket lock is held.
 258		 * we promote our socket to non const
 259		 */
 260		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 261			       net, (struct sock *)sk, skb, NULL, dst->dev,
 262			       dst_output);
 263	}
 264
 
 
 265	skb->dev = dst->dev;
 266	/* ipv6_local_error() does not require socket lock,
 267	 * we promote our socket to non const
 268	 */
 269	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
 270
 271	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
 272	kfree_skb(skb);
 273	return -EMSGSIZE;
 274}
 
 275EXPORT_SYMBOL(ip6_xmit);
 276
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 277static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
 278{
 279	struct ip6_ra_chain *ra;
 280	struct sock *last = NULL;
 281
 282	read_lock(&ip6_ra_lock);
 283	for (ra = ip6_ra_chain; ra; ra = ra->next) {
 284		struct sock *sk = ra->sk;
 285		if (sk && ra->sel == sel &&
 286		    (!sk->sk_bound_dev_if ||
 287		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
 288			if (last) {
 289				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 290				if (skb2)
 291					rawv6_rcv(last, skb2);
 292			}
 293			last = sk;
 294		}
 295	}
 296
 297	if (last) {
 298		rawv6_rcv(last, skb);
 299		read_unlock(&ip6_ra_lock);
 300		return 1;
 301	}
 302	read_unlock(&ip6_ra_lock);
 303	return 0;
 304}
 305
 306static int ip6_forward_proxy_check(struct sk_buff *skb)
 307{
 308	struct ipv6hdr *hdr = ipv6_hdr(skb);
 309	u8 nexthdr = hdr->nexthdr;
 310	__be16 frag_off;
 311	int offset;
 312
 313	if (ipv6_ext_hdr(nexthdr)) {
 314		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
 315		if (offset < 0)
 316			return 0;
 317	} else
 318		offset = sizeof(struct ipv6hdr);
 319
 320	if (nexthdr == IPPROTO_ICMPV6) {
 321		struct icmp6hdr *icmp6;
 322
 323		if (!pskb_may_pull(skb, (skb_network_header(skb) +
 324					 offset + 1 - skb->data)))
 325			return 0;
 326
 327		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
 328
 329		switch (icmp6->icmp6_type) {
 330		case NDISC_ROUTER_SOLICITATION:
 331		case NDISC_ROUTER_ADVERTISEMENT:
 332		case NDISC_NEIGHBOUR_SOLICITATION:
 333		case NDISC_NEIGHBOUR_ADVERTISEMENT:
 334		case NDISC_REDIRECT:
 335			/* For reaction involving unicast neighbor discovery
 336			 * message destined to the proxied address, pass it to
 337			 * input function.
 338			 */
 339			return 1;
 340		default:
 341			break;
 342		}
 343	}
 344
 345	/*
 346	 * The proxying router can't forward traffic sent to a link-local
 347	 * address, so signal the sender and discard the packet. This
 348	 * behavior is clarified by the MIPv6 specification.
 349	 */
 350	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
 351		dst_link_failure(skb);
 352		return -1;
 353	}
 354
 355	return 0;
 356}
 357
 358static inline int ip6_forward_finish(struct net *net, struct sock *sk,
 359				     struct sk_buff *skb)
 360{
 361	return dst_output(net, sk, skb);
 362}
 363
 364static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 365{
 366	unsigned int mtu;
 367	struct inet6_dev *idev;
 368
 369	if (dst_metric_locked(dst, RTAX_MTU)) {
 370		mtu = dst_metric_raw(dst, RTAX_MTU);
 371		if (mtu)
 372			return mtu;
 373	}
 374
 375	mtu = IPV6_MIN_MTU;
 376	rcu_read_lock();
 377	idev = __in6_dev_get(dst->dev);
 378	if (idev)
 379		mtu = idev->cnf.mtu6;
 380	rcu_read_unlock();
 381
 382	return mtu;
 383}
 384
 385static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 386{
 387	if (skb->len <= mtu)
 388		return false;
 389
 390	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
 391	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
 392		return true;
 393
 394	if (skb->ignore_df)
 395		return false;
 396
 397	if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
 398		return false;
 399
 400	return true;
 401}
 402
 403int ip6_forward(struct sk_buff *skb)
 404{
 405	struct dst_entry *dst = skb_dst(skb);
 406	struct ipv6hdr *hdr = ipv6_hdr(skb);
 407	struct inet6_skb_parm *opt = IP6CB(skb);
 408	struct net *net = dev_net(dst->dev);
 
 409	u32 mtu;
 410
 411	if (net->ipv6.devconf_all->forwarding == 0)
 412		goto error;
 413
 414	if (skb->pkt_type != PACKET_HOST)
 415		goto drop;
 416
 417	if (unlikely(skb->sk))
 418		goto drop;
 419
 420	if (skb_warn_if_lro(skb))
 421		goto drop;
 422
 423	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
 424		__IP6_INC_STATS(net, ip6_dst_idev(dst),
 425				IPSTATS_MIB_INDISCARDS);
 426		goto drop;
 427	}
 428
 
 
 
 429	skb_forward_csum(skb);
 430
 431	/*
 432	 *	We DO NOT make any processing on
 433	 *	RA packets, pushing them to user level AS IS
 434	 *	without ane WARRANTY that application will be able
 435	 *	to interpret them. The reason is that we
 436	 *	cannot make anything clever here.
 437	 *
 438	 *	We are not end-node, so that if packet contains
 439	 *	AH/ESP, we cannot make anything.
 440	 *	Defragmentation also would be mistake, RA packets
 441	 *	cannot be fragmented, because there is no warranty
 442	 *	that different fragments will go along one path. --ANK
 443	 */
 444	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
 445		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
 
 446			return 0;
 447	}
 448
 449	/*
 450	 *	check and decrement ttl
 451	 */
 452	if (hdr->hop_limit <= 1) {
 453		/* Force OUTPUT device used as source address */
 454		skb->dev = dst->dev;
 455		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
 456		__IP6_INC_STATS(net, ip6_dst_idev(dst),
 457				IPSTATS_MIB_INHDRERRORS);
 458
 459		kfree_skb(skb);
 460		return -ETIMEDOUT;
 461	}
 462
 463	/* XXX: idev->cnf.proxy_ndp? */
 464	if (net->ipv6.devconf_all->proxy_ndp &&
 465	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
 466		int proxied = ip6_forward_proxy_check(skb);
 467		if (proxied > 0)
 468			return ip6_input(skb);
 469		else if (proxied < 0) {
 470			__IP6_INC_STATS(net, ip6_dst_idev(dst),
 471					IPSTATS_MIB_INDISCARDS);
 472			goto drop;
 473		}
 474	}
 475
 476	if (!xfrm6_route_forward(skb)) {
 477		__IP6_INC_STATS(net, ip6_dst_idev(dst),
 478				IPSTATS_MIB_INDISCARDS);
 479		goto drop;
 480	}
 481	dst = skb_dst(skb);
 482
 483	/* IPv6 specs say nothing about it, but it is clear that we cannot
 484	   send redirects to source routed frames.
 485	   We don't send redirects to frames decapsulated from IPsec.
 486	 */
 487	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
 
 488		struct in6_addr *target = NULL;
 489		struct inet_peer *peer;
 490		struct rt6_info *rt;
 491
 492		/*
 493		 *	incoming and outgoing devices are the same
 494		 *	send a redirect.
 495		 */
 496
 497		rt = (struct rt6_info *) dst;
 498		if (rt->rt6i_flags & RTF_GATEWAY)
 499			target = &rt->rt6i_gateway;
 500		else
 501			target = &hdr->daddr;
 502
 503		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
 
 504
 505		/* Limit redirects both by destination (here)
 506		   and by source (inside ndisc_send_redirect)
 507		 */
 508		if (inet_peer_xrlim_allow(peer, 1*HZ))
 509			ndisc_send_redirect(skb, target);
 510		if (peer)
 511			inet_putpeer(peer);
 512	} else {
 513		int addrtype = ipv6_addr_type(&hdr->saddr);
 514
 515		/* This check is security critical. */
 516		if (addrtype == IPV6_ADDR_ANY ||
 517		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
 518			goto error;
 519		if (addrtype & IPV6_ADDR_LINKLOCAL) {
 520			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
 521				    ICMPV6_NOT_NEIGHBOUR, 0);
 522			goto error;
 523		}
 524	}
 525
 526	mtu = ip6_dst_mtu_forward(dst);
 527	if (mtu < IPV6_MIN_MTU)
 528		mtu = IPV6_MIN_MTU;
 529
 530	if (ip6_pkt_too_big(skb, mtu)) {
 531		/* Again, force OUTPUT device used as source address */
 532		skb->dev = dst->dev;
 533		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 534		__IP6_INC_STATS(net, ip6_dst_idev(dst),
 535				IPSTATS_MIB_INTOOBIGERRORS);
 536		__IP6_INC_STATS(net, ip6_dst_idev(dst),
 537				IPSTATS_MIB_FRAGFAILS);
 538		kfree_skb(skb);
 539		return -EMSGSIZE;
 540	}
 541
 542	if (skb_cow(skb, dst->dev->hard_header_len)) {
 543		__IP6_INC_STATS(net, ip6_dst_idev(dst),
 544				IPSTATS_MIB_OUTDISCARDS);
 545		goto drop;
 546	}
 547
 548	hdr = ipv6_hdr(skb);
 549
 550	/* Mangling hops number delayed to point after skb COW */
 551
 552	hdr->hop_limit--;
 553
 554	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 555	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 556	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
 557		       net, NULL, skb, skb->dev, dst->dev,
 558		       ip6_forward_finish);
 559
 560error:
 561	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
 562drop:
 563	kfree_skb(skb);
 564	return -EINVAL;
 565}
 566
 567static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 568{
 569	to->pkt_type = from->pkt_type;
 570	to->priority = from->priority;
 571	to->protocol = from->protocol;
 572	skb_dst_drop(to);
 573	skb_dst_set(to, dst_clone(skb_dst(from)));
 574	to->dev = from->dev;
 575	to->mark = from->mark;
 576
 577#ifdef CONFIG_NET_SCHED
 578	to->tc_index = from->tc_index;
 579#endif
 580	nf_copy(to, from);
 
 
 
 
 581	skb_copy_secmark(to, from);
 582}
 583
 584int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 585		 int (*output)(struct net *, struct sock *, struct sk_buff *))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 586{
 587	struct sk_buff *frag;
 588	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
 589	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
 590				inet6_sk(skb->sk) : NULL;
 591	struct ipv6hdr *tmp_hdr;
 592	struct frag_hdr *fh;
 593	unsigned int mtu, hlen, left, len;
 594	int hroom, troom;
 595	__be32 frag_id;
 596	int ptr, offset = 0, err = 0;
 597	u8 *prevhdr, nexthdr = 0;
 
 598
 599	hlen = ip6_find_1stfragopt(skb, &prevhdr);
 600	nexthdr = *prevhdr;
 601
 602	mtu = ip6_skb_dst_mtu(skb);
 603
 604	/* We must not fragment if the socket is set to force MTU discovery
 605	 * or if the skb it not generated by a local socket.
 606	 */
 607	if (unlikely(!skb->ignore_df && skb->len > mtu))
 608		goto fail_toobig;
 609
 610	if (IP6CB(skb)->frag_max_size) {
 611		if (IP6CB(skb)->frag_max_size > mtu)
 612			goto fail_toobig;
 613
 614		/* don't send fragments larger than what we received */
 615		mtu = IP6CB(skb)->frag_max_size;
 616		if (mtu < IPV6_MIN_MTU)
 617			mtu = IPV6_MIN_MTU;
 618	}
 619
 620	if (np && np->frag_size < mtu) {
 621		if (np->frag_size)
 622			mtu = np->frag_size;
 623	}
 624	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
 625		goto fail_toobig;
 626	mtu -= hlen + sizeof(struct frag_hdr);
 627
 628	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
 629				    &ipv6_hdr(skb)->saddr);
 630
 631	if (skb->ip_summed == CHECKSUM_PARTIAL &&
 632	    (err = skb_checksum_help(skb)))
 633		goto fail;
 634
 635	hroom = LL_RESERVED_SPACE(rt->dst.dev);
 636	if (skb_has_frag_list(skb)) {
 637		unsigned int first_len = skb_pagelen(skb);
 638		struct sk_buff *frag2;
 639
 640		if (first_len - hlen > mtu ||
 641		    ((first_len - hlen) & 7) ||
 642		    skb_cloned(skb) ||
 643		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
 644			goto slow_path;
 645
 646		skb_walk_frags(skb, frag) {
 647			/* Correct geometry. */
 648			if (frag->len > mtu ||
 649			    ((frag->len & 7) && frag->next) ||
 650			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
 651				goto slow_path_clean;
 652
 653			/* Partially cloned skb? */
 654			if (skb_shared(frag))
 655				goto slow_path_clean;
 656
 657			BUG_ON(frag->sk);
 658			if (skb->sk) {
 659				frag->sk = skb->sk;
 660				frag->destructor = sock_wfree;
 661			}
 662			skb->truesize -= frag->truesize;
 663		}
 664
 665		err = 0;
 666		offset = 0;
 
 
 667		/* BUILD HEADER */
 668
 669		*prevhdr = NEXTHDR_FRAGMENT;
 670		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
 671		if (!tmp_hdr) {
 672			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 673				      IPSTATS_MIB_FRAGFAILS);
 674			err = -ENOMEM;
 675			goto fail;
 676		}
 677		frag = skb_shinfo(skb)->frag_list;
 678		skb_frag_list_init(skb);
 679
 680		__skb_pull(skb, hlen);
 681		fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
 682		__skb_push(skb, hlen);
 683		skb_reset_network_header(skb);
 684		memcpy(skb_network_header(skb), tmp_hdr, hlen);
 685
 
 686		fh->nexthdr = nexthdr;
 687		fh->reserved = 0;
 688		fh->frag_off = htons(IP6_MF);
 689		fh->identification = frag_id;
 690
 691		first_len = skb_pagelen(skb);
 692		skb->data_len = first_len - skb_headlen(skb);
 693		skb->len = first_len;
 694		ipv6_hdr(skb)->payload_len = htons(first_len -
 695						   sizeof(struct ipv6hdr));
 696
 697		dst_hold(&rt->dst);
 698
 699		for (;;) {
 700			/* Prepare header of the next frame,
 701			 * before previous one went down. */
 702			if (frag) {
 703				frag->ip_summed = CHECKSUM_NONE;
 704				skb_reset_transport_header(frag);
 705				fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
 706				__skb_push(frag, hlen);
 707				skb_reset_network_header(frag);
 708				memcpy(skb_network_header(frag), tmp_hdr,
 709				       hlen);
 710				offset += skb->len - hlen - sizeof(struct frag_hdr);
 711				fh->nexthdr = nexthdr;
 712				fh->reserved = 0;
 713				fh->frag_off = htons(offset);
 714				if (frag->next)
 715					fh->frag_off |= htons(IP6_MF);
 716				fh->identification = frag_id;
 717				ipv6_hdr(frag)->payload_len =
 718						htons(frag->len -
 719						      sizeof(struct ipv6hdr));
 720				ip6_copy_metadata(frag, skb);
 721			}
 722
 723			err = output(net, sk, skb);
 724			if (!err)
 725				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 726					      IPSTATS_MIB_FRAGCREATES);
 727
 728			if (err || !frag)
 729				break;
 730
 731			skb = frag;
 732			frag = skb->next;
 733			skb->next = NULL;
 734		}
 735
 736		kfree(tmp_hdr);
 737
 738		if (err == 0) {
 739			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 740				      IPSTATS_MIB_FRAGOKS);
 741			ip6_rt_put(rt);
 742			return 0;
 743		}
 744
 745		kfree_skb_list(frag);
 
 
 
 
 746
 747		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 748			      IPSTATS_MIB_FRAGFAILS);
 749		ip6_rt_put(rt);
 750		return err;
 751
 752slow_path_clean:
 753		skb_walk_frags(skb, frag2) {
 754			if (frag2 == frag)
 755				break;
 756			frag2->sk = NULL;
 757			frag2->destructor = NULL;
 758			skb->truesize += frag2->truesize;
 759		}
 760	}
 761
 762slow_path:
 763	left = skb->len - hlen;		/* Space per frame */
 764	ptr = hlen;			/* Where to start from */
 765
 766	/*
 767	 *	Fragment the datagram.
 768	 */
 769
 770	troom = rt->dst.dev->needed_tailroom;
 771
 772	/*
 773	 *	Keep copying data until we run out.
 774	 */
 775	while (left > 0)	{
 776		u8 *fragnexthdr_offset;
 777
 778		len = left;
 779		/* IF: it doesn't fit, use 'mtu' - the data space left */
 780		if (len > mtu)
 781			len = mtu;
 782		/* IF: we are not sending up to and including the packet end
 783		   then align the next start on an eight byte boundary */
 784		if (len < left)	{
 785			len &= ~7;
 786		}
 
 
 
 787
 788		/* Allocate buffer */
 789		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
 790				 hroom + troom, GFP_ATOMIC);
 791		if (!frag) {
 792			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 793				      IPSTATS_MIB_FRAGFAILS);
 794			err = -ENOMEM;
 795			goto fail;
 796		}
 797
 798		/*
 799		 *	Set up data on packet
 800		 */
 801
 802		ip6_copy_metadata(frag, skb);
 803		skb_reserve(frag, hroom);
 804		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
 805		skb_reset_network_header(frag);
 806		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
 807		frag->transport_header = (frag->network_header + hlen +
 808					  sizeof(struct frag_hdr));
 809
 810		/*
 811		 *	Charge the memory for the fragment to any owner
 812		 *	it might possess
 813		 */
 814		if (skb->sk)
 815			skb_set_owner_w(frag, skb->sk);
 816
 817		/*
 818		 *	Copy the packet header into the new buffer.
 819		 */
 820		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
 821
 822		fragnexthdr_offset = skb_network_header(frag);
 823		fragnexthdr_offset += prevhdr - skb_network_header(skb);
 824		*fragnexthdr_offset = NEXTHDR_FRAGMENT;
 825
 826		/*
 827		 *	Build fragment header.
 828		 */
 829		fh->nexthdr = nexthdr;
 830		fh->reserved = 0;
 831		fh->identification = frag_id;
 
 
 
 
 832
 833		/*
 834		 *	Copy a block of the IP datagram.
 835		 */
 836		BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
 837				     len));
 838		left -= len;
 839
 840		fh->frag_off = htons(offset);
 841		if (left > 0)
 842			fh->frag_off |= htons(IP6_MF);
 843		ipv6_hdr(frag)->payload_len = htons(frag->len -
 844						    sizeof(struct ipv6hdr));
 845
 846		ptr += len;
 847		offset += len;
 848
 849		/*
 850		 *	Put this fragment into the sending queue.
 851		 */
 852		err = output(net, sk, frag);
 853		if (err)
 854			goto fail;
 855
 856		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 857			      IPSTATS_MIB_FRAGCREATES);
 858	}
 859	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 860		      IPSTATS_MIB_FRAGOKS);
 861	consume_skb(skb);
 862	return err;
 863
 864fail_toobig:
 865	if (skb->sk && dst_allfrag(skb_dst(skb)))
 866		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
 867
 868	skb->dev = skb_dst(skb)->dev;
 869	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 870	err = -EMSGSIZE;
 871
 872fail:
 873	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 874		      IPSTATS_MIB_FRAGFAILS);
 875	kfree_skb(skb);
 876	return err;
 877}
 878
 879static inline int ip6_rt_check(const struct rt6key *rt_key,
 880			       const struct in6_addr *fl_addr,
 881			       const struct in6_addr *addr_cache)
 882{
 883	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
 884		(!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
 885}
 886
 887static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
 888					  struct dst_entry *dst,
 889					  const struct flowi6 *fl6)
 890{
 891	struct ipv6_pinfo *np = inet6_sk(sk);
 892	struct rt6_info *rt;
 893
 894	if (!dst)
 895		goto out;
 896
 897	if (dst->ops->family != AF_INET6) {
 898		dst_release(dst);
 899		return NULL;
 900	}
 901
 902	rt = (struct rt6_info *)dst;
 903	/* Yes, checking route validity in not connected
 904	 * case is not very simple. Take into account,
 905	 * that we do not support routing by source, TOS,
 906	 * and MSG_DONTROUTE		--ANK (980726)
 907	 *
 908	 * 1. ip6_rt_check(): If route was host route,
 909	 *    check that cached destination is current.
 910	 *    If it is network route, we still may
 911	 *    check its validity using saved pointer
 912	 *    to the last used address: daddr_cache.
 913	 *    We do not want to save whole address now,
 914	 *    (because main consumer of this service
 915	 *    is tcp, which has not this problem),
 916	 *    so that the last trick works only on connected
 917	 *    sockets.
 918	 * 2. oif also should be the same.
 919	 */
 920	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
 921#ifdef CONFIG_IPV6_SUBTREES
 922	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 923#endif
 924	   (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
 925	      (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
 926		dst_release(dst);
 927		dst = NULL;
 928	}
 929
 930out:
 931	return dst;
 932}
 933
 934static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
 935			       struct dst_entry **dst, struct flowi6 *fl6)
 936{
 
 937#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 938	struct neighbour *n;
 939	struct rt6_info *rt;
 940#endif
 941	int err;
 942	int flags = 0;
 943
 944	/* The correct way to handle this would be to do
 945	 * ip6_route_get_saddr, and then ip6_route_output; however,
 946	 * the route-specific preferred source forces the
 947	 * ip6_route_output call _before_ ip6_route_get_saddr.
 948	 *
 949	 * In source specific routing (no src=any default route),
 950	 * ip6_route_output will fail given src=any saddr, though, so
 951	 * that's why we try it again later.
 952	 */
 953	if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
 954		struct rt6_info *rt;
 955		bool had_dst = *dst != NULL;
 956
 957		if (!had_dst)
 958			*dst = ip6_route_output(net, sk, fl6);
 959		rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
 960		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
 961					  sk ? inet6_sk(sk)->srcprefs : 0,
 962					  &fl6->saddr);
 963		if (err)
 964			goto out_err_release;
 965
 966		/* If we had an erroneous initial result, pretend it
 967		 * never existed and let the SA-enabled version take
 968		 * over.
 969		 */
 970		if (!had_dst && (*dst)->error) {
 971			dst_release(*dst);
 972			*dst = NULL;
 973		}
 974
 975		if (fl6->flowi6_oif)
 976			flags |= RT6_LOOKUP_F_IFACE;
 977	}
 978
 979	if (!*dst)
 980		*dst = ip6_route_output_flags(net, sk, fl6, flags);
 981
 982	err = (*dst)->error;
 983	if (err)
 984		goto out_err_release;
 985
 986#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 987	/*
 988	 * Here if the dst entry we've looked up
 989	 * has a neighbour entry that is in the INCOMPLETE
 990	 * state and the src address from the flow is
 991	 * marked as OPTIMISTIC, we release the found
 992	 * dst entry and replace it instead with the
 993	 * dst entry of the nexthop router
 994	 */
 995	rt = (struct rt6_info *) *dst;
 996	rcu_read_lock_bh();
 997	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
 998				      rt6_nexthop(rt, &fl6->daddr));
 999	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
1000	rcu_read_unlock_bh();
1001
1002	if (err) {
1003		struct inet6_ifaddr *ifp;
1004		struct flowi6 fl_gw6;
1005		int redirect;
1006
 
1007		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
1008				      (*dst)->dev, 1);
1009
1010		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
1011		if (ifp)
1012			in6_ifa_put(ifp);
1013
1014		if (redirect) {
1015			/*
1016			 * We need to get the dst entry for the
1017			 * default router instead
1018			 */
1019			dst_release(*dst);
1020			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1021			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1022			*dst = ip6_route_output(net, sk, &fl_gw6);
1023			err = (*dst)->error;
1024			if (err)
1025				goto out_err_release;
1026		}
 
 
1027	}
1028#endif
1029	if (ipv6_addr_v4mapped(&fl6->saddr) &&
1030	    !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1031		err = -EAFNOSUPPORT;
1032		goto out_err_release;
1033	}
1034
1035	return 0;
1036
1037out_err_release:
 
 
1038	dst_release(*dst);
1039	*dst = NULL;
1040
1041	if (err == -ENETUNREACH)
1042		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1043	return err;
1044}
1045
1046/**
1047 *	ip6_dst_lookup - perform route lookup on flow
1048 *	@sk: socket which provides route info
1049 *	@dst: pointer to dst_entry * for result
1050 *	@fl6: flow to lookup
1051 *
1052 *	This function performs a route lookup on the given flow.
1053 *
1054 *	It returns zero on success, or a standard errno code on error.
1055 */
1056int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1057		   struct flowi6 *fl6)
1058{
1059	*dst = NULL;
1060	return ip6_dst_lookup_tail(net, sk, dst, fl6);
1061}
1062EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1063
1064/**
1065 *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1066 *	@sk: socket which provides route info
1067 *	@fl6: flow to lookup
1068 *	@final_dst: final destination address for ipsec lookup
 
1069 *
1070 *	This function performs a route lookup on the given flow.
1071 *
1072 *	It returns a valid dst pointer on success, or a pointer encoded
1073 *	error code.
1074 */
1075struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
1076				      const struct in6_addr *final_dst)
 
1077{
1078	struct dst_entry *dst = NULL;
1079	int err;
1080
1081	err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6);
1082	if (err)
1083		return ERR_PTR(err);
1084	if (final_dst)
1085		fl6->daddr = *final_dst;
 
 
1086
1087	return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1088}
1089EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1090
1091/**
1092 *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1093 *	@sk: socket which provides the dst cache and route info
1094 *	@fl6: flow to lookup
1095 *	@final_dst: final destination address for ipsec lookup
 
1096 *
1097 *	This function performs a route lookup on the given flow with the
1098 *	possibility of using the cached route in the socket if it is valid.
1099 *	It will take the socket dst lock when operating on the dst cache.
1100 *	As a result, this function can only be used in process context.
1101 *
1102 *	It returns a valid dst pointer on success, or a pointer encoded
1103 *	error code.
1104 */
1105struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1106					 const struct in6_addr *final_dst)
 
1107{
1108	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
 
1109
1110	dst = ip6_sk_dst_check(sk, dst, fl6);
1111	if (!dst)
1112		dst = ip6_dst_lookup_flow(sk, fl6, final_dst);
1113
1114	return dst;
 
 
 
 
 
 
 
 
1115}
1116EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1117
1118static inline int ip6_ufo_append_data(struct sock *sk,
1119			struct sk_buff_head *queue,
1120			int getfrag(void *from, char *to, int offset, int len,
1121			int odd, struct sk_buff *skb),
1122			void *from, int length, int hh_len, int fragheaderlen,
1123			int exthdrlen, int transhdrlen, int mtu,
1124			unsigned int flags, const struct flowi6 *fl6)
1125
1126{
1127	struct sk_buff *skb;
1128	int err;
1129
1130	/* There is support for UDP large send offload by network
1131	 * device, so create one single skb packet containing complete
1132	 * udp datagram
1133	 */
1134	skb = skb_peek_tail(queue);
1135	if (!skb) {
1136		skb = sock_alloc_send_skb(sk,
1137			hh_len + fragheaderlen + transhdrlen + 20,
1138			(flags & MSG_DONTWAIT), &err);
1139		if (!skb)
1140			return err;
1141
1142		/* reserve space for Hardware header */
1143		skb_reserve(skb, hh_len);
1144
1145		/* create space for UDP/IP header */
1146		skb_put(skb, fragheaderlen + transhdrlen);
1147
1148		/* initialize network header pointer */
1149		skb_set_network_header(skb, exthdrlen);
1150
1151		/* initialize protocol header pointer */
1152		skb->transport_header = skb->network_header + fragheaderlen;
1153
1154		skb->protocol = htons(ETH_P_IPV6);
1155		skb->csum = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1156
1157		__skb_queue_tail(queue, skb);
1158	} else if (skb_is_gso(skb)) {
1159		goto append;
1160	}
 
 
 
 
1161
1162	skb->ip_summed = CHECKSUM_PARTIAL;
1163	/* Specify the length of each IPv6 datagram fragment.
1164	 * It has to be a multiple of 8.
1165	 */
1166	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1167				     sizeof(struct frag_hdr)) & ~7;
1168	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1169	skb_shinfo(skb)->ip6_frag_id = ipv6_select_ident(sock_net(sk),
1170							 &fl6->daddr,
1171							 &fl6->saddr);
1172
1173append:
1174	return skb_append_datato_frags(sk, skb, getfrag, from,
1175				       (length - transhdrlen));
1176}
1177
1178static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1179					       gfp_t gfp)
1180{
1181	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1182}
1183
1184static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1185						gfp_t gfp)
1186{
1187	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1188}
1189
1190static void ip6_append_data_mtu(unsigned int *mtu,
1191				int *maxfraglen,
1192				unsigned int fragheaderlen,
1193				struct sk_buff *skb,
1194				struct rt6_info *rt,
1195				unsigned int orig_mtu)
1196{
1197	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1198		if (!skb) {
1199			/* first fragment, reserve header_len */
1200			*mtu = orig_mtu - rt->dst.header_len;
1201
1202		} else {
1203			/*
1204			 * this fragment is not first, the headers
1205			 * space is regarded as data space.
1206			 */
1207			*mtu = orig_mtu;
1208		}
1209		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1210			      + fragheaderlen - sizeof(struct frag_hdr);
1211	}
1212}
1213
1214static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1215			  struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1216			  struct rt6_info *rt, struct flowi6 *fl6)
1217{
 
1218	struct ipv6_pinfo *np = inet6_sk(sk);
1219	unsigned int mtu;
1220	struct ipv6_txoptions *opt = ipc6->opt;
1221
1222	/*
1223	 * setup for corking
1224	 */
1225	if (opt) {
1226		if (WARN_ON(v6_cork->opt))
1227			return -EINVAL;
1228
1229		v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1230		if (unlikely(!v6_cork->opt))
1231			return -ENOBUFS;
1232
1233		v6_cork->opt->tot_len = opt->tot_len;
1234		v6_cork->opt->opt_flen = opt->opt_flen;
1235		v6_cork->opt->opt_nflen = opt->opt_nflen;
1236
1237		v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1238						    sk->sk_allocation);
1239		if (opt->dst0opt && !v6_cork->opt->dst0opt)
1240			return -ENOBUFS;
1241
1242		v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1243						    sk->sk_allocation);
1244		if (opt->dst1opt && !v6_cork->opt->dst1opt)
1245			return -ENOBUFS;
1246
1247		v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1248						   sk->sk_allocation);
1249		if (opt->hopopt && !v6_cork->opt->hopopt)
1250			return -ENOBUFS;
1251
1252		v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1253						    sk->sk_allocation);
1254		if (opt->srcrt && !v6_cork->opt->srcrt)
1255			return -ENOBUFS;
1256
1257		/* need source address above miyazawa*/
1258	}
1259	dst_hold(&rt->dst);
1260	cork->base.dst = &rt->dst;
1261	cork->fl.u.ip6 = *fl6;
1262	v6_cork->hop_limit = ipc6->hlimit;
1263	v6_cork->tclass = ipc6->tclass;
1264	if (rt->dst.flags & DST_XFRM_TUNNEL)
1265		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1266		      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1267	else
1268		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1269		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1270	if (np->frag_size < mtu) {
1271		if (np->frag_size)
1272			mtu = np->frag_size;
1273	}
1274	cork->base.fragsize = mtu;
1275	if (dst_allfrag(rt->dst.path))
1276		cork->base.flags |= IPCORK_ALLFRAG;
1277	cork->base.length = 0;
1278
1279	return 0;
1280}
1281
1282static int __ip6_append_data(struct sock *sk,
1283			     struct flowi6 *fl6,
1284			     struct sk_buff_head *queue,
1285			     struct inet_cork *cork,
1286			     struct inet6_cork *v6_cork,
1287			     struct page_frag *pfrag,
1288			     int getfrag(void *from, char *to, int offset,
1289					 int len, int odd, struct sk_buff *skb),
1290			     void *from, int length, int transhdrlen,
1291			     unsigned int flags, struct ipcm6_cookie *ipc6,
1292			     const struct sockcm_cookie *sockc)
1293{
1294	struct sk_buff *skb, *skb_prev = NULL;
1295	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1296	int exthdrlen = 0;
1297	int dst_exthdrlen = 0;
1298	int hh_len;
 
1299	int copy;
1300	int err;
1301	int offset = 0;
 
1302	__u8 tx_flags = 0;
1303	u32 tskey = 0;
1304	struct rt6_info *rt = (struct rt6_info *)cork->dst;
1305	struct ipv6_txoptions *opt = v6_cork->opt;
1306	int csummode = CHECKSUM_NONE;
1307	unsigned int maxnonfragsize, headersize;
1308
1309	skb = skb_peek_tail(queue);
1310	if (!skb) {
1311		exthdrlen = opt ? opt->opt_flen : 0;
1312		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1313	}
1314
1315	mtu = cork->fragsize;
1316	orig_mtu = mtu;
1317
1318	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1319
1320	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1321			(opt ? opt->opt_nflen : 0);
1322	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1323		     sizeof(struct frag_hdr);
1324
1325	headersize = sizeof(struct ipv6hdr) +
1326		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1327		     (dst_allfrag(&rt->dst) ?
1328		      sizeof(struct frag_hdr) : 0) +
1329		     rt->rt6i_nfheader_len;
1330
1331	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1332	    (sk->sk_protocol == IPPROTO_UDP ||
1333	     sk->sk_protocol == IPPROTO_RAW)) {
1334		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1335				sizeof(struct ipv6hdr));
1336		goto emsgsize;
1337	}
1338
1339	if (ip6_sk_ignore_df(sk))
1340		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1341	else
1342		maxnonfragsize = mtu;
1343
1344	if (cork->length + length > maxnonfragsize - headersize) {
1345emsgsize:
1346		ipv6_local_error(sk, EMSGSIZE, fl6,
1347				 mtu - headersize +
1348				 sizeof(struct ipv6hdr));
1349		return -EMSGSIZE;
1350	}
1351
1352	/* CHECKSUM_PARTIAL only with no extension headers and when
1353	 * we are not going to fragment
1354	 */
1355	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1356	    headersize == sizeof(struct ipv6hdr) &&
1357	    length <= mtu - headersize &&
1358	    !(flags & MSG_MORE) &&
1359	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1360		csummode = CHECKSUM_PARTIAL;
1361
1362	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
1363		sock_tx_timestamp(sk, sockc->tsflags, &tx_flags);
1364		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
1365		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1366			tskey = sk->sk_tskey++;
1367	}
1368
1369	/*
1370	 * Let's try using as much space as possible.
1371	 * Use MTU if total length of the message fits into the MTU.
1372	 * Otherwise, we need to reserve fragment header and
1373	 * fragment alignment (= 8-15 octects, in total).
1374	 *
1375	 * Note that we may need to "move" the data from the tail of
1376	 * of the buffer to the new fragment when we split
1377	 * the message.
1378	 *
1379	 * FIXME: It may be fragmented into multiple chunks
1380	 *        at once if non-fragmentable extension headers
1381	 *        are too large.
1382	 * --yoshfuji
1383	 */
1384
1385	cork->length += length;
1386	if ((((length + fragheaderlen) > mtu) ||
1387	     (skb && skb_is_gso(skb))) &&
1388	    (sk->sk_protocol == IPPROTO_UDP) &&
1389	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
1390	    (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1391		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1392					  hh_len, fragheaderlen, exthdrlen,
1393					  transhdrlen, mtu, flags, fl6);
1394		if (err)
1395			goto error;
1396		return 0;
 
 
 
 
 
 
1397	}
1398
1399	if (!skb)
1400		goto alloc_new_skb;
1401
1402	while (length > 0) {
1403		/* Check if the remaining data fits into current packet. */
1404		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1405		if (copy < length)
1406			copy = maxfraglen - skb->len;
1407
1408		if (copy <= 0) {
1409			char *data;
1410			unsigned int datalen;
1411			unsigned int fraglen;
1412			unsigned int fraggap;
1413			unsigned int alloclen;
 
1414alloc_new_skb:
 
 
1415			/* There's no room in the current skb */
1416			if (skb)
1417				fraggap = skb->len - maxfraglen;
1418			else
1419				fraggap = 0;
1420			/* update mtu and maxfraglen if necessary */
1421			if (!skb || !skb_prev)
1422				ip6_append_data_mtu(&mtu, &maxfraglen,
1423						    fragheaderlen, skb, rt,
1424						    orig_mtu);
1425
1426			skb_prev = skb;
1427
1428			/*
1429			 * If remaining data exceeds the mtu,
1430			 * we know we need more fragment(s).
1431			 */
1432			datalen = length + fraggap;
 
 
1433
1434			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1435				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1436			if ((flags & MSG_MORE) &&
1437			    !(rt->dst.dev->features&NETIF_F_SG))
1438				alloclen = mtu;
1439			else
1440				alloclen = datalen + fragheaderlen;
1441
1442			alloclen += dst_exthdrlen;
1443
1444			if (datalen != length + fraggap) {
1445				/*
1446				 * this is not the last fragment, the trailer
1447				 * space is regarded as data space.
1448				 */
1449				datalen += rt->dst.trailer_len;
1450			}
1451
1452			alloclen += rt->dst.trailer_len;
1453			fraglen = datalen + fragheaderlen;
1454
1455			/*
1456			 * We just reserve space for fragment header.
1457			 * Note: this may be overallocation if the message
1458			 * (without MSG_MORE) fits into the MTU.
1459			 */
1460			alloclen += sizeof(struct frag_hdr);
1461
1462			if (transhdrlen) {
1463				skb = sock_alloc_send_skb(sk,
1464						alloclen + hh_len,
1465						(flags & MSG_DONTWAIT), &err);
1466			} else {
1467				skb = NULL;
1468				if (atomic_read(&sk->sk_wmem_alloc) <=
1469				    2 * sk->sk_sndbuf)
1470					skb = sock_wmalloc(sk,
1471							   alloclen + hh_len, 1,
1472							   sk->sk_allocation);
1473				if (unlikely(!skb))
1474					err = -ENOBUFS;
 
 
 
 
 
 
1475			}
1476			if (!skb)
1477				goto error;
1478			/*
1479			 *	Fill in the control structures
1480			 */
1481			skb->protocol = htons(ETH_P_IPV6);
1482			skb->ip_summed = csummode;
1483			skb->csum = 0;
1484			/* reserve for fragmentation and ipsec header */
1485			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1486				    dst_exthdrlen);
1487
1488			/* Only the initial fragment is time stamped */
1489			skb_shinfo(skb)->tx_flags = tx_flags;
1490			tx_flags = 0;
1491			skb_shinfo(skb)->tskey = tskey;
1492			tskey = 0;
1493
1494			/*
1495			 *	Find where to start putting bytes
1496			 */
1497			data = skb_put(skb, fraglen);
1498			skb_set_network_header(skb, exthdrlen);
1499			data += fragheaderlen;
1500			skb->transport_header = (skb->network_header +
1501						 fragheaderlen);
1502			if (fraggap) {
1503				skb->csum = skb_copy_and_csum_bits(
1504					skb_prev, maxfraglen,
1505					data + transhdrlen, fraggap, 0);
1506				skb_prev->csum = csum_sub(skb_prev->csum,
1507							  skb->csum);
1508				data += fraggap;
1509				pskb_trim_unique(skb_prev, maxfraglen);
1510			}
1511			copy = datalen - transhdrlen - fraggap;
1512
1513			if (copy < 0) {
1514				err = -EINVAL;
1515				kfree_skb(skb);
1516				goto error;
1517			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1518				err = -EFAULT;
1519				kfree_skb(skb);
1520				goto error;
1521			}
1522
1523			offset += copy;
1524			length -= datalen - fraggap;
1525			transhdrlen = 0;
1526			exthdrlen = 0;
1527			dst_exthdrlen = 0;
1528
1529			/*
1530			 * Put the packet on the pending queue
1531			 */
1532			__skb_queue_tail(queue, skb);
1533			continue;
1534		}
1535
1536		if (copy > length)
1537			copy = length;
1538
1539		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1540			unsigned int off;
1541
1542			off = skb->len;
1543			if (getfrag(from, skb_put(skb, copy),
1544						offset, copy, off, skb) < 0) {
1545				__skb_trim(skb, off);
1546				err = -EFAULT;
1547				goto error;
1548			}
1549		} else {
1550			int i = skb_shinfo(skb)->nr_frags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1551
1552			err = -ENOMEM;
1553			if (!sk_page_frag_refill(sk, pfrag))
 
 
 
 
 
 
1554				goto error;
1555
1556			if (!skb_can_coalesce(skb, i, pfrag->page,
1557					      pfrag->offset)) {
1558				err = -EMSGSIZE;
1559				if (i == MAX_SKB_FRAGS)
1560					goto error;
1561
1562				__skb_fill_page_desc(skb, i, pfrag->page,
1563						     pfrag->offset, 0);
1564				skb_shinfo(skb)->nr_frags = ++i;
1565				get_page(pfrag->page);
1566			}
1567			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1568			if (getfrag(from,
1569				    page_address(pfrag->page) + pfrag->offset,
1570				    offset, copy, skb->len, skb) < 0)
1571				goto error_efault;
1572
1573			pfrag->offset += copy;
1574			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1575			skb->len += copy;
1576			skb->data_len += copy;
1577			skb->truesize += copy;
1578			atomic_add(copy, &sk->sk_wmem_alloc);
1579		}
1580		offset += copy;
1581		length -= copy;
1582	}
1583
1584	return 0;
1585
1586error_efault:
1587	err = -EFAULT;
1588error:
1589	cork->length -= length;
1590	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1591	return err;
1592}
1593
1594int ip6_append_data(struct sock *sk,
1595		    int getfrag(void *from, char *to, int offset, int len,
1596				int odd, struct sk_buff *skb),
1597		    void *from, int length, int transhdrlen,
1598		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1599		    struct rt6_info *rt, unsigned int flags,
1600		    const struct sockcm_cookie *sockc)
1601{
1602	struct inet_sock *inet = inet_sk(sk);
1603	struct ipv6_pinfo *np = inet6_sk(sk);
1604	int exthdrlen;
1605	int err;
1606
1607	if (flags&MSG_PROBE)
1608		return 0;
1609	if (skb_queue_empty(&sk->sk_write_queue)) {
1610		/*
1611		 * setup for corking
1612		 */
1613		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1614				     ipc6, rt, fl6);
1615		if (err)
1616			return err;
1617
1618		exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1619		length += exthdrlen;
1620		transhdrlen += exthdrlen;
1621	} else {
1622		fl6 = &inet->cork.fl.u.ip6;
1623		transhdrlen = 0;
1624	}
1625
1626	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1627				 &np->cork, sk_page_frag(sk), getfrag,
1628				 from, length, transhdrlen, flags, ipc6, sockc);
1629}
1630EXPORT_SYMBOL_GPL(ip6_append_data);
1631
1632static void ip6_cork_release(struct inet_cork_full *cork,
1633			     struct inet6_cork *v6_cork)
1634{
1635	if (v6_cork->opt) {
1636		kfree(v6_cork->opt->dst0opt);
1637		kfree(v6_cork->opt->dst1opt);
1638		kfree(v6_cork->opt->hopopt);
1639		kfree(v6_cork->opt->srcrt);
1640		kfree(v6_cork->opt);
1641		v6_cork->opt = NULL;
1642	}
1643
1644	if (cork->base.dst) {
1645		dst_release(cork->base.dst);
1646		cork->base.dst = NULL;
1647		cork->base.flags &= ~IPCORK_ALLFRAG;
1648	}
1649	memset(&cork->fl, 0, sizeof(cork->fl));
1650}
1651
1652struct sk_buff *__ip6_make_skb(struct sock *sk,
1653			       struct sk_buff_head *queue,
1654			       struct inet_cork_full *cork,
1655			       struct inet6_cork *v6_cork)
1656{
1657	struct sk_buff *skb, *tmp_skb;
1658	struct sk_buff **tail_skb;
1659	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
 
1660	struct ipv6_pinfo *np = inet6_sk(sk);
1661	struct net *net = sock_net(sk);
1662	struct ipv6hdr *hdr;
1663	struct ipv6_txoptions *opt = v6_cork->opt;
1664	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1665	struct flowi6 *fl6 = &cork->fl.u.ip6;
1666	unsigned char proto = fl6->flowi6_proto;
 
1667
1668	skb = __skb_dequeue(queue);
1669	if (!skb)
1670		goto out;
1671	tail_skb = &(skb_shinfo(skb)->frag_list);
1672
1673	/* move skb->data to ip header from ext header */
1674	if (skb->data < skb_network_header(skb))
1675		__skb_pull(skb, skb_network_offset(skb));
1676	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1677		__skb_pull(tmp_skb, skb_network_header_len(skb));
1678		*tail_skb = tmp_skb;
1679		tail_skb = &(tmp_skb->next);
1680		skb->len += tmp_skb->len;
1681		skb->data_len += tmp_skb->len;
1682		skb->truesize += tmp_skb->truesize;
1683		tmp_skb->destructor = NULL;
1684		tmp_skb->sk = NULL;
1685	}
1686
1687	/* Allow local fragmentation. */
1688	skb->ignore_df = ip6_sk_ignore_df(sk);
 
1689
1690	*final_dst = fl6->daddr;
1691	__skb_pull(skb, skb_network_header_len(skb));
1692	if (opt && opt->opt_flen)
1693		ipv6_push_frag_opts(skb, opt, &proto);
1694	if (opt && opt->opt_nflen)
1695		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr);
1696
1697	skb_push(skb, sizeof(struct ipv6hdr));
1698	skb_reset_network_header(skb);
1699	hdr = ipv6_hdr(skb);
1700
1701	ip6_flow_hdr(hdr, v6_cork->tclass,
1702		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
1703					np->autoflowlabel, fl6));
1704	hdr->hop_limit = v6_cork->hop_limit;
1705	hdr->nexthdr = proto;
1706	hdr->saddr = fl6->saddr;
1707	hdr->daddr = *final_dst;
1708
1709	skb->priority = sk->sk_priority;
1710	skb->mark = sk->sk_mark;
1711
1712	skb_dst_set(skb, dst_clone(&rt->dst));
1713	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1714	if (proto == IPPROTO_ICMPV6) {
1715		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1716
1717		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1718		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1719	}
1720
1721	ip6_cork_release(cork, v6_cork);
1722out:
1723	return skb;
1724}
1725
1726int ip6_send_skb(struct sk_buff *skb)
1727{
1728	struct net *net = sock_net(skb->sk);
1729	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1730	int err;
1731
1732	err = ip6_local_out(net, skb->sk, skb);
1733	if (err) {
1734		if (err > 0)
1735			err = net_xmit_errno(err);
1736		if (err)
1737			IP6_INC_STATS(net, rt->rt6i_idev,
1738				      IPSTATS_MIB_OUTDISCARDS);
1739	}
1740
 
 
1741	return err;
 
 
 
1742}
1743
1744int ip6_push_pending_frames(struct sock *sk)
1745{
1746	struct sk_buff *skb;
1747
1748	skb = ip6_finish_skb(sk);
1749	if (!skb)
1750		return 0;
1751
1752	return ip6_send_skb(skb);
1753}
1754EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1755
1756static void __ip6_flush_pending_frames(struct sock *sk,
1757				       struct sk_buff_head *queue,
1758				       struct inet_cork_full *cork,
1759				       struct inet6_cork *v6_cork)
1760{
1761	struct sk_buff *skb;
1762
1763	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1764		if (skb_dst(skb))
1765			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1766				      IPSTATS_MIB_OUTDISCARDS);
1767		kfree_skb(skb);
1768	}
1769
1770	ip6_cork_release(cork, v6_cork);
1771}
1772
1773void ip6_flush_pending_frames(struct sock *sk)
1774{
1775	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1776				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1777}
1778EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1779
1780struct sk_buff *ip6_make_skb(struct sock *sk,
1781			     int getfrag(void *from, char *to, int offset,
1782					 int len, int odd, struct sk_buff *skb),
1783			     void *from, int length, int transhdrlen,
1784			     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1785			     struct rt6_info *rt, unsigned int flags,
1786			     const struct sockcm_cookie *sockc)
1787{
1788	struct inet_cork_full cork;
1789	struct inet6_cork v6_cork;
1790	struct sk_buff_head queue;
1791	int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1792	int err;
1793
1794	if (flags & MSG_PROBE)
1795		return NULL;
1796
1797	__skb_queue_head_init(&queue);
1798
1799	cork.base.flags = 0;
1800	cork.base.addr = 0;
1801	cork.base.opt = NULL;
1802	v6_cork.opt = NULL;
1803	err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1804	if (err)
1805		return ERR_PTR(err);
1806
1807	if (ipc6->dontfrag < 0)
1808		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1809
1810	err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork,
1811				&current->task_frag, getfrag, from,
1812				length + exthdrlen, transhdrlen + exthdrlen,
1813				flags, ipc6, sockc);
1814	if (err) {
1815		__ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork);
1816		return ERR_PTR(err);
1817	}
1818
1819	return __ip6_make_skb(sk, &queue, &cork, &v6_cork);
1820}