Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *	IPv6 output functions
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on linux/net/ipv4/ip_output.c
   9 *
  10 *	This program is free software; you can redistribute it and/or
  11 *      modify it under the terms of the GNU General Public License
  12 *      as published by the Free Software Foundation; either version
  13 *      2 of the License, or (at your option) any later version.
  14 *
  15 *	Changes:
  16 *	A.N.Kuznetsov	:	airthmetics in fragmentation.
  17 *				extension headers are implemented.
  18 *				route changes now work.
  19 *				ip6_forward does not confuse sniffers.
  20 *				etc.
  21 *
  22 *      H. von Brand    :       Added missing #include <linux/string.h>
  23 *	Imran Patel	: 	frag id should be in NBO
  24 *      Kazunori MIYAZAWA @USAGI
  25 *			:       add ip6_append_data and related functions
  26 *				for datagram xmit
  27 */
  28
  29#include <linux/errno.h>
  30#include <linux/kernel.h>
  31#include <linux/string.h>
  32#include <linux/socket.h>
  33#include <linux/net.h>
  34#include <linux/netdevice.h>
  35#include <linux/if_arp.h>
  36#include <linux/in6.h>
  37#include <linux/tcp.h>
  38#include <linux/route.h>
  39#include <linux/module.h>
  40#include <linux/slab.h>
  41
  42#include <linux/netfilter.h>
  43#include <linux/netfilter_ipv6.h>
  44
  45#include <net/sock.h>
  46#include <net/snmp.h>
  47
  48#include <net/ipv6.h>
  49#include <net/ndisc.h>
  50#include <net/protocol.h>
  51#include <net/ip6_route.h>
  52#include <net/addrconf.h>
  53#include <net/rawv6.h>
  54#include <net/icmp.h>
  55#include <net/xfrm.h>
  56#include <net/checksum.h>
  57#include <linux/mroute6.h>
  58
  59int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  60
  61int __ip6_local_out(struct sk_buff *skb)
  62{
  63	int len;
  64
  65	len = skb->len - sizeof(struct ipv6hdr);
  66	if (len > IPV6_MAXPLEN)
  67		len = 0;
  68	ipv6_hdr(skb)->payload_len = htons(len);
  69
  70	return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
  71		       skb_dst(skb)->dev, dst_output);
  72}
  73
  74int ip6_local_out(struct sk_buff *skb)
  75{
  76	int err;
  77
  78	err = __ip6_local_out(skb);
  79	if (likely(err == 1))
  80		err = dst_output(skb);
  81
  82	return err;
  83}
  84EXPORT_SYMBOL_GPL(ip6_local_out);
  85
  86/* dev_loopback_xmit for use with netfilter. */
  87static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  88{
  89	skb_reset_mac_header(newskb);
  90	__skb_pull(newskb, skb_network_offset(newskb));
  91	newskb->pkt_type = PACKET_LOOPBACK;
  92	newskb->ip_summed = CHECKSUM_UNNECESSARY;
  93	WARN_ON(!skb_dst(newskb));
  94
  95	netif_rx_ni(newskb);
  96	return 0;
  97}
  98
  99static int ip6_finish_output2(struct sk_buff *skb)
 100{
 101	struct dst_entry *dst = skb_dst(skb);
 102	struct net_device *dev = dst->dev;
 103	struct neighbour *neigh;
 
 
 104
 105	skb->protocol = htons(ETH_P_IPV6);
 106	skb->dev = dev;
 107
 108	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
 109		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 110
 111		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
 112		    ((mroute6_socket(dev_net(dev), skb) &&
 113		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
 114		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
 115					 &ipv6_hdr(skb)->saddr))) {
 116			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 117
 118			/* Do not check for IFF_ALLMULTI; multicast routing
 119			   is not supported in any case.
 120			 */
 121			if (newskb)
 122				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 123					newskb, NULL, newskb->dev,
 124					ip6_dev_loopback_xmit);
 125
 126			if (ipv6_hdr(skb)->hop_limit == 0) {
 127				IP6_INC_STATS(dev_net(dev), idev,
 128					      IPSTATS_MIB_OUTDISCARDS);
 129				kfree_skb(skb);
 130				return 0;
 131			}
 132		}
 133
 134		IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
 135				skb->len);
 136	}
 137
 138	rcu_read_lock();
 139	neigh = dst_get_neighbour(dst);
 140	if (neigh) {
 141		int res = neigh_output(neigh, skb);
 
 
 
 142
 143		rcu_read_unlock();
 144		return res;
 
 
 
 
 
 
 
 145	}
 146	rcu_read_unlock();
 147	IP6_INC_STATS_BH(dev_net(dst->dev),
 148			 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 
 149	kfree_skb(skb);
 150	return -EINVAL;
 151}
 152
 153static int ip6_finish_output(struct sk_buff *skb)
 154{
 155	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 156	    dst_allfrag(skb_dst(skb)))
 
 157		return ip6_fragment(skb, ip6_finish_output2);
 158	else
 159		return ip6_finish_output2(skb);
 160}
 161
 162int ip6_output(struct sk_buff *skb)
 163{
 164	struct net_device *dev = skb_dst(skb)->dev;
 165	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 166	if (unlikely(idev->cnf.disable_ipv6)) {
 167		IP6_INC_STATS(dev_net(dev), idev,
 168			      IPSTATS_MIB_OUTDISCARDS);
 169		kfree_skb(skb);
 170		return 0;
 171	}
 172
 173	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
 174			    ip6_finish_output,
 175			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 176}
 177
 178/*
 179 *	xmit an sk_buff (used by TCP, SCTP and DCCP)
 180 */
 181
 182int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 183	     struct ipv6_txoptions *opt)
 184{
 185	struct net *net = sock_net(sk);
 186	struct ipv6_pinfo *np = inet6_sk(sk);
 187	struct in6_addr *first_hop = &fl6->daddr;
 188	struct dst_entry *dst = skb_dst(skb);
 189	struct ipv6hdr *hdr;
 190	u8  proto = fl6->flowi6_proto;
 191	int seg_len = skb->len;
 192	int hlimit = -1;
 193	int tclass = 0;
 194	u32 mtu;
 195
 196	if (opt) {
 197		unsigned int head_room;
 198
 199		/* First: exthdrs may take lots of space (~8K for now)
 200		   MAX_HEADER is not enough.
 201		 */
 202		head_room = opt->opt_nflen + opt->opt_flen;
 203		seg_len += head_room;
 204		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
 205
 206		if (skb_headroom(skb) < head_room) {
 207			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
 208			if (skb2 == NULL) {
 209				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 210					      IPSTATS_MIB_OUTDISCARDS);
 211				kfree_skb(skb);
 212				return -ENOBUFS;
 213			}
 214			kfree_skb(skb);
 215			skb = skb2;
 216			skb_set_owner_w(skb, sk);
 217		}
 218		if (opt->opt_flen)
 219			ipv6_push_frag_opts(skb, opt, &proto);
 220		if (opt->opt_nflen)
 221			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
 222	}
 223
 224	skb_push(skb, sizeof(struct ipv6hdr));
 225	skb_reset_network_header(skb);
 226	hdr = ipv6_hdr(skb);
 227
 228	/*
 229	 *	Fill in the IPv6 header
 230	 */
 231	if (np) {
 232		tclass = np->tclass;
 233		hlimit = np->hop_limit;
 234	}
 235	if (hlimit < 0)
 236		hlimit = ip6_dst_hoplimit(dst);
 237
 238	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;
 239
 240	hdr->payload_len = htons(seg_len);
 241	hdr->nexthdr = proto;
 242	hdr->hop_limit = hlimit;
 243
 244	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
 245	ipv6_addr_copy(&hdr->daddr, first_hop);
 246
 
 247	skb->priority = sk->sk_priority;
 248	skb->mark = sk->sk_mark;
 249
 250	mtu = dst_mtu(dst);
 251	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
 252		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
 253			      IPSTATS_MIB_OUT, skb->len);
 254		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
 255			       dst->dev, dst_output);
 256	}
 257
 258	if (net_ratelimit())
 259		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
 260	skb->dev = dst->dev;
 261	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 262	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
 263	kfree_skb(skb);
 264	return -EMSGSIZE;
 265}
 266
 267EXPORT_SYMBOL(ip6_xmit);
 268
 269/*
 270 *	To avoid extra problems ND packets are send through this
 271 *	routine. It's code duplication but I really want to avoid
 272 *	extra checks since ipv6_build_header is used by TCP (which
 273 *	is for us performance critical)
 274 */
 275
 276int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
 277	       const struct in6_addr *saddr, const struct in6_addr *daddr,
 278	       int proto, int len)
 279{
 280	struct ipv6_pinfo *np = inet6_sk(sk);
 281	struct ipv6hdr *hdr;
 282
 283	skb->protocol = htons(ETH_P_IPV6);
 284	skb->dev = dev;
 285
 286	skb_reset_network_header(skb);
 287	skb_put(skb, sizeof(struct ipv6hdr));
 288	hdr = ipv6_hdr(skb);
 289
 290	*(__be32*)hdr = htonl(0x60000000);
 291
 292	hdr->payload_len = htons(len);
 293	hdr->nexthdr = proto;
 294	hdr->hop_limit = np->hop_limit;
 295
 296	ipv6_addr_copy(&hdr->saddr, saddr);
 297	ipv6_addr_copy(&hdr->daddr, daddr);
 298
 299	return 0;
 300}
 301
 302static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
 303{
 304	struct ip6_ra_chain *ra;
 305	struct sock *last = NULL;
 306
 307	read_lock(&ip6_ra_lock);
 308	for (ra = ip6_ra_chain; ra; ra = ra->next) {
 309		struct sock *sk = ra->sk;
 310		if (sk && ra->sel == sel &&
 311		    (!sk->sk_bound_dev_if ||
 312		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
 313			if (last) {
 314				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 315				if (skb2)
 316					rawv6_rcv(last, skb2);
 317			}
 318			last = sk;
 319		}
 320	}
 321
 322	if (last) {
 323		rawv6_rcv(last, skb);
 324		read_unlock(&ip6_ra_lock);
 325		return 1;
 326	}
 327	read_unlock(&ip6_ra_lock);
 328	return 0;
 329}
 330
 331static int ip6_forward_proxy_check(struct sk_buff *skb)
 332{
 333	struct ipv6hdr *hdr = ipv6_hdr(skb);
 334	u8 nexthdr = hdr->nexthdr;
 
 335	int offset;
 336
 337	if (ipv6_ext_hdr(nexthdr)) {
 338		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
 339		if (offset < 0)
 340			return 0;
 341	} else
 342		offset = sizeof(struct ipv6hdr);
 343
 344	if (nexthdr == IPPROTO_ICMPV6) {
 345		struct icmp6hdr *icmp6;
 346
 347		if (!pskb_may_pull(skb, (skb_network_header(skb) +
 348					 offset + 1 - skb->data)))
 349			return 0;
 350
 351		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
 352
 353		switch (icmp6->icmp6_type) {
 354		case NDISC_ROUTER_SOLICITATION:
 355		case NDISC_ROUTER_ADVERTISEMENT:
 356		case NDISC_NEIGHBOUR_SOLICITATION:
 357		case NDISC_NEIGHBOUR_ADVERTISEMENT:
 358		case NDISC_REDIRECT:
 359			/* For reaction involving unicast neighbor discovery
 360			 * message destined to the proxied address, pass it to
 361			 * input function.
 362			 */
 363			return 1;
 364		default:
 365			break;
 366		}
 367	}
 368
 369	/*
 370	 * The proxying router can't forward traffic sent to a link-local
 371	 * address, so signal the sender and discard the packet. This
 372	 * behavior is clarified by the MIPv6 specification.
 373	 */
 374	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
 375		dst_link_failure(skb);
 376		return -1;
 377	}
 378
 379	return 0;
 380}
 381
 382static inline int ip6_forward_finish(struct sk_buff *skb)
 383{
 384	return dst_output(skb);
 385}
 386
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 387int ip6_forward(struct sk_buff *skb)
 388{
 389	struct dst_entry *dst = skb_dst(skb);
 390	struct ipv6hdr *hdr = ipv6_hdr(skb);
 391	struct inet6_skb_parm *opt = IP6CB(skb);
 392	struct net *net = dev_net(dst->dev);
 393	struct neighbour *n;
 394	u32 mtu;
 395
 396	if (net->ipv6.devconf_all->forwarding == 0)
 397		goto error;
 398
 
 
 
 399	if (skb_warn_if_lro(skb))
 400		goto drop;
 401
 402	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
 403		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 
 404		goto drop;
 405	}
 406
 407	if (skb->pkt_type != PACKET_HOST)
 408		goto drop;
 409
 410	skb_forward_csum(skb);
 411
 412	/*
 413	 *	We DO NOT make any processing on
 414	 *	RA packets, pushing them to user level AS IS
 415	 *	without ane WARRANTY that application will be able
 416	 *	to interpret them. The reason is that we
 417	 *	cannot make anything clever here.
 418	 *
 419	 *	We are not end-node, so that if packet contains
 420	 *	AH/ESP, we cannot make anything.
 421	 *	Defragmentation also would be mistake, RA packets
 422	 *	cannot be fragmented, because there is no warranty
 423	 *	that different fragments will go along one path. --ANK
 424	 */
 425	if (opt->ra) {
 426		u8 *ptr = skb_network_header(skb) + opt->ra;
 427		if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
 428			return 0;
 429	}
 430
 431	/*
 432	 *	check and decrement ttl
 433	 */
 434	if (hdr->hop_limit <= 1) {
 435		/* Force OUTPUT device used as source address */
 436		skb->dev = dst->dev;
 437		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
 438		IP6_INC_STATS_BH(net,
 439				 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
 440
 441		kfree_skb(skb);
 442		return -ETIMEDOUT;
 443	}
 444
 445	/* XXX: idev->cnf.proxy_ndp? */
 446	if (net->ipv6.devconf_all->proxy_ndp &&
 447	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
 448		int proxied = ip6_forward_proxy_check(skb);
 449		if (proxied > 0)
 450			return ip6_input(skb);
 451		else if (proxied < 0) {
 452			IP6_INC_STATS(net, ip6_dst_idev(dst),
 453				      IPSTATS_MIB_INDISCARDS);
 454			goto drop;
 455		}
 456	}
 457
 458	if (!xfrm6_route_forward(skb)) {
 459		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 
 460		goto drop;
 461	}
 462	dst = skb_dst(skb);
 463
 464	/* IPv6 specs say nothing about it, but it is clear that we cannot
 465	   send redirects to source routed frames.
 466	   We don't send redirects to frames decapsulated from IPsec.
 467	 */
 468	n = dst_get_neighbour(dst);
 469	if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
 470		struct in6_addr *target = NULL;
 
 471		struct rt6_info *rt;
 472
 473		/*
 474		 *	incoming and outgoing devices are the same
 475		 *	send a redirect.
 476		 */
 477
 478		rt = (struct rt6_info *) dst;
 479		if ((rt->rt6i_flags & RTF_GATEWAY))
 480			target = (struct in6_addr*)&n->primary_key;
 481		else
 482			target = &hdr->daddr;
 483
 484		if (!rt->rt6i_peer)
 485			rt6_bind_peer(rt, 1);
 486
 487		/* Limit redirects both by destination (here)
 488		   and by source (inside ndisc_send_redirect)
 489		 */
 490		if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
 491			ndisc_send_redirect(skb, n, target);
 
 
 492	} else {
 493		int addrtype = ipv6_addr_type(&hdr->saddr);
 494
 495		/* This check is security critical. */
 496		if (addrtype == IPV6_ADDR_ANY ||
 497		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
 498			goto error;
 499		if (addrtype & IPV6_ADDR_LINKLOCAL) {
 500			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
 501				    ICMPV6_NOT_NEIGHBOUR, 0);
 502			goto error;
 503		}
 504	}
 505
 506	mtu = dst_mtu(dst);
 507	if (mtu < IPV6_MIN_MTU)
 508		mtu = IPV6_MIN_MTU;
 509
 510	if (skb->len > mtu && !skb_is_gso(skb)) {
 511		/* Again, force OUTPUT device used as source address */
 512		skb->dev = dst->dev;
 513		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 514		IP6_INC_STATS_BH(net,
 515				 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
 516		IP6_INC_STATS_BH(net,
 517				 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
 518		kfree_skb(skb);
 519		return -EMSGSIZE;
 520	}
 521
 522	if (skb_cow(skb, dst->dev->hard_header_len)) {
 523		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
 
 524		goto drop;
 525	}
 526
 527	hdr = ipv6_hdr(skb);
 528
 529	/* Mangling hops number delayed to point after skb COW */
 530
 531	hdr->hop_limit--;
 532
 533	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 
 534	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
 535		       ip6_forward_finish);
 536
 537error:
 538	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
 539drop:
 540	kfree_skb(skb);
 541	return -EINVAL;
 542}
 543
 544static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 545{
 546	to->pkt_type = from->pkt_type;
 547	to->priority = from->priority;
 548	to->protocol = from->protocol;
 549	skb_dst_drop(to);
 550	skb_dst_set(to, dst_clone(skb_dst(from)));
 551	to->dev = from->dev;
 552	to->mark = from->mark;
 553
 554#ifdef CONFIG_NET_SCHED
 555	to->tc_index = from->tc_index;
 556#endif
 557	nf_copy(to, from);
 558#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
 559    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
 560	to->nf_trace = from->nf_trace;
 561#endif
 562	skb_copy_secmark(to, from);
 563}
 564
 565int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 566{
 567	u16 offset = sizeof(struct ipv6hdr);
 568	struct ipv6_opt_hdr *exthdr =
 569				(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
 570	unsigned int packet_len = skb->tail - skb->network_header;
 571	int found_rhdr = 0;
 572	*nexthdr = &ipv6_hdr(skb)->nexthdr;
 573
 574	while (offset + 1 <= packet_len) {
 575
 576		switch (**nexthdr) {
 577
 578		case NEXTHDR_HOP:
 579			break;
 580		case NEXTHDR_ROUTING:
 581			found_rhdr = 1;
 582			break;
 583		case NEXTHDR_DEST:
 584#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
 585			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
 586				break;
 587#endif
 588			if (found_rhdr)
 589				return offset;
 590			break;
 591		default :
 592			return offset;
 593		}
 594
 595		offset += ipv6_optlen(exthdr);
 596		*nexthdr = &exthdr->nexthdr;
 597		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
 598						 offset);
 599	}
 600
 601	return offset;
 602}
 603
 604void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
 605{
 606	static atomic_t ipv6_fragmentation_id;
 607	int old, new;
 608
 609	if (rt) {
 610		struct inet_peer *peer;
 611
 612		if (!rt->rt6i_peer)
 613			rt6_bind_peer(rt, 1);
 614		peer = rt->rt6i_peer;
 615		if (peer) {
 616			fhdr->identification = htonl(inet_getid(peer, 0));
 617			return;
 618		}
 619	}
 620	do {
 621		old = atomic_read(&ipv6_fragmentation_id);
 622		new = old + 1;
 623		if (!new)
 624			new = 1;
 625	} while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
 626	fhdr->identification = htonl(new);
 627}
 628
 629int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 630{
 631	struct sk_buff *frag;
 632	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
 633	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
 634	struct ipv6hdr *tmp_hdr;
 635	struct frag_hdr *fh;
 636	unsigned int mtu, hlen, left, len;
 
 637	__be32 frag_id = 0;
 638	int ptr, offset = 0, err=0;
 639	u8 *prevhdr, nexthdr = 0;
 640	struct net *net = dev_net(skb_dst(skb)->dev);
 641
 642	hlen = ip6_find_1stfragopt(skb, &prevhdr);
 643	nexthdr = *prevhdr;
 644
 645	mtu = ip6_skb_dst_mtu(skb);
 646
 647	/* We must not fragment if the socket is set to force MTU discovery
 648	 * or if the skb it not generated by a local socket.
 649	 */
 650	if (!skb->local_df && skb->len > mtu) {
 
 
 
 
 
 651		skb->dev = skb_dst(skb)->dev;
 652		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 653		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 654			      IPSTATS_MIB_FRAGFAILS);
 655		kfree_skb(skb);
 656		return -EMSGSIZE;
 657	}
 658
 659	if (np && np->frag_size < mtu) {
 660		if (np->frag_size)
 661			mtu = np->frag_size;
 662	}
 663	mtu -= hlen + sizeof(struct frag_hdr);
 664
 665	if (skb_has_frag_list(skb)) {
 666		int first_len = skb_pagelen(skb);
 667		struct sk_buff *frag2;
 668
 669		if (first_len - hlen > mtu ||
 670		    ((first_len - hlen) & 7) ||
 671		    skb_cloned(skb))
 672			goto slow_path;
 673
 674		skb_walk_frags(skb, frag) {
 675			/* Correct geometry. */
 676			if (frag->len > mtu ||
 677			    ((frag->len & 7) && frag->next) ||
 678			    skb_headroom(frag) < hlen)
 679				goto slow_path_clean;
 680
 681			/* Partially cloned skb? */
 682			if (skb_shared(frag))
 683				goto slow_path_clean;
 684
 685			BUG_ON(frag->sk);
 686			if (skb->sk) {
 687				frag->sk = skb->sk;
 688				frag->destructor = sock_wfree;
 689			}
 690			skb->truesize -= frag->truesize;
 691		}
 692
 693		err = 0;
 694		offset = 0;
 695		frag = skb_shinfo(skb)->frag_list;
 696		skb_frag_list_init(skb);
 697		/* BUILD HEADER */
 698
 699		*prevhdr = NEXTHDR_FRAGMENT;
 700		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
 701		if (!tmp_hdr) {
 702			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 703				      IPSTATS_MIB_FRAGFAILS);
 704			return -ENOMEM;
 705		}
 706
 707		__skb_pull(skb, hlen);
 708		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
 709		__skb_push(skb, hlen);
 710		skb_reset_network_header(skb);
 711		memcpy(skb_network_header(skb), tmp_hdr, hlen);
 712
 713		ipv6_select_ident(fh, rt);
 714		fh->nexthdr = nexthdr;
 715		fh->reserved = 0;
 716		fh->frag_off = htons(IP6_MF);
 717		frag_id = fh->identification;
 718
 719		first_len = skb_pagelen(skb);
 720		skb->data_len = first_len - skb_headlen(skb);
 721		skb->len = first_len;
 722		ipv6_hdr(skb)->payload_len = htons(first_len -
 723						   sizeof(struct ipv6hdr));
 724
 725		dst_hold(&rt->dst);
 726
 727		for (;;) {
 728			/* Prepare header of the next frame,
 729			 * before previous one went down. */
 730			if (frag) {
 731				frag->ip_summed = CHECKSUM_NONE;
 732				skb_reset_transport_header(frag);
 733				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
 734				__skb_push(frag, hlen);
 735				skb_reset_network_header(frag);
 736				memcpy(skb_network_header(frag), tmp_hdr,
 737				       hlen);
 738				offset += skb->len - hlen - sizeof(struct frag_hdr);
 739				fh->nexthdr = nexthdr;
 740				fh->reserved = 0;
 741				fh->frag_off = htons(offset);
 742				if (frag->next != NULL)
 743					fh->frag_off |= htons(IP6_MF);
 744				fh->identification = frag_id;
 745				ipv6_hdr(frag)->payload_len =
 746						htons(frag->len -
 747						      sizeof(struct ipv6hdr));
 748				ip6_copy_metadata(frag, skb);
 749			}
 750
 751			err = output(skb);
 752			if(!err)
 753				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 754					      IPSTATS_MIB_FRAGCREATES);
 755
 756			if (err || !frag)
 757				break;
 758
 759			skb = frag;
 760			frag = skb->next;
 761			skb->next = NULL;
 762		}
 763
 764		kfree(tmp_hdr);
 765
 766		if (err == 0) {
 767			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 768				      IPSTATS_MIB_FRAGOKS);
 769			dst_release(&rt->dst);
 770			return 0;
 771		}
 772
 773		while (frag) {
 774			skb = frag->next;
 775			kfree_skb(frag);
 776			frag = skb;
 777		}
 778
 779		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 780			      IPSTATS_MIB_FRAGFAILS);
 781		dst_release(&rt->dst);
 782		return err;
 783
 784slow_path_clean:
 785		skb_walk_frags(skb, frag2) {
 786			if (frag2 == frag)
 787				break;
 788			frag2->sk = NULL;
 789			frag2->destructor = NULL;
 790			skb->truesize += frag2->truesize;
 791		}
 792	}
 793
 794slow_path:
 
 
 
 
 795	left = skb->len - hlen;		/* Space per frame */
 796	ptr = hlen;			/* Where to start from */
 797
 798	/*
 799	 *	Fragment the datagram.
 800	 */
 801
 802	*prevhdr = NEXTHDR_FRAGMENT;
 
 
 803
 804	/*
 805	 *	Keep copying data until we run out.
 806	 */
 807	while(left > 0)	{
 808		len = left;
 809		/* IF: it doesn't fit, use 'mtu' - the data space left */
 810		if (len > mtu)
 811			len = mtu;
 812		/* IF: we are not sending up to and including the packet end
 813		   then align the next start on an eight byte boundary */
 814		if (len < left)	{
 815			len &= ~7;
 816		}
 817		/*
 818		 *	Allocate buffer.
 819		 */
 820
 821		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
 
 822			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
 823			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 824				      IPSTATS_MIB_FRAGFAILS);
 825			err = -ENOMEM;
 826			goto fail;
 827		}
 828
 829		/*
 830		 *	Set up data on packet
 831		 */
 832
 833		ip6_copy_metadata(frag, skb);
 834		skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
 835		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
 836		skb_reset_network_header(frag);
 837		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
 838		frag->transport_header = (frag->network_header + hlen +
 839					  sizeof(struct frag_hdr));
 840
 841		/*
 842		 *	Charge the memory for the fragment to any owner
 843		 *	it might possess
 844		 */
 845		if (skb->sk)
 846			skb_set_owner_w(frag, skb->sk);
 847
 848		/*
 849		 *	Copy the packet header into the new buffer.
 850		 */
 851		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
 852
 853		/*
 854		 *	Build fragment header.
 855		 */
 856		fh->nexthdr = nexthdr;
 857		fh->reserved = 0;
 858		if (!frag_id) {
 859			ipv6_select_ident(fh, rt);
 860			frag_id = fh->identification;
 861		} else
 862			fh->identification = frag_id;
 863
 864		/*
 865		 *	Copy a block of the IP datagram.
 866		 */
 867		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
 868			BUG();
 869		left -= len;
 870
 871		fh->frag_off = htons(offset);
 872		if (left > 0)
 873			fh->frag_off |= htons(IP6_MF);
 874		ipv6_hdr(frag)->payload_len = htons(frag->len -
 875						    sizeof(struct ipv6hdr));
 876
 877		ptr += len;
 878		offset += len;
 879
 880		/*
 881		 *	Put this fragment into the sending queue.
 882		 */
 883		err = output(frag);
 884		if (err)
 885			goto fail;
 886
 887		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 888			      IPSTATS_MIB_FRAGCREATES);
 889	}
 890	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 891		      IPSTATS_MIB_FRAGOKS);
 892	kfree_skb(skb);
 893	return err;
 894
 895fail:
 896	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 897		      IPSTATS_MIB_FRAGFAILS);
 898	kfree_skb(skb);
 899	return err;
 900}
 901
 902static inline int ip6_rt_check(const struct rt6key *rt_key,
 903			       const struct in6_addr *fl_addr,
 904			       const struct in6_addr *addr_cache)
 905{
 906	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
 907		(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
 908}
 909
 910static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
 911					  struct dst_entry *dst,
 912					  const struct flowi6 *fl6)
 913{
 914	struct ipv6_pinfo *np = inet6_sk(sk);
 915	struct rt6_info *rt = (struct rt6_info *)dst;
 916
 917	if (!dst)
 918		goto out;
 919
 
 
 
 
 
 
 920	/* Yes, checking route validity in not connected
 921	 * case is not very simple. Take into account,
 922	 * that we do not support routing by source, TOS,
 923	 * and MSG_DONTROUTE 		--ANK (980726)
 924	 *
 925	 * 1. ip6_rt_check(): If route was host route,
 926	 *    check that cached destination is current.
 927	 *    If it is network route, we still may
 928	 *    check its validity using saved pointer
 929	 *    to the last used address: daddr_cache.
 930	 *    We do not want to save whole address now,
 931	 *    (because main consumer of this service
 932	 *    is tcp, which has not this problem),
 933	 *    so that the last trick works only on connected
 934	 *    sockets.
 935	 * 2. oif also should be the same.
 936	 */
 937	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
 938#ifdef CONFIG_IPV6_SUBTREES
 939	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 940#endif
 941	    (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
 942		dst_release(dst);
 943		dst = NULL;
 944	}
 945
 946out:
 947	return dst;
 948}
 949
 950static int ip6_dst_lookup_tail(struct sock *sk,
 951			       struct dst_entry **dst, struct flowi6 *fl6)
 952{
 953	struct net *net = sock_net(sk);
 954#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 955	struct neighbour *n;
 
 956#endif
 957	int err;
 958
 959	if (*dst == NULL)
 960		*dst = ip6_route_output(net, sk, fl6);
 961
 962	if ((err = (*dst)->error))
 963		goto out_err_release;
 964
 965	if (ipv6_addr_any(&fl6->saddr)) {
 966		struct rt6_info *rt = (struct rt6_info *) *dst;
 967		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
 968					  sk ? inet6_sk(sk)->srcprefs : 0,
 969					  &fl6->saddr);
 970		if (err)
 971			goto out_err_release;
 972	}
 973
 974#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 975	/*
 976	 * Here if the dst entry we've looked up
 977	 * has a neighbour entry that is in the INCOMPLETE
 978	 * state and the src address from the flow is
 979	 * marked as OPTIMISTIC, we release the found
 980	 * dst entry and replace it instead with the
 981	 * dst entry of the nexthop router
 982	 */
 983	rcu_read_lock();
 984	n = dst_get_neighbour(*dst);
 985	if (n && !(n->nud_state & NUD_VALID)) {
 
 
 
 
 986		struct inet6_ifaddr *ifp;
 987		struct flowi6 fl_gw6;
 988		int redirect;
 989
 990		rcu_read_unlock();
 991		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
 992				      (*dst)->dev, 1);
 993
 994		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
 995		if (ifp)
 996			in6_ifa_put(ifp);
 997
 998		if (redirect) {
 999			/*
1000			 * We need to get the dst entry for the
1001			 * default router instead
1002			 */
1003			dst_release(*dst);
1004			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1005			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1006			*dst = ip6_route_output(net, sk, &fl_gw6);
1007			if ((err = (*dst)->error))
1008				goto out_err_release;
1009		}
1010	} else {
1011		rcu_read_unlock();
1012	}
1013#endif
1014
1015	return 0;
1016
1017out_err_release:
1018	if (err == -ENETUNREACH)
1019		IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1020	dst_release(*dst);
1021	*dst = NULL;
1022	return err;
1023}
1024
1025/**
1026 *	ip6_dst_lookup - perform route lookup on flow
1027 *	@sk: socket which provides route info
1028 *	@dst: pointer to dst_entry * for result
1029 *	@fl6: flow to lookup
1030 *
1031 *	This function performs a route lookup on the given flow.
1032 *
1033 *	It returns zero on success, or a standard errno code on error.
1034 */
1035int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
1036{
1037	*dst = NULL;
1038	return ip6_dst_lookup_tail(sk, dst, fl6);
1039}
1040EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1041
1042/**
1043 *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1044 *	@sk: socket which provides route info
1045 *	@fl6: flow to lookup
1046 *	@final_dst: final destination address for ipsec lookup
1047 *	@can_sleep: we are in a sleepable context
1048 *
1049 *	This function performs a route lookup on the given flow.
1050 *
1051 *	It returns a valid dst pointer on success, or a pointer encoded
1052 *	error code.
1053 */
1054struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1055				      const struct in6_addr *final_dst,
1056				      bool can_sleep)
1057{
1058	struct dst_entry *dst = NULL;
1059	int err;
1060
1061	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1062	if (err)
1063		return ERR_PTR(err);
1064	if (final_dst)
1065		ipv6_addr_copy(&fl6->daddr, final_dst);
1066	if (can_sleep)
1067		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1068
1069	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1070}
1071EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1072
1073/**
1074 *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1075 *	@sk: socket which provides the dst cache and route info
1076 *	@fl6: flow to lookup
1077 *	@final_dst: final destination address for ipsec lookup
1078 *	@can_sleep: we are in a sleepable context
1079 *
1080 *	This function performs a route lookup on the given flow with the
1081 *	possibility of using the cached route in the socket if it is valid.
1082 *	It will take the socket dst lock when operating on the dst cache.
1083 *	As a result, this function can only be used in process context.
1084 *
1085 *	It returns a valid dst pointer on success, or a pointer encoded
1086 *	error code.
1087 */
1088struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1089					 const struct in6_addr *final_dst,
1090					 bool can_sleep)
1091{
1092	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1093	int err;
1094
1095	dst = ip6_sk_dst_check(sk, dst, fl6);
1096
1097	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1098	if (err)
1099		return ERR_PTR(err);
1100	if (final_dst)
1101		ipv6_addr_copy(&fl6->daddr, final_dst);
1102	if (can_sleep)
1103		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1104
1105	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1106}
1107EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1108
1109static inline int ip6_ufo_append_data(struct sock *sk,
1110			int getfrag(void *from, char *to, int offset, int len,
1111			int odd, struct sk_buff *skb),
1112			void *from, int length, int hh_len, int fragheaderlen,
1113			int transhdrlen, int mtu,unsigned int flags,
1114			struct rt6_info *rt)
1115
1116{
1117	struct sk_buff *skb;
 
1118	int err;
1119
1120	/* There is support for UDP large send offload by network
1121	 * device, so create one single skb packet containing complete
1122	 * udp datagram
1123	 */
1124	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1125		skb = sock_alloc_send_skb(sk,
1126			hh_len + fragheaderlen + transhdrlen + 20,
1127			(flags & MSG_DONTWAIT), &err);
1128		if (skb == NULL)
1129			return -ENOMEM;
1130
1131		/* reserve space for Hardware header */
1132		skb_reserve(skb, hh_len);
1133
1134		/* create space for UDP/IP header */
1135		skb_put(skb,fragheaderlen + transhdrlen);
1136
1137		/* initialize network header pointer */
1138		skb_reset_network_header(skb);
1139
1140		/* initialize protocol header pointer */
1141		skb->transport_header = skb->network_header + fragheaderlen;
1142
1143		skb->ip_summed = CHECKSUM_PARTIAL;
1144		skb->csum = 0;
1145	}
1146
1147	err = skb_append_datato_frags(sk,skb, getfrag, from,
1148				      (length - transhdrlen));
1149	if (!err) {
1150		struct frag_hdr fhdr;
1151
1152		/* Specify the length of each IPv6 datagram fragment.
1153		 * It has to be a multiple of 8.
1154		 */
1155		skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1156					     sizeof(struct frag_hdr)) & ~7;
1157		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1158		ipv6_select_ident(&fhdr, rt);
1159		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1160		__skb_queue_tail(&sk->sk_write_queue, skb);
1161
1162		return 0;
1163	}
1164	/* There is not enough support do UPD LSO,
1165	 * so follow normal path
1166	 */
1167	kfree_skb(skb);
1168
1169	return err;
 
 
 
 
 
 
 
 
 
 
 
 
1170}
1171
1172static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1173					       gfp_t gfp)
1174{
1175	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1176}
1177
1178static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1179						gfp_t gfp)
1180{
1181	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1182}
1183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1185	int offset, int len, int odd, struct sk_buff *skb),
1186	void *from, int length, int transhdrlen,
1187	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1188	struct rt6_info *rt, unsigned int flags, int dontfrag)
1189{
1190	struct inet_sock *inet = inet_sk(sk);
1191	struct ipv6_pinfo *np = inet6_sk(sk);
1192	struct inet_cork *cork;
1193	struct sk_buff *skb;
1194	unsigned int maxfraglen, fragheaderlen;
1195	int exthdrlen;
 
1196	int hh_len;
1197	int mtu;
1198	int copy;
1199	int err;
1200	int offset = 0;
1201	int csummode = CHECKSUM_NONE;
1202	__u8 tx_flags = 0;
1203
1204	if (flags&MSG_PROBE)
1205		return 0;
1206	cork = &inet->cork.base;
1207	if (skb_queue_empty(&sk->sk_write_queue)) {
1208		/*
1209		 * setup for corking
1210		 */
1211		if (opt) {
1212			if (WARN_ON(np->cork.opt))
1213				return -EINVAL;
1214
1215			np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
1216			if (unlikely(np->cork.opt == NULL))
1217				return -ENOBUFS;
1218
1219			np->cork.opt->tot_len = opt->tot_len;
1220			np->cork.opt->opt_flen = opt->opt_flen;
1221			np->cork.opt->opt_nflen = opt->opt_nflen;
1222
1223			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1224							    sk->sk_allocation);
1225			if (opt->dst0opt && !np->cork.opt->dst0opt)
1226				return -ENOBUFS;
1227
1228			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1229							    sk->sk_allocation);
1230			if (opt->dst1opt && !np->cork.opt->dst1opt)
1231				return -ENOBUFS;
1232
1233			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1234							   sk->sk_allocation);
1235			if (opt->hopopt && !np->cork.opt->hopopt)
1236				return -ENOBUFS;
1237
1238			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1239							    sk->sk_allocation);
1240			if (opt->srcrt && !np->cork.opt->srcrt)
1241				return -ENOBUFS;
1242
1243			/* need source address above miyazawa*/
1244		}
1245		dst_hold(&rt->dst);
1246		cork->dst = &rt->dst;
1247		inet->cork.fl.u.ip6 = *fl6;
1248		np->cork.hop_limit = hlimit;
1249		np->cork.tclass = tclass;
1250		mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1251		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
 
 
 
 
1252		if (np->frag_size < mtu) {
1253			if (np->frag_size)
1254				mtu = np->frag_size;
1255		}
1256		cork->fragsize = mtu;
1257		if (dst_allfrag(rt->dst.path))
1258			cork->flags |= IPCORK_ALLFRAG;
1259		cork->length = 0;
1260		sk->sk_sndmsg_page = NULL;
1261		sk->sk_sndmsg_off = 0;
1262		exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
1263			    rt->rt6i_nfheader_len;
1264		length += exthdrlen;
1265		transhdrlen += exthdrlen;
 
1266	} else {
1267		rt = (struct rt6_info *)cork->dst;
1268		fl6 = &inet->cork.fl.u.ip6;
1269		opt = np->cork.opt;
1270		transhdrlen = 0;
1271		exthdrlen = 0;
 
1272		mtu = cork->fragsize;
1273	}
 
1274
1275	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1276
1277	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1278			(opt ? opt->opt_nflen : 0);
1279	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
 
1280
1281	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1282		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1283			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1284			return -EMSGSIZE;
1285		}
1286	}
1287
1288	/* For UDP, check if TX timestamp is enabled */
1289	if (sk->sk_type == SOCK_DGRAM) {
1290		err = sock_tx_timestamp(sk, &tx_flags);
1291		if (err)
1292			goto error;
1293	}
1294
1295	/*
1296	 * Let's try using as much space as possible.
1297	 * Use MTU if total length of the message fits into the MTU.
1298	 * Otherwise, we need to reserve fragment header and
1299	 * fragment alignment (= 8-15 octects, in total).
1300	 *
1301	 * Note that we may need to "move" the data from the tail of
1302	 * of the buffer to the new fragment when we split
1303	 * the message.
1304	 *
1305	 * FIXME: It may be fragmented into multiple chunks
1306	 *        at once if non-fragmentable extension headers
1307	 *        are too large.
1308	 * --yoshfuji
1309	 */
1310
 
1311	cork->length += length;
1312	if (length > mtu) {
1313		int proto = sk->sk_protocol;
1314		if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
1315			ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1316			return -EMSGSIZE;
1317		}
1318
1319		if (proto == IPPROTO_UDP &&
1320		    (rt->dst.dev->features & NETIF_F_UFO)) {
1321
1322			err = ip6_ufo_append_data(sk, getfrag, from, length,
1323						  hh_len, fragheaderlen,
1324						  transhdrlen, mtu, flags, rt);
1325			if (err)
1326				goto error;
1327			return 0;
1328		}
1329	}
1330
1331	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1332		goto alloc_new_skb;
1333
1334	while (length > 0) {
1335		/* Check if the remaining data fits into current packet. */
1336		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1337		if (copy < length)
1338			copy = maxfraglen - skb->len;
1339
1340		if (copy <= 0) {
1341			char *data;
1342			unsigned int datalen;
1343			unsigned int fraglen;
1344			unsigned int fraggap;
1345			unsigned int alloclen;
1346			struct sk_buff *skb_prev;
1347alloc_new_skb:
1348			skb_prev = skb;
1349
1350			/* There's no room in the current skb */
1351			if (skb_prev)
1352				fraggap = skb_prev->len - maxfraglen;
1353			else
1354				fraggap = 0;
 
 
 
 
 
 
 
1355
1356			/*
1357			 * If remaining data exceeds the mtu,
1358			 * we know we need more fragment(s).
1359			 */
1360			datalen = length + fraggap;
1361			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1362				datalen = maxfraglen - fragheaderlen;
1363
1364			fraglen = datalen + fragheaderlen;
 
1365			if ((flags & MSG_MORE) &&
1366			    !(rt->dst.dev->features&NETIF_F_SG))
1367				alloclen = mtu;
1368			else
1369				alloclen = datalen + fragheaderlen;
1370
1371			/*
1372			 * The last fragment gets additional space at tail.
1373			 * Note: we overallocate on fragments with MSG_MODE
1374			 * because we have no idea if we're the last one.
1375			 */
1376			if (datalen == length + fraggap)
1377				alloclen += rt->dst.trailer_len;
 
 
 
 
 
1378
1379			/*
1380			 * We just reserve space for fragment header.
1381			 * Note: this may be overallocation if the message
1382			 * (without MSG_MORE) fits into the MTU.
1383			 */
1384			alloclen += sizeof(struct frag_hdr);
1385
1386			if (transhdrlen) {
1387				skb = sock_alloc_send_skb(sk,
1388						alloclen + hh_len,
1389						(flags & MSG_DONTWAIT), &err);
1390			} else {
1391				skb = NULL;
1392				if (atomic_read(&sk->sk_wmem_alloc) <=
1393				    2 * sk->sk_sndbuf)
1394					skb = sock_wmalloc(sk,
1395							   alloclen + hh_len, 1,
1396							   sk->sk_allocation);
1397				if (unlikely(skb == NULL))
1398					err = -ENOBUFS;
1399				else {
1400					/* Only the initial fragment
1401					 * is time stamped.
1402					 */
1403					tx_flags = 0;
1404				}
1405			}
1406			if (skb == NULL)
1407				goto error;
1408			/*
1409			 *	Fill in the control structures
1410			 */
1411			skb->ip_summed = csummode;
 
1412			skb->csum = 0;
1413			/* reserve for fragmentation */
1414			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
 
1415
1416			if (sk->sk_type == SOCK_DGRAM)
1417				skb_shinfo(skb)->tx_flags = tx_flags;
1418
1419			/*
1420			 *	Find where to start putting bytes
1421			 */
1422			data = skb_put(skb, fraglen);
1423			skb_set_network_header(skb, exthdrlen);
1424			data += fragheaderlen;
1425			skb->transport_header = (skb->network_header +
1426						 fragheaderlen);
1427			if (fraggap) {
1428				skb->csum = skb_copy_and_csum_bits(
1429					skb_prev, maxfraglen,
1430					data + transhdrlen, fraggap, 0);
1431				skb_prev->csum = csum_sub(skb_prev->csum,
1432							  skb->csum);
1433				data += fraggap;
1434				pskb_trim_unique(skb_prev, maxfraglen);
1435			}
1436			copy = datalen - transhdrlen - fraggap;
 
1437			if (copy < 0) {
1438				err = -EINVAL;
1439				kfree_skb(skb);
1440				goto error;
1441			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1442				err = -EFAULT;
1443				kfree_skb(skb);
1444				goto error;
1445			}
1446
1447			offset += copy;
1448			length -= datalen - fraggap;
1449			transhdrlen = 0;
1450			exthdrlen = 0;
1451			csummode = CHECKSUM_NONE;
1452
1453			/*
1454			 * Put the packet on the pending queue
1455			 */
1456			__skb_queue_tail(&sk->sk_write_queue, skb);
1457			continue;
1458		}
1459
1460		if (copy > length)
1461			copy = length;
1462
1463		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1464			unsigned int off;
1465
1466			off = skb->len;
1467			if (getfrag(from, skb_put(skb, copy),
1468						offset, copy, off, skb) < 0) {
1469				__skb_trim(skb, off);
1470				err = -EFAULT;
1471				goto error;
1472			}
1473		} else {
1474			int i = skb_shinfo(skb)->nr_frags;
1475			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1476			struct page *page = sk->sk_sndmsg_page;
1477			int off = sk->sk_sndmsg_off;
1478			unsigned int left;
1479
1480			if (page && (left = PAGE_SIZE - off) > 0) {
1481				if (copy >= left)
1482					copy = left;
1483				if (page != frag->page) {
1484					if (i == MAX_SKB_FRAGS) {
1485						err = -EMSGSIZE;
1486						goto error;
1487					}
1488					get_page(page);
1489					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1490					frag = &skb_shinfo(skb)->frags[i];
1491				}
1492			} else if(i < MAX_SKB_FRAGS) {
1493				if (copy > PAGE_SIZE)
1494					copy = PAGE_SIZE;
1495				page = alloc_pages(sk->sk_allocation, 0);
1496				if (page == NULL) {
1497					err = -ENOMEM;
1498					goto error;
1499				}
1500				sk->sk_sndmsg_page = page;
1501				sk->sk_sndmsg_off = 0;
1502
1503				skb_fill_page_desc(skb, i, page, 0, 0);
1504				frag = &skb_shinfo(skb)->frags[i];
1505			} else {
1506				err = -EMSGSIZE;
1507				goto error;
1508			}
1509			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1510				err = -EFAULT;
1511				goto error;
 
 
 
 
 
 
 
 
 
 
 
1512			}
1513			sk->sk_sndmsg_off += copy;
1514			frag->size += copy;
 
 
 
 
 
 
1515			skb->len += copy;
1516			skb->data_len += copy;
1517			skb->truesize += copy;
1518			atomic_add(copy, &sk->sk_wmem_alloc);
1519		}
1520		offset += copy;
1521		length -= copy;
1522	}
 
1523	return 0;
 
 
 
1524error:
1525	cork->length -= length;
1526	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1527	return err;
1528}
 
1529
1530static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1531{
1532	if (np->cork.opt) {
1533		kfree(np->cork.opt->dst0opt);
1534		kfree(np->cork.opt->dst1opt);
1535		kfree(np->cork.opt->hopopt);
1536		kfree(np->cork.opt->srcrt);
1537		kfree(np->cork.opt);
1538		np->cork.opt = NULL;
1539	}
1540
1541	if (inet->cork.base.dst) {
1542		dst_release(inet->cork.base.dst);
1543		inet->cork.base.dst = NULL;
1544		inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1545	}
1546	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1547}
1548
1549int ip6_push_pending_frames(struct sock *sk)
1550{
1551	struct sk_buff *skb, *tmp_skb;
1552	struct sk_buff **tail_skb;
1553	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1554	struct inet_sock *inet = inet_sk(sk);
1555	struct ipv6_pinfo *np = inet6_sk(sk);
1556	struct net *net = sock_net(sk);
1557	struct ipv6hdr *hdr;
1558	struct ipv6_txoptions *opt = np->cork.opt;
1559	struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1560	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1561	unsigned char proto = fl6->flowi6_proto;
1562	int err = 0;
1563
1564	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1565		goto out;
1566	tail_skb = &(skb_shinfo(skb)->frag_list);
1567
1568	/* move skb->data to ip header from ext header */
1569	if (skb->data < skb_network_header(skb))
1570		__skb_pull(skb, skb_network_offset(skb));
1571	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1572		__skb_pull(tmp_skb, skb_network_header_len(skb));
1573		*tail_skb = tmp_skb;
1574		tail_skb = &(tmp_skb->next);
1575		skb->len += tmp_skb->len;
1576		skb->data_len += tmp_skb->len;
1577		skb->truesize += tmp_skb->truesize;
1578		tmp_skb->destructor = NULL;
1579		tmp_skb->sk = NULL;
1580	}
1581
1582	/* Allow local fragmentation. */
1583	if (np->pmtudisc < IPV6_PMTUDISC_DO)
1584		skb->local_df = 1;
1585
1586	ipv6_addr_copy(final_dst, &fl6->daddr);
1587	__skb_pull(skb, skb_network_header_len(skb));
1588	if (opt && opt->opt_flen)
1589		ipv6_push_frag_opts(skb, opt, &proto);
1590	if (opt && opt->opt_nflen)
1591		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1592
1593	skb_push(skb, sizeof(struct ipv6hdr));
1594	skb_reset_network_header(skb);
1595	hdr = ipv6_hdr(skb);
1596
1597	*(__be32*)hdr = fl6->flowlabel |
1598		     htonl(0x60000000 | ((int)np->cork.tclass << 20));
1599
1600	hdr->hop_limit = np->cork.hop_limit;
1601	hdr->nexthdr = proto;
1602	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
1603	ipv6_addr_copy(&hdr->daddr, final_dst);
1604
1605	skb->priority = sk->sk_priority;
1606	skb->mark = sk->sk_mark;
1607
1608	skb_dst_set(skb, dst_clone(&rt->dst));
1609	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1610	if (proto == IPPROTO_ICMPV6) {
1611		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1612
1613		ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
1614		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1615	}
1616
1617	err = ip6_local_out(skb);
1618	if (err) {
1619		if (err > 0)
1620			err = net_xmit_errno(err);
1621		if (err)
1622			goto error;
1623	}
1624
1625out:
1626	ip6_cork_release(inet, np);
1627	return err;
1628error:
1629	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1630	goto out;
1631}
 
1632
1633void ip6_flush_pending_frames(struct sock *sk)
1634{
1635	struct sk_buff *skb;
1636
1637	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1638		if (skb_dst(skb))
1639			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1640				      IPSTATS_MIB_OUTDISCARDS);
1641		kfree_skb(skb);
1642	}
1643
1644	ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1645}
v3.15
   1/*
   2 *	IPv6 output functions
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on linux/net/ipv4/ip_output.c
   9 *
  10 *	This program is free software; you can redistribute it and/or
  11 *      modify it under the terms of the GNU General Public License
  12 *      as published by the Free Software Foundation; either version
  13 *      2 of the License, or (at your option) any later version.
  14 *
  15 *	Changes:
  16 *	A.N.Kuznetsov	:	airthmetics in fragmentation.
  17 *				extension headers are implemented.
  18 *				route changes now work.
  19 *				ip6_forward does not confuse sniffers.
  20 *				etc.
  21 *
  22 *      H. von Brand    :       Added missing #include <linux/string.h>
  23 *	Imran Patel	: 	frag id should be in NBO
  24 *      Kazunori MIYAZAWA @USAGI
  25 *			:       add ip6_append_data and related functions
  26 *				for datagram xmit
  27 */
  28
  29#include <linux/errno.h>
  30#include <linux/kernel.h>
  31#include <linux/string.h>
  32#include <linux/socket.h>
  33#include <linux/net.h>
  34#include <linux/netdevice.h>
  35#include <linux/if_arp.h>
  36#include <linux/in6.h>
  37#include <linux/tcp.h>
  38#include <linux/route.h>
  39#include <linux/module.h>
  40#include <linux/slab.h>
  41
  42#include <linux/netfilter.h>
  43#include <linux/netfilter_ipv6.h>
  44
  45#include <net/sock.h>
  46#include <net/snmp.h>
  47
  48#include <net/ipv6.h>
  49#include <net/ndisc.h>
  50#include <net/protocol.h>
  51#include <net/ip6_route.h>
  52#include <net/addrconf.h>
  53#include <net/rawv6.h>
  54#include <net/icmp.h>
  55#include <net/xfrm.h>
  56#include <net/checksum.h>
  57#include <linux/mroute6.h>
  58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  59static int ip6_finish_output2(struct sk_buff *skb)
  60{
  61	struct dst_entry *dst = skb_dst(skb);
  62	struct net_device *dev = dst->dev;
  63	struct neighbour *neigh;
  64	struct in6_addr *nexthop;
  65	int ret;
  66
  67	skb->protocol = htons(ETH_P_IPV6);
  68	skb->dev = dev;
  69
  70	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
  71		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
  72
  73		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
  74		    ((mroute6_socket(dev_net(dev), skb) &&
  75		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
  76		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
  77					 &ipv6_hdr(skb)->saddr))) {
  78			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  79
  80			/* Do not check for IFF_ALLMULTI; multicast routing
  81			   is not supported in any case.
  82			 */
  83			if (newskb)
  84				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
  85					newskb, NULL, newskb->dev,
  86					dev_loopback_xmit);
  87
  88			if (ipv6_hdr(skb)->hop_limit == 0) {
  89				IP6_INC_STATS(dev_net(dev), idev,
  90					      IPSTATS_MIB_OUTDISCARDS);
  91				kfree_skb(skb);
  92				return 0;
  93			}
  94		}
  95
  96		IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
  97				skb->len);
 
  98
  99		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
 100		    IPV6_ADDR_SCOPE_NODELOCAL &&
 101		    !(dev->flags & IFF_LOOPBACK)) {
 102			kfree_skb(skb);
 103			return 0;
 104		}
 105	}
 106
 107	rcu_read_lock_bh();
 108	nexthop = rt6_nexthop((struct rt6_info *)dst);
 109	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
 110	if (unlikely(!neigh))
 111		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
 112	if (!IS_ERR(neigh)) {
 113		ret = dst_neigh_output(dst, neigh, skb);
 114		rcu_read_unlock_bh();
 115		return ret;
 116	}
 117	rcu_read_unlock_bh();
 118
 119	IP6_INC_STATS(dev_net(dst->dev),
 120		      ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 121	kfree_skb(skb);
 122	return -EINVAL;
 123}
 124
 125static int ip6_finish_output(struct sk_buff *skb)
 126{
 127	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 128	    dst_allfrag(skb_dst(skb)) ||
 129	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
 130		return ip6_fragment(skb, ip6_finish_output2);
 131	else
 132		return ip6_finish_output2(skb);
 133}
 134
 135int ip6_output(struct sock *sk, struct sk_buff *skb)
 136{
 137	struct net_device *dev = skb_dst(skb)->dev;
 138	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 139	if (unlikely(idev->cnf.disable_ipv6)) {
 140		IP6_INC_STATS(dev_net(dev), idev,
 141			      IPSTATS_MIB_OUTDISCARDS);
 142		kfree_skb(skb);
 143		return 0;
 144	}
 145
 146	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
 147			    ip6_finish_output,
 148			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 149}
 150
 151/*
 152 *	xmit an sk_buff (used by TCP, SCTP and DCCP)
 153 */
 154
 155int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 156	     struct ipv6_txoptions *opt, int tclass)
 157{
 158	struct net *net = sock_net(sk);
 159	struct ipv6_pinfo *np = inet6_sk(sk);
 160	struct in6_addr *first_hop = &fl6->daddr;
 161	struct dst_entry *dst = skb_dst(skb);
 162	struct ipv6hdr *hdr;
 163	u8  proto = fl6->flowi6_proto;
 164	int seg_len = skb->len;
 165	int hlimit = -1;
 
 166	u32 mtu;
 167
 168	if (opt) {
 169		unsigned int head_room;
 170
 171		/* First: exthdrs may take lots of space (~8K for now)
 172		   MAX_HEADER is not enough.
 173		 */
 174		head_room = opt->opt_nflen + opt->opt_flen;
 175		seg_len += head_room;
 176		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
 177
 178		if (skb_headroom(skb) < head_room) {
 179			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
 180			if (skb2 == NULL) {
 181				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 182					      IPSTATS_MIB_OUTDISCARDS);
 183				kfree_skb(skb);
 184				return -ENOBUFS;
 185			}
 186			consume_skb(skb);
 187			skb = skb2;
 188			skb_set_owner_w(skb, sk);
 189		}
 190		if (opt->opt_flen)
 191			ipv6_push_frag_opts(skb, opt, &proto);
 192		if (opt->opt_nflen)
 193			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
 194	}
 195
 196	skb_push(skb, sizeof(struct ipv6hdr));
 197	skb_reset_network_header(skb);
 198	hdr = ipv6_hdr(skb);
 199
 200	/*
 201	 *	Fill in the IPv6 header
 202	 */
 203	if (np)
 
 204		hlimit = np->hop_limit;
 
 205	if (hlimit < 0)
 206		hlimit = ip6_dst_hoplimit(dst);
 207
 208	ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
 209
 210	hdr->payload_len = htons(seg_len);
 211	hdr->nexthdr = proto;
 212	hdr->hop_limit = hlimit;
 213
 214	hdr->saddr = fl6->saddr;
 215	hdr->daddr = *first_hop;
 216
 217	skb->protocol = htons(ETH_P_IPV6);
 218	skb->priority = sk->sk_priority;
 219	skb->mark = sk->sk_mark;
 220
 221	mtu = dst_mtu(dst);
 222	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
 223		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
 224			      IPSTATS_MIB_OUT, skb->len);
 225		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
 226			       dst->dev, dst_output);
 227	}
 228
 
 
 229	skb->dev = dst->dev;
 230	ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
 231	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
 232	kfree_skb(skb);
 233	return -EMSGSIZE;
 234}
 235
 236EXPORT_SYMBOL(ip6_xmit);
 237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 238static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
 239{
 240	struct ip6_ra_chain *ra;
 241	struct sock *last = NULL;
 242
 243	read_lock(&ip6_ra_lock);
 244	for (ra = ip6_ra_chain; ra; ra = ra->next) {
 245		struct sock *sk = ra->sk;
 246		if (sk && ra->sel == sel &&
 247		    (!sk->sk_bound_dev_if ||
 248		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
 249			if (last) {
 250				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 251				if (skb2)
 252					rawv6_rcv(last, skb2);
 253			}
 254			last = sk;
 255		}
 256	}
 257
 258	if (last) {
 259		rawv6_rcv(last, skb);
 260		read_unlock(&ip6_ra_lock);
 261		return 1;
 262	}
 263	read_unlock(&ip6_ra_lock);
 264	return 0;
 265}
 266
 267static int ip6_forward_proxy_check(struct sk_buff *skb)
 268{
 269	struct ipv6hdr *hdr = ipv6_hdr(skb);
 270	u8 nexthdr = hdr->nexthdr;
 271	__be16 frag_off;
 272	int offset;
 273
 274	if (ipv6_ext_hdr(nexthdr)) {
 275		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
 276		if (offset < 0)
 277			return 0;
 278	} else
 279		offset = sizeof(struct ipv6hdr);
 280
 281	if (nexthdr == IPPROTO_ICMPV6) {
 282		struct icmp6hdr *icmp6;
 283
 284		if (!pskb_may_pull(skb, (skb_network_header(skb) +
 285					 offset + 1 - skb->data)))
 286			return 0;
 287
 288		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
 289
 290		switch (icmp6->icmp6_type) {
 291		case NDISC_ROUTER_SOLICITATION:
 292		case NDISC_ROUTER_ADVERTISEMENT:
 293		case NDISC_NEIGHBOUR_SOLICITATION:
 294		case NDISC_NEIGHBOUR_ADVERTISEMENT:
 295		case NDISC_REDIRECT:
 296			/* For reaction involving unicast neighbor discovery
 297			 * message destined to the proxied address, pass it to
 298			 * input function.
 299			 */
 300			return 1;
 301		default:
 302			break;
 303		}
 304	}
 305
 306	/*
 307	 * The proxying router can't forward traffic sent to a link-local
 308	 * address, so signal the sender and discard the packet. This
 309	 * behavior is clarified by the MIPv6 specification.
 310	 */
 311	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
 312		dst_link_failure(skb);
 313		return -1;
 314	}
 315
 316	return 0;
 317}
 318
 319static inline int ip6_forward_finish(struct sk_buff *skb)
 320{
 321	return dst_output(skb);
 322}
 323
 324static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
 325{
 326	unsigned int mtu;
 327	struct inet6_dev *idev;
 328
 329	if (dst_metric_locked(dst, RTAX_MTU)) {
 330		mtu = dst_metric_raw(dst, RTAX_MTU);
 331		if (mtu)
 332			return mtu;
 333	}
 334
 335	mtu = IPV6_MIN_MTU;
 336	rcu_read_lock();
 337	idev = __in6_dev_get(dst->dev);
 338	if (idev)
 339		mtu = idev->cnf.mtu6;
 340	rcu_read_unlock();
 341
 342	return mtu;
 343}
 344
 345static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 346{
 347	if (skb->len <= mtu)
 348		return false;
 349
 350	/* ipv6 conntrack defrag sets max_frag_size + local_df */
 351	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
 352		return true;
 353
 354	if (skb->local_df)
 355		return false;
 356
 357	if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
 358		return false;
 359
 360	return true;
 361}
 362
 363int ip6_forward(struct sk_buff *skb)
 364{
 365	struct dst_entry *dst = skb_dst(skb);
 366	struct ipv6hdr *hdr = ipv6_hdr(skb);
 367	struct inet6_skb_parm *opt = IP6CB(skb);
 368	struct net *net = dev_net(dst->dev);
 
 369	u32 mtu;
 370
 371	if (net->ipv6.devconf_all->forwarding == 0)
 372		goto error;
 373
 374	if (skb->pkt_type != PACKET_HOST)
 375		goto drop;
 376
 377	if (skb_warn_if_lro(skb))
 378		goto drop;
 379
 380	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
 381		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 382				 IPSTATS_MIB_INDISCARDS);
 383		goto drop;
 384	}
 385
 
 
 
 386	skb_forward_csum(skb);
 387
 388	/*
 389	 *	We DO NOT make any processing on
 390	 *	RA packets, pushing them to user level AS IS
 391	 *	without ane WARRANTY that application will be able
 392	 *	to interpret them. The reason is that we
 393	 *	cannot make anything clever here.
 394	 *
 395	 *	We are not end-node, so that if packet contains
 396	 *	AH/ESP, we cannot make anything.
 397	 *	Defragmentation also would be mistake, RA packets
 398	 *	cannot be fragmented, because there is no warranty
 399	 *	that different fragments will go along one path. --ANK
 400	 */
 401	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
 402		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
 
 403			return 0;
 404	}
 405
 406	/*
 407	 *	check and decrement ttl
 408	 */
 409	if (hdr->hop_limit <= 1) {
 410		/* Force OUTPUT device used as source address */
 411		skb->dev = dst->dev;
 412		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
 413		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 414				 IPSTATS_MIB_INHDRERRORS);
 415
 416		kfree_skb(skb);
 417		return -ETIMEDOUT;
 418	}
 419
 420	/* XXX: idev->cnf.proxy_ndp? */
 421	if (net->ipv6.devconf_all->proxy_ndp &&
 422	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
 423		int proxied = ip6_forward_proxy_check(skb);
 424		if (proxied > 0)
 425			return ip6_input(skb);
 426		else if (proxied < 0) {
 427			IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 428					 IPSTATS_MIB_INDISCARDS);
 429			goto drop;
 430		}
 431	}
 432
 433	if (!xfrm6_route_forward(skb)) {
 434		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 435				 IPSTATS_MIB_INDISCARDS);
 436		goto drop;
 437	}
 438	dst = skb_dst(skb);
 439
 440	/* IPv6 specs say nothing about it, but it is clear that we cannot
 441	   send redirects to source routed frames.
 442	   We don't send redirects to frames decapsulated from IPsec.
 443	 */
 444	if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
 
 445		struct in6_addr *target = NULL;
 446		struct inet_peer *peer;
 447		struct rt6_info *rt;
 448
 449		/*
 450		 *	incoming and outgoing devices are the same
 451		 *	send a redirect.
 452		 */
 453
 454		rt = (struct rt6_info *) dst;
 455		if (rt->rt6i_flags & RTF_GATEWAY)
 456			target = &rt->rt6i_gateway;
 457		else
 458			target = &hdr->daddr;
 459
 460		peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
 
 461
 462		/* Limit redirects both by destination (here)
 463		   and by source (inside ndisc_send_redirect)
 464		 */
 465		if (inet_peer_xrlim_allow(peer, 1*HZ))
 466			ndisc_send_redirect(skb, target);
 467		if (peer)
 468			inet_putpeer(peer);
 469	} else {
 470		int addrtype = ipv6_addr_type(&hdr->saddr);
 471
 472		/* This check is security critical. */
 473		if (addrtype == IPV6_ADDR_ANY ||
 474		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
 475			goto error;
 476		if (addrtype & IPV6_ADDR_LINKLOCAL) {
 477			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
 478				    ICMPV6_NOT_NEIGHBOUR, 0);
 479			goto error;
 480		}
 481	}
 482
 483	mtu = ip6_dst_mtu_forward(dst);
 484	if (mtu < IPV6_MIN_MTU)
 485		mtu = IPV6_MIN_MTU;
 486
 487	if (ip6_pkt_too_big(skb, mtu)) {
 488		/* Again, force OUTPUT device used as source address */
 489		skb->dev = dst->dev;
 490		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 491		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 492				 IPSTATS_MIB_INTOOBIGERRORS);
 493		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 494				 IPSTATS_MIB_FRAGFAILS);
 495		kfree_skb(skb);
 496		return -EMSGSIZE;
 497	}
 498
 499	if (skb_cow(skb, dst->dev->hard_header_len)) {
 500		IP6_INC_STATS_BH(net, ip6_dst_idev(dst),
 501				 IPSTATS_MIB_OUTDISCARDS);
 502		goto drop;
 503	}
 504
 505	hdr = ipv6_hdr(skb);
 506
 507	/* Mangling hops number delayed to point after skb COW */
 508
 509	hdr->hop_limit--;
 510
 511	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 512	IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 513	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
 514		       ip6_forward_finish);
 515
 516error:
 517	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
 518drop:
 519	kfree_skb(skb);
 520	return -EINVAL;
 521}
 522
 523static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 524{
 525	to->pkt_type = from->pkt_type;
 526	to->priority = from->priority;
 527	to->protocol = from->protocol;
 528	skb_dst_drop(to);
 529	skb_dst_set(to, dst_clone(skb_dst(from)));
 530	to->dev = from->dev;
 531	to->mark = from->mark;
 532
 533#ifdef CONFIG_NET_SCHED
 534	to->tc_index = from->tc_index;
 535#endif
 536	nf_copy(to, from);
 
 
 
 
 537	skb_copy_secmark(to, from);
 538}
 539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 540int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 541{
 542	struct sk_buff *frag;
 543	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
 544	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
 545	struct ipv6hdr *tmp_hdr;
 546	struct frag_hdr *fh;
 547	unsigned int mtu, hlen, left, len;
 548	int hroom, troom;
 549	__be32 frag_id = 0;
 550	int ptr, offset = 0, err=0;
 551	u8 *prevhdr, nexthdr = 0;
 552	struct net *net = dev_net(skb_dst(skb)->dev);
 553
 554	hlen = ip6_find_1stfragopt(skb, &prevhdr);
 555	nexthdr = *prevhdr;
 556
 557	mtu = ip6_skb_dst_mtu(skb);
 558
 559	/* We must not fragment if the socket is set to force MTU discovery
 560	 * or if the skb it not generated by a local socket.
 561	 */
 562	if (unlikely(!skb->local_df && skb->len > mtu) ||
 563		     (IP6CB(skb)->frag_max_size &&
 564		      IP6CB(skb)->frag_max_size > mtu)) {
 565		if (skb->sk && dst_allfrag(skb_dst(skb)))
 566			sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
 567
 568		skb->dev = skb_dst(skb)->dev;
 569		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 570		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 571			      IPSTATS_MIB_FRAGFAILS);
 572		kfree_skb(skb);
 573		return -EMSGSIZE;
 574	}
 575
 576	if (np && np->frag_size < mtu) {
 577		if (np->frag_size)
 578			mtu = np->frag_size;
 579	}
 580	mtu -= hlen + sizeof(struct frag_hdr);
 581
 582	if (skb_has_frag_list(skb)) {
 583		int first_len = skb_pagelen(skb);
 584		struct sk_buff *frag2;
 585
 586		if (first_len - hlen > mtu ||
 587		    ((first_len - hlen) & 7) ||
 588		    skb_cloned(skb))
 589			goto slow_path;
 590
 591		skb_walk_frags(skb, frag) {
 592			/* Correct geometry. */
 593			if (frag->len > mtu ||
 594			    ((frag->len & 7) && frag->next) ||
 595			    skb_headroom(frag) < hlen)
 596				goto slow_path_clean;
 597
 598			/* Partially cloned skb? */
 599			if (skb_shared(frag))
 600				goto slow_path_clean;
 601
 602			BUG_ON(frag->sk);
 603			if (skb->sk) {
 604				frag->sk = skb->sk;
 605				frag->destructor = sock_wfree;
 606			}
 607			skb->truesize -= frag->truesize;
 608		}
 609
 610		err = 0;
 611		offset = 0;
 612		frag = skb_shinfo(skb)->frag_list;
 613		skb_frag_list_init(skb);
 614		/* BUILD HEADER */
 615
 616		*prevhdr = NEXTHDR_FRAGMENT;
 617		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
 618		if (!tmp_hdr) {
 619			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 620				      IPSTATS_MIB_FRAGFAILS);
 621			return -ENOMEM;
 622		}
 623
 624		__skb_pull(skb, hlen);
 625		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
 626		__skb_push(skb, hlen);
 627		skb_reset_network_header(skb);
 628		memcpy(skb_network_header(skb), tmp_hdr, hlen);
 629
 630		ipv6_select_ident(fh, rt);
 631		fh->nexthdr = nexthdr;
 632		fh->reserved = 0;
 633		fh->frag_off = htons(IP6_MF);
 634		frag_id = fh->identification;
 635
 636		first_len = skb_pagelen(skb);
 637		skb->data_len = first_len - skb_headlen(skb);
 638		skb->len = first_len;
 639		ipv6_hdr(skb)->payload_len = htons(first_len -
 640						   sizeof(struct ipv6hdr));
 641
 642		dst_hold(&rt->dst);
 643
 644		for (;;) {
 645			/* Prepare header of the next frame,
 646			 * before previous one went down. */
 647			if (frag) {
 648				frag->ip_summed = CHECKSUM_NONE;
 649				skb_reset_transport_header(frag);
 650				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
 651				__skb_push(frag, hlen);
 652				skb_reset_network_header(frag);
 653				memcpy(skb_network_header(frag), tmp_hdr,
 654				       hlen);
 655				offset += skb->len - hlen - sizeof(struct frag_hdr);
 656				fh->nexthdr = nexthdr;
 657				fh->reserved = 0;
 658				fh->frag_off = htons(offset);
 659				if (frag->next != NULL)
 660					fh->frag_off |= htons(IP6_MF);
 661				fh->identification = frag_id;
 662				ipv6_hdr(frag)->payload_len =
 663						htons(frag->len -
 664						      sizeof(struct ipv6hdr));
 665				ip6_copy_metadata(frag, skb);
 666			}
 667
 668			err = output(skb);
 669			if(!err)
 670				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 671					      IPSTATS_MIB_FRAGCREATES);
 672
 673			if (err || !frag)
 674				break;
 675
 676			skb = frag;
 677			frag = skb->next;
 678			skb->next = NULL;
 679		}
 680
 681		kfree(tmp_hdr);
 682
 683		if (err == 0) {
 684			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 685				      IPSTATS_MIB_FRAGOKS);
 686			ip6_rt_put(rt);
 687			return 0;
 688		}
 689
 690		while (frag) {
 691			skb = frag->next;
 692			kfree_skb(frag);
 693			frag = skb;
 694		}
 695
 696		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 697			      IPSTATS_MIB_FRAGFAILS);
 698		ip6_rt_put(rt);
 699		return err;
 700
 701slow_path_clean:
 702		skb_walk_frags(skb, frag2) {
 703			if (frag2 == frag)
 704				break;
 705			frag2->sk = NULL;
 706			frag2->destructor = NULL;
 707			skb->truesize += frag2->truesize;
 708		}
 709	}
 710
 711slow_path:
 712	if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
 713	    skb_checksum_help(skb))
 714		goto fail;
 715
 716	left = skb->len - hlen;		/* Space per frame */
 717	ptr = hlen;			/* Where to start from */
 718
 719	/*
 720	 *	Fragment the datagram.
 721	 */
 722
 723	*prevhdr = NEXTHDR_FRAGMENT;
 724	hroom = LL_RESERVED_SPACE(rt->dst.dev);
 725	troom = rt->dst.dev->needed_tailroom;
 726
 727	/*
 728	 *	Keep copying data until we run out.
 729	 */
 730	while(left > 0)	{
 731		len = left;
 732		/* IF: it doesn't fit, use 'mtu' - the data space left */
 733		if (len > mtu)
 734			len = mtu;
 735		/* IF: we are not sending up to and including the packet end
 736		   then align the next start on an eight byte boundary */
 737		if (len < left)	{
 738			len &= ~7;
 739		}
 740		/*
 741		 *	Allocate buffer.
 742		 */
 743
 744		if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
 745				      hroom + troom, GFP_ATOMIC)) == NULL) {
 746			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
 747			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 748				      IPSTATS_MIB_FRAGFAILS);
 749			err = -ENOMEM;
 750			goto fail;
 751		}
 752
 753		/*
 754		 *	Set up data on packet
 755		 */
 756
 757		ip6_copy_metadata(frag, skb);
 758		skb_reserve(frag, hroom);
 759		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
 760		skb_reset_network_header(frag);
 761		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
 762		frag->transport_header = (frag->network_header + hlen +
 763					  sizeof(struct frag_hdr));
 764
 765		/*
 766		 *	Charge the memory for the fragment to any owner
 767		 *	it might possess
 768		 */
 769		if (skb->sk)
 770			skb_set_owner_w(frag, skb->sk);
 771
 772		/*
 773		 *	Copy the packet header into the new buffer.
 774		 */
 775		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
 776
 777		/*
 778		 *	Build fragment header.
 779		 */
 780		fh->nexthdr = nexthdr;
 781		fh->reserved = 0;
 782		if (!frag_id) {
 783			ipv6_select_ident(fh, rt);
 784			frag_id = fh->identification;
 785		} else
 786			fh->identification = frag_id;
 787
 788		/*
 789		 *	Copy a block of the IP datagram.
 790		 */
 791		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
 792			BUG();
 793		left -= len;
 794
 795		fh->frag_off = htons(offset);
 796		if (left > 0)
 797			fh->frag_off |= htons(IP6_MF);
 798		ipv6_hdr(frag)->payload_len = htons(frag->len -
 799						    sizeof(struct ipv6hdr));
 800
 801		ptr += len;
 802		offset += len;
 803
 804		/*
 805		 *	Put this fragment into the sending queue.
 806		 */
 807		err = output(frag);
 808		if (err)
 809			goto fail;
 810
 811		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 812			      IPSTATS_MIB_FRAGCREATES);
 813	}
 814	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 815		      IPSTATS_MIB_FRAGOKS);
 816	consume_skb(skb);
 817	return err;
 818
 819fail:
 820	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 821		      IPSTATS_MIB_FRAGFAILS);
 822	kfree_skb(skb);
 823	return err;
 824}
 825
 826static inline int ip6_rt_check(const struct rt6key *rt_key,
 827			       const struct in6_addr *fl_addr,
 828			       const struct in6_addr *addr_cache)
 829{
 830	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
 831		(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
 832}
 833
 834static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
 835					  struct dst_entry *dst,
 836					  const struct flowi6 *fl6)
 837{
 838	struct ipv6_pinfo *np = inet6_sk(sk);
 839	struct rt6_info *rt;
 840
 841	if (!dst)
 842		goto out;
 843
 844	if (dst->ops->family != AF_INET6) {
 845		dst_release(dst);
 846		return NULL;
 847	}
 848
 849	rt = (struct rt6_info *)dst;
 850	/* Yes, checking route validity in not connected
 851	 * case is not very simple. Take into account,
 852	 * that we do not support routing by source, TOS,
 853	 * and MSG_DONTROUTE 		--ANK (980726)
 854	 *
 855	 * 1. ip6_rt_check(): If route was host route,
 856	 *    check that cached destination is current.
 857	 *    If it is network route, we still may
 858	 *    check its validity using saved pointer
 859	 *    to the last used address: daddr_cache.
 860	 *    We do not want to save whole address now,
 861	 *    (because main consumer of this service
 862	 *    is tcp, which has not this problem),
 863	 *    so that the last trick works only on connected
 864	 *    sockets.
 865	 * 2. oif also should be the same.
 866	 */
 867	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
 868#ifdef CONFIG_IPV6_SUBTREES
 869	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 870#endif
 871	    (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
 872		dst_release(dst);
 873		dst = NULL;
 874	}
 875
 876out:
 877	return dst;
 878}
 879
 880static int ip6_dst_lookup_tail(struct sock *sk,
 881			       struct dst_entry **dst, struct flowi6 *fl6)
 882{
 883	struct net *net = sock_net(sk);
 884#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 885	struct neighbour *n;
 886	struct rt6_info *rt;
 887#endif
 888	int err;
 889
 890	if (*dst == NULL)
 891		*dst = ip6_route_output(net, sk, fl6);
 892
 893	if ((err = (*dst)->error))
 894		goto out_err_release;
 895
 896	if (ipv6_addr_any(&fl6->saddr)) {
 897		struct rt6_info *rt = (struct rt6_info *) *dst;
 898		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
 899					  sk ? inet6_sk(sk)->srcprefs : 0,
 900					  &fl6->saddr);
 901		if (err)
 902			goto out_err_release;
 903	}
 904
 905#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 906	/*
 907	 * Here if the dst entry we've looked up
 908	 * has a neighbour entry that is in the INCOMPLETE
 909	 * state and the src address from the flow is
 910	 * marked as OPTIMISTIC, we release the found
 911	 * dst entry and replace it instead with the
 912	 * dst entry of the nexthop router
 913	 */
 914	rt = (struct rt6_info *) *dst;
 915	rcu_read_lock_bh();
 916	n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
 917	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
 918	rcu_read_unlock_bh();
 919
 920	if (err) {
 921		struct inet6_ifaddr *ifp;
 922		struct flowi6 fl_gw6;
 923		int redirect;
 924
 
 925		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
 926				      (*dst)->dev, 1);
 927
 928		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
 929		if (ifp)
 930			in6_ifa_put(ifp);
 931
 932		if (redirect) {
 933			/*
 934			 * We need to get the dst entry for the
 935			 * default router instead
 936			 */
 937			dst_release(*dst);
 938			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
 939			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
 940			*dst = ip6_route_output(net, sk, &fl_gw6);
 941			if ((err = (*dst)->error))
 942				goto out_err_release;
 943		}
 
 
 944	}
 945#endif
 946
 947	return 0;
 948
 949out_err_release:
 950	if (err == -ENETUNREACH)
 951		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
 952	dst_release(*dst);
 953	*dst = NULL;
 954	return err;
 955}
 956
 957/**
 958 *	ip6_dst_lookup - perform route lookup on flow
 959 *	@sk: socket which provides route info
 960 *	@dst: pointer to dst_entry * for result
 961 *	@fl6: flow to lookup
 962 *
 963 *	This function performs a route lookup on the given flow.
 964 *
 965 *	It returns zero on success, or a standard errno code on error.
 966 */
 967int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
 968{
 969	*dst = NULL;
 970	return ip6_dst_lookup_tail(sk, dst, fl6);
 971}
 972EXPORT_SYMBOL_GPL(ip6_dst_lookup);
 973
 974/**
 975 *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
 976 *	@sk: socket which provides route info
 977 *	@fl6: flow to lookup
 978 *	@final_dst: final destination address for ipsec lookup
 
 979 *
 980 *	This function performs a route lookup on the given flow.
 981 *
 982 *	It returns a valid dst pointer on success, or a pointer encoded
 983 *	error code.
 984 */
 985struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 986				      const struct in6_addr *final_dst)
 
 987{
 988	struct dst_entry *dst = NULL;
 989	int err;
 990
 991	err = ip6_dst_lookup_tail(sk, &dst, fl6);
 992	if (err)
 993		return ERR_PTR(err);
 994	if (final_dst)
 995		fl6->daddr = *final_dst;
 
 
 996
 997	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
 998}
 999EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1000
1001/**
1002 *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1003 *	@sk: socket which provides the dst cache and route info
1004 *	@fl6: flow to lookup
1005 *	@final_dst: final destination address for ipsec lookup
 
1006 *
1007 *	This function performs a route lookup on the given flow with the
1008 *	possibility of using the cached route in the socket if it is valid.
1009 *	It will take the socket dst lock when operating on the dst cache.
1010 *	As a result, this function can only be used in process context.
1011 *
1012 *	It returns a valid dst pointer on success, or a pointer encoded
1013 *	error code.
1014 */
1015struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1016					 const struct in6_addr *final_dst)
 
1017{
1018	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1019	int err;
1020
1021	dst = ip6_sk_dst_check(sk, dst, fl6);
1022
1023	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1024	if (err)
1025		return ERR_PTR(err);
1026	if (final_dst)
1027		fl6->daddr = *final_dst;
 
 
1028
1029	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1030}
1031EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1032
1033static inline int ip6_ufo_append_data(struct sock *sk,
1034			int getfrag(void *from, char *to, int offset, int len,
1035			int odd, struct sk_buff *skb),
1036			void *from, int length, int hh_len, int fragheaderlen,
1037			int transhdrlen, int mtu,unsigned int flags,
1038			struct rt6_info *rt)
1039
1040{
1041	struct sk_buff *skb;
1042	struct frag_hdr fhdr;
1043	int err;
1044
1045	/* There is support for UDP large send offload by network
1046	 * device, so create one single skb packet containing complete
1047	 * udp datagram
1048	 */
1049	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1050		skb = sock_alloc_send_skb(sk,
1051			hh_len + fragheaderlen + transhdrlen + 20,
1052			(flags & MSG_DONTWAIT), &err);
1053		if (skb == NULL)
1054			return err;
1055
1056		/* reserve space for Hardware header */
1057		skb_reserve(skb, hh_len);
1058
1059		/* create space for UDP/IP header */
1060		skb_put(skb,fragheaderlen + transhdrlen);
1061
1062		/* initialize network header pointer */
1063		skb_reset_network_header(skb);
1064
1065		/* initialize protocol header pointer */
1066		skb->transport_header = skb->network_header + fragheaderlen;
1067
1068		skb->protocol = htons(ETH_P_IPV6);
1069		skb->csum = 0;
 
 
 
 
 
 
1070
 
 
 
 
 
 
 
 
1071		__skb_queue_tail(&sk->sk_write_queue, skb);
1072	} else if (skb_is_gso(skb)) {
1073		goto append;
1074	}
 
 
 
 
1075
1076	skb->ip_summed = CHECKSUM_PARTIAL;
1077	/* Specify the length of each IPv6 datagram fragment.
1078	 * It has to be a multiple of 8.
1079	 */
1080	skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1081				     sizeof(struct frag_hdr)) & ~7;
1082	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1083	ipv6_select_ident(&fhdr, rt);
1084	skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1085
1086append:
1087	return skb_append_datato_frags(sk, skb, getfrag, from,
1088				       (length - transhdrlen));
1089}
1090
1091static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1092					       gfp_t gfp)
1093{
1094	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1095}
1096
1097static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1098						gfp_t gfp)
1099{
1100	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1101}
1102
1103static void ip6_append_data_mtu(unsigned int *mtu,
1104				int *maxfraglen,
1105				unsigned int fragheaderlen,
1106				struct sk_buff *skb,
1107				struct rt6_info *rt,
1108				unsigned int orig_mtu)
1109{
1110	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1111		if (skb == NULL) {
1112			/* first fragment, reserve header_len */
1113			*mtu = orig_mtu - rt->dst.header_len;
1114
1115		} else {
1116			/*
1117			 * this fragment is not first, the headers
1118			 * space is regarded as data space.
1119			 */
1120			*mtu = orig_mtu;
1121		}
1122		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1123			      + fragheaderlen - sizeof(struct frag_hdr);
1124	}
1125}
1126
1127int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1128	int offset, int len, int odd, struct sk_buff *skb),
1129	void *from, int length, int transhdrlen,
1130	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1131	struct rt6_info *rt, unsigned int flags, int dontfrag)
1132{
1133	struct inet_sock *inet = inet_sk(sk);
1134	struct ipv6_pinfo *np = inet6_sk(sk);
1135	struct inet_cork *cork;
1136	struct sk_buff *skb, *skb_prev = NULL;
1137	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1138	int exthdrlen;
1139	int dst_exthdrlen;
1140	int hh_len;
 
1141	int copy;
1142	int err;
1143	int offset = 0;
 
1144	__u8 tx_flags = 0;
1145
1146	if (flags&MSG_PROBE)
1147		return 0;
1148	cork = &inet->cork.base;
1149	if (skb_queue_empty(&sk->sk_write_queue)) {
1150		/*
1151		 * setup for corking
1152		 */
1153		if (opt) {
1154			if (WARN_ON(np->cork.opt))
1155				return -EINVAL;
1156
1157			np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1158			if (unlikely(np->cork.opt == NULL))
1159				return -ENOBUFS;
1160
1161			np->cork.opt->tot_len = opt->tot_len;
1162			np->cork.opt->opt_flen = opt->opt_flen;
1163			np->cork.opt->opt_nflen = opt->opt_nflen;
1164
1165			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1166							    sk->sk_allocation);
1167			if (opt->dst0opt && !np->cork.opt->dst0opt)
1168				return -ENOBUFS;
1169
1170			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1171							    sk->sk_allocation);
1172			if (opt->dst1opt && !np->cork.opt->dst1opt)
1173				return -ENOBUFS;
1174
1175			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1176							   sk->sk_allocation);
1177			if (opt->hopopt && !np->cork.opt->hopopt)
1178				return -ENOBUFS;
1179
1180			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1181							    sk->sk_allocation);
1182			if (opt->srcrt && !np->cork.opt->srcrt)
1183				return -ENOBUFS;
1184
1185			/* need source address above miyazawa*/
1186		}
1187		dst_hold(&rt->dst);
1188		cork->dst = &rt->dst;
1189		inet->cork.fl.u.ip6 = *fl6;
1190		np->cork.hop_limit = hlimit;
1191		np->cork.tclass = tclass;
1192		if (rt->dst.flags & DST_XFRM_TUNNEL)
1193			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1194			      rt->dst.dev->mtu : dst_mtu(&rt->dst);
1195		else
1196			mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1197			      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1198		if (np->frag_size < mtu) {
1199			if (np->frag_size)
1200				mtu = np->frag_size;
1201		}
1202		cork->fragsize = mtu;
1203		if (dst_allfrag(rt->dst.path))
1204			cork->flags |= IPCORK_ALLFRAG;
1205		cork->length = 0;
1206		exthdrlen = (opt ? opt->opt_flen : 0);
 
 
 
1207		length += exthdrlen;
1208		transhdrlen += exthdrlen;
1209		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1210	} else {
1211		rt = (struct rt6_info *)cork->dst;
1212		fl6 = &inet->cork.fl.u.ip6;
1213		opt = np->cork.opt;
1214		transhdrlen = 0;
1215		exthdrlen = 0;
1216		dst_exthdrlen = 0;
1217		mtu = cork->fragsize;
1218	}
1219	orig_mtu = mtu;
1220
1221	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1222
1223	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1224			(opt ? opt->opt_nflen : 0);
1225	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1226		     sizeof(struct frag_hdr);
1227
1228	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1229		unsigned int maxnonfragsize, headersize;
1230
1231		headersize = sizeof(struct ipv6hdr) +
1232			     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1233			     (dst_allfrag(&rt->dst) ?
1234			      sizeof(struct frag_hdr) : 0) +
1235			     rt->rt6i_nfheader_len;
1236
1237		if (ip6_sk_local_df(sk))
1238			maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1239		else
1240			maxnonfragsize = mtu;
1241
1242		/* dontfrag active */
1243		if ((cork->length + length > mtu - headersize) && dontfrag &&
1244		    (sk->sk_protocol == IPPROTO_UDP ||
1245		     sk->sk_protocol == IPPROTO_RAW)) {
1246			ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1247						   sizeof(struct ipv6hdr));
1248			goto emsgsize;
1249		}
1250
1251		if (cork->length + length > maxnonfragsize - headersize) {
1252emsgsize:
1253			ipv6_local_error(sk, EMSGSIZE, fl6,
1254					 mtu - headersize +
1255					 sizeof(struct ipv6hdr));
1256			return -EMSGSIZE;
1257		}
1258	}
1259
1260	/* For UDP, check if TX timestamp is enabled */
1261	if (sk->sk_type == SOCK_DGRAM)
1262		sock_tx_timestamp(sk, &tx_flags);
 
 
 
1263
1264	/*
1265	 * Let's try using as much space as possible.
1266	 * Use MTU if total length of the message fits into the MTU.
1267	 * Otherwise, we need to reserve fragment header and
1268	 * fragment alignment (= 8-15 octects, in total).
1269	 *
1270	 * Note that we may need to "move" the data from the tail of
1271	 * of the buffer to the new fragment when we split
1272	 * the message.
1273	 *
1274	 * FIXME: It may be fragmented into multiple chunks
1275	 *        at once if non-fragmentable extension headers
1276	 *        are too large.
1277	 * --yoshfuji
1278	 */
1279
1280	skb = skb_peek_tail(&sk->sk_write_queue);
1281	cork->length += length;
1282	if (((length > mtu) ||
1283	     (skb && skb_is_gso(skb))) &&
1284	    (sk->sk_protocol == IPPROTO_UDP) &&
1285	    (rt->dst.dev->features & NETIF_F_UFO)) {
1286		err = ip6_ufo_append_data(sk, getfrag, from, length,
1287					  hh_len, fragheaderlen,
1288					  transhdrlen, mtu, flags, rt);
1289		if (err)
1290			goto error;
1291		return 0;
 
 
 
 
 
 
 
1292	}
1293
1294	if (!skb)
1295		goto alloc_new_skb;
1296
1297	while (length > 0) {
1298		/* Check if the remaining data fits into current packet. */
1299		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1300		if (copy < length)
1301			copy = maxfraglen - skb->len;
1302
1303		if (copy <= 0) {
1304			char *data;
1305			unsigned int datalen;
1306			unsigned int fraglen;
1307			unsigned int fraggap;
1308			unsigned int alloclen;
 
1309alloc_new_skb:
 
 
1310			/* There's no room in the current skb */
1311			if (skb)
1312				fraggap = skb->len - maxfraglen;
1313			else
1314				fraggap = 0;
1315			/* update mtu and maxfraglen if necessary */
1316			if (skb == NULL || skb_prev == NULL)
1317				ip6_append_data_mtu(&mtu, &maxfraglen,
1318						    fragheaderlen, skb, rt,
1319						    orig_mtu);
1320
1321			skb_prev = skb;
1322
1323			/*
1324			 * If remaining data exceeds the mtu,
1325			 * we know we need more fragment(s).
1326			 */
1327			datalen = length + fraggap;
 
 
1328
1329			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1330				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1331			if ((flags & MSG_MORE) &&
1332			    !(rt->dst.dev->features&NETIF_F_SG))
1333				alloclen = mtu;
1334			else
1335				alloclen = datalen + fragheaderlen;
1336
1337			alloclen += dst_exthdrlen;
1338
1339			if (datalen != length + fraggap) {
1340				/*
1341				 * this is not the last fragment, the trailer
1342				 * space is regarded as data space.
1343				 */
1344				datalen += rt->dst.trailer_len;
1345			}
1346
1347			alloclen += rt->dst.trailer_len;
1348			fraglen = datalen + fragheaderlen;
1349
1350			/*
1351			 * We just reserve space for fragment header.
1352			 * Note: this may be overallocation if the message
1353			 * (without MSG_MORE) fits into the MTU.
1354			 */
1355			alloclen += sizeof(struct frag_hdr);
1356
1357			if (transhdrlen) {
1358				skb = sock_alloc_send_skb(sk,
1359						alloclen + hh_len,
1360						(flags & MSG_DONTWAIT), &err);
1361			} else {
1362				skb = NULL;
1363				if (atomic_read(&sk->sk_wmem_alloc) <=
1364				    2 * sk->sk_sndbuf)
1365					skb = sock_wmalloc(sk,
1366							   alloclen + hh_len, 1,
1367							   sk->sk_allocation);
1368				if (unlikely(skb == NULL))
1369					err = -ENOBUFS;
1370				else {
1371					/* Only the initial fragment
1372					 * is time stamped.
1373					 */
1374					tx_flags = 0;
1375				}
1376			}
1377			if (skb == NULL)
1378				goto error;
1379			/*
1380			 *	Fill in the control structures
1381			 */
1382			skb->protocol = htons(ETH_P_IPV6);
1383			skb->ip_summed = CHECKSUM_NONE;
1384			skb->csum = 0;
1385			/* reserve for fragmentation and ipsec header */
1386			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1387				    dst_exthdrlen);
1388
1389			if (sk->sk_type == SOCK_DGRAM)
1390				skb_shinfo(skb)->tx_flags = tx_flags;
1391
1392			/*
1393			 *	Find where to start putting bytes
1394			 */
1395			data = skb_put(skb, fraglen);
1396			skb_set_network_header(skb, exthdrlen);
1397			data += fragheaderlen;
1398			skb->transport_header = (skb->network_header +
1399						 fragheaderlen);
1400			if (fraggap) {
1401				skb->csum = skb_copy_and_csum_bits(
1402					skb_prev, maxfraglen,
1403					data + transhdrlen, fraggap, 0);
1404				skb_prev->csum = csum_sub(skb_prev->csum,
1405							  skb->csum);
1406				data += fraggap;
1407				pskb_trim_unique(skb_prev, maxfraglen);
1408			}
1409			copy = datalen - transhdrlen - fraggap;
1410
1411			if (copy < 0) {
1412				err = -EINVAL;
1413				kfree_skb(skb);
1414				goto error;
1415			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1416				err = -EFAULT;
1417				kfree_skb(skb);
1418				goto error;
1419			}
1420
1421			offset += copy;
1422			length -= datalen - fraggap;
1423			transhdrlen = 0;
1424			exthdrlen = 0;
1425			dst_exthdrlen = 0;
1426
1427			/*
1428			 * Put the packet on the pending queue
1429			 */
1430			__skb_queue_tail(&sk->sk_write_queue, skb);
1431			continue;
1432		}
1433
1434		if (copy > length)
1435			copy = length;
1436
1437		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1438			unsigned int off;
1439
1440			off = skb->len;
1441			if (getfrag(from, skb_put(skb, copy),
1442						offset, copy, off, skb) < 0) {
1443				__skb_trim(skb, off);
1444				err = -EFAULT;
1445				goto error;
1446			}
1447		} else {
1448			int i = skb_shinfo(skb)->nr_frags;
1449			struct page_frag *pfrag = sk_page_frag(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1450
1451			err = -ENOMEM;
1452			if (!sk_page_frag_refill(sk, pfrag))
 
 
 
 
 
 
1453				goto error;
1454
1455			if (!skb_can_coalesce(skb, i, pfrag->page,
1456					      pfrag->offset)) {
1457				err = -EMSGSIZE;
1458				if (i == MAX_SKB_FRAGS)
1459					goto error;
1460
1461				__skb_fill_page_desc(skb, i, pfrag->page,
1462						     pfrag->offset, 0);
1463				skb_shinfo(skb)->nr_frags = ++i;
1464				get_page(pfrag->page);
1465			}
1466			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1467			if (getfrag(from,
1468				    page_address(pfrag->page) + pfrag->offset,
1469				    offset, copy, skb->len, skb) < 0)
1470				goto error_efault;
1471
1472			pfrag->offset += copy;
1473			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1474			skb->len += copy;
1475			skb->data_len += copy;
1476			skb->truesize += copy;
1477			atomic_add(copy, &sk->sk_wmem_alloc);
1478		}
1479		offset += copy;
1480		length -= copy;
1481	}
1482
1483	return 0;
1484
1485error_efault:
1486	err = -EFAULT;
1487error:
1488	cork->length -= length;
1489	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1490	return err;
1491}
1492EXPORT_SYMBOL_GPL(ip6_append_data);
1493
1494static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1495{
1496	if (np->cork.opt) {
1497		kfree(np->cork.opt->dst0opt);
1498		kfree(np->cork.opt->dst1opt);
1499		kfree(np->cork.opt->hopopt);
1500		kfree(np->cork.opt->srcrt);
1501		kfree(np->cork.opt);
1502		np->cork.opt = NULL;
1503	}
1504
1505	if (inet->cork.base.dst) {
1506		dst_release(inet->cork.base.dst);
1507		inet->cork.base.dst = NULL;
1508		inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1509	}
1510	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1511}
1512
1513int ip6_push_pending_frames(struct sock *sk)
1514{
1515	struct sk_buff *skb, *tmp_skb;
1516	struct sk_buff **tail_skb;
1517	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1518	struct inet_sock *inet = inet_sk(sk);
1519	struct ipv6_pinfo *np = inet6_sk(sk);
1520	struct net *net = sock_net(sk);
1521	struct ipv6hdr *hdr;
1522	struct ipv6_txoptions *opt = np->cork.opt;
1523	struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1524	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1525	unsigned char proto = fl6->flowi6_proto;
1526	int err = 0;
1527
1528	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1529		goto out;
1530	tail_skb = &(skb_shinfo(skb)->frag_list);
1531
1532	/* move skb->data to ip header from ext header */
1533	if (skb->data < skb_network_header(skb))
1534		__skb_pull(skb, skb_network_offset(skb));
1535	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1536		__skb_pull(tmp_skb, skb_network_header_len(skb));
1537		*tail_skb = tmp_skb;
1538		tail_skb = &(tmp_skb->next);
1539		skb->len += tmp_skb->len;
1540		skb->data_len += tmp_skb->len;
1541		skb->truesize += tmp_skb->truesize;
1542		tmp_skb->destructor = NULL;
1543		tmp_skb->sk = NULL;
1544	}
1545
1546	/* Allow local fragmentation. */
1547	skb->local_df = ip6_sk_local_df(sk);
 
1548
1549	*final_dst = fl6->daddr;
1550	__skb_pull(skb, skb_network_header_len(skb));
1551	if (opt && opt->opt_flen)
1552		ipv6_push_frag_opts(skb, opt, &proto);
1553	if (opt && opt->opt_nflen)
1554		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1555
1556	skb_push(skb, sizeof(struct ipv6hdr));
1557	skb_reset_network_header(skb);
1558	hdr = ipv6_hdr(skb);
1559
1560	ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
 
 
1561	hdr->hop_limit = np->cork.hop_limit;
1562	hdr->nexthdr = proto;
1563	hdr->saddr = fl6->saddr;
1564	hdr->daddr = *final_dst;
1565
1566	skb->priority = sk->sk_priority;
1567	skb->mark = sk->sk_mark;
1568
1569	skb_dst_set(skb, dst_clone(&rt->dst));
1570	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1571	if (proto == IPPROTO_ICMPV6) {
1572		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1573
1574		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1575		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1576	}
1577
1578	err = ip6_local_out(skb);
1579	if (err) {
1580		if (err > 0)
1581			err = net_xmit_errno(err);
1582		if (err)
1583			goto error;
1584	}
1585
1586out:
1587	ip6_cork_release(inet, np);
1588	return err;
1589error:
1590	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1591	goto out;
1592}
1593EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1594
1595void ip6_flush_pending_frames(struct sock *sk)
1596{
1597	struct sk_buff *skb;
1598
1599	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1600		if (skb_dst(skb))
1601			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1602				      IPSTATS_MIB_OUTDISCARDS);
1603		kfree_skb(skb);
1604	}
1605
1606	ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1607}
1608EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);