Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 *	IPv6 output functions
   3 *	Linux INET6 implementation
   4 *
   5 *	Authors:
   6 *	Pedro Roque		<roque@di.fc.ul.pt>
   7 *
   8 *	Based on linux/net/ipv4/ip_output.c
   9 *
  10 *	This program is free software; you can redistribute it and/or
  11 *      modify it under the terms of the GNU General Public License
  12 *      as published by the Free Software Foundation; either version
  13 *      2 of the License, or (at your option) any later version.
  14 *
  15 *	Changes:
  16 *	A.N.Kuznetsov	:	airthmetics in fragmentation.
  17 *				extension headers are implemented.
  18 *				route changes now work.
  19 *				ip6_forward does not confuse sniffers.
  20 *				etc.
  21 *
  22 *      H. von Brand    :       Added missing #include <linux/string.h>
  23 *	Imran Patel	: 	frag id should be in NBO
  24 *      Kazunori MIYAZAWA @USAGI
  25 *			:       add ip6_append_data and related functions
  26 *				for datagram xmit
  27 */
  28
  29#include <linux/errno.h>
  30#include <linux/kernel.h>
  31#include <linux/string.h>
  32#include <linux/socket.h>
  33#include <linux/net.h>
  34#include <linux/netdevice.h>
  35#include <linux/if_arp.h>
  36#include <linux/in6.h>
  37#include <linux/tcp.h>
  38#include <linux/route.h>
  39#include <linux/module.h>
  40#include <linux/slab.h>
  41
 
  42#include <linux/netfilter.h>
  43#include <linux/netfilter_ipv6.h>
  44
  45#include <net/sock.h>
  46#include <net/snmp.h>
  47
  48#include <net/ipv6.h>
  49#include <net/ndisc.h>
  50#include <net/protocol.h>
  51#include <net/ip6_route.h>
  52#include <net/addrconf.h>
  53#include <net/rawv6.h>
  54#include <net/icmp.h>
  55#include <net/xfrm.h>
  56#include <net/checksum.h>
  57#include <linux/mroute6.h>
 
 
 
  58
  59int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
  60
  61int __ip6_local_out(struct sk_buff *skb)
  62{
  63	int len;
  64
  65	len = skb->len - sizeof(struct ipv6hdr);
  66	if (len > IPV6_MAXPLEN)
  67		len = 0;
  68	ipv6_hdr(skb)->payload_len = htons(len);
  69
  70	return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
  71		       skb_dst(skb)->dev, dst_output);
  72}
  73
  74int ip6_local_out(struct sk_buff *skb)
  75{
  76	int err;
  77
  78	err = __ip6_local_out(skb);
  79	if (likely(err == 1))
  80		err = dst_output(skb);
  81
  82	return err;
  83}
  84EXPORT_SYMBOL_GPL(ip6_local_out);
  85
  86/* dev_loopback_xmit for use with netfilter. */
  87static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
  88{
  89	skb_reset_mac_header(newskb);
  90	__skb_pull(newskb, skb_network_offset(newskb));
  91	newskb->pkt_type = PACKET_LOOPBACK;
  92	newskb->ip_summed = CHECKSUM_UNNECESSARY;
  93	WARN_ON(!skb_dst(newskb));
  94
  95	netif_rx_ni(newskb);
  96	return 0;
  97}
  98
  99static int ip6_finish_output2(struct sk_buff *skb)
 100{
 101	struct dst_entry *dst = skb_dst(skb);
 102	struct net_device *dev = dst->dev;
 
 103	struct neighbour *neigh;
 104
 105	skb->protocol = htons(ETH_P_IPV6);
 106	skb->dev = dev;
 107
 108	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
 109		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 110
 111		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
 112		    ((mroute6_socket(dev_net(dev), skb) &&
 113		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
 114		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
 115					 &ipv6_hdr(skb)->saddr))) {
 116			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
 117
 118			/* Do not check for IFF_ALLMULTI; multicast routing
 119			   is not supported in any case.
 120			 */
 121			if (newskb)
 122				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 123					newskb, NULL, newskb->dev,
 124					ip6_dev_loopback_xmit);
 125
 126			if (ipv6_hdr(skb)->hop_limit == 0) {
 127				IP6_INC_STATS(dev_net(dev), idev,
 128					      IPSTATS_MIB_OUTDISCARDS);
 129				kfree_skb(skb);
 130				return 0;
 131			}
 132		}
 133
 134		IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
 135				skb->len);
 
 
 
 
 
 
 136	}
 137
 138	rcu_read_lock();
 139	neigh = dst_get_neighbour(dst);
 140	if (neigh) {
 141		int res = neigh_output(neigh, skb);
 142
 143		rcu_read_unlock();
 144		return res;
 
 
 
 
 
 
 
 
 
 
 
 
 145	}
 146	rcu_read_unlock();
 147	IP6_INC_STATS_BH(dev_net(dst->dev),
 148			 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 149	kfree_skb(skb);
 150	return -EINVAL;
 151}
 152
 153static int ip6_finish_output(struct sk_buff *skb)
 154{
 
 
 
 
 
 
 
 
 155	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 156	    dst_allfrag(skb_dst(skb)))
 157		return ip6_fragment(skb, ip6_finish_output2);
 
 158	else
 159		return ip6_finish_output2(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 160}
 161
 162int ip6_output(struct sk_buff *skb)
 163{
 164	struct net_device *dev = skb_dst(skb)->dev;
 165	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 
 
 
 
 166	if (unlikely(idev->cnf.disable_ipv6)) {
 167		IP6_INC_STATS(dev_net(dev), idev,
 168			      IPSTATS_MIB_OUTDISCARDS);
 169		kfree_skb(skb);
 170		return 0;
 171	}
 172
 173	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
 
 174			    ip6_finish_output,
 175			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 176}
 177
 
 
 
 
 
 
 
 
 178/*
 179 *	xmit an sk_buff (used by TCP, SCTP and DCCP)
 
 
 
 180 */
 181
 182int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 183	     struct ipv6_txoptions *opt)
 184{
 185	struct net *net = sock_net(sk);
 186	struct ipv6_pinfo *np = inet6_sk(sk);
 187	struct in6_addr *first_hop = &fl6->daddr;
 188	struct dst_entry *dst = skb_dst(skb);
 
 189	struct ipv6hdr *hdr;
 190	u8  proto = fl6->flowi6_proto;
 191	int seg_len = skb->len;
 192	int hlimit = -1;
 193	int tclass = 0;
 194	u32 mtu;
 195
 196	if (opt) {
 197		unsigned int head_room;
 198
 199		/* First: exthdrs may take lots of space (~8K for now)
 200		   MAX_HEADER is not enough.
 201		 */
 202		head_room = opt->opt_nflen + opt->opt_flen;
 203		seg_len += head_room;
 204		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
 205
 206		if (skb_headroom(skb) < head_room) {
 207			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
 208			if (skb2 == NULL) {
 209				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 210					      IPSTATS_MIB_OUTDISCARDS);
 211				kfree_skb(skb);
 212				return -ENOBUFS;
 213			}
 214			kfree_skb(skb);
 215			skb = skb2;
 216			skb_set_owner_w(skb, sk);
 217		}
 
 
 
 
 
 
 
 
 
 218		if (opt->opt_flen)
 219			ipv6_push_frag_opts(skb, opt, &proto);
 
 220		if (opt->opt_nflen)
 221			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
 
 222	}
 223
 224	skb_push(skb, sizeof(struct ipv6hdr));
 225	skb_reset_network_header(skb);
 226	hdr = ipv6_hdr(skb);
 227
 228	/*
 229	 *	Fill in the IPv6 header
 230	 */
 231	if (np) {
 232		tclass = np->tclass;
 233		hlimit = np->hop_limit;
 234	}
 235	if (hlimit < 0)
 236		hlimit = ip6_dst_hoplimit(dst);
 237
 238	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;
 
 239
 240	hdr->payload_len = htons(seg_len);
 241	hdr->nexthdr = proto;
 242	hdr->hop_limit = hlimit;
 243
 244	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
 245	ipv6_addr_copy(&hdr->daddr, first_hop);
 246
 247	skb->priority = sk->sk_priority;
 248	skb->mark = sk->sk_mark;
 
 249
 250	mtu = dst_mtu(dst);
 251	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
 252		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
 253			      IPSTATS_MIB_OUT, skb->len);
 254		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
 255			       dst->dev, dst_output);
 
 
 
 
 
 
 
 
 
 
 
 
 256	}
 257
 258	if (net_ratelimit())
 259		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
 260	skb->dev = dst->dev;
 261	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 
 
 
 
 262	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
 263	kfree_skb(skb);
 264	return -EMSGSIZE;
 265}
 266
 267EXPORT_SYMBOL(ip6_xmit);
 268
 269/*
 270 *	To avoid extra problems ND packets are send through this
 271 *	routine. It's code duplication but I really want to avoid
 272 *	extra checks since ipv6_build_header is used by TCP (which
 273 *	is for us performance critical)
 274 */
 275
 276int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
 277	       const struct in6_addr *saddr, const struct in6_addr *daddr,
 278	       int proto, int len)
 279{
 280	struct ipv6_pinfo *np = inet6_sk(sk);
 281	struct ipv6hdr *hdr;
 282
 283	skb->protocol = htons(ETH_P_IPV6);
 284	skb->dev = dev;
 285
 286	skb_reset_network_header(skb);
 287	skb_put(skb, sizeof(struct ipv6hdr));
 288	hdr = ipv6_hdr(skb);
 289
 290	*(__be32*)hdr = htonl(0x60000000);
 291
 292	hdr->payload_len = htons(len);
 293	hdr->nexthdr = proto;
 294	hdr->hop_limit = np->hop_limit;
 295
 296	ipv6_addr_copy(&hdr->saddr, saddr);
 297	ipv6_addr_copy(&hdr->daddr, daddr);
 298
 299	return 0;
 300}
 301
 302static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
 303{
 304	struct ip6_ra_chain *ra;
 305	struct sock *last = NULL;
 306
 307	read_lock(&ip6_ra_lock);
 308	for (ra = ip6_ra_chain; ra; ra = ra->next) {
 309		struct sock *sk = ra->sk;
 310		if (sk && ra->sel == sel &&
 311		    (!sk->sk_bound_dev_if ||
 312		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
 
 
 
 
 
 
 313			if (last) {
 314				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 315				if (skb2)
 316					rawv6_rcv(last, skb2);
 317			}
 318			last = sk;
 319		}
 320	}
 321
 322	if (last) {
 323		rawv6_rcv(last, skb);
 324		read_unlock(&ip6_ra_lock);
 325		return 1;
 326	}
 327	read_unlock(&ip6_ra_lock);
 328	return 0;
 329}
 330
 331static int ip6_forward_proxy_check(struct sk_buff *skb)
 332{
 333	struct ipv6hdr *hdr = ipv6_hdr(skb);
 334	u8 nexthdr = hdr->nexthdr;
 
 335	int offset;
 336
 337	if (ipv6_ext_hdr(nexthdr)) {
 338		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr);
 339		if (offset < 0)
 340			return 0;
 341	} else
 342		offset = sizeof(struct ipv6hdr);
 343
 344	if (nexthdr == IPPROTO_ICMPV6) {
 345		struct icmp6hdr *icmp6;
 346
 347		if (!pskb_may_pull(skb, (skb_network_header(skb) +
 348					 offset + 1 - skb->data)))
 349			return 0;
 350
 351		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
 352
 353		switch (icmp6->icmp6_type) {
 354		case NDISC_ROUTER_SOLICITATION:
 355		case NDISC_ROUTER_ADVERTISEMENT:
 356		case NDISC_NEIGHBOUR_SOLICITATION:
 357		case NDISC_NEIGHBOUR_ADVERTISEMENT:
 358		case NDISC_REDIRECT:
 359			/* For reaction involving unicast neighbor discovery
 360			 * message destined to the proxied address, pass it to
 361			 * input function.
 362			 */
 363			return 1;
 364		default:
 365			break;
 366		}
 367	}
 368
 369	/*
 370	 * The proxying router can't forward traffic sent to a link-local
 371	 * address, so signal the sender and discard the packet. This
 372	 * behavior is clarified by the MIPv6 specification.
 373	 */
 374	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
 375		dst_link_failure(skb);
 376		return -1;
 377	}
 378
 379	return 0;
 380}
 381
 382static inline int ip6_forward_finish(struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 383{
 384	return dst_output(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 385}
 386
 387int ip6_forward(struct sk_buff *skb)
 388{
 
 389	struct dst_entry *dst = skb_dst(skb);
 390	struct ipv6hdr *hdr = ipv6_hdr(skb);
 391	struct inet6_skb_parm *opt = IP6CB(skb);
 392	struct net *net = dev_net(dst->dev);
 393	struct neighbour *n;
 394	u32 mtu;
 395
 396	if (net->ipv6.devconf_all->forwarding == 0)
 397		goto error;
 398
 
 
 
 
 
 
 399	if (skb_warn_if_lro(skb))
 400		goto drop;
 401
 402	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
 403		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 404		goto drop;
 405	}
 406
 407	if (skb->pkt_type != PACKET_HOST)
 408		goto drop;
 409
 410	skb_forward_csum(skb);
 411
 412	/*
 413	 *	We DO NOT make any processing on
 414	 *	RA packets, pushing them to user level AS IS
 415	 *	without ane WARRANTY that application will be able
 416	 *	to interpret them. The reason is that we
 417	 *	cannot make anything clever here.
 418	 *
 419	 *	We are not end-node, so that if packet contains
 420	 *	AH/ESP, we cannot make anything.
 421	 *	Defragmentation also would be mistake, RA packets
 422	 *	cannot be fragmented, because there is no warranty
 423	 *	that different fragments will go along one path. --ANK
 424	 */
 425	if (opt->ra) {
 426		u8 *ptr = skb_network_header(skb) + opt->ra;
 427		if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
 428			return 0;
 429	}
 430
 431	/*
 432	 *	check and decrement ttl
 433	 */
 434	if (hdr->hop_limit <= 1) {
 435		/* Force OUTPUT device used as source address */
 436		skb->dev = dst->dev;
 437		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
 438		IP6_INC_STATS_BH(net,
 439				 ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
 440
 441		kfree_skb(skb);
 442		return -ETIMEDOUT;
 443	}
 444
 445	/* XXX: idev->cnf.proxy_ndp? */
 446	if (net->ipv6.devconf_all->proxy_ndp &&
 447	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
 448		int proxied = ip6_forward_proxy_check(skb);
 449		if (proxied > 0)
 450			return ip6_input(skb);
 451		else if (proxied < 0) {
 452			IP6_INC_STATS(net, ip6_dst_idev(dst),
 453				      IPSTATS_MIB_INDISCARDS);
 454			goto drop;
 455		}
 456	}
 457
 458	if (!xfrm6_route_forward(skb)) {
 459		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
 460		goto drop;
 461	}
 462	dst = skb_dst(skb);
 463
 464	/* IPv6 specs say nothing about it, but it is clear that we cannot
 465	   send redirects to source routed frames.
 466	   We don't send redirects to frames decapsulated from IPsec.
 467	 */
 468	n = dst_get_neighbour(dst);
 469	if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
 470		struct in6_addr *target = NULL;
 
 471		struct rt6_info *rt;
 472
 473		/*
 474		 *	incoming and outgoing devices are the same
 475		 *	send a redirect.
 476		 */
 477
 478		rt = (struct rt6_info *) dst;
 479		if ((rt->rt6i_flags & RTF_GATEWAY))
 480			target = (struct in6_addr*)&n->primary_key;
 481		else
 482			target = &hdr->daddr;
 483
 484		if (!rt->rt6i_peer)
 485			rt6_bind_peer(rt, 1);
 486
 487		/* Limit redirects both by destination (here)
 488		   and by source (inside ndisc_send_redirect)
 489		 */
 490		if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
 491			ndisc_send_redirect(skb, n, target);
 
 
 492	} else {
 493		int addrtype = ipv6_addr_type(&hdr->saddr);
 494
 495		/* This check is security critical. */
 496		if (addrtype == IPV6_ADDR_ANY ||
 497		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
 498			goto error;
 499		if (addrtype & IPV6_ADDR_LINKLOCAL) {
 500			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
 501				    ICMPV6_NOT_NEIGHBOUR, 0);
 502			goto error;
 503		}
 504	}
 505
 506	mtu = dst_mtu(dst);
 507	if (mtu < IPV6_MIN_MTU)
 508		mtu = IPV6_MIN_MTU;
 509
 510	if (skb->len > mtu && !skb_is_gso(skb)) {
 511		/* Again, force OUTPUT device used as source address */
 512		skb->dev = dst->dev;
 513		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 514		IP6_INC_STATS_BH(net,
 515				 ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
 516		IP6_INC_STATS_BH(net,
 517				 ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
 518		kfree_skb(skb);
 519		return -EMSGSIZE;
 520	}
 521
 522	if (skb_cow(skb, dst->dev->hard_header_len)) {
 523		IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
 
 524		goto drop;
 525	}
 526
 527	hdr = ipv6_hdr(skb);
 528
 529	/* Mangling hops number delayed to point after skb COW */
 530
 531	hdr->hop_limit--;
 532
 533	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 534	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
 535		       ip6_forward_finish);
 536
 537error:
 538	IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
 539drop:
 540	kfree_skb(skb);
 541	return -EINVAL;
 542}
 543
 544static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 545{
 546	to->pkt_type = from->pkt_type;
 547	to->priority = from->priority;
 548	to->protocol = from->protocol;
 549	skb_dst_drop(to);
 550	skb_dst_set(to, dst_clone(skb_dst(from)));
 551	to->dev = from->dev;
 552	to->mark = from->mark;
 553
 
 
 554#ifdef CONFIG_NET_SCHED
 555	to->tc_index = from->tc_index;
 556#endif
 557	nf_copy(to, from);
 558#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
 559    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
 560	to->nf_trace = from->nf_trace;
 561#endif
 562	skb_copy_secmark(to, from);
 563}
 564
 565int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 
 
 566{
 567	u16 offset = sizeof(struct ipv6hdr);
 568	struct ipv6_opt_hdr *exthdr =
 569				(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
 570	unsigned int packet_len = skb->tail - skb->network_header;
 571	int found_rhdr = 0;
 572	*nexthdr = &ipv6_hdr(skb)->nexthdr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 573
 574	while (offset + 1 <= packet_len) {
 
 
 
 
 
 
 
 
 575
 576		switch (**nexthdr) {
 
 
 577
 578		case NEXTHDR_HOP:
 579			break;
 580		case NEXTHDR_ROUTING:
 581			found_rhdr = 1;
 582			break;
 583		case NEXTHDR_DEST:
 584#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
 585			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
 586				break;
 587#endif
 588			if (found_rhdr)
 589				return offset;
 590			break;
 591		default :
 592			return offset;
 593		}
 594
 595		offset += ipv6_optlen(exthdr);
 596		*nexthdr = &exthdr->nexthdr;
 597		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
 598						 offset);
 599	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600
 601	return offset;
 602}
 
 603
 604void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
 605{
 606	static atomic_t ipv6_fragmentation_id;
 607	int old, new;
 
 
 608
 609	if (rt) {
 610		struct inet_peer *peer;
 
 
 
 
 
 
 
 
 
 
 
 
 611
 612		if (!rt->rt6i_peer)
 613			rt6_bind_peer(rt, 1);
 614		peer = rt->rt6i_peer;
 615		if (peer) {
 616			fhdr->identification = htonl(inet_getid(peer, 0));
 617			return;
 618		}
 619	}
 620	do {
 621		old = atomic_read(&ipv6_fragmentation_id);
 622		new = old + 1;
 623		if (!new)
 624			new = 1;
 625	} while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
 626	fhdr->identification = htonl(new);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627}
 
 628
 629int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
 
 630{
 631	struct sk_buff *frag;
 632	struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
 633	struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
 634	struct ipv6hdr *tmp_hdr;
 635	struct frag_hdr *fh;
 636	unsigned int mtu, hlen, left, len;
 637	__be32 frag_id = 0;
 638	int ptr, offset = 0, err=0;
 
 639	u8 *prevhdr, nexthdr = 0;
 640	struct net *net = dev_net(skb_dst(skb)->dev);
 641
 642	hlen = ip6_find_1stfragopt(skb, &prevhdr);
 
 
 
 643	nexthdr = *prevhdr;
 
 644
 645	mtu = ip6_skb_dst_mtu(skb);
 646
 647	/* We must not fragment if the socket is set to force MTU discovery
 648	 * or if the skb it not generated by a local socket.
 649	 */
 650	if (!skb->local_df && skb->len > mtu) {
 651		skb->dev = skb_dst(skb)->dev;
 652		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 653		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 654			      IPSTATS_MIB_FRAGFAILS);
 655		kfree_skb(skb);
 656		return -EMSGSIZE;
 
 
 
 
 657	}
 658
 659	if (np && np->frag_size < mtu) {
 660		if (np->frag_size)
 661			mtu = np->frag_size;
 662	}
 
 
 663	mtu -= hlen + sizeof(struct frag_hdr);
 664
 
 
 
 
 
 
 
 
 
 665	if (skb_has_frag_list(skb)) {
 666		int first_len = skb_pagelen(skb);
 
 667		struct sk_buff *frag2;
 668
 669		if (first_len - hlen > mtu ||
 670		    ((first_len - hlen) & 7) ||
 671		    skb_cloned(skb))
 
 672			goto slow_path;
 673
 674		skb_walk_frags(skb, frag) {
 675			/* Correct geometry. */
 676			if (frag->len > mtu ||
 677			    ((frag->len & 7) && frag->next) ||
 678			    skb_headroom(frag) < hlen)
 679				goto slow_path_clean;
 680
 681			/* Partially cloned skb? */
 682			if (skb_shared(frag))
 683				goto slow_path_clean;
 684
 685			BUG_ON(frag->sk);
 686			if (skb->sk) {
 687				frag->sk = skb->sk;
 688				frag->destructor = sock_wfree;
 689			}
 690			skb->truesize -= frag->truesize;
 691		}
 692
 693		err = 0;
 694		offset = 0;
 695		frag = skb_shinfo(skb)->frag_list;
 696		skb_frag_list_init(skb);
 697		/* BUILD HEADER */
 698
 699		*prevhdr = NEXTHDR_FRAGMENT;
 700		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
 701		if (!tmp_hdr) {
 702			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 703				      IPSTATS_MIB_FRAGFAILS);
 704			return -ENOMEM;
 705		}
 706
 707		__skb_pull(skb, hlen);
 708		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
 709		__skb_push(skb, hlen);
 710		skb_reset_network_header(skb);
 711		memcpy(skb_network_header(skb), tmp_hdr, hlen);
 712
 713		ipv6_select_ident(fh, rt);
 714		fh->nexthdr = nexthdr;
 715		fh->reserved = 0;
 716		fh->frag_off = htons(IP6_MF);
 717		frag_id = fh->identification;
 718
 719		first_len = skb_pagelen(skb);
 720		skb->data_len = first_len - skb_headlen(skb);
 721		skb->len = first_len;
 722		ipv6_hdr(skb)->payload_len = htons(first_len -
 723						   sizeof(struct ipv6hdr));
 724
 725		dst_hold(&rt->dst);
 726
 727		for (;;) {
 728			/* Prepare header of the next frame,
 729			 * before previous one went down. */
 730			if (frag) {
 731				frag->ip_summed = CHECKSUM_NONE;
 732				skb_reset_transport_header(frag);
 733				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
 734				__skb_push(frag, hlen);
 735				skb_reset_network_header(frag);
 736				memcpy(skb_network_header(frag), tmp_hdr,
 737				       hlen);
 738				offset += skb->len - hlen - sizeof(struct frag_hdr);
 739				fh->nexthdr = nexthdr;
 740				fh->reserved = 0;
 741				fh->frag_off = htons(offset);
 742				if (frag->next != NULL)
 743					fh->frag_off |= htons(IP6_MF);
 744				fh->identification = frag_id;
 745				ipv6_hdr(frag)->payload_len =
 746						htons(frag->len -
 747						      sizeof(struct ipv6hdr));
 748				ip6_copy_metadata(frag, skb);
 749			}
 750
 751			err = output(skb);
 752			if(!err)
 
 753				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 754					      IPSTATS_MIB_FRAGCREATES);
 755
 756			if (err || !frag)
 757				break;
 758
 759			skb = frag;
 760			frag = skb->next;
 761			skb->next = NULL;
 762		}
 763
 764		kfree(tmp_hdr);
 765
 766		if (err == 0) {
 767			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 768				      IPSTATS_MIB_FRAGOKS);
 769			dst_release(&rt->dst);
 770			return 0;
 771		}
 772
 773		while (frag) {
 774			skb = frag->next;
 775			kfree_skb(frag);
 776			frag = skb;
 777		}
 778
 779		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 780			      IPSTATS_MIB_FRAGFAILS);
 781		dst_release(&rt->dst);
 782		return err;
 783
 784slow_path_clean:
 785		skb_walk_frags(skb, frag2) {
 786			if (frag2 == frag)
 787				break;
 788			frag2->sk = NULL;
 789			frag2->destructor = NULL;
 790			skb->truesize += frag2->truesize;
 791		}
 792	}
 793
 794slow_path:
 795	left = skb->len - hlen;		/* Space per frame */
 796	ptr = hlen;			/* Where to start from */
 797
 798	/*
 799	 *	Fragment the datagram.
 800	 */
 801
 802	*prevhdr = NEXTHDR_FRAGMENT;
 
 
 803
 804	/*
 805	 *	Keep copying data until we run out.
 806	 */
 807	while(left > 0)	{
 808		len = left;
 809		/* IF: it doesn't fit, use 'mtu' - the data space left */
 810		if (len > mtu)
 811			len = mtu;
 812		/* IF: we are not sending up to and including the packet end
 813		   then align the next start on an eight byte boundary */
 814		if (len < left)	{
 815			len &= ~7;
 816		}
 817		/*
 818		 *	Allocate buffer.
 819		 */
 820
 821		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
 822			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
 823			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 824				      IPSTATS_MIB_FRAGFAILS);
 825			err = -ENOMEM;
 826			goto fail;
 827		}
 828
 829		/*
 830		 *	Set up data on packet
 831		 */
 832
 833		ip6_copy_metadata(frag, skb);
 834		skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
 835		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
 836		skb_reset_network_header(frag);
 837		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
 838		frag->transport_header = (frag->network_header + hlen +
 839					  sizeof(struct frag_hdr));
 840
 841		/*
 842		 *	Charge the memory for the fragment to any owner
 843		 *	it might possess
 844		 */
 845		if (skb->sk)
 846			skb_set_owner_w(frag, skb->sk);
 847
 848		/*
 849		 *	Copy the packet header into the new buffer.
 850		 */
 851		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
 852
 853		/*
 854		 *	Build fragment header.
 855		 */
 856		fh->nexthdr = nexthdr;
 857		fh->reserved = 0;
 858		if (!frag_id) {
 859			ipv6_select_ident(fh, rt);
 860			frag_id = fh->identification;
 861		} else
 862			fh->identification = frag_id;
 863
 864		/*
 865		 *	Copy a block of the IP datagram.
 866		 */
 867		if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
 868			BUG();
 869		left -= len;
 870
 871		fh->frag_off = htons(offset);
 872		if (left > 0)
 873			fh->frag_off |= htons(IP6_MF);
 874		ipv6_hdr(frag)->payload_len = htons(frag->len -
 875						    sizeof(struct ipv6hdr));
 876
 877		ptr += len;
 878		offset += len;
 879
 880		/*
 881		 *	Put this fragment into the sending queue.
 882		 */
 883		err = output(frag);
 
 884		if (err)
 885			goto fail;
 886
 887		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 888			      IPSTATS_MIB_FRAGCREATES);
 889	}
 890	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 891		      IPSTATS_MIB_FRAGOKS);
 892	kfree_skb(skb);
 893	return err;
 894
 
 
 
 
 
 
 
 895fail:
 896	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 897		      IPSTATS_MIB_FRAGFAILS);
 898	kfree_skb(skb);
 899	return err;
 900}
 901
 902static inline int ip6_rt_check(const struct rt6key *rt_key,
 903			       const struct in6_addr *fl_addr,
 904			       const struct in6_addr *addr_cache)
 905{
 906	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
 907		(addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
 908}
 909
 910static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
 911					  struct dst_entry *dst,
 912					  const struct flowi6 *fl6)
 913{
 914	struct ipv6_pinfo *np = inet6_sk(sk);
 915	struct rt6_info *rt = (struct rt6_info *)dst;
 916
 917	if (!dst)
 918		goto out;
 919
 
 
 
 
 
 
 920	/* Yes, checking route validity in not connected
 921	 * case is not very simple. Take into account,
 922	 * that we do not support routing by source, TOS,
 923	 * and MSG_DONTROUTE 		--ANK (980726)
 924	 *
 925	 * 1. ip6_rt_check(): If route was host route,
 926	 *    check that cached destination is current.
 927	 *    If it is network route, we still may
 928	 *    check its validity using saved pointer
 929	 *    to the last used address: daddr_cache.
 930	 *    We do not want to save whole address now,
 931	 *    (because main consumer of this service
 932	 *    is tcp, which has not this problem),
 933	 *    so that the last trick works only on connected
 934	 *    sockets.
 935	 * 2. oif also should be the same.
 936	 */
 937	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
 938#ifdef CONFIG_IPV6_SUBTREES
 939	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 940#endif
 941	    (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
 
 942		dst_release(dst);
 943		dst = NULL;
 944	}
 945
 946out:
 947	return dst;
 948}
 949
 950static int ip6_dst_lookup_tail(struct sock *sk,
 951			       struct dst_entry **dst, struct flowi6 *fl6)
 952{
 953	struct net *net = sock_net(sk);
 954#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 955	struct neighbour *n;
 
 956#endif
 957	int err;
 
 958
 959	if (*dst == NULL)
 960		*dst = ip6_route_output(net, sk, fl6);
 961
 962	if ((err = (*dst)->error))
 963		goto out_err_release;
 
 
 
 
 
 
 
 
 964
 965	if (ipv6_addr_any(&fl6->saddr)) {
 966		struct rt6_info *rt = (struct rt6_info *) *dst;
 967		err = ip6_route_get_saddr(net, rt, &fl6->daddr,
 
 
 
 
 968					  sk ? inet6_sk(sk)->srcprefs : 0,
 969					  &fl6->saddr);
 
 
 970		if (err)
 971			goto out_err_release;
 
 
 
 
 
 
 
 
 
 
 
 
 972	}
 973
 
 
 
 
 
 
 
 974#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 975	/*
 976	 * Here if the dst entry we've looked up
 977	 * has a neighbour entry that is in the INCOMPLETE
 978	 * state and the src address from the flow is
 979	 * marked as OPTIMISTIC, we release the found
 980	 * dst entry and replace it instead with the
 981	 * dst entry of the nexthop router
 982	 */
 983	rcu_read_lock();
 984	n = dst_get_neighbour(*dst);
 985	if (n && !(n->nud_state & NUD_VALID)) {
 
 
 
 
 
 986		struct inet6_ifaddr *ifp;
 987		struct flowi6 fl_gw6;
 988		int redirect;
 989
 990		rcu_read_unlock();
 991		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
 992				      (*dst)->dev, 1);
 993
 994		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
 995		if (ifp)
 996			in6_ifa_put(ifp);
 997
 998		if (redirect) {
 999			/*
1000			 * We need to get the dst entry for the
1001			 * default router instead
1002			 */
1003			dst_release(*dst);
1004			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1005			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1006			*dst = ip6_route_output(net, sk, &fl_gw6);
1007			if ((err = (*dst)->error))
 
1008				goto out_err_release;
1009		}
1010	} else {
1011		rcu_read_unlock();
1012	}
1013#endif
 
 
 
 
 
1014
1015	return 0;
1016
1017out_err_release:
1018	if (err == -ENETUNREACH)
1019		IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1020	dst_release(*dst);
1021	*dst = NULL;
 
 
 
1022	return err;
1023}
1024
1025/**
1026 *	ip6_dst_lookup - perform route lookup on flow
 
1027 *	@sk: socket which provides route info
1028 *	@dst: pointer to dst_entry * for result
1029 *	@fl6: flow to lookup
1030 *
1031 *	This function performs a route lookup on the given flow.
1032 *
1033 *	It returns zero on success, or a standard errno code on error.
1034 */
1035int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
 
1036{
1037	*dst = NULL;
1038	return ip6_dst_lookup_tail(sk, dst, fl6);
1039}
1040EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1041
1042/**
1043 *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
 
1044 *	@sk: socket which provides route info
1045 *	@fl6: flow to lookup
1046 *	@final_dst: final destination address for ipsec lookup
1047 *	@can_sleep: we are in a sleepable context
1048 *
1049 *	This function performs a route lookup on the given flow.
1050 *
1051 *	It returns a valid dst pointer on success, or a pointer encoded
1052 *	error code.
1053 */
1054struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1055				      const struct in6_addr *final_dst,
1056				      bool can_sleep)
1057{
1058	struct dst_entry *dst = NULL;
1059	int err;
1060
1061	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1062	if (err)
1063		return ERR_PTR(err);
1064	if (final_dst)
1065		ipv6_addr_copy(&fl6->daddr, final_dst);
1066	if (can_sleep)
1067		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1068
1069	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1070}
1071EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1072
1073/**
1074 *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1075 *	@sk: socket which provides the dst cache and route info
1076 *	@fl6: flow to lookup
1077 *	@final_dst: final destination address for ipsec lookup
1078 *	@can_sleep: we are in a sleepable context
1079 *
1080 *	This function performs a route lookup on the given flow with the
1081 *	possibility of using the cached route in the socket if it is valid.
1082 *	It will take the socket dst lock when operating on the dst cache.
1083 *	As a result, this function can only be used in process context.
1084 *
 
 
 
1085 *	It returns a valid dst pointer on success, or a pointer encoded
1086 *	error code.
1087 */
1088struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1089					 const struct in6_addr *final_dst,
1090					 bool can_sleep)
1091{
1092	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1093	int err;
1094
1095	dst = ip6_sk_dst_check(sk, dst, fl6);
 
 
1096
1097	err = ip6_dst_lookup_tail(sk, &dst, fl6);
1098	if (err)
1099		return ERR_PTR(err);
1100	if (final_dst)
1101		ipv6_addr_copy(&fl6->daddr, final_dst);
1102	if (can_sleep)
1103		fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1104
1105	return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1106}
1107EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1108
1109static inline int ip6_ufo_append_data(struct sock *sk,
1110			int getfrag(void *from, char *to, int offset, int len,
1111			int odd, struct sk_buff *skb),
1112			void *from, int length, int hh_len, int fragheaderlen,
1113			int transhdrlen, int mtu,unsigned int flags,
1114			struct rt6_info *rt)
 
 
 
 
 
 
 
 
 
1115
 
 
 
 
 
 
 
 
1116{
1117	struct sk_buff *skb;
1118	int err;
1119
1120	/* There is support for UDP large send offload by network
1121	 * device, so create one single skb packet containing complete
1122	 * udp datagram
1123	 */
1124	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1125		skb = sock_alloc_send_skb(sk,
1126			hh_len + fragheaderlen + transhdrlen + 20,
1127			(flags & MSG_DONTWAIT), &err);
1128		if (skb == NULL)
1129			return -ENOMEM;
1130
1131		/* reserve space for Hardware header */
1132		skb_reserve(skb, hh_len);
1133
1134		/* create space for UDP/IP header */
1135		skb_put(skb,fragheaderlen + transhdrlen);
1136
1137		/* initialize network header pointer */
1138		skb_reset_network_header(skb);
1139
1140		/* initialize protocol header pointer */
1141		skb->transport_header = skb->network_header + fragheaderlen;
1142
1143		skb->ip_summed = CHECKSUM_PARTIAL;
1144		skb->csum = 0;
 
 
 
 
1145	}
1146
1147	err = skb_append_datato_frags(sk,skb, getfrag, from,
1148				      (length - transhdrlen));
1149	if (!err) {
1150		struct frag_hdr fhdr;
1151
1152		/* Specify the length of each IPv6 datagram fragment.
1153		 * It has to be a multiple of 8.
1154		 */
1155		skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1156					     sizeof(struct frag_hdr)) & ~7;
1157		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1158		ipv6_select_ident(&fhdr, rt);
1159		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1160		__skb_queue_tail(&sk->sk_write_queue, skb);
1161
1162		return 0;
1163	}
1164	/* There is not enough support do UPD LSO,
1165	 * so follow normal path
1166	 */
1167	kfree_skb(skb);
1168
1169	return err;
 
 
 
 
 
1170}
 
1171
1172static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1173					       gfp_t gfp)
1174{
1175	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1176}
1177
1178static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1179						gfp_t gfp)
1180{
1181	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1182}
1183
1184int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1185	int offset, int len, int odd, struct sk_buff *skb),
1186	void *from, int length, int transhdrlen,
1187	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1188	struct rt6_info *rt, unsigned int flags, int dontfrag)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1189{
1190	struct inet_sock *inet = inet_sk(sk);
1191	struct ipv6_pinfo *np = inet6_sk(sk);
1192	struct inet_cork *cork;
1193	struct sk_buff *skb;
1194	unsigned int maxfraglen, fragheaderlen;
1195	int exthdrlen;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1196	int hh_len;
1197	int mtu;
1198	int copy;
1199	int err;
1200	int offset = 0;
 
 
 
1201	int csummode = CHECKSUM_NONE;
1202	__u8 tx_flags = 0;
1203
1204	if (flags&MSG_PROBE)
1205		return 0;
1206	cork = &inet->cork.base;
1207	if (skb_queue_empty(&sk->sk_write_queue)) {
1208		/*
1209		 * setup for corking
1210		 */
1211		if (opt) {
1212			if (WARN_ON(np->cork.opt))
1213				return -EINVAL;
1214
1215			np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
1216			if (unlikely(np->cork.opt == NULL))
1217				return -ENOBUFS;
1218
1219			np->cork.opt->tot_len = opt->tot_len;
1220			np->cork.opt->opt_flen = opt->opt_flen;
1221			np->cork.opt->opt_nflen = opt->opt_nflen;
1222
1223			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1224							    sk->sk_allocation);
1225			if (opt->dst0opt && !np->cork.opt->dst0opt)
1226				return -ENOBUFS;
1227
1228			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1229							    sk->sk_allocation);
1230			if (opt->dst1opt && !np->cork.opt->dst1opt)
1231				return -ENOBUFS;
1232
1233			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1234							   sk->sk_allocation);
1235			if (opt->hopopt && !np->cork.opt->hopopt)
1236				return -ENOBUFS;
1237
1238			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1239							    sk->sk_allocation);
1240			if (opt->srcrt && !np->cork.opt->srcrt)
1241				return -ENOBUFS;
1242
1243			/* need source address above miyazawa*/
1244		}
1245		dst_hold(&rt->dst);
1246		cork->dst = &rt->dst;
1247		inet->cork.fl.u.ip6 = *fl6;
1248		np->cork.hop_limit = hlimit;
1249		np->cork.tclass = tclass;
1250		mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1251		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1252		if (np->frag_size < mtu) {
1253			if (np->frag_size)
1254				mtu = np->frag_size;
1255		}
1256		cork->fragsize = mtu;
1257		if (dst_allfrag(rt->dst.path))
1258			cork->flags |= IPCORK_ALLFRAG;
1259		cork->length = 0;
1260		sk->sk_sndmsg_page = NULL;
1261		sk->sk_sndmsg_off = 0;
1262		exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
1263			    rt->rt6i_nfheader_len;
1264		length += exthdrlen;
1265		transhdrlen += exthdrlen;
1266	} else {
1267		rt = (struct rt6_info *)cork->dst;
1268		fl6 = &inet->cork.fl.u.ip6;
1269		opt = np->cork.opt;
1270		transhdrlen = 0;
1271		exthdrlen = 0;
1272		mtu = cork->fragsize;
1273	}
1274
1275	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1276
1277	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1278			(opt ? opt->opt_nflen : 0);
1279	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
 
1280
1281	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1282		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1283			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1284			return -EMSGSIZE;
1285		}
 
 
 
 
 
 
 
 
 
 
 
 
 
1286	}
1287
1288	/* For UDP, check if TX timestamp is enabled */
1289	if (sk->sk_type == SOCK_DGRAM) {
1290		err = sock_tx_timestamp(sk, &tx_flags);
1291		if (err)
1292			goto error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1293	}
1294
1295	/*
1296	 * Let's try using as much space as possible.
1297	 * Use MTU if total length of the message fits into the MTU.
1298	 * Otherwise, we need to reserve fragment header and
1299	 * fragment alignment (= 8-15 octects, in total).
1300	 *
1301	 * Note that we may need to "move" the data from the tail of
1302	 * of the buffer to the new fragment when we split
1303	 * the message.
1304	 *
1305	 * FIXME: It may be fragmented into multiple chunks
1306	 *        at once if non-fragmentable extension headers
1307	 *        are too large.
1308	 * --yoshfuji
1309	 */
1310
1311	cork->length += length;
1312	if (length > mtu) {
1313		int proto = sk->sk_protocol;
1314		if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
1315			ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1316			return -EMSGSIZE;
1317		}
1318
1319		if (proto == IPPROTO_UDP &&
1320		    (rt->dst.dev->features & NETIF_F_UFO)) {
1321
1322			err = ip6_ufo_append_data(sk, getfrag, from, length,
1323						  hh_len, fragheaderlen,
1324						  transhdrlen, mtu, flags, rt);
1325			if (err)
1326				goto error;
1327			return 0;
1328		}
1329	}
1330
1331	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1332		goto alloc_new_skb;
1333
1334	while (length > 0) {
1335		/* Check if the remaining data fits into current packet. */
1336		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1337		if (copy < length)
1338			copy = maxfraglen - skb->len;
1339
1340		if (copy <= 0) {
1341			char *data;
1342			unsigned int datalen;
1343			unsigned int fraglen;
1344			unsigned int fraggap;
1345			unsigned int alloclen;
1346			struct sk_buff *skb_prev;
1347alloc_new_skb:
1348			skb_prev = skb;
1349
1350			/* There's no room in the current skb */
1351			if (skb_prev)
1352				fraggap = skb_prev->len - maxfraglen;
1353			else
1354				fraggap = 0;
 
 
 
 
 
 
 
1355
1356			/*
1357			 * If remaining data exceeds the mtu,
1358			 * we know we need more fragment(s).
1359			 */
1360			datalen = length + fraggap;
1361			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1362				datalen = maxfraglen - fragheaderlen;
1363
 
 
1364			fraglen = datalen + fragheaderlen;
 
 
1365			if ((flags & MSG_MORE) &&
1366			    !(rt->dst.dev->features&NETIF_F_SG))
1367				alloclen = mtu;
1368			else
1369				alloclen = datalen + fragheaderlen;
 
 
 
 
1370
1371			/*
1372			 * The last fragment gets additional space at tail.
1373			 * Note: we overallocate on fragments with MSG_MODE
1374			 * because we have no idea if we're the last one.
1375			 */
1376			if (datalen == length + fraggap)
1377				alloclen += rt->dst.trailer_len;
 
 
 
 
 
1378
1379			/*
1380			 * We just reserve space for fragment header.
1381			 * Note: this may be overallocation if the message
1382			 * (without MSG_MORE) fits into the MTU.
1383			 */
1384			alloclen += sizeof(struct frag_hdr);
1385
 
 
 
 
 
1386			if (transhdrlen) {
1387				skb = sock_alloc_send_skb(sk,
1388						alloclen + hh_len,
1389						(flags & MSG_DONTWAIT), &err);
1390			} else {
1391				skb = NULL;
1392				if (atomic_read(&sk->sk_wmem_alloc) <=
1393				    2 * sk->sk_sndbuf)
1394					skb = sock_wmalloc(sk,
1395							   alloclen + hh_len, 1,
1396							   sk->sk_allocation);
1397				if (unlikely(skb == NULL))
1398					err = -ENOBUFS;
1399				else {
1400					/* Only the initial fragment
1401					 * is time stamped.
1402					 */
1403					tx_flags = 0;
1404				}
1405			}
1406			if (skb == NULL)
1407				goto error;
1408			/*
1409			 *	Fill in the control structures
1410			 */
 
1411			skb->ip_summed = csummode;
1412			skb->csum = 0;
1413			/* reserve for fragmentation */
1414			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));
1415
1416			if (sk->sk_type == SOCK_DGRAM)
1417				skb_shinfo(skb)->tx_flags = tx_flags;
1418
1419			/*
1420			 *	Find where to start putting bytes
1421			 */
1422			data = skb_put(skb, fraglen);
1423			skb_set_network_header(skb, exthdrlen);
1424			data += fragheaderlen;
1425			skb->transport_header = (skb->network_header +
1426						 fragheaderlen);
1427			if (fraggap) {
1428				skb->csum = skb_copy_and_csum_bits(
1429					skb_prev, maxfraglen,
1430					data + transhdrlen, fraggap, 0);
1431				skb_prev->csum = csum_sub(skb_prev->csum,
1432							  skb->csum);
1433				data += fraggap;
1434				pskb_trim_unique(skb_prev, maxfraglen);
1435			}
1436			copy = datalen - transhdrlen - fraggap;
1437			if (copy < 0) {
1438				err = -EINVAL;
1439				kfree_skb(skb);
1440				goto error;
1441			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1442				err = -EFAULT;
1443				kfree_skb(skb);
1444				goto error;
1445			}
1446
1447			offset += copy;
1448			length -= datalen - fraggap;
1449			transhdrlen = 0;
1450			exthdrlen = 0;
1451			csummode = CHECKSUM_NONE;
 
 
 
 
 
 
 
 
 
 
1452
1453			/*
1454			 * Put the packet on the pending queue
1455			 */
1456			__skb_queue_tail(&sk->sk_write_queue, skb);
 
 
 
 
 
1457			continue;
1458		}
1459
1460		if (copy > length)
1461			copy = length;
1462
1463		if (!(rt->dst.dev->features&NETIF_F_SG)) {
 
1464			unsigned int off;
1465
1466			off = skb->len;
1467			if (getfrag(from, skb_put(skb, copy),
1468						offset, copy, off, skb) < 0) {
1469				__skb_trim(skb, off);
1470				err = -EFAULT;
1471				goto error;
1472			}
1473		} else {
1474			int i = skb_shinfo(skb)->nr_frags;
1475			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1476			struct page *page = sk->sk_sndmsg_page;
1477			int off = sk->sk_sndmsg_off;
1478			unsigned int left;
1479
1480			if (page && (left = PAGE_SIZE - off) > 0) {
1481				if (copy >= left)
1482					copy = left;
1483				if (page != frag->page) {
1484					if (i == MAX_SKB_FRAGS) {
1485						err = -EMSGSIZE;
1486						goto error;
1487					}
1488					get_page(page);
1489					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1490					frag = &skb_shinfo(skb)->frags[i];
1491				}
1492			} else if(i < MAX_SKB_FRAGS) {
1493				if (copy > PAGE_SIZE)
1494					copy = PAGE_SIZE;
1495				page = alloc_pages(sk->sk_allocation, 0);
1496				if (page == NULL) {
1497					err = -ENOMEM;
1498					goto error;
1499				}
1500				sk->sk_sndmsg_page = page;
1501				sk->sk_sndmsg_off = 0;
1502
1503				skb_fill_page_desc(skb, i, page, 0, 0);
1504				frag = &skb_shinfo(skb)->frags[i];
1505			} else {
1506				err = -EMSGSIZE;
1507				goto error;
1508			}
1509			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1510				err = -EFAULT;
1511				goto error;
 
 
 
 
 
 
 
 
 
 
 
1512			}
1513			sk->sk_sndmsg_off += copy;
1514			frag->size += copy;
 
 
 
 
 
 
1515			skb->len += copy;
1516			skb->data_len += copy;
1517			skb->truesize += copy;
1518			atomic_add(copy, &sk->sk_wmem_alloc);
 
 
 
 
1519		}
1520		offset += copy;
1521		length -= copy;
1522	}
 
 
 
1523	return 0;
 
 
 
1524error:
 
 
1525	cork->length -= length;
1526	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
 
1527	return err;
1528}
1529
1530static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
 
 
 
 
 
1531{
1532	if (np->cork.opt) {
1533		kfree(np->cork.opt->dst0opt);
1534		kfree(np->cork.opt->dst1opt);
1535		kfree(np->cork.opt->hopopt);
1536		kfree(np->cork.opt->srcrt);
1537		kfree(np->cork.opt);
1538		np->cork.opt = NULL;
1539	}
1540
1541	if (inet->cork.base.dst) {
1542		dst_release(inet->cork.base.dst);
1543		inet->cork.base.dst = NULL;
1544		inet->cork.base.flags &= ~IPCORK_ALLFRAG;
 
 
 
 
 
 
 
 
 
1545	}
1546	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
 
 
 
1547}
 
1548
1549int ip6_push_pending_frames(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1550{
1551	struct sk_buff *skb, *tmp_skb;
1552	struct sk_buff **tail_skb;
1553	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1554	struct inet_sock *inet = inet_sk(sk);
1555	struct ipv6_pinfo *np = inet6_sk(sk);
1556	struct net *net = sock_net(sk);
1557	struct ipv6hdr *hdr;
1558	struct ipv6_txoptions *opt = np->cork.opt;
1559	struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1560	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1561	unsigned char proto = fl6->flowi6_proto;
1562	int err = 0;
1563
1564	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
 
1565		goto out;
1566	tail_skb = &(skb_shinfo(skb)->frag_list);
1567
1568	/* move skb->data to ip header from ext header */
1569	if (skb->data < skb_network_header(skb))
1570		__skb_pull(skb, skb_network_offset(skb));
1571	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1572		__skb_pull(tmp_skb, skb_network_header_len(skb));
1573		*tail_skb = tmp_skb;
1574		tail_skb = &(tmp_skb->next);
1575		skb->len += tmp_skb->len;
1576		skb->data_len += tmp_skb->len;
1577		skb->truesize += tmp_skb->truesize;
1578		tmp_skb->destructor = NULL;
1579		tmp_skb->sk = NULL;
1580	}
1581
1582	/* Allow local fragmentation. */
1583	if (np->pmtudisc < IPV6_PMTUDISC_DO)
1584		skb->local_df = 1;
1585
1586	ipv6_addr_copy(final_dst, &fl6->daddr);
1587	__skb_pull(skb, skb_network_header_len(skb));
1588	if (opt && opt->opt_flen)
1589		ipv6_push_frag_opts(skb, opt, &proto);
1590	if (opt && opt->opt_nflen)
1591		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1592
1593	skb_push(skb, sizeof(struct ipv6hdr));
1594	skb_reset_network_header(skb);
1595	hdr = ipv6_hdr(skb);
1596
1597	*(__be32*)hdr = fl6->flowlabel |
1598		     htonl(0x60000000 | ((int)np->cork.tclass << 20));
1599
1600	hdr->hop_limit = np->cork.hop_limit;
1601	hdr->nexthdr = proto;
1602	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
1603	ipv6_addr_copy(&hdr->daddr, final_dst);
1604
1605	skb->priority = sk->sk_priority;
1606	skb->mark = sk->sk_mark;
 
 
1607
1608	skb_dst_set(skb, dst_clone(&rt->dst));
1609	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1610	if (proto == IPPROTO_ICMPV6) {
1611		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1612
1613		ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
1614		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
1615	}
1616
1617	err = ip6_local_out(skb);
 
 
 
 
 
 
 
 
 
 
 
1618	if (err) {
1619		if (err > 0)
1620			err = net_xmit_errno(err);
1621		if (err)
1622			goto error;
 
1623	}
1624
1625out:
1626	ip6_cork_release(inet, np);
1627	return err;
1628error:
1629	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1630	goto out;
1631}
1632
1633void ip6_flush_pending_frames(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1634{
1635	struct sk_buff *skb;
1636
1637	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1638		if (skb_dst(skb))
1639			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1640				      IPSTATS_MIB_OUTDISCARDS);
1641		kfree_skb(skb);
1642	}
1643
1644	ip6_cork_release(inet_sk(sk), inet6_sk(sk));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1645}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *	IPv6 output functions
   4 *	Linux INET6 implementation
   5 *
   6 *	Authors:
   7 *	Pedro Roque		<roque@di.fc.ul.pt>
   8 *
   9 *	Based on linux/net/ipv4/ip_output.c
  10 *
 
 
 
 
 
  11 *	Changes:
  12 *	A.N.Kuznetsov	:	airthmetics in fragmentation.
  13 *				extension headers are implemented.
  14 *				route changes now work.
  15 *				ip6_forward does not confuse sniffers.
  16 *				etc.
  17 *
  18 *      H. von Brand    :       Added missing #include <linux/string.h>
  19 *	Imran Patel	:	frag id should be in NBO
  20 *      Kazunori MIYAZAWA @USAGI
  21 *			:       add ip6_append_data and related functions
  22 *				for datagram xmit
  23 */
  24
  25#include <linux/errno.h>
  26#include <linux/kernel.h>
  27#include <linux/string.h>
  28#include <linux/socket.h>
  29#include <linux/net.h>
  30#include <linux/netdevice.h>
  31#include <linux/if_arp.h>
  32#include <linux/in6.h>
  33#include <linux/tcp.h>
  34#include <linux/route.h>
  35#include <linux/module.h>
  36#include <linux/slab.h>
  37
  38#include <linux/bpf-cgroup.h>
  39#include <linux/netfilter.h>
  40#include <linux/netfilter_ipv6.h>
  41
  42#include <net/sock.h>
  43#include <net/snmp.h>
  44
  45#include <net/ipv6.h>
  46#include <net/ndisc.h>
  47#include <net/protocol.h>
  48#include <net/ip6_route.h>
  49#include <net/addrconf.h>
  50#include <net/rawv6.h>
  51#include <net/icmp.h>
  52#include <net/xfrm.h>
  53#include <net/checksum.h>
  54#include <linux/mroute6.h>
  55#include <net/l3mdev.h>
  56#include <net/lwtunnel.h>
  57#include <net/ip_tunnels.h>
  58
  59static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  60{
  61	struct dst_entry *dst = skb_dst(skb);
  62	struct net_device *dev = dst->dev;
  63	const struct in6_addr *nexthop;
  64	struct neighbour *neigh;
  65	int ret;
 
 
  66
  67	if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
  68		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
  69
  70		if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
  71		    ((mroute6_is_socket(net, skb) &&
  72		     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
  73		     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
  74					 &ipv6_hdr(skb)->saddr))) {
  75			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
  76
  77			/* Do not check for IFF_ALLMULTI; multicast routing
  78			   is not supported in any case.
  79			 */
  80			if (newskb)
  81				NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
  82					net, sk, newskb, NULL, newskb->dev,
  83					dev_loopback_xmit);
  84
  85			if (ipv6_hdr(skb)->hop_limit == 0) {
  86				IP6_INC_STATS(net, idev,
  87					      IPSTATS_MIB_OUTDISCARDS);
  88				kfree_skb(skb);
  89				return 0;
  90			}
  91		}
  92
  93		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
  94
  95		if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
  96		    IPV6_ADDR_SCOPE_NODELOCAL &&
  97		    !(dev->flags & IFF_LOOPBACK)) {
  98			kfree_skb(skb);
  99			return 0;
 100		}
 101	}
 102
 103	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
 104		int res = lwtunnel_xmit(skb);
 
 
 105
 106		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
 107			return res;
 108	}
 109
 110	rcu_read_lock_bh();
 111	nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
 112	neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
 113	if (unlikely(!neigh))
 114		neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
 115	if (!IS_ERR(neigh)) {
 116		sock_confirm_neigh(skb, neigh);
 117		ret = neigh_output(neigh, skb, false);
 118		rcu_read_unlock_bh();
 119		return ret;
 120	}
 121	rcu_read_unlock_bh();
 122
 123	IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 124	kfree_skb(skb);
 125	return -EINVAL;
 126}
 127
 128static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 129{
 130#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
 131	/* Policy lookup after SNAT yielded a new policy */
 132	if (skb_dst(skb)->xfrm) {
 133		IPCB(skb)->flags |= IPSKB_REROUTED;
 134		return dst_output(net, sk, skb);
 135	}
 136#endif
 137
 138	if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
 139	    dst_allfrag(skb_dst(skb)) ||
 140	    (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
 141		return ip6_fragment(net, sk, skb, ip6_finish_output2);
 142	else
 143		return ip6_finish_output2(net, sk, skb);
 144}
 145
 146static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 147{
 148	int ret;
 149
 150	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
 151	switch (ret) {
 152	case NET_XMIT_SUCCESS:
 153		return __ip6_finish_output(net, sk, skb);
 154	case NET_XMIT_CN:
 155		return __ip6_finish_output(net, sk, skb) ? : ret;
 156	default:
 157		kfree_skb(skb);
 158		return ret;
 159	}
 160}
 161
 162int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
 163{
 164	struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
 165	struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 166
 167	skb->protocol = htons(ETH_P_IPV6);
 168	skb->dev = dev;
 169
 170	if (unlikely(idev->cnf.disable_ipv6)) {
 171		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
 
 172		kfree_skb(skb);
 173		return 0;
 174	}
 175
 176	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 177			    net, sk, skb, indev, dev,
 178			    ip6_finish_output,
 179			    !(IP6CB(skb)->flags & IP6SKB_REROUTED));
 180}
 181
 182bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
 183{
 184	if (!np->autoflowlabel_set)
 185		return ip6_default_np_autolabel(net);
 186	else
 187		return np->autoflowlabel;
 188}
 189
 190/*
 191 * xmit an sk_buff (used by TCP, SCTP and DCCP)
 192 * Note : socket lock is not held for SYNACK packets, but might be modified
 193 * by calls to skb_set_owner_w() and ipv6_local_error(),
 194 * which are using proper atomic operations or spinlocks.
 195 */
 196int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 197	     __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority)
 
 198{
 199	struct net *net = sock_net(sk);
 200	const struct ipv6_pinfo *np = inet6_sk(sk);
 201	struct in6_addr *first_hop = &fl6->daddr;
 202	struct dst_entry *dst = skb_dst(skb);
 203	unsigned int head_room;
 204	struct ipv6hdr *hdr;
 205	u8  proto = fl6->flowi6_proto;
 206	int seg_len = skb->len;
 207	int hlimit = -1;
 
 208	u32 mtu;
 209
 210	head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
 211	if (opt)
 212		head_room += opt->opt_nflen + opt->opt_flen;
 213
 214	if (unlikely(skb_headroom(skb) < head_room)) {
 215		struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
 216		if (!skb2) {
 217			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 218				      IPSTATS_MIB_OUTDISCARDS);
 
 
 
 
 
 
 
 
 
 219			kfree_skb(skb);
 220			return -ENOBUFS;
 
 221		}
 222		if (skb->sk)
 223			skb_set_owner_w(skb2, skb->sk);
 224		consume_skb(skb);
 225		skb = skb2;
 226	}
 227
 228	if (opt) {
 229		seg_len += opt->opt_nflen + opt->opt_flen;
 230
 231		if (opt->opt_flen)
 232			ipv6_push_frag_opts(skb, opt, &proto);
 233
 234		if (opt->opt_nflen)
 235			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
 236					     &fl6->saddr);
 237	}
 238
 239	skb_push(skb, sizeof(struct ipv6hdr));
 240	skb_reset_network_header(skb);
 241	hdr = ipv6_hdr(skb);
 242
 243	/*
 244	 *	Fill in the IPv6 header
 245	 */
 246	if (np)
 
 247		hlimit = np->hop_limit;
 
 248	if (hlimit < 0)
 249		hlimit = ip6_dst_hoplimit(dst);
 250
 251	ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
 252				ip6_autoflowlabel(net, np), fl6));
 253
 254	hdr->payload_len = htons(seg_len);
 255	hdr->nexthdr = proto;
 256	hdr->hop_limit = hlimit;
 257
 258	hdr->saddr = fl6->saddr;
 259	hdr->daddr = *first_hop;
 260
 261	skb->protocol = htons(ETH_P_IPV6);
 262	skb->priority = priority;
 263	skb->mark = mark;
 264
 265	mtu = dst_mtu(dst);
 266	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
 267		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
 268			      IPSTATS_MIB_OUT, skb->len);
 269
 270		/* if egress device is enslaved to an L3 master device pass the
 271		 * skb to its handler for processing
 272		 */
 273		skb = l3mdev_ip6_out((struct sock *)sk, skb);
 274		if (unlikely(!skb))
 275			return 0;
 276
 277		/* hooks should never assume socket lock is held.
 278		 * we promote our socket to non const
 279		 */
 280		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 281			       net, (struct sock *)sk, skb, NULL, dst->dev,
 282			       dst_output);
 283	}
 284
 
 
 285	skb->dev = dst->dev;
 286	/* ipv6_local_error() does not require socket lock,
 287	 * we promote our socket to non const
 288	 */
 289	ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
 290
 291	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
 292	kfree_skb(skb);
 293	return -EMSGSIZE;
 294}
 
 295EXPORT_SYMBOL(ip6_xmit);
 296
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
 298{
 299	struct ip6_ra_chain *ra;
 300	struct sock *last = NULL;
 301
 302	read_lock(&ip6_ra_lock);
 303	for (ra = ip6_ra_chain; ra; ra = ra->next) {
 304		struct sock *sk = ra->sk;
 305		if (sk && ra->sel == sel &&
 306		    (!sk->sk_bound_dev_if ||
 307		     sk->sk_bound_dev_if == skb->dev->ifindex)) {
 308			struct ipv6_pinfo *np = inet6_sk(sk);
 309
 310			if (np && np->rtalert_isolate &&
 311			    !net_eq(sock_net(sk), dev_net(skb->dev))) {
 312				continue;
 313			}
 314			if (last) {
 315				struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
 316				if (skb2)
 317					rawv6_rcv(last, skb2);
 318			}
 319			last = sk;
 320		}
 321	}
 322
 323	if (last) {
 324		rawv6_rcv(last, skb);
 325		read_unlock(&ip6_ra_lock);
 326		return 1;
 327	}
 328	read_unlock(&ip6_ra_lock);
 329	return 0;
 330}
 331
 332static int ip6_forward_proxy_check(struct sk_buff *skb)
 333{
 334	struct ipv6hdr *hdr = ipv6_hdr(skb);
 335	u8 nexthdr = hdr->nexthdr;
 336	__be16 frag_off;
 337	int offset;
 338
 339	if (ipv6_ext_hdr(nexthdr)) {
 340		offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
 341		if (offset < 0)
 342			return 0;
 343	} else
 344		offset = sizeof(struct ipv6hdr);
 345
 346	if (nexthdr == IPPROTO_ICMPV6) {
 347		struct icmp6hdr *icmp6;
 348
 349		if (!pskb_may_pull(skb, (skb_network_header(skb) +
 350					 offset + 1 - skb->data)))
 351			return 0;
 352
 353		icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
 354
 355		switch (icmp6->icmp6_type) {
 356		case NDISC_ROUTER_SOLICITATION:
 357		case NDISC_ROUTER_ADVERTISEMENT:
 358		case NDISC_NEIGHBOUR_SOLICITATION:
 359		case NDISC_NEIGHBOUR_ADVERTISEMENT:
 360		case NDISC_REDIRECT:
 361			/* For reaction involving unicast neighbor discovery
 362			 * message destined to the proxied address, pass it to
 363			 * input function.
 364			 */
 365			return 1;
 366		default:
 367			break;
 368		}
 369	}
 370
 371	/*
 372	 * The proxying router can't forward traffic sent to a link-local
 373	 * address, so signal the sender and discard the packet. This
 374	 * behavior is clarified by the MIPv6 specification.
 375	 */
 376	if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
 377		dst_link_failure(skb);
 378		return -1;
 379	}
 380
 381	return 0;
 382}
 383
 384static inline int ip6_forward_finish(struct net *net, struct sock *sk,
 385				     struct sk_buff *skb)
 386{
 387	struct dst_entry *dst = skb_dst(skb);
 388
 389	__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
 390	__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 391
 392#ifdef CONFIG_NET_SWITCHDEV
 393	if (skb->offload_l3_fwd_mark) {
 394		consume_skb(skb);
 395		return 0;
 396	}
 397#endif
 398
 399	skb->tstamp = 0;
 400	return dst_output(net, sk, skb);
 401}
 402
 403static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
 404{
 405	if (skb->len <= mtu)
 406		return false;
 407
 408	/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
 409	if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
 410		return true;
 411
 412	if (skb->ignore_df)
 413		return false;
 414
 415	if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
 416		return false;
 417
 418	return true;
 419}
 420
 421int ip6_forward(struct sk_buff *skb)
 422{
 423	struct inet6_dev *idev = __in6_dev_get_safely(skb->dev);
 424	struct dst_entry *dst = skb_dst(skb);
 425	struct ipv6hdr *hdr = ipv6_hdr(skb);
 426	struct inet6_skb_parm *opt = IP6CB(skb);
 427	struct net *net = dev_net(dst->dev);
 
 428	u32 mtu;
 429
 430	if (net->ipv6.devconf_all->forwarding == 0)
 431		goto error;
 432
 433	if (skb->pkt_type != PACKET_HOST)
 434		goto drop;
 435
 436	if (unlikely(skb->sk))
 437		goto drop;
 438
 439	if (skb_warn_if_lro(skb))
 440		goto drop;
 441
 442	if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
 443		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
 444		goto drop;
 445	}
 446
 
 
 
 447	skb_forward_csum(skb);
 448
 449	/*
 450	 *	We DO NOT make any processing on
 451	 *	RA packets, pushing them to user level AS IS
 452	 *	without ane WARRANTY that application will be able
 453	 *	to interpret them. The reason is that we
 454	 *	cannot make anything clever here.
 455	 *
 456	 *	We are not end-node, so that if packet contains
 457	 *	AH/ESP, we cannot make anything.
 458	 *	Defragmentation also would be mistake, RA packets
 459	 *	cannot be fragmented, because there is no warranty
 460	 *	that different fragments will go along one path. --ANK
 461	 */
 462	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
 463		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
 
 464			return 0;
 465	}
 466
 467	/*
 468	 *	check and decrement ttl
 469	 */
 470	if (hdr->hop_limit <= 1) {
 471		/* Force OUTPUT device used as source address */
 472		skb->dev = dst->dev;
 473		icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
 474		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
 
 475
 476		kfree_skb(skb);
 477		return -ETIMEDOUT;
 478	}
 479
 480	/* XXX: idev->cnf.proxy_ndp? */
 481	if (net->ipv6.devconf_all->proxy_ndp &&
 482	    pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
 483		int proxied = ip6_forward_proxy_check(skb);
 484		if (proxied > 0)
 485			return ip6_input(skb);
 486		else if (proxied < 0) {
 487			__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
 
 488			goto drop;
 489		}
 490	}
 491
 492	if (!xfrm6_route_forward(skb)) {
 493		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
 494		goto drop;
 495	}
 496	dst = skb_dst(skb);
 497
 498	/* IPv6 specs say nothing about it, but it is clear that we cannot
 499	   send redirects to source routed frames.
 500	   We don't send redirects to frames decapsulated from IPsec.
 501	 */
 502	if (IP6CB(skb)->iif == dst->dev->ifindex &&
 503	    opt->srcrt == 0 && !skb_sec_path(skb)) {
 504		struct in6_addr *target = NULL;
 505		struct inet_peer *peer;
 506		struct rt6_info *rt;
 507
 508		/*
 509		 *	incoming and outgoing devices are the same
 510		 *	send a redirect.
 511		 */
 512
 513		rt = (struct rt6_info *) dst;
 514		if (rt->rt6i_flags & RTF_GATEWAY)
 515			target = &rt->rt6i_gateway;
 516		else
 517			target = &hdr->daddr;
 518
 519		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
 
 520
 521		/* Limit redirects both by destination (here)
 522		   and by source (inside ndisc_send_redirect)
 523		 */
 524		if (inet_peer_xrlim_allow(peer, 1*HZ))
 525			ndisc_send_redirect(skb, target);
 526		if (peer)
 527			inet_putpeer(peer);
 528	} else {
 529		int addrtype = ipv6_addr_type(&hdr->saddr);
 530
 531		/* This check is security critical. */
 532		if (addrtype == IPV6_ADDR_ANY ||
 533		    addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
 534			goto error;
 535		if (addrtype & IPV6_ADDR_LINKLOCAL) {
 536			icmpv6_send(skb, ICMPV6_DEST_UNREACH,
 537				    ICMPV6_NOT_NEIGHBOUR, 0);
 538			goto error;
 539		}
 540	}
 541
 542	mtu = ip6_dst_mtu_forward(dst);
 543	if (mtu < IPV6_MIN_MTU)
 544		mtu = IPV6_MIN_MTU;
 545
 546	if (ip6_pkt_too_big(skb, mtu)) {
 547		/* Again, force OUTPUT device used as source address */
 548		skb->dev = dst->dev;
 549		icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 550		__IP6_INC_STATS(net, idev, IPSTATS_MIB_INTOOBIGERRORS);
 551		__IP6_INC_STATS(net, ip6_dst_idev(dst),
 552				IPSTATS_MIB_FRAGFAILS);
 
 553		kfree_skb(skb);
 554		return -EMSGSIZE;
 555	}
 556
 557	if (skb_cow(skb, dst->dev->hard_header_len)) {
 558		__IP6_INC_STATS(net, ip6_dst_idev(dst),
 559				IPSTATS_MIB_OUTDISCARDS);
 560		goto drop;
 561	}
 562
 563	hdr = ipv6_hdr(skb);
 564
 565	/* Mangling hops number delayed to point after skb COW */
 566
 567	hdr->hop_limit--;
 568
 569	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
 570		       net, NULL, skb, skb->dev, dst->dev,
 571		       ip6_forward_finish);
 572
 573error:
 574	__IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
 575drop:
 576	kfree_skb(skb);
 577	return -EINVAL;
 578}
 579
 580static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
 581{
 582	to->pkt_type = from->pkt_type;
 583	to->priority = from->priority;
 584	to->protocol = from->protocol;
 585	skb_dst_drop(to);
 586	skb_dst_set(to, dst_clone(skb_dst(from)));
 587	to->dev = from->dev;
 588	to->mark = from->mark;
 589
 590	skb_copy_hash(to, from);
 591
 592#ifdef CONFIG_NET_SCHED
 593	to->tc_index = from->tc_index;
 594#endif
 595	nf_copy(to, from);
 596	skb_ext_copy(to, from);
 
 
 
 597	skb_copy_secmark(to, from);
 598}
 599
 600int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr,
 601		      u8 nexthdr, __be32 frag_id,
 602		      struct ip6_fraglist_iter *iter)
 603{
 604	unsigned int first_len;
 605	struct frag_hdr *fh;
 606
 607	/* BUILD HEADER */
 608	*prevhdr = NEXTHDR_FRAGMENT;
 609	iter->tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
 610	if (!iter->tmp_hdr)
 611		return -ENOMEM;
 612
 613	iter->frag = skb_shinfo(skb)->frag_list;
 614	skb_frag_list_init(skb);
 615
 616	iter->offset = 0;
 617	iter->hlen = hlen;
 618	iter->frag_id = frag_id;
 619	iter->nexthdr = nexthdr;
 620
 621	__skb_pull(skb, hlen);
 622	fh = __skb_push(skb, sizeof(struct frag_hdr));
 623	__skb_push(skb, hlen);
 624	skb_reset_network_header(skb);
 625	memcpy(skb_network_header(skb), iter->tmp_hdr, hlen);
 626
 627	fh->nexthdr = nexthdr;
 628	fh->reserved = 0;
 629	fh->frag_off = htons(IP6_MF);
 630	fh->identification = frag_id;
 631
 632	first_len = skb_pagelen(skb);
 633	skb->data_len = first_len - skb_headlen(skb);
 634	skb->len = first_len;
 635	ipv6_hdr(skb)->payload_len = htons(first_len - sizeof(struct ipv6hdr));
 636
 637	return 0;
 638}
 639EXPORT_SYMBOL(ip6_fraglist_init);
 640
 641void ip6_fraglist_prepare(struct sk_buff *skb,
 642			  struct ip6_fraglist_iter *iter)
 643{
 644	struct sk_buff *frag = iter->frag;
 645	unsigned int hlen = iter->hlen;
 646	struct frag_hdr *fh;
 
 
 
 
 
 
 
 
 
 
 647
 648	frag->ip_summed = CHECKSUM_NONE;
 649	skb_reset_transport_header(frag);
 650	fh = __skb_push(frag, sizeof(struct frag_hdr));
 651	__skb_push(frag, hlen);
 652	skb_reset_network_header(frag);
 653	memcpy(skb_network_header(frag), iter->tmp_hdr, hlen);
 654	iter->offset += skb->len - hlen - sizeof(struct frag_hdr);
 655	fh->nexthdr = iter->nexthdr;
 656	fh->reserved = 0;
 657	fh->frag_off = htons(iter->offset);
 658	if (frag->next)
 659		fh->frag_off |= htons(IP6_MF);
 660	fh->identification = iter->frag_id;
 661	ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
 662	ip6_copy_metadata(frag, skb);
 663}
 664EXPORT_SYMBOL(ip6_fraglist_prepare);
 665
 666void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu,
 667		   unsigned short needed_tailroom, int hdr_room, u8 *prevhdr,
 668		   u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state)
 669{
 670	state->prevhdr = prevhdr;
 671	state->nexthdr = nexthdr;
 672	state->frag_id = frag_id;
 673
 674	state->hlen = hlen;
 675	state->mtu = mtu;
 676
 677	state->left = skb->len - hlen;	/* Space per frame */
 678	state->ptr = hlen;		/* Where to start from */
 679
 680	state->hroom = hdr_room;
 681	state->troom = needed_tailroom;
 682
 683	state->offset = 0;
 684}
 685EXPORT_SYMBOL(ip6_frag_init);
 686
 687struct sk_buff *ip6_frag_next(struct sk_buff *skb, struct ip6_frag_state *state)
 688{
 689	u8 *prevhdr = state->prevhdr, *fragnexthdr_offset;
 690	struct sk_buff *frag;
 691	struct frag_hdr *fh;
 692	unsigned int len;
 693
 694	len = state->left;
 695	/* IF: it doesn't fit, use 'mtu' - the data space left */
 696	if (len > state->mtu)
 697		len = state->mtu;
 698	/* IF: we are not sending up to and including the packet end
 699	   then align the next start on an eight byte boundary */
 700	if (len < state->left)
 701		len &= ~7;
 702
 703	/* Allocate buffer */
 704	frag = alloc_skb(len + state->hlen + sizeof(struct frag_hdr) +
 705			 state->hroom + state->troom, GFP_ATOMIC);
 706	if (!frag)
 707		return ERR_PTR(-ENOMEM);
 708
 709	/*
 710	 *	Set up data on packet
 711	 */
 712
 713	ip6_copy_metadata(frag, skb);
 714	skb_reserve(frag, state->hroom);
 715	skb_put(frag, len + state->hlen + sizeof(struct frag_hdr));
 716	skb_reset_network_header(frag);
 717	fh = (struct frag_hdr *)(skb_network_header(frag) + state->hlen);
 718	frag->transport_header = (frag->network_header + state->hlen +
 719				  sizeof(struct frag_hdr));
 720
 721	/*
 722	 *	Charge the memory for the fragment to any owner
 723	 *	it might possess
 724	 */
 725	if (skb->sk)
 726		skb_set_owner_w(frag, skb->sk);
 727
 728	/*
 729	 *	Copy the packet header into the new buffer.
 730	 */
 731	skb_copy_from_linear_data(skb, skb_network_header(frag), state->hlen);
 732
 733	fragnexthdr_offset = skb_network_header(frag);
 734	fragnexthdr_offset += prevhdr - skb_network_header(skb);
 735	*fragnexthdr_offset = NEXTHDR_FRAGMENT;
 736
 737	/*
 738	 *	Build fragment header.
 739	 */
 740	fh->nexthdr = state->nexthdr;
 741	fh->reserved = 0;
 742	fh->identification = state->frag_id;
 743
 744	/*
 745	 *	Copy a block of the IP datagram.
 746	 */
 747	BUG_ON(skb_copy_bits(skb, state->ptr, skb_transport_header(frag),
 748			     len));
 749	state->left -= len;
 750
 751	fh->frag_off = htons(state->offset);
 752	if (state->left > 0)
 753		fh->frag_off |= htons(IP6_MF);
 754	ipv6_hdr(frag)->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
 755
 756	state->ptr += len;
 757	state->offset += len;
 758
 759	return frag;
 760}
 761EXPORT_SYMBOL(ip6_frag_next);
 762
 763int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 764		 int (*output)(struct net *, struct sock *, struct sk_buff *))
 765{
 766	struct sk_buff *frag;
 767	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
 768	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
 769				inet6_sk(skb->sk) : NULL;
 770	struct ip6_frag_state state;
 771	unsigned int mtu, hlen, nexthdr_offset;
 772	ktime_t tstamp = skb->tstamp;
 773	int hroom, err = 0;
 774	__be32 frag_id;
 775	u8 *prevhdr, nexthdr = 0;
 
 776
 777	err = ip6_find_1stfragopt(skb, &prevhdr);
 778	if (err < 0)
 779		goto fail;
 780	hlen = err;
 781	nexthdr = *prevhdr;
 782	nexthdr_offset = prevhdr - skb_network_header(skb);
 783
 784	mtu = ip6_skb_dst_mtu(skb);
 785
 786	/* We must not fragment if the socket is set to force MTU discovery
 787	 * or if the skb it not generated by a local socket.
 788	 */
 789	if (unlikely(!skb->ignore_df && skb->len > mtu))
 790		goto fail_toobig;
 791
 792	if (IP6CB(skb)->frag_max_size) {
 793		if (IP6CB(skb)->frag_max_size > mtu)
 794			goto fail_toobig;
 795
 796		/* don't send fragments larger than what we received */
 797		mtu = IP6CB(skb)->frag_max_size;
 798		if (mtu < IPV6_MIN_MTU)
 799			mtu = IPV6_MIN_MTU;
 800	}
 801
 802	if (np && np->frag_size < mtu) {
 803		if (np->frag_size)
 804			mtu = np->frag_size;
 805	}
 806	if (mtu < hlen + sizeof(struct frag_hdr) + 8)
 807		goto fail_toobig;
 808	mtu -= hlen + sizeof(struct frag_hdr);
 809
 810	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
 811				    &ipv6_hdr(skb)->saddr);
 812
 813	if (skb->ip_summed == CHECKSUM_PARTIAL &&
 814	    (err = skb_checksum_help(skb)))
 815		goto fail;
 816
 817	prevhdr = skb_network_header(skb) + nexthdr_offset;
 818	hroom = LL_RESERVED_SPACE(rt->dst.dev);
 819	if (skb_has_frag_list(skb)) {
 820		unsigned int first_len = skb_pagelen(skb);
 821		struct ip6_fraglist_iter iter;
 822		struct sk_buff *frag2;
 823
 824		if (first_len - hlen > mtu ||
 825		    ((first_len - hlen) & 7) ||
 826		    skb_cloned(skb) ||
 827		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
 828			goto slow_path;
 829
 830		skb_walk_frags(skb, frag) {
 831			/* Correct geometry. */
 832			if (frag->len > mtu ||
 833			    ((frag->len & 7) && frag->next) ||
 834			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
 835				goto slow_path_clean;
 836
 837			/* Partially cloned skb? */
 838			if (skb_shared(frag))
 839				goto slow_path_clean;
 840
 841			BUG_ON(frag->sk);
 842			if (skb->sk) {
 843				frag->sk = skb->sk;
 844				frag->destructor = sock_wfree;
 845			}
 846			skb->truesize -= frag->truesize;
 847		}
 848
 849		err = ip6_fraglist_init(skb, hlen, prevhdr, nexthdr, frag_id,
 850					&iter);
 851		if (err < 0)
 852			goto fail;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 853
 854		for (;;) {
 855			/* Prepare header of the next frame,
 856			 * before previous one went down. */
 857			if (iter.frag)
 858				ip6_fraglist_prepare(skb, &iter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 859
 860			skb->tstamp = tstamp;
 861			err = output(net, sk, skb);
 862			if (!err)
 863				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 864					      IPSTATS_MIB_FRAGCREATES);
 865
 866			if (err || !iter.frag)
 867				break;
 868
 869			skb = ip6_fraglist_next(&iter);
 
 
 870		}
 871
 872		kfree(iter.tmp_hdr);
 873
 874		if (err == 0) {
 875			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 876				      IPSTATS_MIB_FRAGOKS);
 
 877			return 0;
 878		}
 879
 880		kfree_skb_list(iter.frag);
 
 
 
 
 881
 882		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
 883			      IPSTATS_MIB_FRAGFAILS);
 
 884		return err;
 885
 886slow_path_clean:
 887		skb_walk_frags(skb, frag2) {
 888			if (frag2 == frag)
 889				break;
 890			frag2->sk = NULL;
 891			frag2->destructor = NULL;
 892			skb->truesize += frag2->truesize;
 893		}
 894	}
 895
 896slow_path:
 
 
 
 897	/*
 898	 *	Fragment the datagram.
 899	 */
 900
 901	ip6_frag_init(skb, hlen, mtu, rt->dst.dev->needed_tailroom,
 902		      LL_RESERVED_SPACE(rt->dst.dev), prevhdr, nexthdr, frag_id,
 903		      &state);
 904
 905	/*
 906	 *	Keep copying data until we run out.
 907	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 908
 909	while (state.left > 0) {
 910		frag = ip6_frag_next(skb, &state);
 911		if (IS_ERR(frag)) {
 912			err = PTR_ERR(frag);
 
 913			goto fail;
 914		}
 915
 916		/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 917		 *	Put this fragment into the sending queue.
 918		 */
 919		frag->tstamp = tstamp;
 920		err = output(net, sk, frag);
 921		if (err)
 922			goto fail;
 923
 924		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 925			      IPSTATS_MIB_FRAGCREATES);
 926	}
 927	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 928		      IPSTATS_MIB_FRAGOKS);
 929	consume_skb(skb);
 930	return err;
 931
 932fail_toobig:
 933	if (skb->sk && dst_allfrag(skb_dst(skb)))
 934		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
 935
 936	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
 937	err = -EMSGSIZE;
 938
 939fail:
 940	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
 941		      IPSTATS_MIB_FRAGFAILS);
 942	kfree_skb(skb);
 943	return err;
 944}
 945
 946static inline int ip6_rt_check(const struct rt6key *rt_key,
 947			       const struct in6_addr *fl_addr,
 948			       const struct in6_addr *addr_cache)
 949{
 950	return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
 951		(!addr_cache || !ipv6_addr_equal(fl_addr, addr_cache));
 952}
 953
 954static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
 955					  struct dst_entry *dst,
 956					  const struct flowi6 *fl6)
 957{
 958	struct ipv6_pinfo *np = inet6_sk(sk);
 959	struct rt6_info *rt;
 960
 961	if (!dst)
 962		goto out;
 963
 964	if (dst->ops->family != AF_INET6) {
 965		dst_release(dst);
 966		return NULL;
 967	}
 968
 969	rt = (struct rt6_info *)dst;
 970	/* Yes, checking route validity in not connected
 971	 * case is not very simple. Take into account,
 972	 * that we do not support routing by source, TOS,
 973	 * and MSG_DONTROUTE		--ANK (980726)
 974	 *
 975	 * 1. ip6_rt_check(): If route was host route,
 976	 *    check that cached destination is current.
 977	 *    If it is network route, we still may
 978	 *    check its validity using saved pointer
 979	 *    to the last used address: daddr_cache.
 980	 *    We do not want to save whole address now,
 981	 *    (because main consumer of this service
 982	 *    is tcp, which has not this problem),
 983	 *    so that the last trick works only on connected
 984	 *    sockets.
 985	 * 2. oif also should be the same.
 986	 */
 987	if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
 988#ifdef CONFIG_IPV6_SUBTREES
 989	    ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
 990#endif
 991	   (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
 992	      (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
 993		dst_release(dst);
 994		dst = NULL;
 995	}
 996
 997out:
 998	return dst;
 999}
1000
1001static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
1002			       struct dst_entry **dst, struct flowi6 *fl6)
1003{
 
1004#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1005	struct neighbour *n;
1006	struct rt6_info *rt;
1007#endif
1008	int err;
1009	int flags = 0;
1010
1011	/* The correct way to handle this would be to do
1012	 * ip6_route_get_saddr, and then ip6_route_output; however,
1013	 * the route-specific preferred source forces the
1014	 * ip6_route_output call _before_ ip6_route_get_saddr.
1015	 *
1016	 * In source specific routing (no src=any default route),
1017	 * ip6_route_output will fail given src=any saddr, though, so
1018	 * that's why we try it again later.
1019	 */
1020	if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
1021		struct fib6_info *from;
1022		struct rt6_info *rt;
1023		bool had_dst = *dst != NULL;
1024
1025		if (!had_dst)
1026			*dst = ip6_route_output(net, sk, fl6);
1027		rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
1028
1029		rcu_read_lock();
1030		from = rt ? rcu_dereference(rt->from) : NULL;
1031		err = ip6_route_get_saddr(net, from, &fl6->daddr,
1032					  sk ? inet6_sk(sk)->srcprefs : 0,
1033					  &fl6->saddr);
1034		rcu_read_unlock();
1035
1036		if (err)
1037			goto out_err_release;
1038
1039		/* If we had an erroneous initial result, pretend it
1040		 * never existed and let the SA-enabled version take
1041		 * over.
1042		 */
1043		if (!had_dst && (*dst)->error) {
1044			dst_release(*dst);
1045			*dst = NULL;
1046		}
1047
1048		if (fl6->flowi6_oif)
1049			flags |= RT6_LOOKUP_F_IFACE;
1050	}
1051
1052	if (!*dst)
1053		*dst = ip6_route_output_flags(net, sk, fl6, flags);
1054
1055	err = (*dst)->error;
1056	if (err)
1057		goto out_err_release;
1058
1059#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1060	/*
1061	 * Here if the dst entry we've looked up
1062	 * has a neighbour entry that is in the INCOMPLETE
1063	 * state and the src address from the flow is
1064	 * marked as OPTIMISTIC, we release the found
1065	 * dst entry and replace it instead with the
1066	 * dst entry of the nexthop router
1067	 */
1068	rt = (struct rt6_info *) *dst;
1069	rcu_read_lock_bh();
1070	n = __ipv6_neigh_lookup_noref(rt->dst.dev,
1071				      rt6_nexthop(rt, &fl6->daddr));
1072	err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
1073	rcu_read_unlock_bh();
1074
1075	if (err) {
1076		struct inet6_ifaddr *ifp;
1077		struct flowi6 fl_gw6;
1078		int redirect;
1079
 
1080		ifp = ipv6_get_ifaddr(net, &fl6->saddr,
1081				      (*dst)->dev, 1);
1082
1083		redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
1084		if (ifp)
1085			in6_ifa_put(ifp);
1086
1087		if (redirect) {
1088			/*
1089			 * We need to get the dst entry for the
1090			 * default router instead
1091			 */
1092			dst_release(*dst);
1093			memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
1094			memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
1095			*dst = ip6_route_output(net, sk, &fl_gw6);
1096			err = (*dst)->error;
1097			if (err)
1098				goto out_err_release;
1099		}
 
 
1100	}
1101#endif
1102	if (ipv6_addr_v4mapped(&fl6->saddr) &&
1103	    !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
1104		err = -EAFNOSUPPORT;
1105		goto out_err_release;
1106	}
1107
1108	return 0;
1109
1110out_err_release:
 
 
1111	dst_release(*dst);
1112	*dst = NULL;
1113
1114	if (err == -ENETUNREACH)
1115		IP6_INC_STATS(net, NULL, IPSTATS_MIB_OUTNOROUTES);
1116	return err;
1117}
1118
1119/**
1120 *	ip6_dst_lookup - perform route lookup on flow
1121 *	@net: Network namespace to perform lookup in
1122 *	@sk: socket which provides route info
1123 *	@dst: pointer to dst_entry * for result
1124 *	@fl6: flow to lookup
1125 *
1126 *	This function performs a route lookup on the given flow.
1127 *
1128 *	It returns zero on success, or a standard errno code on error.
1129 */
1130int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
1131		   struct flowi6 *fl6)
1132{
1133	*dst = NULL;
1134	return ip6_dst_lookup_tail(net, sk, dst, fl6);
1135}
1136EXPORT_SYMBOL_GPL(ip6_dst_lookup);
1137
1138/**
1139 *	ip6_dst_lookup_flow - perform route lookup on flow with ipsec
1140 *	@net: Network namespace to perform lookup in
1141 *	@sk: socket which provides route info
1142 *	@fl6: flow to lookup
1143 *	@final_dst: final destination address for ipsec lookup
 
1144 *
1145 *	This function performs a route lookup on the given flow.
1146 *
1147 *	It returns a valid dst pointer on success, or a pointer encoded
1148 *	error code.
1149 */
1150struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
1151				      const struct in6_addr *final_dst)
 
1152{
1153	struct dst_entry *dst = NULL;
1154	int err;
1155
1156	err = ip6_dst_lookup_tail(net, sk, &dst, fl6);
1157	if (err)
1158		return ERR_PTR(err);
1159	if (final_dst)
1160		fl6->daddr = *final_dst;
 
 
1161
1162	return xfrm_lookup_route(net, dst, flowi6_to_flowi(fl6), sk, 0);
1163}
1164EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1165
1166/**
1167 *	ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1168 *	@sk: socket which provides the dst cache and route info
1169 *	@fl6: flow to lookup
1170 *	@final_dst: final destination address for ipsec lookup
1171 *	@connected: whether @sk is connected or not
1172 *
1173 *	This function performs a route lookup on the given flow with the
1174 *	possibility of using the cached route in the socket if it is valid.
1175 *	It will take the socket dst lock when operating on the dst cache.
1176 *	As a result, this function can only be used in process context.
1177 *
1178 *	In addition, for a connected socket, cache the dst in the socket
1179 *	if the current cache is not valid.
1180 *
1181 *	It returns a valid dst pointer on success, or a pointer encoded
1182 *	error code.
1183 */
1184struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1185					 const struct in6_addr *final_dst,
1186					 bool connected)
1187{
1188	struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
 
1189
1190	dst = ip6_sk_dst_check(sk, dst, fl6);
1191	if (dst)
1192		return dst;
1193
1194	dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_dst);
1195	if (connected && !IS_ERR(dst))
1196		ip6_sk_dst_store_flow(sk, dst_clone(dst), fl6);
 
 
 
 
1197
1198	return dst;
1199}
1200EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1201
1202/**
1203 *      ip6_dst_lookup_tunnel - perform route lookup on tunnel
1204 *      @skb: Packet for which lookup is done
1205 *      @dev: Tunnel device
1206 *      @net: Network namespace of tunnel device
1207 *      @sock: Socket which provides route info
1208 *      @saddr: Memory to store the src ip address
1209 *      @info: Tunnel information
1210 *      @protocol: IP protocol
1211 *      @use_cache: Flag to enable cache usage
1212 *      This function performs a route lookup on a tunnel
1213 *
1214 *      It returns a valid dst pointer and stores src address to be used in
1215 *      tunnel in param saddr on success, else a pointer encoded error code.
1216 */
1217
1218struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb,
1219					struct net_device *dev,
1220					struct net *net,
1221					struct socket *sock,
1222					struct in6_addr *saddr,
1223					const struct ip_tunnel_info *info,
1224					u8 protocol,
1225					bool use_cache)
1226{
1227	struct dst_entry *dst = NULL;
1228#ifdef CONFIG_DST_CACHE
1229	struct dst_cache *dst_cache;
1230#endif
1231	struct flowi6 fl6;
1232	__u8 prio;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1233
1234#ifdef CONFIG_DST_CACHE
1235	dst_cache = (struct dst_cache *)&info->dst_cache;
1236	if (use_cache) {
1237		dst = dst_cache_get_ip6(dst_cache, saddr);
1238		if (dst)
1239			return dst;
1240	}
1241#endif
1242	memset(&fl6, 0, sizeof(fl6));
1243	fl6.flowi6_mark = skb->mark;
1244	fl6.flowi6_proto = protocol;
1245	fl6.daddr = info->key.u.ipv6.dst;
1246	fl6.saddr = info->key.u.ipv6.src;
1247	prio = info->key.tos;
1248	fl6.flowlabel = ip6_make_flowinfo(RT_TOS(prio),
1249					  info->key.label);
1250
1251	dst = ipv6_stub->ipv6_dst_lookup_flow(net, sock->sk, &fl6,
1252					      NULL);
1253	if (IS_ERR(dst)) {
1254		netdev_dbg(dev, "no route to %pI6\n", &fl6.daddr);
1255		return ERR_PTR(-ENETUNREACH);
 
 
1256	}
1257	if (dst->dev == dev) { /* is this necessary? */
1258		netdev_dbg(dev, "circular route to %pI6\n", &fl6.daddr);
1259		dst_release(dst);
1260		return ERR_PTR(-ELOOP);
1261	}
1262#ifdef CONFIG_DST_CACHE
1263	if (use_cache)
1264		dst_cache_set_ip6(dst_cache, dst, &fl6.saddr);
1265#endif
1266	*saddr = fl6.saddr;
1267	return dst;
1268}
1269EXPORT_SYMBOL_GPL(ip6_dst_lookup_tunnel);
1270
1271static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1272					       gfp_t gfp)
1273{
1274	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1275}
1276
1277static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1278						gfp_t gfp)
1279{
1280	return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1281}
1282
1283static void ip6_append_data_mtu(unsigned int *mtu,
1284				int *maxfraglen,
1285				unsigned int fragheaderlen,
1286				struct sk_buff *skb,
1287				struct rt6_info *rt,
1288				unsigned int orig_mtu)
1289{
1290	if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1291		if (!skb) {
1292			/* first fragment, reserve header_len */
1293			*mtu = orig_mtu - rt->dst.header_len;
1294
1295		} else {
1296			/*
1297			 * this fragment is not first, the headers
1298			 * space is regarded as data space.
1299			 */
1300			*mtu = orig_mtu;
1301		}
1302		*maxfraglen = ((*mtu - fragheaderlen) & ~7)
1303			      + fragheaderlen - sizeof(struct frag_hdr);
1304	}
1305}
1306
1307static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1308			  struct inet6_cork *v6_cork, struct ipcm6_cookie *ipc6,
1309			  struct rt6_info *rt, struct flowi6 *fl6)
1310{
 
1311	struct ipv6_pinfo *np = inet6_sk(sk);
1312	unsigned int mtu;
1313	struct ipv6_txoptions *opt = ipc6->opt;
1314
1315	/*
1316	 * setup for corking
1317	 */
1318	if (opt) {
1319		if (WARN_ON(v6_cork->opt))
1320			return -EINVAL;
1321
1322		v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
1323		if (unlikely(!v6_cork->opt))
1324			return -ENOBUFS;
1325
1326		v6_cork->opt->tot_len = sizeof(*opt);
1327		v6_cork->opt->opt_flen = opt->opt_flen;
1328		v6_cork->opt->opt_nflen = opt->opt_nflen;
1329
1330		v6_cork->opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1331						    sk->sk_allocation);
1332		if (opt->dst0opt && !v6_cork->opt->dst0opt)
1333			return -ENOBUFS;
1334
1335		v6_cork->opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1336						    sk->sk_allocation);
1337		if (opt->dst1opt && !v6_cork->opt->dst1opt)
1338			return -ENOBUFS;
1339
1340		v6_cork->opt->hopopt = ip6_opt_dup(opt->hopopt,
1341						   sk->sk_allocation);
1342		if (opt->hopopt && !v6_cork->opt->hopopt)
1343			return -ENOBUFS;
1344
1345		v6_cork->opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1346						    sk->sk_allocation);
1347		if (opt->srcrt && !v6_cork->opt->srcrt)
1348			return -ENOBUFS;
1349
1350		/* need source address above miyazawa*/
1351	}
1352	dst_hold(&rt->dst);
1353	cork->base.dst = &rt->dst;
1354	cork->fl.u.ip6 = *fl6;
1355	v6_cork->hop_limit = ipc6->hlimit;
1356	v6_cork->tclass = ipc6->tclass;
1357	if (rt->dst.flags & DST_XFRM_TUNNEL)
1358		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1359		      READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
1360	else
1361		mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1362			READ_ONCE(rt->dst.dev->mtu) : dst_mtu(xfrm_dst_path(&rt->dst));
1363	if (np->frag_size < mtu) {
1364		if (np->frag_size)
1365			mtu = np->frag_size;
1366	}
1367	if (mtu < IPV6_MIN_MTU)
1368		return -EINVAL;
1369	cork->base.fragsize = mtu;
1370	cork->base.gso_size = ipc6->gso_size;
1371	cork->base.tx_flags = 0;
1372	cork->base.mark = ipc6->sockc.mark;
1373	sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
1374
1375	if (dst_allfrag(xfrm_dst_path(&rt->dst)))
1376		cork->base.flags |= IPCORK_ALLFRAG;
1377	cork->base.length = 0;
1378
1379	cork->base.transmit_time = ipc6->sockc.transmit_time;
1380
1381	return 0;
1382}
1383
1384static int __ip6_append_data(struct sock *sk,
1385			     struct flowi6 *fl6,
1386			     struct sk_buff_head *queue,
1387			     struct inet_cork *cork,
1388			     struct inet6_cork *v6_cork,
1389			     struct page_frag *pfrag,
1390			     int getfrag(void *from, char *to, int offset,
1391					 int len, int odd, struct sk_buff *skb),
1392			     void *from, int length, int transhdrlen,
1393			     unsigned int flags, struct ipcm6_cookie *ipc6)
1394{
1395	struct sk_buff *skb, *skb_prev = NULL;
1396	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
1397	struct ubuf_info *uarg = NULL;
1398	int exthdrlen = 0;
1399	int dst_exthdrlen = 0;
1400	int hh_len;
 
1401	int copy;
1402	int err;
1403	int offset = 0;
1404	u32 tskey = 0;
1405	struct rt6_info *rt = (struct rt6_info *)cork->dst;
1406	struct ipv6_txoptions *opt = v6_cork->opt;
1407	int csummode = CHECKSUM_NONE;
1408	unsigned int maxnonfragsize, headersize;
1409	unsigned int wmem_alloc_delta = 0;
1410	bool paged, extra_uref = false;
1411
1412	skb = skb_peek_tail(queue);
1413	if (!skb) {
1414		exthdrlen = opt ? opt->opt_flen : 0;
1415		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1416	}
1417
1418	paged = !!cork->gso_size;
1419	mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize;
1420	orig_mtu = mtu;
1421
1422	if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
1423	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1424		tskey = sk->sk_tskey++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1425
1426	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1427
1428	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1429			(opt ? opt->opt_nflen : 0);
1430	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
1431		     sizeof(struct frag_hdr);
1432
1433	headersize = sizeof(struct ipv6hdr) +
1434		     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
1435		     (dst_allfrag(&rt->dst) ?
1436		      sizeof(struct frag_hdr) : 0) +
1437		     rt->rt6i_nfheader_len;
1438
1439	/* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
1440	 * the first fragment
1441	 */
1442	if (headersize + transhdrlen > mtu)
1443		goto emsgsize;
1444
1445	if (cork->length + length > mtu - headersize && ipc6->dontfrag &&
1446	    (sk->sk_protocol == IPPROTO_UDP ||
1447	     sk->sk_protocol == IPPROTO_RAW)) {
1448		ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
1449				sizeof(struct ipv6hdr));
1450		goto emsgsize;
1451	}
1452
1453	if (ip6_sk_ignore_df(sk))
1454		maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
1455	else
1456		maxnonfragsize = mtu;
1457
1458	if (cork->length + length > maxnonfragsize - headersize) {
1459emsgsize:
1460		pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
1461		ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
1462		return -EMSGSIZE;
1463	}
1464
1465	/* CHECKSUM_PARTIAL only with no extension headers and when
1466	 * we are not going to fragment
1467	 */
1468	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
1469	    headersize == sizeof(struct ipv6hdr) &&
1470	    length <= mtu - headersize &&
1471	    (!(flags & MSG_MORE) || cork->gso_size) &&
1472	    rt->dst.dev->features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
1473		csummode = CHECKSUM_PARTIAL;
1474
1475	if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1476		uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1477		if (!uarg)
1478			return -ENOBUFS;
1479		extra_uref = !skb_zcopy(skb);	/* only ref on new uarg */
1480		if (rt->dst.dev->features & NETIF_F_SG &&
1481		    csummode == CHECKSUM_PARTIAL) {
1482			paged = true;
1483		} else {
1484			uarg->zerocopy = 0;
1485			skb_zcopy_set(skb, uarg, &extra_uref);
1486		}
1487	}
1488
1489	/*
1490	 * Let's try using as much space as possible.
1491	 * Use MTU if total length of the message fits into the MTU.
1492	 * Otherwise, we need to reserve fragment header and
1493	 * fragment alignment (= 8-15 octects, in total).
1494	 *
1495	 * Note that we may need to "move" the data from the tail of
1496	 * of the buffer to the new fragment when we split
1497	 * the message.
1498	 *
1499	 * FIXME: It may be fragmented into multiple chunks
1500	 *        at once if non-fragmentable extension headers
1501	 *        are too large.
1502	 * --yoshfuji
1503	 */
1504
1505	cork->length += length;
1506	if (!skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1507		goto alloc_new_skb;
1508
1509	while (length > 0) {
1510		/* Check if the remaining data fits into current packet. */
1511		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1512		if (copy < length)
1513			copy = maxfraglen - skb->len;
1514
1515		if (copy <= 0) {
1516			char *data;
1517			unsigned int datalen;
1518			unsigned int fraglen;
1519			unsigned int fraggap;
1520			unsigned int alloclen;
1521			unsigned int pagedlen;
1522alloc_new_skb:
 
 
1523			/* There's no room in the current skb */
1524			if (skb)
1525				fraggap = skb->len - maxfraglen;
1526			else
1527				fraggap = 0;
1528			/* update mtu and maxfraglen if necessary */
1529			if (!skb || !skb_prev)
1530				ip6_append_data_mtu(&mtu, &maxfraglen,
1531						    fragheaderlen, skb, rt,
1532						    orig_mtu);
1533
1534			skb_prev = skb;
1535
1536			/*
1537			 * If remaining data exceeds the mtu,
1538			 * we know we need more fragment(s).
1539			 */
1540			datalen = length + fraggap;
 
 
1541
1542			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1543				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1544			fraglen = datalen + fragheaderlen;
1545			pagedlen = 0;
1546
1547			if ((flags & MSG_MORE) &&
1548			    !(rt->dst.dev->features&NETIF_F_SG))
1549				alloclen = mtu;
1550			else if (!paged)
1551				alloclen = fraglen;
1552			else {
1553				alloclen = min_t(int, fraglen, MAX_HEADER);
1554				pagedlen = fraglen - alloclen;
1555			}
1556
1557			alloclen += dst_exthdrlen;
1558
1559			if (datalen != length + fraggap) {
1560				/*
1561				 * this is not the last fragment, the trailer
1562				 * space is regarded as data space.
1563				 */
1564				datalen += rt->dst.trailer_len;
1565			}
1566
1567			alloclen += rt->dst.trailer_len;
1568			fraglen = datalen + fragheaderlen;
1569
1570			/*
1571			 * We just reserve space for fragment header.
1572			 * Note: this may be overallocation if the message
1573			 * (without MSG_MORE) fits into the MTU.
1574			 */
1575			alloclen += sizeof(struct frag_hdr);
1576
1577			copy = datalen - transhdrlen - fraggap - pagedlen;
1578			if (copy < 0) {
1579				err = -EINVAL;
1580				goto error;
1581			}
1582			if (transhdrlen) {
1583				skb = sock_alloc_send_skb(sk,
1584						alloclen + hh_len,
1585						(flags & MSG_DONTWAIT), &err);
1586			} else {
1587				skb = NULL;
1588				if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1589				    2 * sk->sk_sndbuf)
1590					skb = alloc_skb(alloclen + hh_len,
1591							sk->sk_allocation);
1592				if (unlikely(!skb))
 
1593					err = -ENOBUFS;
 
 
 
 
 
 
1594			}
1595			if (!skb)
1596				goto error;
1597			/*
1598			 *	Fill in the control structures
1599			 */
1600			skb->protocol = htons(ETH_P_IPV6);
1601			skb->ip_summed = csummode;
1602			skb->csum = 0;
1603			/* reserve for fragmentation and ipsec header */
1604			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1605				    dst_exthdrlen);
 
 
1606
1607			/*
1608			 *	Find where to start putting bytes
1609			 */
1610			data = skb_put(skb, fraglen - pagedlen);
1611			skb_set_network_header(skb, exthdrlen);
1612			data += fragheaderlen;
1613			skb->transport_header = (skb->network_header +
1614						 fragheaderlen);
1615			if (fraggap) {
1616				skb->csum = skb_copy_and_csum_bits(
1617					skb_prev, maxfraglen,
1618					data + transhdrlen, fraggap, 0);
1619				skb_prev->csum = csum_sub(skb_prev->csum,
1620							  skb->csum);
1621				data += fraggap;
1622				pskb_trim_unique(skb_prev, maxfraglen);
1623			}
1624			if (copy > 0 &&
1625			    getfrag(from, data + transhdrlen, offset,
1626				    copy, fraggap, skb) < 0) {
 
 
 
1627				err = -EFAULT;
1628				kfree_skb(skb);
1629				goto error;
1630			}
1631
1632			offset += copy;
1633			length -= copy + transhdrlen;
1634			transhdrlen = 0;
1635			exthdrlen = 0;
1636			dst_exthdrlen = 0;
1637
1638			/* Only the initial fragment is time stamped */
1639			skb_shinfo(skb)->tx_flags = cork->tx_flags;
1640			cork->tx_flags = 0;
1641			skb_shinfo(skb)->tskey = tskey;
1642			tskey = 0;
1643			skb_zcopy_set(skb, uarg, &extra_uref);
1644
1645			if ((flags & MSG_CONFIRM) && !skb_prev)
1646				skb_set_dst_pending_confirm(skb, 1);
1647
1648			/*
1649			 * Put the packet on the pending queue
1650			 */
1651			if (!skb->destructor) {
1652				skb->destructor = sock_wfree;
1653				skb->sk = sk;
1654				wmem_alloc_delta += skb->truesize;
1655			}
1656			__skb_queue_tail(queue, skb);
1657			continue;
1658		}
1659
1660		if (copy > length)
1661			copy = length;
1662
1663		if (!(rt->dst.dev->features&NETIF_F_SG) &&
1664		    skb_tailroom(skb) >= copy) {
1665			unsigned int off;
1666
1667			off = skb->len;
1668			if (getfrag(from, skb_put(skb, copy),
1669						offset, copy, off, skb) < 0) {
1670				__skb_trim(skb, off);
1671				err = -EFAULT;
1672				goto error;
1673			}
1674		} else if (!uarg || !uarg->zerocopy) {
1675			int i = skb_shinfo(skb)->nr_frags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1676
1677			err = -ENOMEM;
1678			if (!sk_page_frag_refill(sk, pfrag))
 
 
 
 
 
 
1679				goto error;
1680
1681			if (!skb_can_coalesce(skb, i, pfrag->page,
1682					      pfrag->offset)) {
1683				err = -EMSGSIZE;
1684				if (i == MAX_SKB_FRAGS)
1685					goto error;
1686
1687				__skb_fill_page_desc(skb, i, pfrag->page,
1688						     pfrag->offset, 0);
1689				skb_shinfo(skb)->nr_frags = ++i;
1690				get_page(pfrag->page);
1691			}
1692			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1693			if (getfrag(from,
1694				    page_address(pfrag->page) + pfrag->offset,
1695				    offset, copy, skb->len, skb) < 0)
1696				goto error_efault;
1697
1698			pfrag->offset += copy;
1699			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1700			skb->len += copy;
1701			skb->data_len += copy;
1702			skb->truesize += copy;
1703			wmem_alloc_delta += copy;
1704		} else {
1705			err = skb_zerocopy_iter_dgram(skb, from, copy);
1706			if (err < 0)
1707				goto error;
1708		}
1709		offset += copy;
1710		length -= copy;
1711	}
1712
1713	if (wmem_alloc_delta)
1714		refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1715	return 0;
1716
1717error_efault:
1718	err = -EFAULT;
1719error:
1720	if (uarg)
1721		sock_zerocopy_put_abort(uarg, extra_uref);
1722	cork->length -= length;
1723	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1724	refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1725	return err;
1726}
1727
1728int ip6_append_data(struct sock *sk,
1729		    int getfrag(void *from, char *to, int offset, int len,
1730				int odd, struct sk_buff *skb),
1731		    void *from, int length, int transhdrlen,
1732		    struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1733		    struct rt6_info *rt, unsigned int flags)
1734{
1735	struct inet_sock *inet = inet_sk(sk);
1736	struct ipv6_pinfo *np = inet6_sk(sk);
1737	int exthdrlen;
1738	int err;
1739
1740	if (flags&MSG_PROBE)
1741		return 0;
1742	if (skb_queue_empty(&sk->sk_write_queue)) {
1743		/*
1744		 * setup for corking
1745		 */
1746		err = ip6_setup_cork(sk, &inet->cork, &np->cork,
1747				     ipc6, rt, fl6);
1748		if (err)
1749			return err;
1750
1751		exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1752		length += exthdrlen;
1753		transhdrlen += exthdrlen;
1754	} else {
1755		fl6 = &inet->cork.fl.u.ip6;
1756		transhdrlen = 0;
1757	}
1758
1759	return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base,
1760				 &np->cork, sk_page_frag(sk), getfrag,
1761				 from, length, transhdrlen, flags, ipc6);
1762}
1763EXPORT_SYMBOL_GPL(ip6_append_data);
1764
1765static void ip6_cork_release(struct inet_cork_full *cork,
1766			     struct inet6_cork *v6_cork)
1767{
1768	if (v6_cork->opt) {
1769		kfree(v6_cork->opt->dst0opt);
1770		kfree(v6_cork->opt->dst1opt);
1771		kfree(v6_cork->opt->hopopt);
1772		kfree(v6_cork->opt->srcrt);
1773		kfree(v6_cork->opt);
1774		v6_cork->opt = NULL;
1775	}
1776
1777	if (cork->base.dst) {
1778		dst_release(cork->base.dst);
1779		cork->base.dst = NULL;
1780		cork->base.flags &= ~IPCORK_ALLFRAG;
1781	}
1782	memset(&cork->fl, 0, sizeof(cork->fl));
1783}
1784
1785struct sk_buff *__ip6_make_skb(struct sock *sk,
1786			       struct sk_buff_head *queue,
1787			       struct inet_cork_full *cork,
1788			       struct inet6_cork *v6_cork)
1789{
1790	struct sk_buff *skb, *tmp_skb;
1791	struct sk_buff **tail_skb;
1792	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
 
1793	struct ipv6_pinfo *np = inet6_sk(sk);
1794	struct net *net = sock_net(sk);
1795	struct ipv6hdr *hdr;
1796	struct ipv6_txoptions *opt = v6_cork->opt;
1797	struct rt6_info *rt = (struct rt6_info *)cork->base.dst;
1798	struct flowi6 *fl6 = &cork->fl.u.ip6;
1799	unsigned char proto = fl6->flowi6_proto;
 
1800
1801	skb = __skb_dequeue(queue);
1802	if (!skb)
1803		goto out;
1804	tail_skb = &(skb_shinfo(skb)->frag_list);
1805
1806	/* move skb->data to ip header from ext header */
1807	if (skb->data < skb_network_header(skb))
1808		__skb_pull(skb, skb_network_offset(skb));
1809	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1810		__skb_pull(tmp_skb, skb_network_header_len(skb));
1811		*tail_skb = tmp_skb;
1812		tail_skb = &(tmp_skb->next);
1813		skb->len += tmp_skb->len;
1814		skb->data_len += tmp_skb->len;
1815		skb->truesize += tmp_skb->truesize;
1816		tmp_skb->destructor = NULL;
1817		tmp_skb->sk = NULL;
1818	}
1819
1820	/* Allow local fragmentation. */
1821	skb->ignore_df = ip6_sk_ignore_df(sk);
 
1822
1823	*final_dst = fl6->daddr;
1824	__skb_pull(skb, skb_network_header_len(skb));
1825	if (opt && opt->opt_flen)
1826		ipv6_push_frag_opts(skb, opt, &proto);
1827	if (opt && opt->opt_nflen)
1828		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst, &fl6->saddr);
1829
1830	skb_push(skb, sizeof(struct ipv6hdr));
1831	skb_reset_network_header(skb);
1832	hdr = ipv6_hdr(skb);
1833
1834	ip6_flow_hdr(hdr, v6_cork->tclass,
1835		     ip6_make_flowlabel(net, skb, fl6->flowlabel,
1836					ip6_autoflowlabel(net, np), fl6));
1837	hdr->hop_limit = v6_cork->hop_limit;
1838	hdr->nexthdr = proto;
1839	hdr->saddr = fl6->saddr;
1840	hdr->daddr = *final_dst;
1841
1842	skb->priority = sk->sk_priority;
1843	skb->mark = cork->base.mark;
1844
1845	skb->tstamp = cork->base.transmit_time;
1846
1847	skb_dst_set(skb, dst_clone(&rt->dst));
1848	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1849	if (proto == IPPROTO_ICMPV6) {
1850		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1851
1852		ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1853		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1854	}
1855
1856	ip6_cork_release(cork, v6_cork);
1857out:
1858	return skb;
1859}
1860
1861int ip6_send_skb(struct sk_buff *skb)
1862{
1863	struct net *net = sock_net(skb->sk);
1864	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1865	int err;
1866
1867	err = ip6_local_out(net, skb->sk, skb);
1868	if (err) {
1869		if (err > 0)
1870			err = net_xmit_errno(err);
1871		if (err)
1872			IP6_INC_STATS(net, rt->rt6i_idev,
1873				      IPSTATS_MIB_OUTDISCARDS);
1874	}
1875
 
 
1876	return err;
 
 
 
1877}
1878
1879int ip6_push_pending_frames(struct sock *sk)
1880{
1881	struct sk_buff *skb;
1882
1883	skb = ip6_finish_skb(sk);
1884	if (!skb)
1885		return 0;
1886
1887	return ip6_send_skb(skb);
1888}
1889EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1890
1891static void __ip6_flush_pending_frames(struct sock *sk,
1892				       struct sk_buff_head *queue,
1893				       struct inet_cork_full *cork,
1894				       struct inet6_cork *v6_cork)
1895{
1896	struct sk_buff *skb;
1897
1898	while ((skb = __skb_dequeue_tail(queue)) != NULL) {
1899		if (skb_dst(skb))
1900			IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1901				      IPSTATS_MIB_OUTDISCARDS);
1902		kfree_skb(skb);
1903	}
1904
1905	ip6_cork_release(cork, v6_cork);
1906}
1907
1908void ip6_flush_pending_frames(struct sock *sk)
1909{
1910	__ip6_flush_pending_frames(sk, &sk->sk_write_queue,
1911				   &inet_sk(sk)->cork, &inet6_sk(sk)->cork);
1912}
1913EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
1914
1915struct sk_buff *ip6_make_skb(struct sock *sk,
1916			     int getfrag(void *from, char *to, int offset,
1917					 int len, int odd, struct sk_buff *skb),
1918			     void *from, int length, int transhdrlen,
1919			     struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
1920			     struct rt6_info *rt, unsigned int flags,
1921			     struct inet_cork_full *cork)
1922{
1923	struct inet6_cork v6_cork;
1924	struct sk_buff_head queue;
1925	int exthdrlen = (ipc6->opt ? ipc6->opt->opt_flen : 0);
1926	int err;
1927
1928	if (flags & MSG_PROBE)
1929		return NULL;
1930
1931	__skb_queue_head_init(&queue);
1932
1933	cork->base.flags = 0;
1934	cork->base.addr = 0;
1935	cork->base.opt = NULL;
1936	cork->base.dst = NULL;
1937	v6_cork.opt = NULL;
1938	err = ip6_setup_cork(sk, cork, &v6_cork, ipc6, rt, fl6);
1939	if (err) {
1940		ip6_cork_release(cork, &v6_cork);
1941		return ERR_PTR(err);
1942	}
1943	if (ipc6->dontfrag < 0)
1944		ipc6->dontfrag = inet6_sk(sk)->dontfrag;
1945
1946	err = __ip6_append_data(sk, fl6, &queue, &cork->base, &v6_cork,
1947				&current->task_frag, getfrag, from,
1948				length + exthdrlen, transhdrlen + exthdrlen,
1949				flags, ipc6);
1950	if (err) {
1951		__ip6_flush_pending_frames(sk, &queue, cork, &v6_cork);
1952		return ERR_PTR(err);
1953	}
1954
1955	return __ip6_make_skb(sk, &queue, cork, &v6_cork);
1956}