Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *	IPv6 fragment reassembly
  3 *	Linux INET6 implementation
  4 *
  5 *	Authors:
  6 *	Pedro Roque		<roque@di.fc.ul.pt>
  7 *
  8 *	Based on: net/ipv4/ip_fragment.c
  9 *
 10 *	This program is free software; you can redistribute it and/or
 11 *      modify it under the terms of the GNU General Public License
 12 *      as published by the Free Software Foundation; either version
 13 *      2 of the License, or (at your option) any later version.
 14 */
 15
 16/*
 17 *	Fixes:
 18 *	Andi Kleen	Make it work with multiple hosts.
 19 *			More RFC compliance.
 20 *
 21 *      Horst von Brand Add missing #include <linux/string.h>
 22 *	Alexey Kuznetsov	SMP races, threading, cleanup.
 23 *	Patrick McHardy		LRU queue of frag heads for evictor.
 24 *	Mitsuru KANDA @USAGI	Register inet6_protocol{}.
 25 *	David Stevens and
 26 *	YOSHIFUJI,H. @USAGI	Always remove fragment header to
 27 *				calculate ICV correctly.
 28 */
 
 
 
 29#include <linux/errno.h>
 30#include <linux/types.h>
 31#include <linux/string.h>
 32#include <linux/socket.h>
 33#include <linux/sockios.h>
 34#include <linux/jiffies.h>
 35#include <linux/net.h>
 36#include <linux/list.h>
 37#include <linux/netdevice.h>
 38#include <linux/in6.h>
 39#include <linux/ipv6.h>
 40#include <linux/icmpv6.h>
 41#include <linux/random.h>
 42#include <linux/jhash.h>
 43#include <linux/skbuff.h>
 44#include <linux/slab.h>
 
 45
 46#include <net/sock.h>
 47#include <net/snmp.h>
 48
 49#include <net/ipv6.h>
 50#include <net/ip6_route.h>
 51#include <net/protocol.h>
 52#include <net/transp_v6.h>
 53#include <net/rawv6.h>
 54#include <net/ndisc.h>
 55#include <net/addrconf.h>
 56#include <net/inet_frag.h>
 
 57
 58struct ip6frag_skb_cb
 59{
 60	struct inet6_skb_parm	h;
 61	int			offset;
 62};
 63
 64#define FRAG6_CB(skb)	((struct ip6frag_skb_cb*)((skb)->cb))
 65
 66
 67/*
 68 *	Equivalent of ipv4 struct ipq
 69 */
 70
 71struct frag_queue
 72{
 73	struct inet_frag_queue	q;
 74
 75	__be32			id;		/* fragment id		*/
 76	u32			user;
 77	struct in6_addr		saddr;
 78	struct in6_addr		daddr;
 79
 80	int			iif;
 81	unsigned int		csum;
 82	__u16			nhoffset;
 83};
 84
 85static struct inet_frags ip6_frags;
 86
 87int ip6_frag_nqueues(struct net *net)
 88{
 89	return net->ipv6.frags.nqueues;
 90}
 91
 92int ip6_frag_mem(struct net *net)
 93{
 94	return atomic_read(&net->ipv6.frags.mem);
 95}
 96
 97static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 98			  struct net_device *dev);
 99
100/*
101 * callers should be careful not to use the hash value outside the ipfrag_lock
102 * as doing so could race with ipfrag_hash_rnd being recalculated.
103 */
104unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
105			     const struct in6_addr *daddr, u32 rnd)
106{
107	u32 c;
108
109	c = jhash_3words((__force u32)saddr->s6_addr32[0],
110			 (__force u32)saddr->s6_addr32[1],
111			 (__force u32)saddr->s6_addr32[2],
112			 rnd);
113
114	c = jhash_3words((__force u32)saddr->s6_addr32[3],
115			 (__force u32)daddr->s6_addr32[0],
116			 (__force u32)daddr->s6_addr32[1],
117			 c);
118
119	c =  jhash_3words((__force u32)daddr->s6_addr32[2],
120			  (__force u32)daddr->s6_addr32[3],
121			  (__force u32)id,
122			  c);
123
124	return c & (INETFRAGS_HASHSZ - 1);
125}
126EXPORT_SYMBOL_GPL(inet6_hash_frag);
127
128static unsigned int ip6_hashfn(struct inet_frag_queue *q)
129{
130	struct frag_queue *fq;
131
132	fq = container_of(q, struct frag_queue, q);
133	return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd);
134}
135
136int ip6_frag_match(struct inet_frag_queue *q, void *a)
137{
138	struct frag_queue *fq;
139	struct ip6_create_arg *arg = a;
140
141	fq = container_of(q, struct frag_queue, q);
142	return (fq->id == arg->id && fq->user == arg->user &&
143			ipv6_addr_equal(&fq->saddr, arg->src) &&
144			ipv6_addr_equal(&fq->daddr, arg->dst));
 
145}
146EXPORT_SYMBOL(ip6_frag_match);
147
148void ip6_frag_init(struct inet_frag_queue *q, void *a)
149{
150	struct frag_queue *fq = container_of(q, struct frag_queue, q);
151	struct ip6_create_arg *arg = a;
152
153	fq->id = arg->id;
154	fq->user = arg->user;
155	ipv6_addr_copy(&fq->saddr, arg->src);
156	ipv6_addr_copy(&fq->daddr, arg->dst);
 
157}
158EXPORT_SYMBOL(ip6_frag_init);
159
160/* Destruction primitives. */
161
162static __inline__ void fq_put(struct frag_queue *fq)
163{
164	inet_frag_put(&fq->q, &ip6_frags);
165}
166
167/* Kill fq entry. It is not destroyed immediately,
168 * because caller (and someone more) holds reference count.
169 */
170static __inline__ void fq_kill(struct frag_queue *fq)
171{
172	inet_frag_kill(&fq->q, &ip6_frags);
173}
174
175static void ip6_evictor(struct net *net, struct inet6_dev *idev)
176{
177	int evicted;
178
179	evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags);
180	if (evicted)
181		IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted);
182}
183
184static void ip6_frag_expire(unsigned long data)
185{
186	struct frag_queue *fq;
187	struct net_device *dev = NULL;
188	struct net *net;
189
190	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
191
192	spin_lock(&fq->q.lock);
193
194	if (fq->q.last_in & INET_FRAG_COMPLETE)
195		goto out;
196
197	fq_kill(fq);
198
199	net = container_of(fq->q.net, struct net, ipv6.frags);
200	rcu_read_lock();
201	dev = dev_get_by_index_rcu(net, fq->iif);
202	if (!dev)
203		goto out_rcu_unlock;
204
205	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
206	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
207
208	/* Don't send error if the first segment did not arrive. */
209	if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
210		goto out_rcu_unlock;
211
212	/*
213	   But use as source device on which LAST ARRIVED
214	   segment was received. And do not use fq->dev
215	   pointer directly, device might already disappeared.
216	 */
217	fq->q.fragments->dev = dev;
218	icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
219out_rcu_unlock:
220	rcu_read_unlock();
221out:
222	spin_unlock(&fq->q.lock);
223	fq_put(fq);
 
 
 
 
 
 
 
 
 
 
 
 
224}
225
226static __inline__ struct frag_queue *
227fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
 
228{
229	struct inet_frag_queue *q;
230	struct ip6_create_arg arg;
231	unsigned int hash;
232
233	arg.id = id;
234	arg.user = IP6_DEFRAG_LOCAL_DELIVER;
235	arg.src = src;
236	arg.dst = dst;
 
237
238	read_lock(&ip6_frags.lock);
239	hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
240
241	q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
242	if (q == NULL)
 
243		return NULL;
244
245	return container_of(q, struct frag_queue, q);
246}
247
248static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
249			   struct frag_hdr *fhdr, int nhoff)
250{
251	struct sk_buff *prev, *next;
252	struct net_device *dev;
253	int offset, end;
254	struct net *net = dev_net(skb_dst(skb)->dev);
 
255
256	if (fq->q.last_in & INET_FRAG_COMPLETE)
257		goto err;
258
259	offset = ntohs(fhdr->frag_off) & ~0x7;
260	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
261			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
262
263	if ((unsigned int)end > IPV6_MAXPLEN) {
264		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
265				 IPSTATS_MIB_INHDRERRORS);
266		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
267				  ((u8 *)&fhdr->frag_off -
268				   skb_network_header(skb)));
269		return -1;
270	}
271
 
 
272	if (skb->ip_summed == CHECKSUM_COMPLETE) {
273		const unsigned char *nh = skb_network_header(skb);
274		skb->csum = csum_sub(skb->csum,
275				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
276						  0));
277	}
278
279	/* Is this the final fragment? */
280	if (!(fhdr->frag_off & htons(IP6_MF))) {
281		/* If we already have some bits beyond end
282		 * or have different end, the segment is corrupted.
283		 */
284		if (end < fq->q.len ||
285		    ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
286			goto err;
287		fq->q.last_in |= INET_FRAG_LAST_IN;
288		fq->q.len = end;
289	} else {
290		/* Check if the fragment is rounded to 8 bytes.
291		 * Required by the RFC.
292		 */
293		if (end & 0x7) {
294			/* RFC2460 says always send parameter problem in
295			 * this case. -DaveM
296			 */
297			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
298					 IPSTATS_MIB_INHDRERRORS);
299			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
300					  offsetof(struct ipv6hdr, payload_len));
301			return -1;
302		}
303		if (end > fq->q.len) {
304			/* Some bits beyond end -> corruption. */
305			if (fq->q.last_in & INET_FRAG_LAST_IN)
306				goto err;
307			fq->q.len = end;
308		}
309	}
310
311	if (end == offset)
312		goto err;
313
314	/* Point into the IP datagram 'data' part. */
315	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
316		goto err;
317
318	if (pskb_trim_rcsum(skb, end - offset))
319		goto err;
320
321	/* Find out which fragments are in front and at the back of us
322	 * in the chain of fragments so far.  We must know where to put
323	 * this fragment, right?
324	 */
325	prev = fq->q.fragments_tail;
326	if (!prev || FRAG6_CB(prev)->offset < offset) {
327		next = NULL;
328		goto found;
329	}
330	prev = NULL;
331	for(next = fq->q.fragments; next != NULL; next = next->next) {
332		if (FRAG6_CB(next)->offset >= offset)
333			break;	/* bingo! */
334		prev = next;
335	}
336
337found:
338	/* RFC5722, Section 4:
339	 *                                  When reassembling an IPv6 datagram, if
340	 *   one or more its constituent fragments is determined to be an
341	 *   overlapping fragment, the entire datagram (and any constituent
342	 *   fragments, including those not yet received) MUST be silently
343	 *   discarded.
344	 */
345
346	/* Check for overlap with preceding fragment. */
347	if (prev &&
348	    (FRAG6_CB(prev)->offset + prev->len) > offset)
349		goto discard_fq;
350
351	/* Look for overlap with succeeding segment. */
352	if (next && FRAG6_CB(next)->offset < end)
353		goto discard_fq;
354
355	FRAG6_CB(skb)->offset = offset;
356
357	/* Insert this fragment in the chain of fragments. */
358	skb->next = next;
359	if (!next)
360		fq->q.fragments_tail = skb;
361	if (prev)
362		prev->next = skb;
363	else
364		fq->q.fragments = skb;
365
366	dev = skb->dev;
367	if (dev) {
368		fq->iif = dev->ifindex;
369		skb->dev = NULL;
370	}
371	fq->q.stamp = skb->tstamp;
372	fq->q.meat += skb->len;
373	atomic_add(skb->truesize, &fq->q.net->mem);
 
374
375	/* The first fragment.
376	 * nhoffset is obtained from the first fragment, of course.
377	 */
378	if (offset == 0) {
379		fq->nhoffset = nhoff;
380		fq->q.last_in |= INET_FRAG_FIRST_IN;
381	}
382
383	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
384	    fq->q.meat == fq->q.len)
385		return ip6_frag_reasm(fq, prev, dev);
 
 
 
 
 
 
 
386
387	write_lock(&ip6_frags.lock);
388	list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list);
389	write_unlock(&ip6_frags.lock);
390	return -1;
391
392discard_fq:
393	fq_kill(fq);
394err:
395	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
396		      IPSTATS_MIB_REASMFAILS);
397	kfree_skb(skb);
398	return -1;
399}
400
401/*
402 *	Check if this packet is complete.
403 *	Returns NULL on failure by any reason, and pointer
404 *	to current nexthdr field in reassembled frame.
405 *
406 *	It is called with locked fq, and caller must check that
407 *	queue is eligible for reassembly i.e. it is not COMPLETE,
408 *	the last and the first frames arrived and all the bits are here.
409 */
410static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
411			  struct net_device *dev)
412{
413	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
414	struct sk_buff *fp, *head = fq->q.fragments;
415	int    payload_len;
416	unsigned int nhoff;
 
 
 
 
417
418	fq_kill(fq);
 
 
419
420	/* Make the one we just received the head. */
421	if (prev) {
422		head = prev->next;
423		fp = skb_clone(head, GFP_ATOMIC);
424
425		if (!fp)
426			goto out_oom;
427
428		fp->next = head->next;
429		if (!fp->next)
430			fq->q.fragments_tail = fp;
431		prev->next = fp;
432
433		skb_morph(head, fq->q.fragments);
434		head->next = fq->q.fragments->next;
435
436		kfree_skb(fq->q.fragments);
437		fq->q.fragments = head;
438	}
439
440	WARN_ON(head == NULL);
441	WARN_ON(FRAG6_CB(head)->offset != 0);
442
443	/* Unfragmented part is taken from the first segment. */
444	payload_len = ((head->data - skb_network_header(head)) -
445		       sizeof(struct ipv6hdr) + fq->q.len -
446		       sizeof(struct frag_hdr));
447	if (payload_len > IPV6_MAXPLEN)
448		goto out_oversize;
449
450	/* Head of list must not be cloned. */
451	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
452		goto out_oom;
453
454	/* If the first fragment is fragmented itself, we split
455	 * it to two chunks: the first with data and paged part
456	 * and the second, holding only fragments. */
457	if (skb_has_frag_list(head)) {
458		struct sk_buff *clone;
459		int i, plen = 0;
460
461		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
462			goto out_oom;
463		clone->next = head->next;
464		head->next = clone;
465		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
466		skb_frag_list_init(head);
467		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
468			plen += skb_shinfo(head)->frags[i].size;
469		clone->len = clone->data_len = head->data_len - plen;
470		head->data_len -= clone->len;
471		head->len -= clone->len;
472		clone->csum = 0;
473		clone->ip_summed = head->ip_summed;
474		atomic_add(clone->truesize, &fq->q.net->mem);
475	}
476
477	/* We have to remove fragment header from datagram and to relocate
478	 * header in order to calculate ICV correctly. */
479	nhoff = fq->nhoffset;
480	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
481	memmove(head->head + sizeof(struct frag_hdr), head->head,
482		(head->data - head->head) - sizeof(struct frag_hdr));
483	head->mac_header += sizeof(struct frag_hdr);
484	head->network_header += sizeof(struct frag_hdr);
485
486	skb_shinfo(head)->frag_list = head->next;
487	skb_reset_transport_header(head);
488	skb_push(head, head->data - skb_network_header(head));
489
490	for (fp=head->next; fp; fp = fp->next) {
491		head->data_len += fp->len;
492		head->len += fp->len;
 
 
 
 
493		if (head->ip_summed != fp->ip_summed)
494			head->ip_summed = CHECKSUM_NONE;
495		else if (head->ip_summed == CHECKSUM_COMPLETE)
496			head->csum = csum_add(head->csum, fp->csum);
497		head->truesize += fp->truesize;
 
 
 
 
 
 
 
 
 
 
498	}
499	atomic_sub(head->truesize, &fq->q.net->mem);
500
501	head->next = NULL;
502	head->dev = dev;
503	head->tstamp = fq->q.stamp;
504	ipv6_hdr(head)->payload_len = htons(payload_len);
 
505	IP6CB(head)->nhoff = nhoff;
 
506
507	/* Yes, and fold redundant checksum back. 8) */
508	if (head->ip_summed == CHECKSUM_COMPLETE)
509		head->csum = csum_partial(skb_network_header(head),
510					  skb_network_header_len(head),
511					  head->csum);
512
513	rcu_read_lock();
514	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
515	rcu_read_unlock();
516	fq->q.fragments = NULL;
517	fq->q.fragments_tail = NULL;
518	return 1;
519
520out_oversize:
521	if (net_ratelimit())
522		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
523	goto out_fail;
524out_oom:
525	if (net_ratelimit())
526		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
527out_fail:
528	rcu_read_lock();
529	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
530	rcu_read_unlock();
531	return -1;
532}
533
534static int ipv6_frag_rcv(struct sk_buff *skb)
535{
536	struct frag_hdr *fhdr;
537	struct frag_queue *fq;
538	const struct ipv6hdr *hdr = ipv6_hdr(skb);
539	struct net *net = dev_net(skb_dst(skb)->dev);
 
 
 
 
540
541	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
542
543	/* Jumbo payload inhibits frag. header */
544	if (hdr->payload_len==0)
545		goto fail_hdr;
546
547	if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
548				 sizeof(struct frag_hdr))))
549		goto fail_hdr;
550
551	hdr = ipv6_hdr(skb);
552	fhdr = (struct frag_hdr *)skb_transport_header(skb);
553
554	if (!(fhdr->frag_off & htons(0xFFF9))) {
555		/* It is not a fragmented frame */
556		skb->transport_header += sizeof(struct frag_hdr);
557		IP6_INC_STATS_BH(net,
558				 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
559
560		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
 
561		return 1;
562	}
563
564	if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh)
565		ip6_evictor(net, ip6_dst_idev(skb_dst(skb)));
 
 
566
567	fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr);
 
568	if (fq != NULL) {
569		int ret;
570
571		spin_lock(&fq->q.lock);
572
573		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
574
575		spin_unlock(&fq->q.lock);
576		fq_put(fq);
577		return ret;
578	}
579
580	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
581	kfree_skb(skb);
582	return -1;
583
584fail_hdr:
585	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
586	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
587	return -1;
588}
589
590static const struct inet6_protocol frag_protocol =
591{
592	.handler	=	ipv6_frag_rcv,
593	.flags		=	INET6_PROTO_NOPOLICY,
594};
595
596#ifdef CONFIG_SYSCTL
597static struct ctl_table ip6_frags_ns_ctl_table[] = {
598	{
599		.procname	= "ip6frag_high_thresh",
600		.data		= &init_net.ipv6.frags.high_thresh,
601		.maxlen		= sizeof(int),
602		.mode		= 0644,
603		.proc_handler	= proc_dointvec
604	},
605	{
606		.procname	= "ip6frag_low_thresh",
607		.data		= &init_net.ipv6.frags.low_thresh,
608		.maxlen		= sizeof(int),
609		.mode		= 0644,
610		.proc_handler	= proc_dointvec
611	},
612	{
613		.procname	= "ip6frag_time",
614		.data		= &init_net.ipv6.frags.timeout,
615		.maxlen		= sizeof(int),
616		.mode		= 0644,
617		.proc_handler	= proc_dointvec_jiffies,
618	},
619	{ }
620};
621
622static struct ctl_table ip6_frags_ctl_table[] = {
623	{
624		.procname	= "ip6frag_secret_interval",
625		.data		= &ip6_frags.secret_interval,
626		.maxlen		= sizeof(int),
627		.mode		= 0644,
628		.proc_handler	= proc_dointvec_jiffies,
629	},
630	{ }
631};
632
633static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
634{
635	struct ctl_table *table;
636	struct ctl_table_header *hdr;
637
638	table = ip6_frags_ns_ctl_table;
639	if (!net_eq(net, &init_net)) {
640		table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
641		if (table == NULL)
642			goto err_alloc;
643
644		table[0].data = &net->ipv6.frags.high_thresh;
645		table[1].data = &net->ipv6.frags.low_thresh;
646		table[2].data = &net->ipv6.frags.timeout;
 
 
 
 
647	}
648
649	hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table);
650	if (hdr == NULL)
651		goto err_reg;
652
653	net->ipv6.sysctl.frags_hdr = hdr;
654	return 0;
655
656err_reg:
657	if (!net_eq(net, &init_net))
658		kfree(table);
659err_alloc:
660	return -ENOMEM;
661}
662
663static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
664{
665	struct ctl_table *table;
666
667	table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
668	unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
669	if (!net_eq(net, &init_net))
670		kfree(table);
671}
672
673static struct ctl_table_header *ip6_ctl_header;
674
675static int ip6_frags_sysctl_register(void)
676{
677	ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path,
678			ip6_frags_ctl_table);
679	return ip6_ctl_header == NULL ? -ENOMEM : 0;
680}
681
682static void ip6_frags_sysctl_unregister(void)
683{
684	unregister_net_sysctl_table(ip6_ctl_header);
685}
686#else
687static inline int ip6_frags_ns_sysctl_register(struct net *net)
688{
689	return 0;
690}
691
692static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
693{
694}
695
696static inline int ip6_frags_sysctl_register(void)
697{
698	return 0;
699}
700
701static inline void ip6_frags_sysctl_unregister(void)
702{
703}
704#endif
705
706static int __net_init ipv6_frags_init_net(struct net *net)
707{
708	net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
709	net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
710	net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
711
712	inet_frags_init_net(&net->ipv6.frags);
713
714	return ip6_frags_ns_sysctl_register(net);
715}
716
717static void __net_exit ipv6_frags_exit_net(struct net *net)
718{
719	ip6_frags_ns_sysctl_unregister(net);
720	inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
721}
722
723static struct pernet_operations ip6_frags_ops = {
724	.init = ipv6_frags_init_net,
725	.exit = ipv6_frags_exit_net,
726};
727
728int __init ipv6_frag_init(void)
729{
730	int ret;
731
732	ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
733	if (ret)
734		goto out;
735
736	ret = ip6_frags_sysctl_register();
737	if (ret)
738		goto err_sysctl;
739
740	ret = register_pernet_subsys(&ip6_frags_ops);
741	if (ret)
742		goto err_pernet;
743
744	ip6_frags.hashfn = ip6_hashfn;
745	ip6_frags.constructor = ip6_frag_init;
746	ip6_frags.destructor = NULL;
747	ip6_frags.skb_free = NULL;
748	ip6_frags.qsize = sizeof(struct frag_queue);
749	ip6_frags.match = ip6_frag_match;
750	ip6_frags.frag_expire = ip6_frag_expire;
751	ip6_frags.secret_interval = 10 * 60 * HZ;
752	inet_frags_init(&ip6_frags);
753out:
754	return ret;
755
756err_pernet:
757	ip6_frags_sysctl_unregister();
758err_sysctl:
759	inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
760	goto out;
761}
762
763void ipv6_frag_exit(void)
764{
765	inet_frags_fini(&ip6_frags);
766	ip6_frags_sysctl_unregister();
767	unregister_pernet_subsys(&ip6_frags_ops);
768	inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
769}
v3.15
  1/*
  2 *	IPv6 fragment reassembly
  3 *	Linux INET6 implementation
  4 *
  5 *	Authors:
  6 *	Pedro Roque		<roque@di.fc.ul.pt>
  7 *
  8 *	Based on: net/ipv4/ip_fragment.c
  9 *
 10 *	This program is free software; you can redistribute it and/or
 11 *      modify it under the terms of the GNU General Public License
 12 *      as published by the Free Software Foundation; either version
 13 *      2 of the License, or (at your option) any later version.
 14 */
 15
 16/*
 17 *	Fixes:
 18 *	Andi Kleen	Make it work with multiple hosts.
 19 *			More RFC compliance.
 20 *
 21 *      Horst von Brand Add missing #include <linux/string.h>
 22 *	Alexey Kuznetsov	SMP races, threading, cleanup.
 23 *	Patrick McHardy		LRU queue of frag heads for evictor.
 24 *	Mitsuru KANDA @USAGI	Register inet6_protocol{}.
 25 *	David Stevens and
 26 *	YOSHIFUJI,H. @USAGI	Always remove fragment header to
 27 *				calculate ICV correctly.
 28 */
 29
 30#define pr_fmt(fmt) "IPv6: " fmt
 31
 32#include <linux/errno.h>
 33#include <linux/types.h>
 34#include <linux/string.h>
 35#include <linux/socket.h>
 36#include <linux/sockios.h>
 37#include <linux/jiffies.h>
 38#include <linux/net.h>
 39#include <linux/list.h>
 40#include <linux/netdevice.h>
 41#include <linux/in6.h>
 42#include <linux/ipv6.h>
 43#include <linux/icmpv6.h>
 44#include <linux/random.h>
 45#include <linux/jhash.h>
 46#include <linux/skbuff.h>
 47#include <linux/slab.h>
 48#include <linux/export.h>
 49
 50#include <net/sock.h>
 51#include <net/snmp.h>
 52
 53#include <net/ipv6.h>
 54#include <net/ip6_route.h>
 55#include <net/protocol.h>
 56#include <net/transp_v6.h>
 57#include <net/rawv6.h>
 58#include <net/ndisc.h>
 59#include <net/addrconf.h>
 60#include <net/inet_frag.h>
 61#include <net/inet_ecn.h>
 62
 63struct ip6frag_skb_cb
 64{
 65	struct inet6_skb_parm	h;
 66	int			offset;
 67};
 68
 69#define FRAG6_CB(skb)	((struct ip6frag_skb_cb*)((skb)->cb))
 70
 71static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72{
 73	return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
 74}
 75
 76static struct inet_frags ip6_frags;
 
 
 
 77
 78static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 79			  struct net_device *dev);
 80
 81/*
 82 * callers should be careful not to use the hash value outside the ipfrag_lock
 83 * as doing so could race with ipfrag_hash_rnd being recalculated.
 84 */
 85static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
 86				    const struct in6_addr *daddr)
 87{
 88	u32 c;
 89
 90	net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
 91	c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
 92			 (__force u32)id, ip6_frags.rnd);
 
 
 
 
 
 
 
 
 
 
 
 93
 94	return c & (INETFRAGS_HASHSZ - 1);
 95}
 
 96
 97static unsigned int ip6_hashfn(struct inet_frag_queue *q)
 98{
 99	struct frag_queue *fq;
100
101	fq = container_of(q, struct frag_queue, q);
102	return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
103}
104
105bool ip6_frag_match(struct inet_frag_queue *q, void *a)
106{
107	struct frag_queue *fq;
108	struct ip6_create_arg *arg = a;
109
110	fq = container_of(q, struct frag_queue, q);
111	return	fq->id == arg->id &&
112		fq->user == arg->user &&
113		ipv6_addr_equal(&fq->saddr, arg->src) &&
114		ipv6_addr_equal(&fq->daddr, arg->dst);
115}
116EXPORT_SYMBOL(ip6_frag_match);
117
118void ip6_frag_init(struct inet_frag_queue *q, void *a)
119{
120	struct frag_queue *fq = container_of(q, struct frag_queue, q);
121	struct ip6_create_arg *arg = a;
122
123	fq->id = arg->id;
124	fq->user = arg->user;
125	fq->saddr = *arg->src;
126	fq->daddr = *arg->dst;
127	fq->ecn = arg->ecn;
128}
129EXPORT_SYMBOL(ip6_frag_init);
130
131void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
132			   struct inet_frags *frags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133{
 
134	struct net_device *dev = NULL;
 
 
 
135
136	spin_lock(&fq->q.lock);
137
138	if (fq->q.last_in & INET_FRAG_COMPLETE)
139		goto out;
140
141	inet_frag_kill(&fq->q, frags);
142
 
143	rcu_read_lock();
144	dev = dev_get_by_index_rcu(net, fq->iif);
145	if (!dev)
146		goto out_rcu_unlock;
147
148	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
149	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
150
151	/* Don't send error if the first segment did not arrive. */
152	if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
153		goto out_rcu_unlock;
154
155	/*
156	   But use as source device on which LAST ARRIVED
157	   segment was received. And do not use fq->dev
158	   pointer directly, device might already disappeared.
159	 */
160	fq->q.fragments->dev = dev;
161	icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
162out_rcu_unlock:
163	rcu_read_unlock();
164out:
165	spin_unlock(&fq->q.lock);
166	inet_frag_put(&fq->q, frags);
167}
168EXPORT_SYMBOL(ip6_expire_frag_queue);
169
170static void ip6_frag_expire(unsigned long data)
171{
172	struct frag_queue *fq;
173	struct net *net;
174
175	fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
176	net = container_of(fq->q.net, struct net, ipv6.frags);
177
178	ip6_expire_frag_queue(net, fq, &ip6_frags);
179}
180
181static __inline__ struct frag_queue *
182fq_find(struct net *net, __be32 id, const struct in6_addr *src,
183	const struct in6_addr *dst, u8 ecn)
184{
185	struct inet_frag_queue *q;
186	struct ip6_create_arg arg;
187	unsigned int hash;
188
189	arg.id = id;
190	arg.user = IP6_DEFRAG_LOCAL_DELIVER;
191	arg.src = src;
192	arg.dst = dst;
193	arg.ecn = ecn;
194
195	read_lock(&ip6_frags.lock);
196	hash = inet6_hash_frag(id, src, dst);
197
198	q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
199	if (IS_ERR_OR_NULL(q)) {
200		inet_frag_maybe_warn_overflow(q, pr_fmt());
201		return NULL;
202	}
203	return container_of(q, struct frag_queue, q);
204}
205
206static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
207			   struct frag_hdr *fhdr, int nhoff)
208{
209	struct sk_buff *prev, *next;
210	struct net_device *dev;
211	int offset, end;
212	struct net *net = dev_net(skb_dst(skb)->dev);
213	u8 ecn;
214
215	if (fq->q.last_in & INET_FRAG_COMPLETE)
216		goto err;
217
218	offset = ntohs(fhdr->frag_off) & ~0x7;
219	end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
220			((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
221
222	if ((unsigned int)end > IPV6_MAXPLEN) {
223		IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
224				 IPSTATS_MIB_INHDRERRORS);
225		icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
226				  ((u8 *)&fhdr->frag_off -
227				   skb_network_header(skb)));
228		return -1;
229	}
230
231	ecn = ip6_frag_ecn(ipv6_hdr(skb));
232
233	if (skb->ip_summed == CHECKSUM_COMPLETE) {
234		const unsigned char *nh = skb_network_header(skb);
235		skb->csum = csum_sub(skb->csum,
236				     csum_partial(nh, (u8 *)(fhdr + 1) - nh,
237						  0));
238	}
239
240	/* Is this the final fragment? */
241	if (!(fhdr->frag_off & htons(IP6_MF))) {
242		/* If we already have some bits beyond end
243		 * or have different end, the segment is corrupted.
244		 */
245		if (end < fq->q.len ||
246		    ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
247			goto err;
248		fq->q.last_in |= INET_FRAG_LAST_IN;
249		fq->q.len = end;
250	} else {
251		/* Check if the fragment is rounded to 8 bytes.
252		 * Required by the RFC.
253		 */
254		if (end & 0x7) {
255			/* RFC2460 says always send parameter problem in
256			 * this case. -DaveM
257			 */
258			IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
259					 IPSTATS_MIB_INHDRERRORS);
260			icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
261					  offsetof(struct ipv6hdr, payload_len));
262			return -1;
263		}
264		if (end > fq->q.len) {
265			/* Some bits beyond end -> corruption. */
266			if (fq->q.last_in & INET_FRAG_LAST_IN)
267				goto err;
268			fq->q.len = end;
269		}
270	}
271
272	if (end == offset)
273		goto err;
274
275	/* Point into the IP datagram 'data' part. */
276	if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data))
277		goto err;
278
279	if (pskb_trim_rcsum(skb, end - offset))
280		goto err;
281
282	/* Find out which fragments are in front and at the back of us
283	 * in the chain of fragments so far.  We must know where to put
284	 * this fragment, right?
285	 */
286	prev = fq->q.fragments_tail;
287	if (!prev || FRAG6_CB(prev)->offset < offset) {
288		next = NULL;
289		goto found;
290	}
291	prev = NULL;
292	for(next = fq->q.fragments; next != NULL; next = next->next) {
293		if (FRAG6_CB(next)->offset >= offset)
294			break;	/* bingo! */
295		prev = next;
296	}
297
298found:
299	/* RFC5722, Section 4, amended by Errata ID : 3089
300	 *                          When reassembling an IPv6 datagram, if
301	 *   one or more its constituent fragments is determined to be an
302	 *   overlapping fragment, the entire datagram (and any constituent
303	 *   fragments) MUST be silently discarded.
 
304	 */
305
306	/* Check for overlap with preceding fragment. */
307	if (prev &&
308	    (FRAG6_CB(prev)->offset + prev->len) > offset)
309		goto discard_fq;
310
311	/* Look for overlap with succeeding segment. */
312	if (next && FRAG6_CB(next)->offset < end)
313		goto discard_fq;
314
315	FRAG6_CB(skb)->offset = offset;
316
317	/* Insert this fragment in the chain of fragments. */
318	skb->next = next;
319	if (!next)
320		fq->q.fragments_tail = skb;
321	if (prev)
322		prev->next = skb;
323	else
324		fq->q.fragments = skb;
325
326	dev = skb->dev;
327	if (dev) {
328		fq->iif = dev->ifindex;
329		skb->dev = NULL;
330	}
331	fq->q.stamp = skb->tstamp;
332	fq->q.meat += skb->len;
333	fq->ecn |= ecn;
334	add_frag_mem_limit(&fq->q, skb->truesize);
335
336	/* The first fragment.
337	 * nhoffset is obtained from the first fragment, of course.
338	 */
339	if (offset == 0) {
340		fq->nhoffset = nhoff;
341		fq->q.last_in |= INET_FRAG_FIRST_IN;
342	}
343
344	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
345	    fq->q.meat == fq->q.len) {
346		int res;
347		unsigned long orefdst = skb->_skb_refdst;
348
349		skb->_skb_refdst = 0UL;
350		res = ip6_frag_reasm(fq, prev, dev);
351		skb->_skb_refdst = orefdst;
352		return res;
353	}
354
355	skb_dst_drop(skb);
356	inet_frag_lru_move(&fq->q);
 
357	return -1;
358
359discard_fq:
360	inet_frag_kill(&fq->q, &ip6_frags);
361err:
362	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
363		      IPSTATS_MIB_REASMFAILS);
364	kfree_skb(skb);
365	return -1;
366}
367
368/*
369 *	Check if this packet is complete.
370 *	Returns NULL on failure by any reason, and pointer
371 *	to current nexthdr field in reassembled frame.
372 *
373 *	It is called with locked fq, and caller must check that
374 *	queue is eligible for reassembly i.e. it is not COMPLETE,
375 *	the last and the first frames arrived and all the bits are here.
376 */
377static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
378			  struct net_device *dev)
379{
380	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
381	struct sk_buff *fp, *head = fq->q.fragments;
382	int    payload_len;
383	unsigned int nhoff;
384	int sum_truesize;
385	u8 ecn;
386
387	inet_frag_kill(&fq->q, &ip6_frags);
388
389	ecn = ip_frag_ecn_table[fq->ecn];
390	if (unlikely(ecn == 0xff))
391		goto out_fail;
392
393	/* Make the one we just received the head. */
394	if (prev) {
395		head = prev->next;
396		fp = skb_clone(head, GFP_ATOMIC);
397
398		if (!fp)
399			goto out_oom;
400
401		fp->next = head->next;
402		if (!fp->next)
403			fq->q.fragments_tail = fp;
404		prev->next = fp;
405
406		skb_morph(head, fq->q.fragments);
407		head->next = fq->q.fragments->next;
408
409		consume_skb(fq->q.fragments);
410		fq->q.fragments = head;
411	}
412
413	WARN_ON(head == NULL);
414	WARN_ON(FRAG6_CB(head)->offset != 0);
415
416	/* Unfragmented part is taken from the first segment. */
417	payload_len = ((head->data - skb_network_header(head)) -
418		       sizeof(struct ipv6hdr) + fq->q.len -
419		       sizeof(struct frag_hdr));
420	if (payload_len > IPV6_MAXPLEN)
421		goto out_oversize;
422
423	/* Head of list must not be cloned. */
424	if (skb_unclone(head, GFP_ATOMIC))
425		goto out_oom;
426
427	/* If the first fragment is fragmented itself, we split
428	 * it to two chunks: the first with data and paged part
429	 * and the second, holding only fragments. */
430	if (skb_has_frag_list(head)) {
431		struct sk_buff *clone;
432		int i, plen = 0;
433
434		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
435			goto out_oom;
436		clone->next = head->next;
437		head->next = clone;
438		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
439		skb_frag_list_init(head);
440		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
441			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
442		clone->len = clone->data_len = head->data_len - plen;
443		head->data_len -= clone->len;
444		head->len -= clone->len;
445		clone->csum = 0;
446		clone->ip_summed = head->ip_summed;
447		add_frag_mem_limit(&fq->q, clone->truesize);
448	}
449
450	/* We have to remove fragment header from datagram and to relocate
451	 * header in order to calculate ICV correctly. */
452	nhoff = fq->nhoffset;
453	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
454	memmove(head->head + sizeof(struct frag_hdr), head->head,
455		(head->data - head->head) - sizeof(struct frag_hdr));
456	head->mac_header += sizeof(struct frag_hdr);
457	head->network_header += sizeof(struct frag_hdr);
458
 
459	skb_reset_transport_header(head);
460	skb_push(head, head->data - skb_network_header(head));
461
462	sum_truesize = head->truesize;
463	for (fp = head->next; fp;) {
464		bool headstolen;
465		int delta;
466		struct sk_buff *next = fp->next;
467
468		sum_truesize += fp->truesize;
469		if (head->ip_summed != fp->ip_summed)
470			head->ip_summed = CHECKSUM_NONE;
471		else if (head->ip_summed == CHECKSUM_COMPLETE)
472			head->csum = csum_add(head->csum, fp->csum);
473
474		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
475			kfree_skb_partial(fp, headstolen);
476		} else {
477			if (!skb_shinfo(head)->frag_list)
478				skb_shinfo(head)->frag_list = fp;
479			head->data_len += fp->len;
480			head->len += fp->len;
481			head->truesize += fp->truesize;
482		}
483		fp = next;
484	}
485	sub_frag_mem_limit(&fq->q, sum_truesize);
486
487	head->next = NULL;
488	head->dev = dev;
489	head->tstamp = fq->q.stamp;
490	ipv6_hdr(head)->payload_len = htons(payload_len);
491	ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
492	IP6CB(head)->nhoff = nhoff;
493	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
494
495	/* Yes, and fold redundant checksum back. 8) */
496	if (head->ip_summed == CHECKSUM_COMPLETE)
497		head->csum = csum_partial(skb_network_header(head),
498					  skb_network_header_len(head),
499					  head->csum);
500
501	rcu_read_lock();
502	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
503	rcu_read_unlock();
504	fq->q.fragments = NULL;
505	fq->q.fragments_tail = NULL;
506	return 1;
507
508out_oversize:
509	net_dbg_ratelimited("ip6_frag_reasm: payload len = %d\n", payload_len);
 
510	goto out_fail;
511out_oom:
512	net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
 
513out_fail:
514	rcu_read_lock();
515	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
516	rcu_read_unlock();
517	return -1;
518}
519
520static int ipv6_frag_rcv(struct sk_buff *skb)
521{
522	struct frag_hdr *fhdr;
523	struct frag_queue *fq;
524	const struct ipv6hdr *hdr = ipv6_hdr(skb);
525	struct net *net = dev_net(skb_dst(skb)->dev);
526	int evicted;
527
528	if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
529		goto fail_hdr;
530
531	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
532
533	/* Jumbo payload inhibits frag. header */
534	if (hdr->payload_len==0)
535		goto fail_hdr;
536
537	if (!pskb_may_pull(skb, (skb_transport_offset(skb) +
538				 sizeof(struct frag_hdr))))
539		goto fail_hdr;
540
541	hdr = ipv6_hdr(skb);
542	fhdr = (struct frag_hdr *)skb_transport_header(skb);
543
544	if (!(fhdr->frag_off & htons(0xFFF9))) {
545		/* It is not a fragmented frame */
546		skb->transport_header += sizeof(struct frag_hdr);
547		IP6_INC_STATS_BH(net,
548				 ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
549
550		IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
551		IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
552		return 1;
553	}
554
555	evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
556	if (evicted)
557		IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
558				 IPSTATS_MIB_REASMFAILS, evicted);
559
560	fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
561		     ip6_frag_ecn(hdr));
562	if (fq != NULL) {
563		int ret;
564
565		spin_lock(&fq->q.lock);
566
567		ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
568
569		spin_unlock(&fq->q.lock);
570		inet_frag_put(&fq->q, &ip6_frags);
571		return ret;
572	}
573
574	IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
575	kfree_skb(skb);
576	return -1;
577
578fail_hdr:
579	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
580	icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
581	return -1;
582}
583
584static const struct inet6_protocol frag_protocol =
585{
586	.handler	=	ipv6_frag_rcv,
587	.flags		=	INET6_PROTO_NOPOLICY,
588};
589
590#ifdef CONFIG_SYSCTL
591static struct ctl_table ip6_frags_ns_ctl_table[] = {
592	{
593		.procname	= "ip6frag_high_thresh",
594		.data		= &init_net.ipv6.frags.high_thresh,
595		.maxlen		= sizeof(int),
596		.mode		= 0644,
597		.proc_handler	= proc_dointvec
598	},
599	{
600		.procname	= "ip6frag_low_thresh",
601		.data		= &init_net.ipv6.frags.low_thresh,
602		.maxlen		= sizeof(int),
603		.mode		= 0644,
604		.proc_handler	= proc_dointvec
605	},
606	{
607		.procname	= "ip6frag_time",
608		.data		= &init_net.ipv6.frags.timeout,
609		.maxlen		= sizeof(int),
610		.mode		= 0644,
611		.proc_handler	= proc_dointvec_jiffies,
612	},
613	{ }
614};
615
616static struct ctl_table ip6_frags_ctl_table[] = {
617	{
618		.procname	= "ip6frag_secret_interval",
619		.data		= &ip6_frags.secret_interval,
620		.maxlen		= sizeof(int),
621		.mode		= 0644,
622		.proc_handler	= proc_dointvec_jiffies,
623	},
624	{ }
625};
626
627static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
628{
629	struct ctl_table *table;
630	struct ctl_table_header *hdr;
631
632	table = ip6_frags_ns_ctl_table;
633	if (!net_eq(net, &init_net)) {
634		table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
635		if (table == NULL)
636			goto err_alloc;
637
638		table[0].data = &net->ipv6.frags.high_thresh;
639		table[1].data = &net->ipv6.frags.low_thresh;
640		table[2].data = &net->ipv6.frags.timeout;
641
642		/* Don't export sysctls to unprivileged users */
643		if (net->user_ns != &init_user_ns)
644			table[0].procname = NULL;
645	}
646
647	hdr = register_net_sysctl(net, "net/ipv6", table);
648	if (hdr == NULL)
649		goto err_reg;
650
651	net->ipv6.sysctl.frags_hdr = hdr;
652	return 0;
653
654err_reg:
655	if (!net_eq(net, &init_net))
656		kfree(table);
657err_alloc:
658	return -ENOMEM;
659}
660
661static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net)
662{
663	struct ctl_table *table;
664
665	table = net->ipv6.sysctl.frags_hdr->ctl_table_arg;
666	unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr);
667	if (!net_eq(net, &init_net))
668		kfree(table);
669}
670
671static struct ctl_table_header *ip6_ctl_header;
672
673static int ip6_frags_sysctl_register(void)
674{
675	ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
676			ip6_frags_ctl_table);
677	return ip6_ctl_header == NULL ? -ENOMEM : 0;
678}
679
680static void ip6_frags_sysctl_unregister(void)
681{
682	unregister_net_sysctl_table(ip6_ctl_header);
683}
684#else
685static inline int ip6_frags_ns_sysctl_register(struct net *net)
686{
687	return 0;
688}
689
690static inline void ip6_frags_ns_sysctl_unregister(struct net *net)
691{
692}
693
694static inline int ip6_frags_sysctl_register(void)
695{
696	return 0;
697}
698
699static inline void ip6_frags_sysctl_unregister(void)
700{
701}
702#endif
703
704static int __net_init ipv6_frags_init_net(struct net *net)
705{
706	net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
707	net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
708	net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
709
710	inet_frags_init_net(&net->ipv6.frags);
711
712	return ip6_frags_ns_sysctl_register(net);
713}
714
715static void __net_exit ipv6_frags_exit_net(struct net *net)
716{
717	ip6_frags_ns_sysctl_unregister(net);
718	inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
719}
720
721static struct pernet_operations ip6_frags_ops = {
722	.init = ipv6_frags_init_net,
723	.exit = ipv6_frags_exit_net,
724};
725
726int __init ipv6_frag_init(void)
727{
728	int ret;
729
730	ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
731	if (ret)
732		goto out;
733
734	ret = ip6_frags_sysctl_register();
735	if (ret)
736		goto err_sysctl;
737
738	ret = register_pernet_subsys(&ip6_frags_ops);
739	if (ret)
740		goto err_pernet;
741
742	ip6_frags.hashfn = ip6_hashfn;
743	ip6_frags.constructor = ip6_frag_init;
744	ip6_frags.destructor = NULL;
745	ip6_frags.skb_free = NULL;
746	ip6_frags.qsize = sizeof(struct frag_queue);
747	ip6_frags.match = ip6_frag_match;
748	ip6_frags.frag_expire = ip6_frag_expire;
749	ip6_frags.secret_interval = 10 * 60 * HZ;
750	inet_frags_init(&ip6_frags);
751out:
752	return ret;
753
754err_pernet:
755	ip6_frags_sysctl_unregister();
756err_sysctl:
757	inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
758	goto out;
759}
760
761void ipv6_frag_exit(void)
762{
763	inet_frags_fini(&ip6_frags);
764	ip6_frags_sysctl_unregister();
765	unregister_pernet_subsys(&ip6_frags_ops);
766	inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
767}