Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  3 *		operating system.  INET is implemented using the  BSD Socket
  4 *		interface as the means of communication with the user level.
  5 *
  6 *		The IP fragmentation functionality.
  7 *
  8 * Authors:	Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
  9 *		Alan Cox <alan@lxorguk.ukuu.org.uk>
 10 *
 11 * Fixes:
 12 *		Alan Cox	:	Split from ip.c , see ip_input.c for history.
 13 *		David S. Miller :	Begin massive cleanup...
 14 *		Andi Kleen	:	Add sysctls.
 15 *		xxxx		:	Overlapfrag bug.
 16 *		Ultima          :       ip_expire() kernel panic.
 17 *		Bill Hawes	:	Frag accounting and evictor fixes.
 18 *		John McDonald	:	0 length frag bug.
 19 *		Alexey Kuznetsov:	SMP races, threading, cleanup.
 20 *		Patrick McHardy :	LRU queue of frag heads for evictor.
 21 */
 22
 23#define pr_fmt(fmt) "IPv4: " fmt
 24
 25#include <linux/compiler.h>
 26#include <linux/module.h>
 27#include <linux/types.h>
 28#include <linux/mm.h>
 29#include <linux/jiffies.h>
 30#include <linux/skbuff.h>
 31#include <linux/list.h>
 32#include <linux/ip.h>
 33#include <linux/icmp.h>
 34#include <linux/netdevice.h>
 35#include <linux/jhash.h>
 36#include <linux/random.h>
 37#include <linux/slab.h>
 38#include <net/route.h>
 39#include <net/dst.h>
 40#include <net/sock.h>
 41#include <net/ip.h>
 42#include <net/icmp.h>
 43#include <net/checksum.h>
 44#include <net/inetpeer.h>
 45#include <net/inet_frag.h>
 46#include <linux/tcp.h>
 47#include <linux/udp.h>
 48#include <linux/inet.h>
 49#include <linux/netfilter_ipv4.h>
 50#include <net/inet_ecn.h>
 51
 52/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
 53 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
 54 * as well. Or notify me, at least. --ANK
 55 */
 56
 57static int sysctl_ipfrag_max_dist __read_mostly = 64;
 58
 59struct ipfrag_skb_cb
 60{
 61	struct inet_skb_parm	h;
 62	int			offset;
 63};
 64
 65#define FRAG_CB(skb)	((struct ipfrag_skb_cb *)((skb)->cb))
 66
 67/* Describe an entry in the "incomplete datagrams" queue. */
 68struct ipq {
 69	struct inet_frag_queue q;
 70
 71	u32		user;
 72	__be32		saddr;
 73	__be32		daddr;
 74	__be16		id;
 75	u8		protocol;
 76	u8		ecn; /* RFC3168 support */
 77	int             iif;
 78	unsigned int    rid;
 79	struct inet_peer *peer;
 80};
 81
 
 
 
 
 
 
 
 
 
 82static inline u8 ip4_frag_ecn(u8 tos)
 83{
 84	return 1 << (tos & INET_ECN_MASK);
 85}
 86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 87static struct inet_frags ip4_frags;
 88
 89int ip_frag_nqueues(struct net *net)
 90{
 91	return net->ipv4.frags.nqueues;
 92}
 93
 94int ip_frag_mem(struct net *net)
 95{
 96	return sum_frag_mem_limit(&net->ipv4.frags);
 97}
 98
 99static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
100			 struct net_device *dev);
101
102struct ip4_create_arg {
103	struct iphdr *iph;
104	u32 user;
105};
106
107static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
108{
109	net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
110	return jhash_3words((__force u32)id << 16 | prot,
111			    (__force u32)saddr, (__force u32)daddr,
112			    ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
113}
114
115static unsigned int ip4_hashfn(struct inet_frag_queue *q)
116{
117	struct ipq *ipq;
118
119	ipq = container_of(q, struct ipq, q);
120	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
121}
122
123static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
124{
125	struct ipq *qp;
126	struct ip4_create_arg *arg = a;
127
128	qp = container_of(q, struct ipq, q);
129	return	qp->id == arg->iph->id &&
130		qp->saddr == arg->iph->saddr &&
131		qp->daddr == arg->iph->daddr &&
132		qp->protocol == arg->iph->protocol &&
133		qp->user == arg->user;
134}
135
 
 
 
 
 
 
 
136static void ip4_frag_init(struct inet_frag_queue *q, void *a)
137{
138	struct ipq *qp = container_of(q, struct ipq, q);
139	struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
140					       frags);
141	struct net *net = container_of(ipv4, struct net, ipv4);
142
143	struct ip4_create_arg *arg = a;
144
145	qp->protocol = arg->iph->protocol;
146	qp->id = arg->iph->id;
147	qp->ecn = ip4_frag_ecn(arg->iph->tos);
148	qp->saddr = arg->iph->saddr;
149	qp->daddr = arg->iph->daddr;
150	qp->user = arg->user;
151	qp->peer = sysctl_ipfrag_max_dist ?
152		inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
153}
154
155static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
156{
157	struct ipq *qp;
158
159	qp = container_of(q, struct ipq, q);
160	if (qp->peer)
161		inet_putpeer(qp->peer);
162}
163
164
165/* Destruction primitives. */
166
167static __inline__ void ipq_put(struct ipq *ipq)
168{
169	inet_frag_put(&ipq->q, &ip4_frags);
170}
171
172/* Kill ipq entry. It is not destroyed immediately,
173 * because caller (and someone more) holds reference count.
174 */
175static void ipq_kill(struct ipq *ipq)
176{
177	inet_frag_kill(&ipq->q, &ip4_frags);
178}
179
180/* Memory limiting on fragments.  Evictor trashes the oldest
181 * fragment queue until we are back under the threshold.
182 */
183static void ip_evictor(struct net *net)
184{
185	int evicted;
186
187	evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags, false);
188	if (evicted)
189		IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
190}
191
192/*
193 * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
194 */
195static void ip_expire(unsigned long arg)
196{
197	struct ipq *qp;
198	struct net *net;
199
200	qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
201	net = container_of(qp->q.net, struct net, ipv4.frags);
202
203	spin_lock(&qp->q.lock);
204
205	if (qp->q.last_in & INET_FRAG_COMPLETE)
206		goto out;
207
208	ipq_kill(qp);
209
210	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
211	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
212
213	if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
214		struct sk_buff *head = qp->q.fragments;
215		const struct iphdr *iph;
216		int err;
217
218		rcu_read_lock();
219		head->dev = dev_get_by_index_rcu(net, qp->iif);
220		if (!head->dev)
221			goto out_rcu_unlock;
222
223		/* skb has no dst, perform route lookup again */
 
224		iph = ip_hdr(head);
225		err = ip_route_input_noref(head, iph->daddr, iph->saddr,
226					   iph->tos, head->dev);
227		if (err)
228			goto out_rcu_unlock;
229
230		/*
231		 * Only an end host needs to send an ICMP
232		 * "Fragment Reassembly Timeout" message, per RFC792.
233		 */
234		if (qp->user == IP_DEFRAG_AF_PACKET ||
235		    ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
236		     (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
237		     (skb_rtable(head)->rt_type != RTN_LOCAL)))
238			goto out_rcu_unlock;
239
240
241		/* Send an ICMP "Fragment Reassembly Timeout" message. */
242		icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
243out_rcu_unlock:
244		rcu_read_unlock();
245	}
246out:
247	spin_unlock(&qp->q.lock);
248	ipq_put(qp);
249}
250
251/* Find the correct entry in the "incomplete datagrams" queue for
252 * this IP datagram, and create new one, if nothing is found.
253 */
254static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
255{
256	struct inet_frag_queue *q;
257	struct ip4_create_arg arg;
258	unsigned int hash;
259
260	arg.iph = iph;
261	arg.user = user;
262
263	read_lock(&ip4_frags.lock);
264	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
265
266	q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
267	if (IS_ERR_OR_NULL(q)) {
268		inet_frag_maybe_warn_overflow(q, pr_fmt());
269		return NULL;
270	}
271	return container_of(q, struct ipq, q);
 
 
 
 
272}
273
274/* Is the fragment too far ahead to be part of ipq? */
275static inline int ip_frag_too_far(struct ipq *qp)
276{
277	struct inet_peer *peer = qp->peer;
278	unsigned int max = sysctl_ipfrag_max_dist;
279	unsigned int start, end;
280
281	int rc;
282
283	if (!peer || !max)
284		return 0;
285
286	start = qp->rid;
287	end = atomic_inc_return(&peer->rid);
288	qp->rid = end;
289
290	rc = qp->q.fragments && (end - start) > max;
291
292	if (rc) {
293		struct net *net;
294
295		net = container_of(qp->q.net, struct net, ipv4.frags);
296		IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
297	}
298
299	return rc;
300}
301
302static int ip_frag_reinit(struct ipq *qp)
303{
304	struct sk_buff *fp;
305	unsigned int sum_truesize = 0;
306
307	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
308		atomic_inc(&qp->q.refcnt);
309		return -ETIMEDOUT;
310	}
311
312	fp = qp->q.fragments;
313	do {
314		struct sk_buff *xp = fp->next;
315
316		sum_truesize += fp->truesize;
317		kfree_skb(fp);
318		fp = xp;
319	} while (fp);
320	sub_frag_mem_limit(&qp->q, sum_truesize);
321
322	qp->q.last_in = 0;
323	qp->q.len = 0;
324	qp->q.meat = 0;
325	qp->q.fragments = NULL;
326	qp->q.fragments_tail = NULL;
327	qp->iif = 0;
328	qp->ecn = 0;
329
330	return 0;
331}
332
333/* Add new segment to existing queue. */
334static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
335{
336	struct sk_buff *prev, *next;
337	struct net_device *dev;
338	int flags, offset;
339	int ihl, end;
340	int err = -ENOENT;
341	u8 ecn;
342
343	if (qp->q.last_in & INET_FRAG_COMPLETE)
344		goto err;
345
346	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
347	    unlikely(ip_frag_too_far(qp)) &&
348	    unlikely(err = ip_frag_reinit(qp))) {
349		ipq_kill(qp);
350		goto err;
351	}
352
353	ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
354	offset = ntohs(ip_hdr(skb)->frag_off);
355	flags = offset & ~IP_OFFSET;
356	offset &= IP_OFFSET;
357	offset <<= 3;		/* offset is in 8-byte chunks */
358	ihl = ip_hdrlen(skb);
359
360	/* Determine the position of this fragment. */
361	end = offset + skb->len - ihl;
362	err = -EINVAL;
363
364	/* Is this the final fragment? */
365	if ((flags & IP_MF) == 0) {
366		/* If we already have some bits beyond end
367		 * or have different end, the segment is corrupted.
368		 */
369		if (end < qp->q.len ||
370		    ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
371			goto err;
372		qp->q.last_in |= INET_FRAG_LAST_IN;
373		qp->q.len = end;
374	} else {
375		if (end&7) {
376			end &= ~7;
377			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
378				skb->ip_summed = CHECKSUM_NONE;
379		}
380		if (end > qp->q.len) {
381			/* Some bits beyond end -> corruption. */
382			if (qp->q.last_in & INET_FRAG_LAST_IN)
383				goto err;
384			qp->q.len = end;
385		}
386	}
387	if (end == offset)
388		goto err;
389
390	err = -ENOMEM;
391	if (pskb_pull(skb, ihl) == NULL)
392		goto err;
393
394	err = pskb_trim_rcsum(skb, end - offset);
395	if (err)
396		goto err;
397
398	/* Find out which fragments are in front and at the back of us
399	 * in the chain of fragments so far.  We must know where to put
400	 * this fragment, right?
401	 */
402	prev = qp->q.fragments_tail;
403	if (!prev || FRAG_CB(prev)->offset < offset) {
404		next = NULL;
405		goto found;
406	}
407	prev = NULL;
408	for (next = qp->q.fragments; next != NULL; next = next->next) {
409		if (FRAG_CB(next)->offset >= offset)
410			break;	/* bingo! */
411		prev = next;
412	}
413
414found:
415	/* We found where to put this one.  Check for overlap with
416	 * preceding fragment, and, if needed, align things so that
417	 * any overlaps are eliminated.
418	 */
419	if (prev) {
420		int i = (FRAG_CB(prev)->offset + prev->len) - offset;
421
422		if (i > 0) {
423			offset += i;
424			err = -EINVAL;
425			if (end <= offset)
426				goto err;
427			err = -ENOMEM;
428			if (!pskb_pull(skb, i))
429				goto err;
430			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
431				skb->ip_summed = CHECKSUM_NONE;
432		}
433	}
434
435	err = -ENOMEM;
436
437	while (next && FRAG_CB(next)->offset < end) {
438		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
439
440		if (i < next->len) {
441			/* Eat head of the next overlapped fragment
442			 * and leave the loop. The next ones cannot overlap.
443			 */
444			if (!pskb_pull(next, i))
445				goto err;
446			FRAG_CB(next)->offset += i;
447			qp->q.meat -= i;
448			if (next->ip_summed != CHECKSUM_UNNECESSARY)
449				next->ip_summed = CHECKSUM_NONE;
450			break;
451		} else {
452			struct sk_buff *free_it = next;
453
454			/* Old fragment is completely overridden with
455			 * new one drop it.
456			 */
457			next = next->next;
458
459			if (prev)
460				prev->next = next;
461			else
462				qp->q.fragments = next;
463
464			qp->q.meat -= free_it->len;
465			sub_frag_mem_limit(&qp->q, free_it->truesize);
466			kfree_skb(free_it);
467		}
468	}
469
470	FRAG_CB(skb)->offset = offset;
471
472	/* Insert this fragment in the chain of fragments. */
473	skb->next = next;
474	if (!next)
475		qp->q.fragments_tail = skb;
476	if (prev)
477		prev->next = skb;
478	else
479		qp->q.fragments = skb;
480
481	dev = skb->dev;
482	if (dev) {
483		qp->iif = dev->ifindex;
484		skb->dev = NULL;
485	}
486	qp->q.stamp = skb->tstamp;
487	qp->q.meat += skb->len;
488	qp->ecn |= ecn;
489	add_frag_mem_limit(&qp->q, skb->truesize);
490	if (offset == 0)
491		qp->q.last_in |= INET_FRAG_FIRST_IN;
492
493	if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
494	    skb->len + ihl > qp->q.max_size)
495		qp->q.max_size = skb->len + ihl;
496
497	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
498	    qp->q.meat == qp->q.len) {
499		unsigned long orefdst = skb->_skb_refdst;
500
501		skb->_skb_refdst = 0UL;
502		err = ip_frag_reasm(qp, prev, dev);
503		skb->_skb_refdst = orefdst;
504		return err;
505	}
506
507	skb_dst_drop(skb);
508	inet_frag_lru_move(&qp->q);
509	return -EINPROGRESS;
510
511err:
512	kfree_skb(skb);
513	return err;
514}
515
516
517/* Build a new IP datagram from all its fragments. */
518
519static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
520			 struct net_device *dev)
521{
522	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
523	struct iphdr *iph;
524	struct sk_buff *fp, *head = qp->q.fragments;
525	int len;
526	int ihlen;
527	int err;
528	int sum_truesize;
529	u8 ecn;
530
531	ipq_kill(qp);
532
533	ecn = ip_frag_ecn_table[qp->ecn];
534	if (unlikely(ecn == 0xff)) {
535		err = -EINVAL;
536		goto out_fail;
537	}
538	/* Make the one we just received the head. */
539	if (prev) {
540		head = prev->next;
541		fp = skb_clone(head, GFP_ATOMIC);
542		if (!fp)
543			goto out_nomem;
544
545		fp->next = head->next;
546		if (!fp->next)
547			qp->q.fragments_tail = fp;
548		prev->next = fp;
549
550		skb_morph(head, qp->q.fragments);
551		head->next = qp->q.fragments->next;
552
553		consume_skb(qp->q.fragments);
554		qp->q.fragments = head;
555	}
556
557	WARN_ON(head == NULL);
558	WARN_ON(FRAG_CB(head)->offset != 0);
559
560	/* Allocate a new buffer for the datagram. */
561	ihlen = ip_hdrlen(head);
562	len = ihlen + qp->q.len;
563
564	err = -E2BIG;
565	if (len > 65535)
566		goto out_oversize;
567
568	/* Head of list must not be cloned. */
569	if (skb_unclone(head, GFP_ATOMIC))
570		goto out_nomem;
571
572	/* If the first fragment is fragmented itself, we split
573	 * it to two chunks: the first with data and paged part
574	 * and the second, holding only fragments. */
575	if (skb_has_frag_list(head)) {
576		struct sk_buff *clone;
577		int i, plen = 0;
578
579		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
580			goto out_nomem;
581		clone->next = head->next;
582		head->next = clone;
583		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
584		skb_frag_list_init(head);
585		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
586			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
587		clone->len = clone->data_len = head->data_len - plen;
588		head->data_len -= clone->len;
589		head->len -= clone->len;
590		clone->csum = 0;
591		clone->ip_summed = head->ip_summed;
592		add_frag_mem_limit(&qp->q, clone->truesize);
593	}
594
595	skb_push(head, head->data - skb_network_header(head));
596
597	sum_truesize = head->truesize;
598	for (fp = head->next; fp;) {
599		bool headstolen;
600		int delta;
601		struct sk_buff *next = fp->next;
602
603		sum_truesize += fp->truesize;
604		if (head->ip_summed != fp->ip_summed)
605			head->ip_summed = CHECKSUM_NONE;
606		else if (head->ip_summed == CHECKSUM_COMPLETE)
607			head->csum = csum_add(head->csum, fp->csum);
608
609		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
610			kfree_skb_partial(fp, headstolen);
611		} else {
612			if (!skb_shinfo(head)->frag_list)
613				skb_shinfo(head)->frag_list = fp;
614			head->data_len += fp->len;
615			head->len += fp->len;
616			head->truesize += fp->truesize;
617		}
618		fp = next;
619	}
620	sub_frag_mem_limit(&qp->q, sum_truesize);
621
622	head->next = NULL;
623	head->dev = dev;
624	head->tstamp = qp->q.stamp;
625	IPCB(head)->frag_max_size = qp->q.max_size;
626
627	iph = ip_hdr(head);
628	/* max_size != 0 implies at least one fragment had IP_DF set */
629	iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
630	iph->tot_len = htons(len);
631	iph->tos |= ecn;
632	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
633	qp->q.fragments = NULL;
634	qp->q.fragments_tail = NULL;
635	return 0;
636
637out_nomem:
638	LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
639		       qp);
640	err = -ENOMEM;
641	goto out_fail;
642out_oversize:
643	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
644out_fail:
645	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
646	return err;
647}
648
649/* Process an incoming IP datagram fragment. */
650int ip_defrag(struct sk_buff *skb, u32 user)
651{
652	struct ipq *qp;
653	struct net *net;
654
655	net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
656	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
657
658	/* Start by cleaning up the memory. */
659	ip_evictor(net);
 
660
661	/* Lookup (or create) queue header */
662	if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
663		int ret;
664
665		spin_lock(&qp->q.lock);
666
667		ret = ip_frag_queue(qp, skb);
668
669		spin_unlock(&qp->q.lock);
670		ipq_put(qp);
671		return ret;
672	}
673
674	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
675	kfree_skb(skb);
676	return -ENOMEM;
677}
678EXPORT_SYMBOL(ip_defrag);
679
680struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
681{
682	struct iphdr iph;
683	u32 len;
684
685	if (skb->protocol != htons(ETH_P_IP))
686		return skb;
687
688	if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
689		return skb;
690
691	if (iph.ihl < 5 || iph.version != 4)
 
692		return skb;
693
694	len = ntohs(iph.tot_len);
695	if (skb->len < len || len < (iph.ihl * 4))
 
 
696		return skb;
697
698	if (ip_is_fragment(&iph)) {
699		skb = skb_share_check(skb, GFP_ATOMIC);
700		if (skb) {
701			if (!pskb_may_pull(skb, iph.ihl*4))
702				return skb;
703			if (pskb_trim_rcsum(skb, len))
704				return skb;
705			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
706			if (ip_defrag(skb, user))
707				return NULL;
708			skb_clear_hash(skb);
709		}
710	}
711	return skb;
712}
713EXPORT_SYMBOL(ip_check_defrag);
714
715#ifdef CONFIG_SYSCTL
716static int zero;
717
718static struct ctl_table ip4_frags_ns_ctl_table[] = {
719	{
720		.procname	= "ipfrag_high_thresh",
721		.data		= &init_net.ipv4.frags.high_thresh,
722		.maxlen		= sizeof(int),
723		.mode		= 0644,
724		.proc_handler	= proc_dointvec
725	},
726	{
727		.procname	= "ipfrag_low_thresh",
728		.data		= &init_net.ipv4.frags.low_thresh,
729		.maxlen		= sizeof(int),
730		.mode		= 0644,
731		.proc_handler	= proc_dointvec
732	},
733	{
734		.procname	= "ipfrag_time",
735		.data		= &init_net.ipv4.frags.timeout,
736		.maxlen		= sizeof(int),
737		.mode		= 0644,
738		.proc_handler	= proc_dointvec_jiffies,
739	},
740	{ }
741};
742
743static struct ctl_table ip4_frags_ctl_table[] = {
744	{
745		.procname	= "ipfrag_secret_interval",
746		.data		= &ip4_frags.secret_interval,
747		.maxlen		= sizeof(int),
748		.mode		= 0644,
749		.proc_handler	= proc_dointvec_jiffies,
750	},
751	{
752		.procname	= "ipfrag_max_dist",
753		.data		= &sysctl_ipfrag_max_dist,
754		.maxlen		= sizeof(int),
755		.mode		= 0644,
756		.proc_handler	= proc_dointvec_minmax,
757		.extra1		= &zero
758	},
759	{ }
760};
761
762static int __net_init ip4_frags_ns_ctl_register(struct net *net)
763{
764	struct ctl_table *table;
765	struct ctl_table_header *hdr;
766
767	table = ip4_frags_ns_ctl_table;
768	if (!net_eq(net, &init_net)) {
769		table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
770		if (table == NULL)
771			goto err_alloc;
772
773		table[0].data = &net->ipv4.frags.high_thresh;
774		table[1].data = &net->ipv4.frags.low_thresh;
775		table[2].data = &net->ipv4.frags.timeout;
776
777		/* Don't export sysctls to unprivileged users */
778		if (net->user_ns != &init_user_ns)
779			table[0].procname = NULL;
780	}
781
782	hdr = register_net_sysctl(net, "net/ipv4", table);
783	if (hdr == NULL)
784		goto err_reg;
785
786	net->ipv4.frags_hdr = hdr;
787	return 0;
788
789err_reg:
790	if (!net_eq(net, &init_net))
791		kfree(table);
792err_alloc:
793	return -ENOMEM;
794}
795
796static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
797{
798	struct ctl_table *table;
799
800	table = net->ipv4.frags_hdr->ctl_table_arg;
801	unregister_net_sysctl_table(net->ipv4.frags_hdr);
802	kfree(table);
803}
804
805static void ip4_frags_ctl_register(void)
806{
807	register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
808}
809#else
810static inline int ip4_frags_ns_ctl_register(struct net *net)
811{
812	return 0;
813}
814
815static inline void ip4_frags_ns_ctl_unregister(struct net *net)
816{
817}
818
819static inline void ip4_frags_ctl_register(void)
820{
821}
822#endif
823
824static int __net_init ipv4_frags_init_net(struct net *net)
825{
826	/* Fragment cache limits.
827	 *
828	 * The fragment memory accounting code, (tries to) account for
829	 * the real memory usage, by measuring both the size of frag
830	 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
831	 * and the SKB's truesize.
832	 *
833	 * A 64K fragment consumes 129736 bytes (44*2944)+200
834	 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
835	 *
836	 * We will commit 4MB at one time. Should we cross that limit
837	 * we will prune down to 3MB, making room for approx 8 big 64K
838	 * fragments 8x128k.
839	 */
840	net->ipv4.frags.high_thresh = 4 * 1024 * 1024;
841	net->ipv4.frags.low_thresh  = 3 * 1024 * 1024;
842	/*
843	 * Important NOTE! Fragment queue must be destroyed before MSL expires.
844	 * RFC791 is wrong proposing to prolongate timer each fragment arrival
845	 * by TTL.
846	 */
847	net->ipv4.frags.timeout = IP_FRAG_TIME;
848
849	inet_frags_init_net(&net->ipv4.frags);
850
851	return ip4_frags_ns_ctl_register(net);
852}
853
854static void __net_exit ipv4_frags_exit_net(struct net *net)
855{
856	ip4_frags_ns_ctl_unregister(net);
857	inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
858}
859
860static struct pernet_operations ip4_frags_ops = {
861	.init = ipv4_frags_init_net,
862	.exit = ipv4_frags_exit_net,
863};
864
865void __init ipfrag_init(void)
866{
867	ip4_frags_ctl_register();
868	register_pernet_subsys(&ip4_frags_ops);
869	ip4_frags.hashfn = ip4_hashfn;
870	ip4_frags.constructor = ip4_frag_init;
871	ip4_frags.destructor = ip4_frag_free;
872	ip4_frags.skb_free = NULL;
873	ip4_frags.qsize = sizeof(struct ipq);
874	ip4_frags.match = ip4_frag_match;
875	ip4_frags.frag_expire = ip_expire;
876	ip4_frags.secret_interval = 10 * 60 * HZ;
877	inet_frags_init(&ip4_frags);
878}
v3.5.6
  1/*
  2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  3 *		operating system.  INET is implemented using the  BSD Socket
  4 *		interface as the means of communication with the user level.
  5 *
  6 *		The IP fragmentation functionality.
  7 *
  8 * Authors:	Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
  9 *		Alan Cox <alan@lxorguk.ukuu.org.uk>
 10 *
 11 * Fixes:
 12 *		Alan Cox	:	Split from ip.c , see ip_input.c for history.
 13 *		David S. Miller :	Begin massive cleanup...
 14 *		Andi Kleen	:	Add sysctls.
 15 *		xxxx		:	Overlapfrag bug.
 16 *		Ultima          :       ip_expire() kernel panic.
 17 *		Bill Hawes	:	Frag accounting and evictor fixes.
 18 *		John McDonald	:	0 length frag bug.
 19 *		Alexey Kuznetsov:	SMP races, threading, cleanup.
 20 *		Patrick McHardy :	LRU queue of frag heads for evictor.
 21 */
 22
 23#define pr_fmt(fmt) "IPv4: " fmt
 24
 25#include <linux/compiler.h>
 26#include <linux/module.h>
 27#include <linux/types.h>
 28#include <linux/mm.h>
 29#include <linux/jiffies.h>
 30#include <linux/skbuff.h>
 31#include <linux/list.h>
 32#include <linux/ip.h>
 33#include <linux/icmp.h>
 34#include <linux/netdevice.h>
 35#include <linux/jhash.h>
 36#include <linux/random.h>
 37#include <linux/slab.h>
 38#include <net/route.h>
 39#include <net/dst.h>
 40#include <net/sock.h>
 41#include <net/ip.h>
 42#include <net/icmp.h>
 43#include <net/checksum.h>
 44#include <net/inetpeer.h>
 45#include <net/inet_frag.h>
 46#include <linux/tcp.h>
 47#include <linux/udp.h>
 48#include <linux/inet.h>
 49#include <linux/netfilter_ipv4.h>
 50#include <net/inet_ecn.h>
 51
 52/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
 53 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
 54 * as well. Or notify me, at least. --ANK
 55 */
 56
 57static int sysctl_ipfrag_max_dist __read_mostly = 64;
 58
 59struct ipfrag_skb_cb
 60{
 61	struct inet_skb_parm	h;
 62	int			offset;
 63};
 64
 65#define FRAG_CB(skb)	((struct ipfrag_skb_cb *)((skb)->cb))
 66
 67/* Describe an entry in the "incomplete datagrams" queue. */
 68struct ipq {
 69	struct inet_frag_queue q;
 70
 71	u32		user;
 72	__be32		saddr;
 73	__be32		daddr;
 74	__be16		id;
 75	u8		protocol;
 76	u8		ecn; /* RFC3168 support */
 77	int             iif;
 78	unsigned int    rid;
 79	struct inet_peer *peer;
 80};
 81
 82/* RFC 3168 support :
 83 * We want to check ECN values of all fragments, do detect invalid combinations.
 84 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
 85 */
 86#define	IPFRAG_ECN_NOT_ECT	0x01 /* one frag had ECN_NOT_ECT */
 87#define	IPFRAG_ECN_ECT_1	0x02 /* one frag had ECN_ECT_1 */
 88#define	IPFRAG_ECN_ECT_0	0x04 /* one frag had ECN_ECT_0 */
 89#define	IPFRAG_ECN_CE		0x08 /* one frag had ECN_CE */
 90
 91static inline u8 ip4_frag_ecn(u8 tos)
 92{
 93	return 1 << (tos & INET_ECN_MASK);
 94}
 95
 96/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 97 * Value : 0xff if frame should be dropped.
 98 *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 99 */
100static const u8 ip4_frag_ecn_table[16] = {
101	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
102	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
103	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
104	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
105
106	/* invalid combinations : drop frame */
107	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
108	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
109	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
110	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
111	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
112	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
113	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
114};
115
116static struct inet_frags ip4_frags;
117
118int ip_frag_nqueues(struct net *net)
119{
120	return net->ipv4.frags.nqueues;
121}
122
123int ip_frag_mem(struct net *net)
124{
125	return atomic_read(&net->ipv4.frags.mem);
126}
127
128static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
129			 struct net_device *dev);
130
131struct ip4_create_arg {
132	struct iphdr *iph;
133	u32 user;
134};
135
136static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
137{
 
138	return jhash_3words((__force u32)id << 16 | prot,
139			    (__force u32)saddr, (__force u32)daddr,
140			    ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
141}
142
143static unsigned int ip4_hashfn(struct inet_frag_queue *q)
144{
145	struct ipq *ipq;
146
147	ipq = container_of(q, struct ipq, q);
148	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
149}
150
151static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
152{
153	struct ipq *qp;
154	struct ip4_create_arg *arg = a;
155
156	qp = container_of(q, struct ipq, q);
157	return	qp->id == arg->iph->id &&
158		qp->saddr == arg->iph->saddr &&
159		qp->daddr == arg->iph->daddr &&
160		qp->protocol == arg->iph->protocol &&
161		qp->user == arg->user;
162}
163
164/* Memory Tracking Functions. */
165static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
166{
167	atomic_sub(skb->truesize, &nf->mem);
168	kfree_skb(skb);
169}
170
171static void ip4_frag_init(struct inet_frag_queue *q, void *a)
172{
173	struct ipq *qp = container_of(q, struct ipq, q);
 
 
 
 
174	struct ip4_create_arg *arg = a;
175
176	qp->protocol = arg->iph->protocol;
177	qp->id = arg->iph->id;
178	qp->ecn = ip4_frag_ecn(arg->iph->tos);
179	qp->saddr = arg->iph->saddr;
180	qp->daddr = arg->iph->daddr;
181	qp->user = arg->user;
182	qp->peer = sysctl_ipfrag_max_dist ?
183		inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
184}
185
186static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
187{
188	struct ipq *qp;
189
190	qp = container_of(q, struct ipq, q);
191	if (qp->peer)
192		inet_putpeer(qp->peer);
193}
194
195
196/* Destruction primitives. */
197
198static __inline__ void ipq_put(struct ipq *ipq)
199{
200	inet_frag_put(&ipq->q, &ip4_frags);
201}
202
203/* Kill ipq entry. It is not destroyed immediately,
204 * because caller (and someone more) holds reference count.
205 */
206static void ipq_kill(struct ipq *ipq)
207{
208	inet_frag_kill(&ipq->q, &ip4_frags);
209}
210
211/* Memory limiting on fragments.  Evictor trashes the oldest
212 * fragment queue until we are back under the threshold.
213 */
214static void ip_evictor(struct net *net)
215{
216	int evicted;
217
218	evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
219	if (evicted)
220		IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
221}
222
223/*
224 * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
225 */
226static void ip_expire(unsigned long arg)
227{
228	struct ipq *qp;
229	struct net *net;
230
231	qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
232	net = container_of(qp->q.net, struct net, ipv4.frags);
233
234	spin_lock(&qp->q.lock);
235
236	if (qp->q.last_in & INET_FRAG_COMPLETE)
237		goto out;
238
239	ipq_kill(qp);
240
241	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
242	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
243
244	if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
245		struct sk_buff *head = qp->q.fragments;
246		const struct iphdr *iph;
247		int err;
248
249		rcu_read_lock();
250		head->dev = dev_get_by_index_rcu(net, qp->iif);
251		if (!head->dev)
252			goto out_rcu_unlock;
253
254		/* skb dst is stale, drop it, and perform route lookup again */
255		skb_dst_drop(head);
256		iph = ip_hdr(head);
257		err = ip_route_input_noref(head, iph->daddr, iph->saddr,
258					   iph->tos, head->dev);
259		if (err)
260			goto out_rcu_unlock;
261
262		/*
263		 * Only an end host needs to send an ICMP
264		 * "Fragment Reassembly Timeout" message, per RFC792.
265		 */
266		if (qp->user == IP_DEFRAG_AF_PACKET ||
267		    (qp->user == IP_DEFRAG_CONNTRACK_IN &&
268		     skb_rtable(head)->rt_type != RTN_LOCAL))
 
269			goto out_rcu_unlock;
270
271
272		/* Send an ICMP "Fragment Reassembly Timeout" message. */
273		icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
274out_rcu_unlock:
275		rcu_read_unlock();
276	}
277out:
278	spin_unlock(&qp->q.lock);
279	ipq_put(qp);
280}
281
282/* Find the correct entry in the "incomplete datagrams" queue for
283 * this IP datagram, and create new one, if nothing is found.
284 */
285static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
286{
287	struct inet_frag_queue *q;
288	struct ip4_create_arg arg;
289	unsigned int hash;
290
291	arg.iph = iph;
292	arg.user = user;
293
294	read_lock(&ip4_frags.lock);
295	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
296
297	q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
298	if (q == NULL)
299		goto out_nomem;
300
 
301	return container_of(q, struct ipq, q);
302
303out_nomem:
304	LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
305	return NULL;
306}
307
308/* Is the fragment too far ahead to be part of ipq? */
309static inline int ip_frag_too_far(struct ipq *qp)
310{
311	struct inet_peer *peer = qp->peer;
312	unsigned int max = sysctl_ipfrag_max_dist;
313	unsigned int start, end;
314
315	int rc;
316
317	if (!peer || !max)
318		return 0;
319
320	start = qp->rid;
321	end = atomic_inc_return(&peer->rid);
322	qp->rid = end;
323
324	rc = qp->q.fragments && (end - start) > max;
325
326	if (rc) {
327		struct net *net;
328
329		net = container_of(qp->q.net, struct net, ipv4.frags);
330		IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
331	}
332
333	return rc;
334}
335
336static int ip_frag_reinit(struct ipq *qp)
337{
338	struct sk_buff *fp;
 
339
340	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
341		atomic_inc(&qp->q.refcnt);
342		return -ETIMEDOUT;
343	}
344
345	fp = qp->q.fragments;
346	do {
347		struct sk_buff *xp = fp->next;
348		frag_kfree_skb(qp->q.net, fp);
 
 
349		fp = xp;
350	} while (fp);
 
351
352	qp->q.last_in = 0;
353	qp->q.len = 0;
354	qp->q.meat = 0;
355	qp->q.fragments = NULL;
356	qp->q.fragments_tail = NULL;
357	qp->iif = 0;
358	qp->ecn = 0;
359
360	return 0;
361}
362
363/* Add new segment to existing queue. */
364static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
365{
366	struct sk_buff *prev, *next;
367	struct net_device *dev;
368	int flags, offset;
369	int ihl, end;
370	int err = -ENOENT;
371	u8 ecn;
372
373	if (qp->q.last_in & INET_FRAG_COMPLETE)
374		goto err;
375
376	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
377	    unlikely(ip_frag_too_far(qp)) &&
378	    unlikely(err = ip_frag_reinit(qp))) {
379		ipq_kill(qp);
380		goto err;
381	}
382
383	ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
384	offset = ntohs(ip_hdr(skb)->frag_off);
385	flags = offset & ~IP_OFFSET;
386	offset &= IP_OFFSET;
387	offset <<= 3;		/* offset is in 8-byte chunks */
388	ihl = ip_hdrlen(skb);
389
390	/* Determine the position of this fragment. */
391	end = offset + skb->len - ihl;
392	err = -EINVAL;
393
394	/* Is this the final fragment? */
395	if ((flags & IP_MF) == 0) {
396		/* If we already have some bits beyond end
397		 * or have different end, the segment is corrupted.
398		 */
399		if (end < qp->q.len ||
400		    ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
401			goto err;
402		qp->q.last_in |= INET_FRAG_LAST_IN;
403		qp->q.len = end;
404	} else {
405		if (end&7) {
406			end &= ~7;
407			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
408				skb->ip_summed = CHECKSUM_NONE;
409		}
410		if (end > qp->q.len) {
411			/* Some bits beyond end -> corruption. */
412			if (qp->q.last_in & INET_FRAG_LAST_IN)
413				goto err;
414			qp->q.len = end;
415		}
416	}
417	if (end == offset)
418		goto err;
419
420	err = -ENOMEM;
421	if (pskb_pull(skb, ihl) == NULL)
422		goto err;
423
424	err = pskb_trim_rcsum(skb, end - offset);
425	if (err)
426		goto err;
427
428	/* Find out which fragments are in front and at the back of us
429	 * in the chain of fragments so far.  We must know where to put
430	 * this fragment, right?
431	 */
432	prev = qp->q.fragments_tail;
433	if (!prev || FRAG_CB(prev)->offset < offset) {
434		next = NULL;
435		goto found;
436	}
437	prev = NULL;
438	for (next = qp->q.fragments; next != NULL; next = next->next) {
439		if (FRAG_CB(next)->offset >= offset)
440			break;	/* bingo! */
441		prev = next;
442	}
443
444found:
445	/* We found where to put this one.  Check for overlap with
446	 * preceding fragment, and, if needed, align things so that
447	 * any overlaps are eliminated.
448	 */
449	if (prev) {
450		int i = (FRAG_CB(prev)->offset + prev->len) - offset;
451
452		if (i > 0) {
453			offset += i;
454			err = -EINVAL;
455			if (end <= offset)
456				goto err;
457			err = -ENOMEM;
458			if (!pskb_pull(skb, i))
459				goto err;
460			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
461				skb->ip_summed = CHECKSUM_NONE;
462		}
463	}
464
465	err = -ENOMEM;
466
467	while (next && FRAG_CB(next)->offset < end) {
468		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
469
470		if (i < next->len) {
471			/* Eat head of the next overlapped fragment
472			 * and leave the loop. The next ones cannot overlap.
473			 */
474			if (!pskb_pull(next, i))
475				goto err;
476			FRAG_CB(next)->offset += i;
477			qp->q.meat -= i;
478			if (next->ip_summed != CHECKSUM_UNNECESSARY)
479				next->ip_summed = CHECKSUM_NONE;
480			break;
481		} else {
482			struct sk_buff *free_it = next;
483
484			/* Old fragment is completely overridden with
485			 * new one drop it.
486			 */
487			next = next->next;
488
489			if (prev)
490				prev->next = next;
491			else
492				qp->q.fragments = next;
493
494			qp->q.meat -= free_it->len;
495			frag_kfree_skb(qp->q.net, free_it);
 
496		}
497	}
498
499	FRAG_CB(skb)->offset = offset;
500
501	/* Insert this fragment in the chain of fragments. */
502	skb->next = next;
503	if (!next)
504		qp->q.fragments_tail = skb;
505	if (prev)
506		prev->next = skb;
507	else
508		qp->q.fragments = skb;
509
510	dev = skb->dev;
511	if (dev) {
512		qp->iif = dev->ifindex;
513		skb->dev = NULL;
514	}
515	qp->q.stamp = skb->tstamp;
516	qp->q.meat += skb->len;
517	qp->ecn |= ecn;
518	atomic_add(skb->truesize, &qp->q.net->mem);
519	if (offset == 0)
520		qp->q.last_in |= INET_FRAG_FIRST_IN;
521
 
 
 
 
522	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
523	    qp->q.meat == qp->q.len)
524		return ip_frag_reasm(qp, prev, dev);
525
526	write_lock(&ip4_frags.lock);
527	list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
528	write_unlock(&ip4_frags.lock);
 
 
 
 
 
529	return -EINPROGRESS;
530
531err:
532	kfree_skb(skb);
533	return err;
534}
535
536
537/* Build a new IP datagram from all its fragments. */
538
539static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
540			 struct net_device *dev)
541{
542	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
543	struct iphdr *iph;
544	struct sk_buff *fp, *head = qp->q.fragments;
545	int len;
546	int ihlen;
547	int err;
548	int sum_truesize;
549	u8 ecn;
550
551	ipq_kill(qp);
552
553	ecn = ip4_frag_ecn_table[qp->ecn];
554	if (unlikely(ecn == 0xff)) {
555		err = -EINVAL;
556		goto out_fail;
557	}
558	/* Make the one we just received the head. */
559	if (prev) {
560		head = prev->next;
561		fp = skb_clone(head, GFP_ATOMIC);
562		if (!fp)
563			goto out_nomem;
564
565		fp->next = head->next;
566		if (!fp->next)
567			qp->q.fragments_tail = fp;
568		prev->next = fp;
569
570		skb_morph(head, qp->q.fragments);
571		head->next = qp->q.fragments->next;
572
573		consume_skb(qp->q.fragments);
574		qp->q.fragments = head;
575	}
576
577	WARN_ON(head == NULL);
578	WARN_ON(FRAG_CB(head)->offset != 0);
579
580	/* Allocate a new buffer for the datagram. */
581	ihlen = ip_hdrlen(head);
582	len = ihlen + qp->q.len;
583
584	err = -E2BIG;
585	if (len > 65535)
586		goto out_oversize;
587
588	/* Head of list must not be cloned. */
589	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
590		goto out_nomem;
591
592	/* If the first fragment is fragmented itself, we split
593	 * it to two chunks: the first with data and paged part
594	 * and the second, holding only fragments. */
595	if (skb_has_frag_list(head)) {
596		struct sk_buff *clone;
597		int i, plen = 0;
598
599		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
600			goto out_nomem;
601		clone->next = head->next;
602		head->next = clone;
603		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
604		skb_frag_list_init(head);
605		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
606			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
607		clone->len = clone->data_len = head->data_len - plen;
608		head->data_len -= clone->len;
609		head->len -= clone->len;
610		clone->csum = 0;
611		clone->ip_summed = head->ip_summed;
612		atomic_add(clone->truesize, &qp->q.net->mem);
613	}
614
615	skb_push(head, head->data - skb_network_header(head));
616
617	sum_truesize = head->truesize;
618	for (fp = head->next; fp;) {
619		bool headstolen;
620		int delta;
621		struct sk_buff *next = fp->next;
622
623		sum_truesize += fp->truesize;
624		if (head->ip_summed != fp->ip_summed)
625			head->ip_summed = CHECKSUM_NONE;
626		else if (head->ip_summed == CHECKSUM_COMPLETE)
627			head->csum = csum_add(head->csum, fp->csum);
628
629		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
630			kfree_skb_partial(fp, headstolen);
631		} else {
632			if (!skb_shinfo(head)->frag_list)
633				skb_shinfo(head)->frag_list = fp;
634			head->data_len += fp->len;
635			head->len += fp->len;
636			head->truesize += fp->truesize;
637		}
638		fp = next;
639	}
640	atomic_sub(sum_truesize, &qp->q.net->mem);
641
642	head->next = NULL;
643	head->dev = dev;
644	head->tstamp = qp->q.stamp;
 
645
646	iph = ip_hdr(head);
647	iph->frag_off = 0;
 
648	iph->tot_len = htons(len);
649	iph->tos |= ecn;
650	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
651	qp->q.fragments = NULL;
652	qp->q.fragments_tail = NULL;
653	return 0;
654
655out_nomem:
656	LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
657		       qp);
658	err = -ENOMEM;
659	goto out_fail;
660out_oversize:
661	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
662out_fail:
663	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
664	return err;
665}
666
667/* Process an incoming IP datagram fragment. */
668int ip_defrag(struct sk_buff *skb, u32 user)
669{
670	struct ipq *qp;
671	struct net *net;
672
673	net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
674	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
675
676	/* Start by cleaning up the memory. */
677	if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
678		ip_evictor(net);
679
680	/* Lookup (or create) queue header */
681	if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
682		int ret;
683
684		spin_lock(&qp->q.lock);
685
686		ret = ip_frag_queue(qp, skb);
687
688		spin_unlock(&qp->q.lock);
689		ipq_put(qp);
690		return ret;
691	}
692
693	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
694	kfree_skb(skb);
695	return -ENOMEM;
696}
697EXPORT_SYMBOL(ip_defrag);
698
699struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
700{
701	const struct iphdr *iph;
702	u32 len;
703
704	if (skb->protocol != htons(ETH_P_IP))
705		return skb;
706
707	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
708		return skb;
709
710	iph = ip_hdr(skb);
711	if (iph->ihl < 5 || iph->version != 4)
712		return skb;
713	if (!pskb_may_pull(skb, iph->ihl*4))
714		return skb;
715	iph = ip_hdr(skb);
716	len = ntohs(iph->tot_len);
717	if (skb->len < len || len < (iph->ihl * 4))
718		return skb;
719
720	if (ip_is_fragment(ip_hdr(skb))) {
721		skb = skb_share_check(skb, GFP_ATOMIC);
722		if (skb) {
 
 
723			if (pskb_trim_rcsum(skb, len))
724				return skb;
725			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
726			if (ip_defrag(skb, user))
727				return NULL;
728			skb->rxhash = 0;
729		}
730	}
731	return skb;
732}
733EXPORT_SYMBOL(ip_check_defrag);
734
735#ifdef CONFIG_SYSCTL
736static int zero;
737
738static struct ctl_table ip4_frags_ns_ctl_table[] = {
739	{
740		.procname	= "ipfrag_high_thresh",
741		.data		= &init_net.ipv4.frags.high_thresh,
742		.maxlen		= sizeof(int),
743		.mode		= 0644,
744		.proc_handler	= proc_dointvec
745	},
746	{
747		.procname	= "ipfrag_low_thresh",
748		.data		= &init_net.ipv4.frags.low_thresh,
749		.maxlen		= sizeof(int),
750		.mode		= 0644,
751		.proc_handler	= proc_dointvec
752	},
753	{
754		.procname	= "ipfrag_time",
755		.data		= &init_net.ipv4.frags.timeout,
756		.maxlen		= sizeof(int),
757		.mode		= 0644,
758		.proc_handler	= proc_dointvec_jiffies,
759	},
760	{ }
761};
762
763static struct ctl_table ip4_frags_ctl_table[] = {
764	{
765		.procname	= "ipfrag_secret_interval",
766		.data		= &ip4_frags.secret_interval,
767		.maxlen		= sizeof(int),
768		.mode		= 0644,
769		.proc_handler	= proc_dointvec_jiffies,
770	},
771	{
772		.procname	= "ipfrag_max_dist",
773		.data		= &sysctl_ipfrag_max_dist,
774		.maxlen		= sizeof(int),
775		.mode		= 0644,
776		.proc_handler	= proc_dointvec_minmax,
777		.extra1		= &zero
778	},
779	{ }
780};
781
782static int __net_init ip4_frags_ns_ctl_register(struct net *net)
783{
784	struct ctl_table *table;
785	struct ctl_table_header *hdr;
786
787	table = ip4_frags_ns_ctl_table;
788	if (!net_eq(net, &init_net)) {
789		table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
790		if (table == NULL)
791			goto err_alloc;
792
793		table[0].data = &net->ipv4.frags.high_thresh;
794		table[1].data = &net->ipv4.frags.low_thresh;
795		table[2].data = &net->ipv4.frags.timeout;
 
 
 
 
796	}
797
798	hdr = register_net_sysctl(net, "net/ipv4", table);
799	if (hdr == NULL)
800		goto err_reg;
801
802	net->ipv4.frags_hdr = hdr;
803	return 0;
804
805err_reg:
806	if (!net_eq(net, &init_net))
807		kfree(table);
808err_alloc:
809	return -ENOMEM;
810}
811
812static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
813{
814	struct ctl_table *table;
815
816	table = net->ipv4.frags_hdr->ctl_table_arg;
817	unregister_net_sysctl_table(net->ipv4.frags_hdr);
818	kfree(table);
819}
820
821static void ip4_frags_ctl_register(void)
822{
823	register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
824}
825#else
826static inline int ip4_frags_ns_ctl_register(struct net *net)
827{
828	return 0;
829}
830
831static inline void ip4_frags_ns_ctl_unregister(struct net *net)
832{
833}
834
835static inline void ip4_frags_ctl_register(void)
836{
837}
838#endif
839
840static int __net_init ipv4_frags_init_net(struct net *net)
841{
842	/*
843	 * Fragment cache limits. We will commit 256K at one time. Should we
844	 * cross that limit we will prune down to 192K. This should cope with
845	 * even the most extreme cases without allowing an attacker to
846	 * measurably harm machine performance.
 
 
 
 
 
 
 
 
847	 */
848	net->ipv4.frags.high_thresh = 256 * 1024;
849	net->ipv4.frags.low_thresh = 192 * 1024;
850	/*
851	 * Important NOTE! Fragment queue must be destroyed before MSL expires.
852	 * RFC791 is wrong proposing to prolongate timer each fragment arrival
853	 * by TTL.
854	 */
855	net->ipv4.frags.timeout = IP_FRAG_TIME;
856
857	inet_frags_init_net(&net->ipv4.frags);
858
859	return ip4_frags_ns_ctl_register(net);
860}
861
862static void __net_exit ipv4_frags_exit_net(struct net *net)
863{
864	ip4_frags_ns_ctl_unregister(net);
865	inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
866}
867
868static struct pernet_operations ip4_frags_ops = {
869	.init = ipv4_frags_init_net,
870	.exit = ipv4_frags_exit_net,
871};
872
873void __init ipfrag_init(void)
874{
875	ip4_frags_ctl_register();
876	register_pernet_subsys(&ip4_frags_ops);
877	ip4_frags.hashfn = ip4_hashfn;
878	ip4_frags.constructor = ip4_frag_init;
879	ip4_frags.destructor = ip4_frag_free;
880	ip4_frags.skb_free = NULL;
881	ip4_frags.qsize = sizeof(struct ipq);
882	ip4_frags.match = ip4_frag_match;
883	ip4_frags.frag_expire = ip_expire;
884	ip4_frags.secret_interval = 10 * 60 * HZ;
885	inet_frags_init(&ip4_frags);
886}