Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  4 *		operating system.  INET is implemented using the  BSD Socket
  5 *		interface as the means of communication with the user level.
  6 *
  7 *		The IP fragmentation functionality.
  8 *
  9 * Authors:	Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
 10 *		Alan Cox <alan@lxorguk.ukuu.org.uk>
 11 *
 12 * Fixes:
 13 *		Alan Cox	:	Split from ip.c , see ip_input.c for history.
 14 *		David S. Miller :	Begin massive cleanup...
 15 *		Andi Kleen	:	Add sysctls.
 16 *		xxxx		:	Overlapfrag bug.
 17 *		Ultima          :       ip_expire() kernel panic.
 18 *		Bill Hawes	:	Frag accounting and evictor fixes.
 19 *		John McDonald	:	0 length frag bug.
 20 *		Alexey Kuznetsov:	SMP races, threading, cleanup.
 21 *		Patrick McHardy :	LRU queue of frag heads for evictor.
 22 */
 23
 24#define pr_fmt(fmt) "IPv4: " fmt
 25
 26#include <linux/compiler.h>
 27#include <linux/module.h>
 28#include <linux/types.h>
 29#include <linux/mm.h>
 30#include <linux/jiffies.h>
 31#include <linux/skbuff.h>
 32#include <linux/list.h>
 33#include <linux/ip.h>
 34#include <linux/icmp.h>
 35#include <linux/netdevice.h>
 36#include <linux/jhash.h>
 37#include <linux/random.h>
 38#include <linux/slab.h>
 39#include <net/route.h>
 40#include <net/dst.h>
 41#include <net/sock.h>
 42#include <net/ip.h>
 43#include <net/icmp.h>
 44#include <net/checksum.h>
 45#include <net/inetpeer.h>
 46#include <net/inet_frag.h>
 47#include <linux/tcp.h>
 48#include <linux/udp.h>
 49#include <linux/inet.h>
 50#include <linux/netfilter_ipv4.h>
 51#include <net/inet_ecn.h>
 52#include <net/l3mdev.h>
 53
 54/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
 55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
 56 * as well. Or notify me, at least. --ANK
 57 */
 58static const char ip_frag_cache_name[] = "ip4-frags";
 
 
 
 
 
 
 
 
 
 59
 60/* Describe an entry in the "incomplete datagrams" queue. */
 61struct ipq {
 62	struct inet_frag_queue q;
 63
 
 
 
 
 
 64	u8		ecn; /* RFC3168 support */
 65	u16		max_df_size; /* largest frag with DF set seen */
 66	int             iif;
 67	unsigned int    rid;
 68	struct inet_peer *peer;
 69};
 70
 71static u8 ip4_frag_ecn(u8 tos)
 
 
 
 
 
 
 
 
 
 72{
 73	return 1 << (tos & INET_ECN_MASK);
 74}
 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76static struct inet_frags ip4_frags;
 77
 
 
 
 
 
 
 
 
 
 
 78static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
 79			 struct net_device *dev);
 80
 
 
 
 
 81
 82static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
 83{
 84	struct ipq *qp = container_of(q, struct ipq, q);
 85	struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
 86					       frags);
 87	struct net *net = container_of(ipv4, struct net, ipv4);
 88
 89	const struct frag_v4_compare_key *key = a;
 
 
 90
 91	q->key.v4 = *key;
 92	qp->ecn = 0;
 93	qp->peer = q->net->max_dist ?
 94		inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
 95		NULL;
 96}
 97
 98static void ip4_frag_free(struct inet_frag_queue *q)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99{
100	struct ipq *qp;
101
102	qp = container_of(q, struct ipq, q);
103	if (qp->peer)
104		inet_putpeer(qp->peer);
105}
106
107
108/* Destruction primitives. */
109
110static void ipq_put(struct ipq *ipq)
111{
112	inet_frag_put(&ipq->q);
113}
114
115/* Kill ipq entry. It is not destroyed immediately,
116 * because caller (and someone more) holds reference count.
117 */
118static void ipq_kill(struct ipq *ipq)
119{
120	inet_frag_kill(&ipq->q);
121}
122
123static bool frag_expire_skip_icmp(u32 user)
 
 
 
124{
125	return user == IP_DEFRAG_AF_PACKET ||
126	       ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
127					 __IP_DEFRAG_CONNTRACK_IN_END) ||
128	       ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
129					 __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
130}
131
132/*
133 * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
134 */
135static void ip_expire(struct timer_list *t)
136{
137	struct inet_frag_queue *frag = from_timer(frag, t, timer);
138	const struct iphdr *iph;
139	struct sk_buff *head;
140	struct net *net;
141	struct ipq *qp;
142	int err;
143
144	qp = container_of(frag, struct ipq, q);
145	net = container_of(qp->q.net, struct net, ipv4.frags);
146
147	rcu_read_lock();
148	spin_lock(&qp->q.lock);
149
150	if (qp->q.flags & INET_FRAG_COMPLETE)
151		goto out;
152
153	ipq_kill(qp);
154	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
155
156	head = qp->q.fragments;
157
158	__IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
159
160	if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !head)
161		goto out;
162
163	head->dev = dev_get_by_index_rcu(net, qp->iif);
164	if (!head->dev)
165		goto out;
166
 
 
167
168	/* skb has no dst, perform route lookup again */
169	iph = ip_hdr(head);
170	err = ip_route_input_noref(head, iph->daddr, iph->saddr,
 
 
 
 
 
 
 
 
 
 
 
171					   iph->tos, head->dev);
172	if (err)
173		goto out;
174
175	/* Only an end host needs to send an ICMP
176	 * "Fragment Reassembly Timeout" message, per RFC792.
177	 */
178	if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
179	    (skb_rtable(head)->rt_type != RTN_LOCAL))
180		goto out;
 
 
181
182	skb_get(head);
183	spin_unlock(&qp->q.lock);
184	icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
185	kfree_skb(head);
186	goto out_rcu_unlock;
187
 
 
 
 
 
188out:
189	spin_unlock(&qp->q.lock);
190out_rcu_unlock:
191	rcu_read_unlock();
192	ipq_put(qp);
193}
194
195/* Find the correct entry in the "incomplete datagrams" queue for
196 * this IP datagram, and create new one, if nothing is found.
197 */
198static struct ipq *ip_find(struct net *net, struct iphdr *iph,
199			   u32 user, int vif)
200{
201	struct frag_v4_compare_key key = {
202		.saddr = iph->saddr,
203		.daddr = iph->daddr,
204		.user = user,
205		.vif = vif,
206		.id = iph->id,
207		.protocol = iph->protocol,
208	};
209	struct inet_frag_queue *q;
 
 
 
 
 
 
 
 
210
211	q = inet_frag_find(&net->ipv4.frags, &key);
212	if (!q)
213		return NULL;
214
215	return container_of(q, struct ipq, q);
 
 
 
 
216}
217
218/* Is the fragment too far ahead to be part of ipq? */
219static int ip_frag_too_far(struct ipq *qp)
220{
221	struct inet_peer *peer = qp->peer;
222	unsigned int max = qp->q.net->max_dist;
223	unsigned int start, end;
224
225	int rc;
226
227	if (!peer || !max)
228		return 0;
229
230	start = qp->rid;
231	end = atomic_inc_return(&peer->rid);
232	qp->rid = end;
233
234	rc = qp->q.fragments && (end - start) > max;
235
236	if (rc) {
237		struct net *net;
238
239		net = container_of(qp->q.net, struct net, ipv4.frags);
240		__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
241	}
242
243	return rc;
244}
245
246static int ip_frag_reinit(struct ipq *qp)
247{
248	struct sk_buff *fp;
249	unsigned int sum_truesize = 0;
250
251	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
252		refcount_inc(&qp->q.refcnt);
253		return -ETIMEDOUT;
254	}
255
256	fp = qp->q.fragments;
257	do {
258		struct sk_buff *xp = fp->next;
259
260		sum_truesize += fp->truesize;
261		kfree_skb(fp);
262		fp = xp;
263	} while (fp);
264	sub_frag_mem_limit(qp->q.net, sum_truesize);
265
266	qp->q.flags = 0;
267	qp->q.len = 0;
268	qp->q.meat = 0;
269	qp->q.fragments = NULL;
270	qp->q.fragments_tail = NULL;
271	qp->iif = 0;
272	qp->ecn = 0;
273
274	return 0;
275}
276
277/* Add new segment to existing queue. */
278static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
279{
280	struct sk_buff *prev, *next;
281	struct net_device *dev;
282	unsigned int fragsize;
283	int flags, offset;
284	int ihl, end;
285	int err = -ENOENT;
286	u8 ecn;
287
288	if (qp->q.flags & INET_FRAG_COMPLETE)
289		goto err;
290
291	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
292	    unlikely(ip_frag_too_far(qp)) &&
293	    unlikely(err = ip_frag_reinit(qp))) {
294		ipq_kill(qp);
295		goto err;
296	}
297
298	ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
299	offset = ntohs(ip_hdr(skb)->frag_off);
300	flags = offset & ~IP_OFFSET;
301	offset &= IP_OFFSET;
302	offset <<= 3;		/* offset is in 8-byte chunks */
303	ihl = ip_hdrlen(skb);
304
305	/* Determine the position of this fragment. */
306	end = offset + skb->len - skb_network_offset(skb) - ihl;
307	err = -EINVAL;
308
309	/* Is this the final fragment? */
310	if ((flags & IP_MF) == 0) {
311		/* If we already have some bits beyond end
312		 * or have different end, the segment is corrupted.
313		 */
314		if (end < qp->q.len ||
315		    ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
316			goto err;
317		qp->q.flags |= INET_FRAG_LAST_IN;
318		qp->q.len = end;
319	} else {
320		if (end&7) {
321			end &= ~7;
322			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
323				skb->ip_summed = CHECKSUM_NONE;
324		}
325		if (end > qp->q.len) {
326			/* Some bits beyond end -> corruption. */
327			if (qp->q.flags & INET_FRAG_LAST_IN)
328				goto err;
329			qp->q.len = end;
330		}
331	}
332	if (end == offset)
333		goto err;
334
335	err = -ENOMEM;
336	if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
337		goto err;
338
339	err = pskb_trim_rcsum(skb, end - offset);
340	if (err)
341		goto err;
342
343	/* Find out which fragments are in front and at the back of us
344	 * in the chain of fragments so far.  We must know where to put
345	 * this fragment, right?
346	 */
347	prev = qp->q.fragments_tail;
348	if (!prev || prev->ip_defrag_offset < offset) {
349		next = NULL;
350		goto found;
351	}
352	prev = NULL;
353	for (next = qp->q.fragments; next != NULL; next = next->next) {
354		if (next->ip_defrag_offset >= offset)
355			break;	/* bingo! */
356		prev = next;
357	}
358
359found:
360	/* We found where to put this one.  Check for overlap with
361	 * preceding fragment, and, if needed, align things so that
362	 * any overlaps are eliminated.
363	 */
364	if (prev) {
365		int i = (prev->ip_defrag_offset + prev->len) - offset;
366
367		if (i > 0) {
368			offset += i;
369			err = -EINVAL;
370			if (end <= offset)
371				goto err;
372			err = -ENOMEM;
373			if (!pskb_pull(skb, i))
374				goto err;
375			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
376				skb->ip_summed = CHECKSUM_NONE;
377		}
378	}
379
380	err = -ENOMEM;
381
382	while (next && next->ip_defrag_offset < end) {
383		int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */
384
385		if (i < next->len) {
386			/* Eat head of the next overlapped fragment
387			 * and leave the loop. The next ones cannot overlap.
388			 */
389			if (!pskb_pull(next, i))
390				goto err;
391			next->ip_defrag_offset += i;
392			qp->q.meat -= i;
393			if (next->ip_summed != CHECKSUM_UNNECESSARY)
394				next->ip_summed = CHECKSUM_NONE;
395			break;
396		} else {
397			struct sk_buff *free_it = next;
398
399			/* Old fragment is completely overridden with
400			 * new one drop it.
401			 */
402			next = next->next;
403
404			if (prev)
405				prev->next = next;
406			else
407				qp->q.fragments = next;
408
409			qp->q.meat -= free_it->len;
410			sub_frag_mem_limit(qp->q.net, free_it->truesize);
411			kfree_skb(free_it);
412		}
413	}
414
415	/* Note : skb->ip_defrag_offset and skb->dev share the same location */
416	dev = skb->dev;
417	if (dev)
418		qp->iif = dev->ifindex;
419	/* Makes sure compiler wont do silly aliasing games */
420	barrier();
421	skb->ip_defrag_offset = offset;
422
423	/* Insert this fragment in the chain of fragments. */
424	skb->next = next;
425	if (!next)
426		qp->q.fragments_tail = skb;
427	if (prev)
428		prev->next = skb;
429	else
430		qp->q.fragments = skb;
431
 
 
 
 
 
432	qp->q.stamp = skb->tstamp;
433	qp->q.meat += skb->len;
434	qp->ecn |= ecn;
435	add_frag_mem_limit(qp->q.net, skb->truesize);
436	if (offset == 0)
437		qp->q.flags |= INET_FRAG_FIRST_IN;
438
439	fragsize = skb->len + ihl;
440
441	if (fragsize > qp->q.max_size)
442		qp->q.max_size = fragsize;
443
444	if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
445	    fragsize > qp->max_df_size)
446		qp->max_df_size = fragsize;
447
448	if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
449	    qp->q.meat == qp->q.len) {
450		unsigned long orefdst = skb->_skb_refdst;
451
452		skb->_skb_refdst = 0UL;
453		err = ip_frag_reasm(qp, prev, dev);
454		skb->_skb_refdst = orefdst;
455		return err;
456	}
457
458	skb_dst_drop(skb);
459	return -EINPROGRESS;
460
461err:
462	kfree_skb(skb);
463	return err;
464}
465
466
467/* Build a new IP datagram from all its fragments. */
468
469static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
470			 struct net_device *dev)
471{
472	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
473	struct iphdr *iph;
474	struct sk_buff *fp, *head = qp->q.fragments;
475	int len;
476	int ihlen;
477	int err;
 
478	u8 ecn;
479
480	ipq_kill(qp);
481
482	ecn = ip_frag_ecn_table[qp->ecn];
483	if (unlikely(ecn == 0xff)) {
484		err = -EINVAL;
485		goto out_fail;
486	}
487	/* Make the one we just received the head. */
488	if (prev) {
489		head = prev->next;
490		fp = skb_clone(head, GFP_ATOMIC);
491		if (!fp)
492			goto out_nomem;
493
494		fp->next = head->next;
495		if (!fp->next)
496			qp->q.fragments_tail = fp;
497		prev->next = fp;
498
499		skb_morph(head, qp->q.fragments);
500		head->next = qp->q.fragments->next;
501
502		consume_skb(qp->q.fragments);
503		qp->q.fragments = head;
504	}
505
506	WARN_ON(!head);
507	WARN_ON(head->ip_defrag_offset != 0);
508
509	/* Allocate a new buffer for the datagram. */
510	ihlen = ip_hdrlen(head);
511	len = ihlen + qp->q.len;
512
513	err = -E2BIG;
514	if (len > 65535)
515		goto out_oversize;
516
517	/* Head of list must not be cloned. */
518	if (skb_unclone(head, GFP_ATOMIC))
519		goto out_nomem;
520
521	/* If the first fragment is fragmented itself, we split
522	 * it to two chunks: the first with data and paged part
523	 * and the second, holding only fragments. */
524	if (skb_has_frag_list(head)) {
525		struct sk_buff *clone;
526		int i, plen = 0;
527
528		clone = alloc_skb(0, GFP_ATOMIC);
529		if (!clone)
530			goto out_nomem;
531		clone->next = head->next;
532		head->next = clone;
533		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
534		skb_frag_list_init(head);
535		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
536			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
537		clone->len = clone->data_len = head->data_len - plen;
538		head->data_len -= clone->len;
539		head->len -= clone->len;
540		clone->csum = 0;
541		clone->ip_summed = head->ip_summed;
542		add_frag_mem_limit(qp->q.net, clone->truesize);
543	}
544
545	skb_shinfo(head)->frag_list = head->next;
546	skb_push(head, head->data - skb_network_header(head));
547
548	for (fp=head->next; fp; fp = fp->next) {
549		head->data_len += fp->len;
550		head->len += fp->len;
 
 
 
 
551		if (head->ip_summed != fp->ip_summed)
552			head->ip_summed = CHECKSUM_NONE;
553		else if (head->ip_summed == CHECKSUM_COMPLETE)
554			head->csum = csum_add(head->csum, fp->csum);
555		head->truesize += fp->truesize;
 
 
 
 
 
 
 
 
 
 
556	}
557	sub_frag_mem_limit(qp->q.net, head->truesize);
558
559	head->next = NULL;
560	head->dev = dev;
561	head->tstamp = qp->q.stamp;
562	IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
563
564	iph = ip_hdr(head);
 
565	iph->tot_len = htons(len);
566	iph->tos |= ecn;
567
568	/* When we set IP_DF on a refragmented skb we must also force a
569	 * call to ip_fragment to avoid forwarding a DF-skb of size s while
570	 * original sender only sent fragments of size f (where f < s).
571	 *
572	 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
573	 * frag seen to avoid sending tiny DF-fragments in case skb was built
574	 * from one very small df-fragment and one large non-df frag.
575	 */
576	if (qp->max_df_size == qp->q.max_size) {
577		IPCB(head)->flags |= IPSKB_FRAG_PMTU;
578		iph->frag_off = htons(IP_DF);
579	} else {
580		iph->frag_off = 0;
581	}
582
583	ip_send_check(iph);
584
585	__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
586	qp->q.fragments = NULL;
587	qp->q.fragments_tail = NULL;
588	return 0;
589
590out_nomem:
591	net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
 
592	err = -ENOMEM;
593	goto out_fail;
594out_oversize:
595	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
596out_fail:
597	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
598	return err;
599}
600
601/* Process an incoming IP datagram fragment. */
602int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
603{
604	struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
605	int vif = l3mdev_master_ifindex_rcu(dev);
606	struct ipq *qp;
 
607
608	__IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
609	skb_orphan(skb);
 
 
 
 
610
611	/* Lookup (or create) queue header */
612	qp = ip_find(net, ip_hdr(skb), user, vif);
613	if (qp) {
614		int ret;
615
616		spin_lock(&qp->q.lock);
617
618		ret = ip_frag_queue(qp, skb);
619
620		spin_unlock(&qp->q.lock);
621		ipq_put(qp);
622		return ret;
623	}
624
625	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
626	kfree_skb(skb);
627	return -ENOMEM;
628}
629EXPORT_SYMBOL(ip_defrag);
630
631struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
632{
633	struct iphdr iph;
634	int netoff;
635	u32 len;
636
637	if (skb->protocol != htons(ETH_P_IP))
638		return skb;
639
640	netoff = skb_network_offset(skb);
641
642	if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
643		return skb;
644
645	if (iph.ihl < 5 || iph.version != 4)
 
646		return skb;
647
648	len = ntohs(iph.tot_len);
649	if (skb->len < netoff + len || len < (iph.ihl * 4))
 
 
650		return skb;
651
652	if (ip_is_fragment(&iph)) {
653		skb = skb_share_check(skb, GFP_ATOMIC);
654		if (skb) {
655			if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
656				return skb;
657			if (pskb_trim_rcsum(skb, netoff + len))
658				return skb;
659			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
660			if (ip_defrag(net, skb, user))
661				return NULL;
662			skb_clear_hash(skb);
663		}
664	}
665	return skb;
666}
667EXPORT_SYMBOL(ip_check_defrag);
668
669#ifdef CONFIG_SYSCTL
670static int dist_min;
671
672static struct ctl_table ip4_frags_ns_ctl_table[] = {
673	{
674		.procname	= "ipfrag_high_thresh",
675		.data		= &init_net.ipv4.frags.high_thresh,
676		.maxlen		= sizeof(unsigned long),
677		.mode		= 0644,
678		.proc_handler	= proc_doulongvec_minmax,
679		.extra1		= &init_net.ipv4.frags.low_thresh
680	},
681	{
682		.procname	= "ipfrag_low_thresh",
683		.data		= &init_net.ipv4.frags.low_thresh,
684		.maxlen		= sizeof(unsigned long),
685		.mode		= 0644,
686		.proc_handler	= proc_doulongvec_minmax,
687		.extra2		= &init_net.ipv4.frags.high_thresh
688	},
689	{
690		.procname	= "ipfrag_time",
691		.data		= &init_net.ipv4.frags.timeout,
692		.maxlen		= sizeof(int),
693		.mode		= 0644,
694		.proc_handler	= proc_dointvec_jiffies,
695	},
696	{
697		.procname	= "ipfrag_max_dist",
698		.data		= &init_net.ipv4.frags.max_dist,
699		.maxlen		= sizeof(int),
700		.mode		= 0644,
701		.proc_handler	= proc_dointvec_minmax,
702		.extra1		= &dist_min,
703	},
704	{ }
705};
706
707/* secret interval has been deprecated */
708static int ip4_frags_secret_interval_unused;
709static struct ctl_table ip4_frags_ctl_table[] = {
710	{
711		.procname	= "ipfrag_secret_interval",
712		.data		= &ip4_frags_secret_interval_unused,
713		.maxlen		= sizeof(int),
714		.mode		= 0644,
715		.proc_handler	= proc_dointvec_jiffies,
716	},
 
 
 
 
 
 
 
 
717	{ }
718};
719
720static int __net_init ip4_frags_ns_ctl_register(struct net *net)
721{
722	struct ctl_table *table;
723	struct ctl_table_header *hdr;
724
725	table = ip4_frags_ns_ctl_table;
726	if (!net_eq(net, &init_net)) {
727		table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
728		if (!table)
729			goto err_alloc;
730
731		table[0].data = &net->ipv4.frags.high_thresh;
732		table[0].extra1 = &net->ipv4.frags.low_thresh;
733		table[0].extra2 = &init_net.ipv4.frags.high_thresh;
734		table[1].data = &net->ipv4.frags.low_thresh;
735		table[1].extra2 = &net->ipv4.frags.high_thresh;
736		table[2].data = &net->ipv4.frags.timeout;
737		table[3].data = &net->ipv4.frags.max_dist;
738	}
739
740	hdr = register_net_sysctl(net, "net/ipv4", table);
741	if (!hdr)
742		goto err_reg;
743
744	net->ipv4.frags_hdr = hdr;
745	return 0;
746
747err_reg:
748	if (!net_eq(net, &init_net))
749		kfree(table);
750err_alloc:
751	return -ENOMEM;
752}
753
754static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
755{
756	struct ctl_table *table;
757
758	table = net->ipv4.frags_hdr->ctl_table_arg;
759	unregister_net_sysctl_table(net->ipv4.frags_hdr);
760	kfree(table);
761}
762
763static void __init ip4_frags_ctl_register(void)
764{
765	register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
766}
767#else
768static int ip4_frags_ns_ctl_register(struct net *net)
769{
770	return 0;
771}
772
773static void ip4_frags_ns_ctl_unregister(struct net *net)
774{
775}
776
777static void __init ip4_frags_ctl_register(void)
778{
779}
780#endif
781
782static int __net_init ipv4_frags_init_net(struct net *net)
783{
784	int res;
785
786	/* Fragment cache limits.
787	 *
788	 * The fragment memory accounting code, (tries to) account for
789	 * the real memory usage, by measuring both the size of frag
790	 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
791	 * and the SKB's truesize.
792	 *
793	 * A 64K fragment consumes 129736 bytes (44*2944)+200
794	 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
795	 *
796	 * We will commit 4MB at one time. Should we cross that limit
797	 * we will prune down to 3MB, making room for approx 8 big 64K
798	 * fragments 8x128k.
799	 */
800	net->ipv4.frags.high_thresh = 4 * 1024 * 1024;
801	net->ipv4.frags.low_thresh  = 3 * 1024 * 1024;
802	/*
803	 * Important NOTE! Fragment queue must be destroyed before MSL expires.
804	 * RFC791 is wrong proposing to prolongate timer each fragment arrival
805	 * by TTL.
806	 */
807	net->ipv4.frags.timeout = IP_FRAG_TIME;
808
809	net->ipv4.frags.max_dist = 64;
810	net->ipv4.frags.f = &ip4_frags;
811
812	res = inet_frags_init_net(&net->ipv4.frags);
813	if (res < 0)
814		return res;
815	res = ip4_frags_ns_ctl_register(net);
816	if (res < 0)
817		inet_frags_exit_net(&net->ipv4.frags);
818	return res;
819}
820
821static void __net_exit ipv4_frags_exit_net(struct net *net)
822{
823	ip4_frags_ns_ctl_unregister(net);
824	inet_frags_exit_net(&net->ipv4.frags);
825}
826
827static struct pernet_operations ip4_frags_ops = {
828	.init = ipv4_frags_init_net,
829	.exit = ipv4_frags_exit_net,
830};
831
832
833static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
834{
835	return jhash2(data,
836		      sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
837}
838
839static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
840{
841	const struct inet_frag_queue *fq = data;
842
843	return jhash2((const u32 *)&fq->key.v4,
844		      sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
845}
846
847static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
848{
849	const struct frag_v4_compare_key *key = arg->key;
850	const struct inet_frag_queue *fq = ptr;
851
852	return !!memcmp(&fq->key, key, sizeof(*key));
853}
854
855static const struct rhashtable_params ip4_rhash_params = {
856	.head_offset		= offsetof(struct inet_frag_queue, node),
857	.key_offset		= offsetof(struct inet_frag_queue, key),
858	.key_len		= sizeof(struct frag_v4_compare_key),
859	.hashfn			= ip4_key_hashfn,
860	.obj_hashfn		= ip4_obj_hashfn,
861	.obj_cmpfn		= ip4_obj_cmpfn,
862	.automatic_shrinking	= true,
863};
864
865void __init ipfrag_init(void)
866{
 
 
 
867	ip4_frags.constructor = ip4_frag_init;
868	ip4_frags.destructor = ip4_frag_free;
 
869	ip4_frags.qsize = sizeof(struct ipq);
 
870	ip4_frags.frag_expire = ip_expire;
871	ip4_frags.frags_cache_name = ip_frag_cache_name;
872	ip4_frags.rhash_params = ip4_rhash_params;
873	if (inet_frags_init(&ip4_frags))
874		panic("IP: failed to allocate ip4_frags cache\n");
875	ip4_frags_ctl_register();
876	register_pernet_subsys(&ip4_frags_ops);
877}
v3.5.6
 
  1/*
  2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
  3 *		operating system.  INET is implemented using the  BSD Socket
  4 *		interface as the means of communication with the user level.
  5 *
  6 *		The IP fragmentation functionality.
  7 *
  8 * Authors:	Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
  9 *		Alan Cox <alan@lxorguk.ukuu.org.uk>
 10 *
 11 * Fixes:
 12 *		Alan Cox	:	Split from ip.c , see ip_input.c for history.
 13 *		David S. Miller :	Begin massive cleanup...
 14 *		Andi Kleen	:	Add sysctls.
 15 *		xxxx		:	Overlapfrag bug.
 16 *		Ultima          :       ip_expire() kernel panic.
 17 *		Bill Hawes	:	Frag accounting and evictor fixes.
 18 *		John McDonald	:	0 length frag bug.
 19 *		Alexey Kuznetsov:	SMP races, threading, cleanup.
 20 *		Patrick McHardy :	LRU queue of frag heads for evictor.
 21 */
 22
 23#define pr_fmt(fmt) "IPv4: " fmt
 24
 25#include <linux/compiler.h>
 26#include <linux/module.h>
 27#include <linux/types.h>
 28#include <linux/mm.h>
 29#include <linux/jiffies.h>
 30#include <linux/skbuff.h>
 31#include <linux/list.h>
 32#include <linux/ip.h>
 33#include <linux/icmp.h>
 34#include <linux/netdevice.h>
 35#include <linux/jhash.h>
 36#include <linux/random.h>
 37#include <linux/slab.h>
 38#include <net/route.h>
 39#include <net/dst.h>
 40#include <net/sock.h>
 41#include <net/ip.h>
 42#include <net/icmp.h>
 43#include <net/checksum.h>
 44#include <net/inetpeer.h>
 45#include <net/inet_frag.h>
 46#include <linux/tcp.h>
 47#include <linux/udp.h>
 48#include <linux/inet.h>
 49#include <linux/netfilter_ipv4.h>
 50#include <net/inet_ecn.h>
 
 51
 52/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
 53 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
 54 * as well. Or notify me, at least. --ANK
 55 */
 56
 57static int sysctl_ipfrag_max_dist __read_mostly = 64;
 58
 59struct ipfrag_skb_cb
 60{
 61	struct inet_skb_parm	h;
 62	int			offset;
 63};
 64
 65#define FRAG_CB(skb)	((struct ipfrag_skb_cb *)((skb)->cb))
 66
 67/* Describe an entry in the "incomplete datagrams" queue. */
 68struct ipq {
 69	struct inet_frag_queue q;
 70
 71	u32		user;
 72	__be32		saddr;
 73	__be32		daddr;
 74	__be16		id;
 75	u8		protocol;
 76	u8		ecn; /* RFC3168 support */
 
 77	int             iif;
 78	unsigned int    rid;
 79	struct inet_peer *peer;
 80};
 81
 82/* RFC 3168 support :
 83 * We want to check ECN values of all fragments, do detect invalid combinations.
 84 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
 85 */
 86#define	IPFRAG_ECN_NOT_ECT	0x01 /* one frag had ECN_NOT_ECT */
 87#define	IPFRAG_ECN_ECT_1	0x02 /* one frag had ECN_ECT_1 */
 88#define	IPFRAG_ECN_ECT_0	0x04 /* one frag had ECN_ECT_0 */
 89#define	IPFRAG_ECN_CE		0x08 /* one frag had ECN_CE */
 90
 91static inline u8 ip4_frag_ecn(u8 tos)
 92{
 93	return 1 << (tos & INET_ECN_MASK);
 94}
 95
 96/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 97 * Value : 0xff if frame should be dropped.
 98 *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 99 */
100static const u8 ip4_frag_ecn_table[16] = {
101	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
102	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
103	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
104	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
105
106	/* invalid combinations : drop frame */
107	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
108	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
109	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
110	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
111	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
112	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
113	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
114};
115
116static struct inet_frags ip4_frags;
117
118int ip_frag_nqueues(struct net *net)
119{
120	return net->ipv4.frags.nqueues;
121}
122
123int ip_frag_mem(struct net *net)
124{
125	return atomic_read(&net->ipv4.frags.mem);
126}
127
128static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
129			 struct net_device *dev);
130
131struct ip4_create_arg {
132	struct iphdr *iph;
133	u32 user;
134};
135
136static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
137{
138	return jhash_3words((__force u32)id << 16 | prot,
139			    (__force u32)saddr, (__force u32)daddr,
140			    ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
141}
142
143static unsigned int ip4_hashfn(struct inet_frag_queue *q)
144{
145	struct ipq *ipq;
146
147	ipq = container_of(q, struct ipq, q);
148	return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
 
 
 
149}
150
151static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
152{
153	struct ipq *qp;
154	struct ip4_create_arg *arg = a;
155
156	qp = container_of(q, struct ipq, q);
157	return	qp->id == arg->iph->id &&
158		qp->saddr == arg->iph->saddr &&
159		qp->daddr == arg->iph->daddr &&
160		qp->protocol == arg->iph->protocol &&
161		qp->user == arg->user;
162}
163
164/* Memory Tracking Functions. */
165static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
166{
167	atomic_sub(skb->truesize, &nf->mem);
168	kfree_skb(skb);
169}
170
171static void ip4_frag_init(struct inet_frag_queue *q, void *a)
172{
173	struct ipq *qp = container_of(q, struct ipq, q);
174	struct ip4_create_arg *arg = a;
175
176	qp->protocol = arg->iph->protocol;
177	qp->id = arg->iph->id;
178	qp->ecn = ip4_frag_ecn(arg->iph->tos);
179	qp->saddr = arg->iph->saddr;
180	qp->daddr = arg->iph->daddr;
181	qp->user = arg->user;
182	qp->peer = sysctl_ipfrag_max_dist ?
183		inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
184}
185
186static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
187{
188	struct ipq *qp;
189
190	qp = container_of(q, struct ipq, q);
191	if (qp->peer)
192		inet_putpeer(qp->peer);
193}
194
195
196/* Destruction primitives. */
197
198static __inline__ void ipq_put(struct ipq *ipq)
199{
200	inet_frag_put(&ipq->q, &ip4_frags);
201}
202
203/* Kill ipq entry. It is not destroyed immediately,
204 * because caller (and someone more) holds reference count.
205 */
206static void ipq_kill(struct ipq *ipq)
207{
208	inet_frag_kill(&ipq->q, &ip4_frags);
209}
210
211/* Memory limiting on fragments.  Evictor trashes the oldest
212 * fragment queue until we are back under the threshold.
213 */
214static void ip_evictor(struct net *net)
215{
216	int evicted;
217
218	evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags);
219	if (evicted)
220		IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
221}
222
223/*
224 * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
225 */
226static void ip_expire(unsigned long arg)
227{
 
 
 
 
228	struct ipq *qp;
229	struct net *net;
230
231	qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
232	net = container_of(qp->q.net, struct net, ipv4.frags);
233
 
234	spin_lock(&qp->q.lock);
235
236	if (qp->q.last_in & INET_FRAG_COMPLETE)
237		goto out;
238
239	ipq_kill(qp);
 
 
 
 
 
 
 
 
 
 
 
 
240
241	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
242	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
243
244	if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
245		struct sk_buff *head = qp->q.fragments;
246		const struct iphdr *iph;
247		int err;
248
249		rcu_read_lock();
250		head->dev = dev_get_by_index_rcu(net, qp->iif);
251		if (!head->dev)
252			goto out_rcu_unlock;
253
254		/* skb dst is stale, drop it, and perform route lookup again */
255		skb_dst_drop(head);
256		iph = ip_hdr(head);
257		err = ip_route_input_noref(head, iph->daddr, iph->saddr,
258					   iph->tos, head->dev);
259		if (err)
260			goto out_rcu_unlock;
261
262		/*
263		 * Only an end host needs to send an ICMP
264		 * "Fragment Reassembly Timeout" message, per RFC792.
265		 */
266		if (qp->user == IP_DEFRAG_AF_PACKET ||
267		    (qp->user == IP_DEFRAG_CONNTRACK_IN &&
268		     skb_rtable(head)->rt_type != RTN_LOCAL))
269			goto out_rcu_unlock;
270
 
 
 
 
 
271
272		/* Send an ICMP "Fragment Reassembly Timeout" message. */
273		icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
274out_rcu_unlock:
275		rcu_read_unlock();
276	}
277out:
278	spin_unlock(&qp->q.lock);
 
 
279	ipq_put(qp);
280}
281
282/* Find the correct entry in the "incomplete datagrams" queue for
283 * this IP datagram, and create new one, if nothing is found.
284 */
285static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
 
286{
 
 
 
 
 
 
 
 
287	struct inet_frag_queue *q;
288	struct ip4_create_arg arg;
289	unsigned int hash;
290
291	arg.iph = iph;
292	arg.user = user;
293
294	read_lock(&ip4_frags.lock);
295	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
296
297	q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
298	if (q == NULL)
299		goto out_nomem;
300
301	return container_of(q, struct ipq, q);
302
303out_nomem:
304	LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
305	return NULL;
306}
307
308/* Is the fragment too far ahead to be part of ipq? */
309static inline int ip_frag_too_far(struct ipq *qp)
310{
311	struct inet_peer *peer = qp->peer;
312	unsigned int max = sysctl_ipfrag_max_dist;
313	unsigned int start, end;
314
315	int rc;
316
317	if (!peer || !max)
318		return 0;
319
320	start = qp->rid;
321	end = atomic_inc_return(&peer->rid);
322	qp->rid = end;
323
324	rc = qp->q.fragments && (end - start) > max;
325
326	if (rc) {
327		struct net *net;
328
329		net = container_of(qp->q.net, struct net, ipv4.frags);
330		IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
331	}
332
333	return rc;
334}
335
336static int ip_frag_reinit(struct ipq *qp)
337{
338	struct sk_buff *fp;
 
339
340	if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
341		atomic_inc(&qp->q.refcnt);
342		return -ETIMEDOUT;
343	}
344
345	fp = qp->q.fragments;
346	do {
347		struct sk_buff *xp = fp->next;
348		frag_kfree_skb(qp->q.net, fp);
 
 
349		fp = xp;
350	} while (fp);
 
351
352	qp->q.last_in = 0;
353	qp->q.len = 0;
354	qp->q.meat = 0;
355	qp->q.fragments = NULL;
356	qp->q.fragments_tail = NULL;
357	qp->iif = 0;
358	qp->ecn = 0;
359
360	return 0;
361}
362
363/* Add new segment to existing queue. */
364static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
365{
366	struct sk_buff *prev, *next;
367	struct net_device *dev;
 
368	int flags, offset;
369	int ihl, end;
370	int err = -ENOENT;
371	u8 ecn;
372
373	if (qp->q.last_in & INET_FRAG_COMPLETE)
374		goto err;
375
376	if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
377	    unlikely(ip_frag_too_far(qp)) &&
378	    unlikely(err = ip_frag_reinit(qp))) {
379		ipq_kill(qp);
380		goto err;
381	}
382
383	ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
384	offset = ntohs(ip_hdr(skb)->frag_off);
385	flags = offset & ~IP_OFFSET;
386	offset &= IP_OFFSET;
387	offset <<= 3;		/* offset is in 8-byte chunks */
388	ihl = ip_hdrlen(skb);
389
390	/* Determine the position of this fragment. */
391	end = offset + skb->len - ihl;
392	err = -EINVAL;
393
394	/* Is this the final fragment? */
395	if ((flags & IP_MF) == 0) {
396		/* If we already have some bits beyond end
397		 * or have different end, the segment is corrupted.
398		 */
399		if (end < qp->q.len ||
400		    ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
401			goto err;
402		qp->q.last_in |= INET_FRAG_LAST_IN;
403		qp->q.len = end;
404	} else {
405		if (end&7) {
406			end &= ~7;
407			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
408				skb->ip_summed = CHECKSUM_NONE;
409		}
410		if (end > qp->q.len) {
411			/* Some bits beyond end -> corruption. */
412			if (qp->q.last_in & INET_FRAG_LAST_IN)
413				goto err;
414			qp->q.len = end;
415		}
416	}
417	if (end == offset)
418		goto err;
419
420	err = -ENOMEM;
421	if (pskb_pull(skb, ihl) == NULL)
422		goto err;
423
424	err = pskb_trim_rcsum(skb, end - offset);
425	if (err)
426		goto err;
427
428	/* Find out which fragments are in front and at the back of us
429	 * in the chain of fragments so far.  We must know where to put
430	 * this fragment, right?
431	 */
432	prev = qp->q.fragments_tail;
433	if (!prev || FRAG_CB(prev)->offset < offset) {
434		next = NULL;
435		goto found;
436	}
437	prev = NULL;
438	for (next = qp->q.fragments; next != NULL; next = next->next) {
439		if (FRAG_CB(next)->offset >= offset)
440			break;	/* bingo! */
441		prev = next;
442	}
443
444found:
445	/* We found where to put this one.  Check for overlap with
446	 * preceding fragment, and, if needed, align things so that
447	 * any overlaps are eliminated.
448	 */
449	if (prev) {
450		int i = (FRAG_CB(prev)->offset + prev->len) - offset;
451
452		if (i > 0) {
453			offset += i;
454			err = -EINVAL;
455			if (end <= offset)
456				goto err;
457			err = -ENOMEM;
458			if (!pskb_pull(skb, i))
459				goto err;
460			if (skb->ip_summed != CHECKSUM_UNNECESSARY)
461				skb->ip_summed = CHECKSUM_NONE;
462		}
463	}
464
465	err = -ENOMEM;
466
467	while (next && FRAG_CB(next)->offset < end) {
468		int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
469
470		if (i < next->len) {
471			/* Eat head of the next overlapped fragment
472			 * and leave the loop. The next ones cannot overlap.
473			 */
474			if (!pskb_pull(next, i))
475				goto err;
476			FRAG_CB(next)->offset += i;
477			qp->q.meat -= i;
478			if (next->ip_summed != CHECKSUM_UNNECESSARY)
479				next->ip_summed = CHECKSUM_NONE;
480			break;
481		} else {
482			struct sk_buff *free_it = next;
483
484			/* Old fragment is completely overridden with
485			 * new one drop it.
486			 */
487			next = next->next;
488
489			if (prev)
490				prev->next = next;
491			else
492				qp->q.fragments = next;
493
494			qp->q.meat -= free_it->len;
495			frag_kfree_skb(qp->q.net, free_it);
 
496		}
497	}
498
499	FRAG_CB(skb)->offset = offset;
 
 
 
 
 
 
500
501	/* Insert this fragment in the chain of fragments. */
502	skb->next = next;
503	if (!next)
504		qp->q.fragments_tail = skb;
505	if (prev)
506		prev->next = skb;
507	else
508		qp->q.fragments = skb;
509
510	dev = skb->dev;
511	if (dev) {
512		qp->iif = dev->ifindex;
513		skb->dev = NULL;
514	}
515	qp->q.stamp = skb->tstamp;
516	qp->q.meat += skb->len;
517	qp->ecn |= ecn;
518	atomic_add(skb->truesize, &qp->q.net->mem);
519	if (offset == 0)
520		qp->q.last_in |= INET_FRAG_FIRST_IN;
 
 
 
 
 
521
522	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
523	    qp->q.meat == qp->q.len)
524		return ip_frag_reasm(qp, prev, dev);
525
526	write_lock(&ip4_frags.lock);
527	list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list);
528	write_unlock(&ip4_frags.lock);
 
 
 
 
 
 
 
 
529	return -EINPROGRESS;
530
531err:
532	kfree_skb(skb);
533	return err;
534}
535
536
537/* Build a new IP datagram from all its fragments. */
538
539static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
540			 struct net_device *dev)
541{
542	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
543	struct iphdr *iph;
544	struct sk_buff *fp, *head = qp->q.fragments;
545	int len;
546	int ihlen;
547	int err;
548	int sum_truesize;
549	u8 ecn;
550
551	ipq_kill(qp);
552
553	ecn = ip4_frag_ecn_table[qp->ecn];
554	if (unlikely(ecn == 0xff)) {
555		err = -EINVAL;
556		goto out_fail;
557	}
558	/* Make the one we just received the head. */
559	if (prev) {
560		head = prev->next;
561		fp = skb_clone(head, GFP_ATOMIC);
562		if (!fp)
563			goto out_nomem;
564
565		fp->next = head->next;
566		if (!fp->next)
567			qp->q.fragments_tail = fp;
568		prev->next = fp;
569
570		skb_morph(head, qp->q.fragments);
571		head->next = qp->q.fragments->next;
572
573		consume_skb(qp->q.fragments);
574		qp->q.fragments = head;
575	}
576
577	WARN_ON(head == NULL);
578	WARN_ON(FRAG_CB(head)->offset != 0);
579
580	/* Allocate a new buffer for the datagram. */
581	ihlen = ip_hdrlen(head);
582	len = ihlen + qp->q.len;
583
584	err = -E2BIG;
585	if (len > 65535)
586		goto out_oversize;
587
588	/* Head of list must not be cloned. */
589	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
590		goto out_nomem;
591
592	/* If the first fragment is fragmented itself, we split
593	 * it to two chunks: the first with data and paged part
594	 * and the second, holding only fragments. */
595	if (skb_has_frag_list(head)) {
596		struct sk_buff *clone;
597		int i, plen = 0;
598
599		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
 
600			goto out_nomem;
601		clone->next = head->next;
602		head->next = clone;
603		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
604		skb_frag_list_init(head);
605		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
606			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
607		clone->len = clone->data_len = head->data_len - plen;
608		head->data_len -= clone->len;
609		head->len -= clone->len;
610		clone->csum = 0;
611		clone->ip_summed = head->ip_summed;
612		atomic_add(clone->truesize, &qp->q.net->mem);
613	}
614
 
615	skb_push(head, head->data - skb_network_header(head));
616
617	sum_truesize = head->truesize;
618	for (fp = head->next; fp;) {
619		bool headstolen;
620		int delta;
621		struct sk_buff *next = fp->next;
622
623		sum_truesize += fp->truesize;
624		if (head->ip_summed != fp->ip_summed)
625			head->ip_summed = CHECKSUM_NONE;
626		else if (head->ip_summed == CHECKSUM_COMPLETE)
627			head->csum = csum_add(head->csum, fp->csum);
628
629		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
630			kfree_skb_partial(fp, headstolen);
631		} else {
632			if (!skb_shinfo(head)->frag_list)
633				skb_shinfo(head)->frag_list = fp;
634			head->data_len += fp->len;
635			head->len += fp->len;
636			head->truesize += fp->truesize;
637		}
638		fp = next;
639	}
640	atomic_sub(sum_truesize, &qp->q.net->mem);
641
642	head->next = NULL;
643	head->dev = dev;
644	head->tstamp = qp->q.stamp;
 
645
646	iph = ip_hdr(head);
647	iph->frag_off = 0;
648	iph->tot_len = htons(len);
649	iph->tos |= ecn;
650	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651	qp->q.fragments = NULL;
652	qp->q.fragments_tail = NULL;
653	return 0;
654
655out_nomem:
656	LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
657		       qp);
658	err = -ENOMEM;
659	goto out_fail;
660out_oversize:
661	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
662out_fail:
663	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
664	return err;
665}
666
667/* Process an incoming IP datagram fragment. */
668int ip_defrag(struct sk_buff *skb, u32 user)
669{
 
 
670	struct ipq *qp;
671	struct net *net;
672
673	net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
674	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
675
676	/* Start by cleaning up the memory. */
677	if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
678		ip_evictor(net);
679
680	/* Lookup (or create) queue header */
681	if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
 
682		int ret;
683
684		spin_lock(&qp->q.lock);
685
686		ret = ip_frag_queue(qp, skb);
687
688		spin_unlock(&qp->q.lock);
689		ipq_put(qp);
690		return ret;
691	}
692
693	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
694	kfree_skb(skb);
695	return -ENOMEM;
696}
697EXPORT_SYMBOL(ip_defrag);
698
699struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
700{
701	const struct iphdr *iph;
 
702	u32 len;
703
704	if (skb->protocol != htons(ETH_P_IP))
705		return skb;
706
707	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
 
 
708		return skb;
709
710	iph = ip_hdr(skb);
711	if (iph->ihl < 5 || iph->version != 4)
712		return skb;
713	if (!pskb_may_pull(skb, iph->ihl*4))
714		return skb;
715	iph = ip_hdr(skb);
716	len = ntohs(iph->tot_len);
717	if (skb->len < len || len < (iph->ihl * 4))
718		return skb;
719
720	if (ip_is_fragment(ip_hdr(skb))) {
721		skb = skb_share_check(skb, GFP_ATOMIC);
722		if (skb) {
723			if (pskb_trim_rcsum(skb, len))
 
 
724				return skb;
725			memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
726			if (ip_defrag(skb, user))
727				return NULL;
728			skb->rxhash = 0;
729		}
730	}
731	return skb;
732}
733EXPORT_SYMBOL(ip_check_defrag);
734
735#ifdef CONFIG_SYSCTL
736static int zero;
737
738static struct ctl_table ip4_frags_ns_ctl_table[] = {
739	{
740		.procname	= "ipfrag_high_thresh",
741		.data		= &init_net.ipv4.frags.high_thresh,
742		.maxlen		= sizeof(int),
743		.mode		= 0644,
744		.proc_handler	= proc_dointvec
 
745	},
746	{
747		.procname	= "ipfrag_low_thresh",
748		.data		= &init_net.ipv4.frags.low_thresh,
749		.maxlen		= sizeof(int),
750		.mode		= 0644,
751		.proc_handler	= proc_dointvec
 
752	},
753	{
754		.procname	= "ipfrag_time",
755		.data		= &init_net.ipv4.frags.timeout,
756		.maxlen		= sizeof(int),
757		.mode		= 0644,
758		.proc_handler	= proc_dointvec_jiffies,
759	},
 
 
 
 
 
 
 
 
760	{ }
761};
762
 
 
763static struct ctl_table ip4_frags_ctl_table[] = {
764	{
765		.procname	= "ipfrag_secret_interval",
766		.data		= &ip4_frags.secret_interval,
767		.maxlen		= sizeof(int),
768		.mode		= 0644,
769		.proc_handler	= proc_dointvec_jiffies,
770	},
771	{
772		.procname	= "ipfrag_max_dist",
773		.data		= &sysctl_ipfrag_max_dist,
774		.maxlen		= sizeof(int),
775		.mode		= 0644,
776		.proc_handler	= proc_dointvec_minmax,
777		.extra1		= &zero
778	},
779	{ }
780};
781
782static int __net_init ip4_frags_ns_ctl_register(struct net *net)
783{
784	struct ctl_table *table;
785	struct ctl_table_header *hdr;
786
787	table = ip4_frags_ns_ctl_table;
788	if (!net_eq(net, &init_net)) {
789		table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
790		if (table == NULL)
791			goto err_alloc;
792
793		table[0].data = &net->ipv4.frags.high_thresh;
 
 
794		table[1].data = &net->ipv4.frags.low_thresh;
 
795		table[2].data = &net->ipv4.frags.timeout;
 
796	}
797
798	hdr = register_net_sysctl(net, "net/ipv4", table);
799	if (hdr == NULL)
800		goto err_reg;
801
802	net->ipv4.frags_hdr = hdr;
803	return 0;
804
805err_reg:
806	if (!net_eq(net, &init_net))
807		kfree(table);
808err_alloc:
809	return -ENOMEM;
810}
811
812static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
813{
814	struct ctl_table *table;
815
816	table = net->ipv4.frags_hdr->ctl_table_arg;
817	unregister_net_sysctl_table(net->ipv4.frags_hdr);
818	kfree(table);
819}
820
821static void ip4_frags_ctl_register(void)
822{
823	register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
824}
825#else
826static inline int ip4_frags_ns_ctl_register(struct net *net)
827{
828	return 0;
829}
830
831static inline void ip4_frags_ns_ctl_unregister(struct net *net)
832{
833}
834
835static inline void ip4_frags_ctl_register(void)
836{
837}
838#endif
839
840static int __net_init ipv4_frags_init_net(struct net *net)
841{
842	/*
843	 * Fragment cache limits. We will commit 256K at one time. Should we
844	 * cross that limit we will prune down to 192K. This should cope with
845	 * even the most extreme cases without allowing an attacker to
846	 * measurably harm machine performance.
 
 
 
 
 
 
 
 
 
 
847	 */
848	net->ipv4.frags.high_thresh = 256 * 1024;
849	net->ipv4.frags.low_thresh = 192 * 1024;
850	/*
851	 * Important NOTE! Fragment queue must be destroyed before MSL expires.
852	 * RFC791 is wrong proposing to prolongate timer each fragment arrival
853	 * by TTL.
854	 */
855	net->ipv4.frags.timeout = IP_FRAG_TIME;
856
857	inet_frags_init_net(&net->ipv4.frags);
 
858
859	return ip4_frags_ns_ctl_register(net);
 
 
 
 
 
 
860}
861
862static void __net_exit ipv4_frags_exit_net(struct net *net)
863{
864	ip4_frags_ns_ctl_unregister(net);
865	inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
866}
867
868static struct pernet_operations ip4_frags_ops = {
869	.init = ipv4_frags_init_net,
870	.exit = ipv4_frags_exit_net,
871};
872
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
873void __init ipfrag_init(void)
874{
875	ip4_frags_ctl_register();
876	register_pernet_subsys(&ip4_frags_ops);
877	ip4_frags.hashfn = ip4_hashfn;
878	ip4_frags.constructor = ip4_frag_init;
879	ip4_frags.destructor = ip4_frag_free;
880	ip4_frags.skb_free = NULL;
881	ip4_frags.qsize = sizeof(struct ipq);
882	ip4_frags.match = ip4_frag_match;
883	ip4_frags.frag_expire = ip_expire;
884	ip4_frags.secret_interval = 10 * 60 * HZ;
885	inet_frags_init(&ip4_frags);
 
 
 
 
886}