Loading...
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The IP fragmentation functionality.
7 *
8 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
9 * Alan Cox <alan@lxorguk.ukuu.org.uk>
10 *
11 * Fixes:
12 * Alan Cox : Split from ip.c , see ip_input.c for history.
13 * David S. Miller : Begin massive cleanup...
14 * Andi Kleen : Add sysctls.
15 * xxxx : Overlapfrag bug.
16 * Ultima : ip_expire() kernel panic.
17 * Bill Hawes : Frag accounting and evictor fixes.
18 * John McDonald : 0 length frag bug.
19 * Alexey Kuznetsov: SMP races, threading, cleanup.
20 * Patrick McHardy : LRU queue of frag heads for evictor.
21 */
22
23#define pr_fmt(fmt) "IPv4: " fmt
24
25#include <linux/compiler.h>
26#include <linux/module.h>
27#include <linux/types.h>
28#include <linux/mm.h>
29#include <linux/jiffies.h>
30#include <linux/skbuff.h>
31#include <linux/list.h>
32#include <linux/ip.h>
33#include <linux/icmp.h>
34#include <linux/netdevice.h>
35#include <linux/jhash.h>
36#include <linux/random.h>
37#include <linux/slab.h>
38#include <net/route.h>
39#include <net/dst.h>
40#include <net/sock.h>
41#include <net/ip.h>
42#include <net/icmp.h>
43#include <net/checksum.h>
44#include <net/inetpeer.h>
45#include <net/inet_frag.h>
46#include <linux/tcp.h>
47#include <linux/udp.h>
48#include <linux/inet.h>
49#include <linux/netfilter_ipv4.h>
50#include <net/inet_ecn.h>
51
52/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
53 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
54 * as well. Or notify me, at least. --ANK
55 */
56
57static int sysctl_ipfrag_max_dist __read_mostly = 64;
58
59struct ipfrag_skb_cb
60{
61 struct inet_skb_parm h;
62 int offset;
63};
64
65#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
66
67/* Describe an entry in the "incomplete datagrams" queue. */
68struct ipq {
69 struct inet_frag_queue q;
70
71 u32 user;
72 __be32 saddr;
73 __be32 daddr;
74 __be16 id;
75 u8 protocol;
76 u8 ecn; /* RFC3168 support */
77 int iif;
78 unsigned int rid;
79 struct inet_peer *peer;
80};
81
82static inline u8 ip4_frag_ecn(u8 tos)
83{
84 return 1 << (tos & INET_ECN_MASK);
85}
86
87static struct inet_frags ip4_frags;
88
89int ip_frag_nqueues(struct net *net)
90{
91 return net->ipv4.frags.nqueues;
92}
93
94int ip_frag_mem(struct net *net)
95{
96 return sum_frag_mem_limit(&net->ipv4.frags);
97}
98
99static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
100 struct net_device *dev);
101
102struct ip4_create_arg {
103 struct iphdr *iph;
104 u32 user;
105};
106
107static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
108{
109 net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
110 return jhash_3words((__force u32)id << 16 | prot,
111 (__force u32)saddr, (__force u32)daddr,
112 ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
113}
114
115static unsigned int ip4_hashfn(struct inet_frag_queue *q)
116{
117 struct ipq *ipq;
118
119 ipq = container_of(q, struct ipq, q);
120 return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
121}
122
123static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
124{
125 struct ipq *qp;
126 struct ip4_create_arg *arg = a;
127
128 qp = container_of(q, struct ipq, q);
129 return qp->id == arg->iph->id &&
130 qp->saddr == arg->iph->saddr &&
131 qp->daddr == arg->iph->daddr &&
132 qp->protocol == arg->iph->protocol &&
133 qp->user == arg->user;
134}
135
136static void ip4_frag_init(struct inet_frag_queue *q, void *a)
137{
138 struct ipq *qp = container_of(q, struct ipq, q);
139 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
140 frags);
141 struct net *net = container_of(ipv4, struct net, ipv4);
142
143 struct ip4_create_arg *arg = a;
144
145 qp->protocol = arg->iph->protocol;
146 qp->id = arg->iph->id;
147 qp->ecn = ip4_frag_ecn(arg->iph->tos);
148 qp->saddr = arg->iph->saddr;
149 qp->daddr = arg->iph->daddr;
150 qp->user = arg->user;
151 qp->peer = sysctl_ipfrag_max_dist ?
152 inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
153}
154
155static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
156{
157 struct ipq *qp;
158
159 qp = container_of(q, struct ipq, q);
160 if (qp->peer)
161 inet_putpeer(qp->peer);
162}
163
164
165/* Destruction primitives. */
166
167static __inline__ void ipq_put(struct ipq *ipq)
168{
169 inet_frag_put(&ipq->q, &ip4_frags);
170}
171
172/* Kill ipq entry. It is not destroyed immediately,
173 * because caller (and someone more) holds reference count.
174 */
175static void ipq_kill(struct ipq *ipq)
176{
177 inet_frag_kill(&ipq->q, &ip4_frags);
178}
179
180/* Memory limiting on fragments. Evictor trashes the oldest
181 * fragment queue until we are back under the threshold.
182 */
183static void ip_evictor(struct net *net)
184{
185 int evicted;
186
187 evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags, false);
188 if (evicted)
189 IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
190}
191
192/*
193 * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
194 */
195static void ip_expire(unsigned long arg)
196{
197 struct ipq *qp;
198 struct net *net;
199
200 qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
201 net = container_of(qp->q.net, struct net, ipv4.frags);
202
203 spin_lock(&qp->q.lock);
204
205 if (qp->q.last_in & INET_FRAG_COMPLETE)
206 goto out;
207
208 ipq_kill(qp);
209
210 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
211 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
212
213 if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
214 struct sk_buff *head = qp->q.fragments;
215 const struct iphdr *iph;
216 int err;
217
218 rcu_read_lock();
219 head->dev = dev_get_by_index_rcu(net, qp->iif);
220 if (!head->dev)
221 goto out_rcu_unlock;
222
223 /* skb has no dst, perform route lookup again */
224 iph = ip_hdr(head);
225 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
226 iph->tos, head->dev);
227 if (err)
228 goto out_rcu_unlock;
229
230 /*
231 * Only an end host needs to send an ICMP
232 * "Fragment Reassembly Timeout" message, per RFC792.
233 */
234 if (qp->user == IP_DEFRAG_AF_PACKET ||
235 ((qp->user >= IP_DEFRAG_CONNTRACK_IN) &&
236 (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) &&
237 (skb_rtable(head)->rt_type != RTN_LOCAL)))
238 goto out_rcu_unlock;
239
240
241 /* Send an ICMP "Fragment Reassembly Timeout" message. */
242 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
243out_rcu_unlock:
244 rcu_read_unlock();
245 }
246out:
247 spin_unlock(&qp->q.lock);
248 ipq_put(qp);
249}
250
251/* Find the correct entry in the "incomplete datagrams" queue for
252 * this IP datagram, and create new one, if nothing is found.
253 */
254static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
255{
256 struct inet_frag_queue *q;
257 struct ip4_create_arg arg;
258 unsigned int hash;
259
260 arg.iph = iph;
261 arg.user = user;
262
263 read_lock(&ip4_frags.lock);
264 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
265
266 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
267 if (IS_ERR_OR_NULL(q)) {
268 inet_frag_maybe_warn_overflow(q, pr_fmt());
269 return NULL;
270 }
271 return container_of(q, struct ipq, q);
272}
273
274/* Is the fragment too far ahead to be part of ipq? */
275static inline int ip_frag_too_far(struct ipq *qp)
276{
277 struct inet_peer *peer = qp->peer;
278 unsigned int max = sysctl_ipfrag_max_dist;
279 unsigned int start, end;
280
281 int rc;
282
283 if (!peer || !max)
284 return 0;
285
286 start = qp->rid;
287 end = atomic_inc_return(&peer->rid);
288 qp->rid = end;
289
290 rc = qp->q.fragments && (end - start) > max;
291
292 if (rc) {
293 struct net *net;
294
295 net = container_of(qp->q.net, struct net, ipv4.frags);
296 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
297 }
298
299 return rc;
300}
301
302static int ip_frag_reinit(struct ipq *qp)
303{
304 struct sk_buff *fp;
305 unsigned int sum_truesize = 0;
306
307 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
308 atomic_inc(&qp->q.refcnt);
309 return -ETIMEDOUT;
310 }
311
312 fp = qp->q.fragments;
313 do {
314 struct sk_buff *xp = fp->next;
315
316 sum_truesize += fp->truesize;
317 kfree_skb(fp);
318 fp = xp;
319 } while (fp);
320 sub_frag_mem_limit(&qp->q, sum_truesize);
321
322 qp->q.last_in = 0;
323 qp->q.len = 0;
324 qp->q.meat = 0;
325 qp->q.fragments = NULL;
326 qp->q.fragments_tail = NULL;
327 qp->iif = 0;
328 qp->ecn = 0;
329
330 return 0;
331}
332
333/* Add new segment to existing queue. */
334static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
335{
336 struct sk_buff *prev, *next;
337 struct net_device *dev;
338 int flags, offset;
339 int ihl, end;
340 int err = -ENOENT;
341 u8 ecn;
342
343 if (qp->q.last_in & INET_FRAG_COMPLETE)
344 goto err;
345
346 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
347 unlikely(ip_frag_too_far(qp)) &&
348 unlikely(err = ip_frag_reinit(qp))) {
349 ipq_kill(qp);
350 goto err;
351 }
352
353 ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
354 offset = ntohs(ip_hdr(skb)->frag_off);
355 flags = offset & ~IP_OFFSET;
356 offset &= IP_OFFSET;
357 offset <<= 3; /* offset is in 8-byte chunks */
358 ihl = ip_hdrlen(skb);
359
360 /* Determine the position of this fragment. */
361 end = offset + skb->len - ihl;
362 err = -EINVAL;
363
364 /* Is this the final fragment? */
365 if ((flags & IP_MF) == 0) {
366 /* If we already have some bits beyond end
367 * or have different end, the segment is corrupted.
368 */
369 if (end < qp->q.len ||
370 ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
371 goto err;
372 qp->q.last_in |= INET_FRAG_LAST_IN;
373 qp->q.len = end;
374 } else {
375 if (end&7) {
376 end &= ~7;
377 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
378 skb->ip_summed = CHECKSUM_NONE;
379 }
380 if (end > qp->q.len) {
381 /* Some bits beyond end -> corruption. */
382 if (qp->q.last_in & INET_FRAG_LAST_IN)
383 goto err;
384 qp->q.len = end;
385 }
386 }
387 if (end == offset)
388 goto err;
389
390 err = -ENOMEM;
391 if (pskb_pull(skb, ihl) == NULL)
392 goto err;
393
394 err = pskb_trim_rcsum(skb, end - offset);
395 if (err)
396 goto err;
397
398 /* Find out which fragments are in front and at the back of us
399 * in the chain of fragments so far. We must know where to put
400 * this fragment, right?
401 */
402 prev = qp->q.fragments_tail;
403 if (!prev || FRAG_CB(prev)->offset < offset) {
404 next = NULL;
405 goto found;
406 }
407 prev = NULL;
408 for (next = qp->q.fragments; next != NULL; next = next->next) {
409 if (FRAG_CB(next)->offset >= offset)
410 break; /* bingo! */
411 prev = next;
412 }
413
414found:
415 /* We found where to put this one. Check for overlap with
416 * preceding fragment, and, if needed, align things so that
417 * any overlaps are eliminated.
418 */
419 if (prev) {
420 int i = (FRAG_CB(prev)->offset + prev->len) - offset;
421
422 if (i > 0) {
423 offset += i;
424 err = -EINVAL;
425 if (end <= offset)
426 goto err;
427 err = -ENOMEM;
428 if (!pskb_pull(skb, i))
429 goto err;
430 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
431 skb->ip_summed = CHECKSUM_NONE;
432 }
433 }
434
435 err = -ENOMEM;
436
437 while (next && FRAG_CB(next)->offset < end) {
438 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
439
440 if (i < next->len) {
441 /* Eat head of the next overlapped fragment
442 * and leave the loop. The next ones cannot overlap.
443 */
444 if (!pskb_pull(next, i))
445 goto err;
446 FRAG_CB(next)->offset += i;
447 qp->q.meat -= i;
448 if (next->ip_summed != CHECKSUM_UNNECESSARY)
449 next->ip_summed = CHECKSUM_NONE;
450 break;
451 } else {
452 struct sk_buff *free_it = next;
453
454 /* Old fragment is completely overridden with
455 * new one drop it.
456 */
457 next = next->next;
458
459 if (prev)
460 prev->next = next;
461 else
462 qp->q.fragments = next;
463
464 qp->q.meat -= free_it->len;
465 sub_frag_mem_limit(&qp->q, free_it->truesize);
466 kfree_skb(free_it);
467 }
468 }
469
470 FRAG_CB(skb)->offset = offset;
471
472 /* Insert this fragment in the chain of fragments. */
473 skb->next = next;
474 if (!next)
475 qp->q.fragments_tail = skb;
476 if (prev)
477 prev->next = skb;
478 else
479 qp->q.fragments = skb;
480
481 dev = skb->dev;
482 if (dev) {
483 qp->iif = dev->ifindex;
484 skb->dev = NULL;
485 }
486 qp->q.stamp = skb->tstamp;
487 qp->q.meat += skb->len;
488 qp->ecn |= ecn;
489 add_frag_mem_limit(&qp->q, skb->truesize);
490 if (offset == 0)
491 qp->q.last_in |= INET_FRAG_FIRST_IN;
492
493 if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
494 skb->len + ihl > qp->q.max_size)
495 qp->q.max_size = skb->len + ihl;
496
497 if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
498 qp->q.meat == qp->q.len) {
499 unsigned long orefdst = skb->_skb_refdst;
500
501 skb->_skb_refdst = 0UL;
502 err = ip_frag_reasm(qp, prev, dev);
503 skb->_skb_refdst = orefdst;
504 return err;
505 }
506
507 skb_dst_drop(skb);
508 inet_frag_lru_move(&qp->q);
509 return -EINPROGRESS;
510
511err:
512 kfree_skb(skb);
513 return err;
514}
515
516
517/* Build a new IP datagram from all its fragments. */
518
519static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
520 struct net_device *dev)
521{
522 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
523 struct iphdr *iph;
524 struct sk_buff *fp, *head = qp->q.fragments;
525 int len;
526 int ihlen;
527 int err;
528 int sum_truesize;
529 u8 ecn;
530
531 ipq_kill(qp);
532
533 ecn = ip_frag_ecn_table[qp->ecn];
534 if (unlikely(ecn == 0xff)) {
535 err = -EINVAL;
536 goto out_fail;
537 }
538 /* Make the one we just received the head. */
539 if (prev) {
540 head = prev->next;
541 fp = skb_clone(head, GFP_ATOMIC);
542 if (!fp)
543 goto out_nomem;
544
545 fp->next = head->next;
546 if (!fp->next)
547 qp->q.fragments_tail = fp;
548 prev->next = fp;
549
550 skb_morph(head, qp->q.fragments);
551 head->next = qp->q.fragments->next;
552
553 consume_skb(qp->q.fragments);
554 qp->q.fragments = head;
555 }
556
557 WARN_ON(head == NULL);
558 WARN_ON(FRAG_CB(head)->offset != 0);
559
560 /* Allocate a new buffer for the datagram. */
561 ihlen = ip_hdrlen(head);
562 len = ihlen + qp->q.len;
563
564 err = -E2BIG;
565 if (len > 65535)
566 goto out_oversize;
567
568 /* Head of list must not be cloned. */
569 if (skb_unclone(head, GFP_ATOMIC))
570 goto out_nomem;
571
572 /* If the first fragment is fragmented itself, we split
573 * it to two chunks: the first with data and paged part
574 * and the second, holding only fragments. */
575 if (skb_has_frag_list(head)) {
576 struct sk_buff *clone;
577 int i, plen = 0;
578
579 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
580 goto out_nomem;
581 clone->next = head->next;
582 head->next = clone;
583 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
584 skb_frag_list_init(head);
585 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
586 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
587 clone->len = clone->data_len = head->data_len - plen;
588 head->data_len -= clone->len;
589 head->len -= clone->len;
590 clone->csum = 0;
591 clone->ip_summed = head->ip_summed;
592 add_frag_mem_limit(&qp->q, clone->truesize);
593 }
594
595 skb_push(head, head->data - skb_network_header(head));
596
597 sum_truesize = head->truesize;
598 for (fp = head->next; fp;) {
599 bool headstolen;
600 int delta;
601 struct sk_buff *next = fp->next;
602
603 sum_truesize += fp->truesize;
604 if (head->ip_summed != fp->ip_summed)
605 head->ip_summed = CHECKSUM_NONE;
606 else if (head->ip_summed == CHECKSUM_COMPLETE)
607 head->csum = csum_add(head->csum, fp->csum);
608
609 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
610 kfree_skb_partial(fp, headstolen);
611 } else {
612 if (!skb_shinfo(head)->frag_list)
613 skb_shinfo(head)->frag_list = fp;
614 head->data_len += fp->len;
615 head->len += fp->len;
616 head->truesize += fp->truesize;
617 }
618 fp = next;
619 }
620 sub_frag_mem_limit(&qp->q, sum_truesize);
621
622 head->next = NULL;
623 head->dev = dev;
624 head->tstamp = qp->q.stamp;
625 IPCB(head)->frag_max_size = qp->q.max_size;
626
627 iph = ip_hdr(head);
628 /* max_size != 0 implies at least one fragment had IP_DF set */
629 iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
630 iph->tot_len = htons(len);
631 iph->tos |= ecn;
632 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
633 qp->q.fragments = NULL;
634 qp->q.fragments_tail = NULL;
635 return 0;
636
637out_nomem:
638 LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
639 qp);
640 err = -ENOMEM;
641 goto out_fail;
642out_oversize:
643 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
644out_fail:
645 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
646 return err;
647}
648
649/* Process an incoming IP datagram fragment. */
650int ip_defrag(struct sk_buff *skb, u32 user)
651{
652 struct ipq *qp;
653 struct net *net;
654
655 net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
656 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
657
658 /* Start by cleaning up the memory. */
659 ip_evictor(net);
660
661 /* Lookup (or create) queue header */
662 if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
663 int ret;
664
665 spin_lock(&qp->q.lock);
666
667 ret = ip_frag_queue(qp, skb);
668
669 spin_unlock(&qp->q.lock);
670 ipq_put(qp);
671 return ret;
672 }
673
674 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
675 kfree_skb(skb);
676 return -ENOMEM;
677}
678EXPORT_SYMBOL(ip_defrag);
679
680struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
681{
682 struct iphdr iph;
683 u32 len;
684
685 if (skb->protocol != htons(ETH_P_IP))
686 return skb;
687
688 if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
689 return skb;
690
691 if (iph.ihl < 5 || iph.version != 4)
692 return skb;
693
694 len = ntohs(iph.tot_len);
695 if (skb->len < len || len < (iph.ihl * 4))
696 return skb;
697
698 if (ip_is_fragment(&iph)) {
699 skb = skb_share_check(skb, GFP_ATOMIC);
700 if (skb) {
701 if (!pskb_may_pull(skb, iph.ihl*4))
702 return skb;
703 if (pskb_trim_rcsum(skb, len))
704 return skb;
705 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
706 if (ip_defrag(skb, user))
707 return NULL;
708 skb_clear_hash(skb);
709 }
710 }
711 return skb;
712}
713EXPORT_SYMBOL(ip_check_defrag);
714
715#ifdef CONFIG_SYSCTL
716static int zero;
717
718static struct ctl_table ip4_frags_ns_ctl_table[] = {
719 {
720 .procname = "ipfrag_high_thresh",
721 .data = &init_net.ipv4.frags.high_thresh,
722 .maxlen = sizeof(int),
723 .mode = 0644,
724 .proc_handler = proc_dointvec
725 },
726 {
727 .procname = "ipfrag_low_thresh",
728 .data = &init_net.ipv4.frags.low_thresh,
729 .maxlen = sizeof(int),
730 .mode = 0644,
731 .proc_handler = proc_dointvec
732 },
733 {
734 .procname = "ipfrag_time",
735 .data = &init_net.ipv4.frags.timeout,
736 .maxlen = sizeof(int),
737 .mode = 0644,
738 .proc_handler = proc_dointvec_jiffies,
739 },
740 { }
741};
742
743static struct ctl_table ip4_frags_ctl_table[] = {
744 {
745 .procname = "ipfrag_secret_interval",
746 .data = &ip4_frags.secret_interval,
747 .maxlen = sizeof(int),
748 .mode = 0644,
749 .proc_handler = proc_dointvec_jiffies,
750 },
751 {
752 .procname = "ipfrag_max_dist",
753 .data = &sysctl_ipfrag_max_dist,
754 .maxlen = sizeof(int),
755 .mode = 0644,
756 .proc_handler = proc_dointvec_minmax,
757 .extra1 = &zero
758 },
759 { }
760};
761
762static int __net_init ip4_frags_ns_ctl_register(struct net *net)
763{
764 struct ctl_table *table;
765 struct ctl_table_header *hdr;
766
767 table = ip4_frags_ns_ctl_table;
768 if (!net_eq(net, &init_net)) {
769 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
770 if (table == NULL)
771 goto err_alloc;
772
773 table[0].data = &net->ipv4.frags.high_thresh;
774 table[1].data = &net->ipv4.frags.low_thresh;
775 table[2].data = &net->ipv4.frags.timeout;
776
777 /* Don't export sysctls to unprivileged users */
778 if (net->user_ns != &init_user_ns)
779 table[0].procname = NULL;
780 }
781
782 hdr = register_net_sysctl(net, "net/ipv4", table);
783 if (hdr == NULL)
784 goto err_reg;
785
786 net->ipv4.frags_hdr = hdr;
787 return 0;
788
789err_reg:
790 if (!net_eq(net, &init_net))
791 kfree(table);
792err_alloc:
793 return -ENOMEM;
794}
795
796static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
797{
798 struct ctl_table *table;
799
800 table = net->ipv4.frags_hdr->ctl_table_arg;
801 unregister_net_sysctl_table(net->ipv4.frags_hdr);
802 kfree(table);
803}
804
805static void ip4_frags_ctl_register(void)
806{
807 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
808}
809#else
810static inline int ip4_frags_ns_ctl_register(struct net *net)
811{
812 return 0;
813}
814
815static inline void ip4_frags_ns_ctl_unregister(struct net *net)
816{
817}
818
819static inline void ip4_frags_ctl_register(void)
820{
821}
822#endif
823
824static int __net_init ipv4_frags_init_net(struct net *net)
825{
826 /* Fragment cache limits.
827 *
828 * The fragment memory accounting code, (tries to) account for
829 * the real memory usage, by measuring both the size of frag
830 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
831 * and the SKB's truesize.
832 *
833 * A 64K fragment consumes 129736 bytes (44*2944)+200
834 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
835 *
836 * We will commit 4MB at one time. Should we cross that limit
837 * we will prune down to 3MB, making room for approx 8 big 64K
838 * fragments 8x128k.
839 */
840 net->ipv4.frags.high_thresh = 4 * 1024 * 1024;
841 net->ipv4.frags.low_thresh = 3 * 1024 * 1024;
842 /*
843 * Important NOTE! Fragment queue must be destroyed before MSL expires.
844 * RFC791 is wrong proposing to prolongate timer each fragment arrival
845 * by TTL.
846 */
847 net->ipv4.frags.timeout = IP_FRAG_TIME;
848
849 inet_frags_init_net(&net->ipv4.frags);
850
851 return ip4_frags_ns_ctl_register(net);
852}
853
854static void __net_exit ipv4_frags_exit_net(struct net *net)
855{
856 ip4_frags_ns_ctl_unregister(net);
857 inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
858}
859
860static struct pernet_operations ip4_frags_ops = {
861 .init = ipv4_frags_init_net,
862 .exit = ipv4_frags_exit_net,
863};
864
865void __init ipfrag_init(void)
866{
867 ip4_frags_ctl_register();
868 register_pernet_subsys(&ip4_frags_ops);
869 ip4_frags.hashfn = ip4_hashfn;
870 ip4_frags.constructor = ip4_frag_init;
871 ip4_frags.destructor = ip4_frag_free;
872 ip4_frags.skb_free = NULL;
873 ip4_frags.qsize = sizeof(struct ipq);
874 ip4_frags.match = ip4_frag_match;
875 ip4_frags.frag_expire = ip_expire;
876 ip4_frags.secret_interval = 10 * 60 * HZ;
877 inet_frags_init(&ip4_frags);
878}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP fragmentation functionality.
8 *
9 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox <alan@lxorguk.ukuu.org.uk>
11 *
12 * Fixes:
13 * Alan Cox : Split from ip.c , see ip_input.c for history.
14 * David S. Miller : Begin massive cleanup...
15 * Andi Kleen : Add sysctls.
16 * xxxx : Overlapfrag bug.
17 * Ultima : ip_expire() kernel panic.
18 * Bill Hawes : Frag accounting and evictor fixes.
19 * John McDonald : 0 length frag bug.
20 * Alexey Kuznetsov: SMP races, threading, cleanup.
21 * Patrick McHardy : LRU queue of frag heads for evictor.
22 */
23
24#define pr_fmt(fmt) "IPv4: " fmt
25
26#include <linux/compiler.h>
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/mm.h>
30#include <linux/jiffies.h>
31#include <linux/skbuff.h>
32#include <linux/list.h>
33#include <linux/ip.h>
34#include <linux/icmp.h>
35#include <linux/netdevice.h>
36#include <linux/jhash.h>
37#include <linux/random.h>
38#include <linux/slab.h>
39#include <net/route.h>
40#include <net/dst.h>
41#include <net/sock.h>
42#include <net/ip.h>
43#include <net/icmp.h>
44#include <net/checksum.h>
45#include <net/inetpeer.h>
46#include <net/inet_frag.h>
47#include <linux/tcp.h>
48#include <linux/udp.h>
49#include <linux/inet.h>
50#include <linux/netfilter_ipv4.h>
51#include <net/inet_ecn.h>
52#include <net/l3mdev.h>
53
54/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
56 * as well. Or notify me, at least. --ANK
57 */
58static const char ip_frag_cache_name[] = "ip4-frags";
59
60/* Describe an entry in the "incomplete datagrams" queue. */
61struct ipq {
62 struct inet_frag_queue q;
63
64 u8 ecn; /* RFC3168 support */
65 u16 max_df_size; /* largest frag with DF set seen */
66 int iif;
67 unsigned int rid;
68 struct inet_peer *peer;
69};
70
71static u8 ip4_frag_ecn(u8 tos)
72{
73 return 1 << (tos & INET_ECN_MASK);
74}
75
76static struct inet_frags ip4_frags;
77
78static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
79 struct net_device *dev);
80
81
82static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
83{
84 struct ipq *qp = container_of(q, struct ipq, q);
85 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
86 frags);
87 struct net *net = container_of(ipv4, struct net, ipv4);
88
89 const struct frag_v4_compare_key *key = a;
90
91 q->key.v4 = *key;
92 qp->ecn = 0;
93 qp->peer = q->net->max_dist ?
94 inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
95 NULL;
96}
97
98static void ip4_frag_free(struct inet_frag_queue *q)
99{
100 struct ipq *qp;
101
102 qp = container_of(q, struct ipq, q);
103 if (qp->peer)
104 inet_putpeer(qp->peer);
105}
106
107
108/* Destruction primitives. */
109
110static void ipq_put(struct ipq *ipq)
111{
112 inet_frag_put(&ipq->q);
113}
114
115/* Kill ipq entry. It is not destroyed immediately,
116 * because caller (and someone more) holds reference count.
117 */
118static void ipq_kill(struct ipq *ipq)
119{
120 inet_frag_kill(&ipq->q);
121}
122
123static bool frag_expire_skip_icmp(u32 user)
124{
125 return user == IP_DEFRAG_AF_PACKET ||
126 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
127 __IP_DEFRAG_CONNTRACK_IN_END) ||
128 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
129 __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
130}
131
132/*
133 * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
134 */
135static void ip_expire(struct timer_list *t)
136{
137 struct inet_frag_queue *frag = from_timer(frag, t, timer);
138 const struct iphdr *iph;
139 struct sk_buff *head;
140 struct net *net;
141 struct ipq *qp;
142 int err;
143
144 qp = container_of(frag, struct ipq, q);
145 net = container_of(qp->q.net, struct net, ipv4.frags);
146
147 rcu_read_lock();
148 spin_lock(&qp->q.lock);
149
150 if (qp->q.flags & INET_FRAG_COMPLETE)
151 goto out;
152
153 ipq_kill(qp);
154 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
155
156 head = qp->q.fragments;
157
158 __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
159
160 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !head)
161 goto out;
162
163 head->dev = dev_get_by_index_rcu(net, qp->iif);
164 if (!head->dev)
165 goto out;
166
167
168 /* skb has no dst, perform route lookup again */
169 iph = ip_hdr(head);
170 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
171 iph->tos, head->dev);
172 if (err)
173 goto out;
174
175 /* Only an end host needs to send an ICMP
176 * "Fragment Reassembly Timeout" message, per RFC792.
177 */
178 if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
179 (skb_rtable(head)->rt_type != RTN_LOCAL))
180 goto out;
181
182 skb_get(head);
183 spin_unlock(&qp->q.lock);
184 icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
185 kfree_skb(head);
186 goto out_rcu_unlock;
187
188out:
189 spin_unlock(&qp->q.lock);
190out_rcu_unlock:
191 rcu_read_unlock();
192 ipq_put(qp);
193}
194
195/* Find the correct entry in the "incomplete datagrams" queue for
196 * this IP datagram, and create new one, if nothing is found.
197 */
198static struct ipq *ip_find(struct net *net, struct iphdr *iph,
199 u32 user, int vif)
200{
201 struct frag_v4_compare_key key = {
202 .saddr = iph->saddr,
203 .daddr = iph->daddr,
204 .user = user,
205 .vif = vif,
206 .id = iph->id,
207 .protocol = iph->protocol,
208 };
209 struct inet_frag_queue *q;
210
211 q = inet_frag_find(&net->ipv4.frags, &key);
212 if (!q)
213 return NULL;
214
215 return container_of(q, struct ipq, q);
216}
217
218/* Is the fragment too far ahead to be part of ipq? */
219static int ip_frag_too_far(struct ipq *qp)
220{
221 struct inet_peer *peer = qp->peer;
222 unsigned int max = qp->q.net->max_dist;
223 unsigned int start, end;
224
225 int rc;
226
227 if (!peer || !max)
228 return 0;
229
230 start = qp->rid;
231 end = atomic_inc_return(&peer->rid);
232 qp->rid = end;
233
234 rc = qp->q.fragments && (end - start) > max;
235
236 if (rc) {
237 struct net *net;
238
239 net = container_of(qp->q.net, struct net, ipv4.frags);
240 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
241 }
242
243 return rc;
244}
245
246static int ip_frag_reinit(struct ipq *qp)
247{
248 struct sk_buff *fp;
249 unsigned int sum_truesize = 0;
250
251 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
252 refcount_inc(&qp->q.refcnt);
253 return -ETIMEDOUT;
254 }
255
256 fp = qp->q.fragments;
257 do {
258 struct sk_buff *xp = fp->next;
259
260 sum_truesize += fp->truesize;
261 kfree_skb(fp);
262 fp = xp;
263 } while (fp);
264 sub_frag_mem_limit(qp->q.net, sum_truesize);
265
266 qp->q.flags = 0;
267 qp->q.len = 0;
268 qp->q.meat = 0;
269 qp->q.fragments = NULL;
270 qp->q.fragments_tail = NULL;
271 qp->iif = 0;
272 qp->ecn = 0;
273
274 return 0;
275}
276
277/* Add new segment to existing queue. */
278static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
279{
280 struct sk_buff *prev, *next;
281 struct net_device *dev;
282 unsigned int fragsize;
283 int flags, offset;
284 int ihl, end;
285 int err = -ENOENT;
286 u8 ecn;
287
288 if (qp->q.flags & INET_FRAG_COMPLETE)
289 goto err;
290
291 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
292 unlikely(ip_frag_too_far(qp)) &&
293 unlikely(err = ip_frag_reinit(qp))) {
294 ipq_kill(qp);
295 goto err;
296 }
297
298 ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
299 offset = ntohs(ip_hdr(skb)->frag_off);
300 flags = offset & ~IP_OFFSET;
301 offset &= IP_OFFSET;
302 offset <<= 3; /* offset is in 8-byte chunks */
303 ihl = ip_hdrlen(skb);
304
305 /* Determine the position of this fragment. */
306 end = offset + skb->len - skb_network_offset(skb) - ihl;
307 err = -EINVAL;
308
309 /* Is this the final fragment? */
310 if ((flags & IP_MF) == 0) {
311 /* If we already have some bits beyond end
312 * or have different end, the segment is corrupted.
313 */
314 if (end < qp->q.len ||
315 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
316 goto err;
317 qp->q.flags |= INET_FRAG_LAST_IN;
318 qp->q.len = end;
319 } else {
320 if (end&7) {
321 end &= ~7;
322 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
323 skb->ip_summed = CHECKSUM_NONE;
324 }
325 if (end > qp->q.len) {
326 /* Some bits beyond end -> corruption. */
327 if (qp->q.flags & INET_FRAG_LAST_IN)
328 goto err;
329 qp->q.len = end;
330 }
331 }
332 if (end == offset)
333 goto err;
334
335 err = -ENOMEM;
336 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
337 goto err;
338
339 err = pskb_trim_rcsum(skb, end - offset);
340 if (err)
341 goto err;
342
343 /* Find out which fragments are in front and at the back of us
344 * in the chain of fragments so far. We must know where to put
345 * this fragment, right?
346 */
347 prev = qp->q.fragments_tail;
348 if (!prev || prev->ip_defrag_offset < offset) {
349 next = NULL;
350 goto found;
351 }
352 prev = NULL;
353 for (next = qp->q.fragments; next != NULL; next = next->next) {
354 if (next->ip_defrag_offset >= offset)
355 break; /* bingo! */
356 prev = next;
357 }
358
359found:
360 /* We found where to put this one. Check for overlap with
361 * preceding fragment, and, if needed, align things so that
362 * any overlaps are eliminated.
363 */
364 if (prev) {
365 int i = (prev->ip_defrag_offset + prev->len) - offset;
366
367 if (i > 0) {
368 offset += i;
369 err = -EINVAL;
370 if (end <= offset)
371 goto err;
372 err = -ENOMEM;
373 if (!pskb_pull(skb, i))
374 goto err;
375 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
376 skb->ip_summed = CHECKSUM_NONE;
377 }
378 }
379
380 err = -ENOMEM;
381
382 while (next && next->ip_defrag_offset < end) {
383 int i = end - next->ip_defrag_offset; /* overlap is 'i' bytes */
384
385 if (i < next->len) {
386 /* Eat head of the next overlapped fragment
387 * and leave the loop. The next ones cannot overlap.
388 */
389 if (!pskb_pull(next, i))
390 goto err;
391 next->ip_defrag_offset += i;
392 qp->q.meat -= i;
393 if (next->ip_summed != CHECKSUM_UNNECESSARY)
394 next->ip_summed = CHECKSUM_NONE;
395 break;
396 } else {
397 struct sk_buff *free_it = next;
398
399 /* Old fragment is completely overridden with
400 * new one drop it.
401 */
402 next = next->next;
403
404 if (prev)
405 prev->next = next;
406 else
407 qp->q.fragments = next;
408
409 qp->q.meat -= free_it->len;
410 sub_frag_mem_limit(qp->q.net, free_it->truesize);
411 kfree_skb(free_it);
412 }
413 }
414
415 /* Note : skb->ip_defrag_offset and skb->dev share the same location */
416 dev = skb->dev;
417 if (dev)
418 qp->iif = dev->ifindex;
419 /* Makes sure compiler wont do silly aliasing games */
420 barrier();
421 skb->ip_defrag_offset = offset;
422
423 /* Insert this fragment in the chain of fragments. */
424 skb->next = next;
425 if (!next)
426 qp->q.fragments_tail = skb;
427 if (prev)
428 prev->next = skb;
429 else
430 qp->q.fragments = skb;
431
432 qp->q.stamp = skb->tstamp;
433 qp->q.meat += skb->len;
434 qp->ecn |= ecn;
435 add_frag_mem_limit(qp->q.net, skb->truesize);
436 if (offset == 0)
437 qp->q.flags |= INET_FRAG_FIRST_IN;
438
439 fragsize = skb->len + ihl;
440
441 if (fragsize > qp->q.max_size)
442 qp->q.max_size = fragsize;
443
444 if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
445 fragsize > qp->max_df_size)
446 qp->max_df_size = fragsize;
447
448 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
449 qp->q.meat == qp->q.len) {
450 unsigned long orefdst = skb->_skb_refdst;
451
452 skb->_skb_refdst = 0UL;
453 err = ip_frag_reasm(qp, prev, dev);
454 skb->_skb_refdst = orefdst;
455 return err;
456 }
457
458 skb_dst_drop(skb);
459 return -EINPROGRESS;
460
461err:
462 kfree_skb(skb);
463 return err;
464}
465
466
467/* Build a new IP datagram from all its fragments. */
468
469static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
470 struct net_device *dev)
471{
472 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
473 struct iphdr *iph;
474 struct sk_buff *fp, *head = qp->q.fragments;
475 int len;
476 int ihlen;
477 int err;
478 u8 ecn;
479
480 ipq_kill(qp);
481
482 ecn = ip_frag_ecn_table[qp->ecn];
483 if (unlikely(ecn == 0xff)) {
484 err = -EINVAL;
485 goto out_fail;
486 }
487 /* Make the one we just received the head. */
488 if (prev) {
489 head = prev->next;
490 fp = skb_clone(head, GFP_ATOMIC);
491 if (!fp)
492 goto out_nomem;
493
494 fp->next = head->next;
495 if (!fp->next)
496 qp->q.fragments_tail = fp;
497 prev->next = fp;
498
499 skb_morph(head, qp->q.fragments);
500 head->next = qp->q.fragments->next;
501
502 consume_skb(qp->q.fragments);
503 qp->q.fragments = head;
504 }
505
506 WARN_ON(!head);
507 WARN_ON(head->ip_defrag_offset != 0);
508
509 /* Allocate a new buffer for the datagram. */
510 ihlen = ip_hdrlen(head);
511 len = ihlen + qp->q.len;
512
513 err = -E2BIG;
514 if (len > 65535)
515 goto out_oversize;
516
517 /* Head of list must not be cloned. */
518 if (skb_unclone(head, GFP_ATOMIC))
519 goto out_nomem;
520
521 /* If the first fragment is fragmented itself, we split
522 * it to two chunks: the first with data and paged part
523 * and the second, holding only fragments. */
524 if (skb_has_frag_list(head)) {
525 struct sk_buff *clone;
526 int i, plen = 0;
527
528 clone = alloc_skb(0, GFP_ATOMIC);
529 if (!clone)
530 goto out_nomem;
531 clone->next = head->next;
532 head->next = clone;
533 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
534 skb_frag_list_init(head);
535 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
536 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
537 clone->len = clone->data_len = head->data_len - plen;
538 head->data_len -= clone->len;
539 head->len -= clone->len;
540 clone->csum = 0;
541 clone->ip_summed = head->ip_summed;
542 add_frag_mem_limit(qp->q.net, clone->truesize);
543 }
544
545 skb_shinfo(head)->frag_list = head->next;
546 skb_push(head, head->data - skb_network_header(head));
547
548 for (fp=head->next; fp; fp = fp->next) {
549 head->data_len += fp->len;
550 head->len += fp->len;
551 if (head->ip_summed != fp->ip_summed)
552 head->ip_summed = CHECKSUM_NONE;
553 else if (head->ip_summed == CHECKSUM_COMPLETE)
554 head->csum = csum_add(head->csum, fp->csum);
555 head->truesize += fp->truesize;
556 }
557 sub_frag_mem_limit(qp->q.net, head->truesize);
558
559 head->next = NULL;
560 head->dev = dev;
561 head->tstamp = qp->q.stamp;
562 IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
563
564 iph = ip_hdr(head);
565 iph->tot_len = htons(len);
566 iph->tos |= ecn;
567
568 /* When we set IP_DF on a refragmented skb we must also force a
569 * call to ip_fragment to avoid forwarding a DF-skb of size s while
570 * original sender only sent fragments of size f (where f < s).
571 *
572 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
573 * frag seen to avoid sending tiny DF-fragments in case skb was built
574 * from one very small df-fragment and one large non-df frag.
575 */
576 if (qp->max_df_size == qp->q.max_size) {
577 IPCB(head)->flags |= IPSKB_FRAG_PMTU;
578 iph->frag_off = htons(IP_DF);
579 } else {
580 iph->frag_off = 0;
581 }
582
583 ip_send_check(iph);
584
585 __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
586 qp->q.fragments = NULL;
587 qp->q.fragments_tail = NULL;
588 return 0;
589
590out_nomem:
591 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
592 err = -ENOMEM;
593 goto out_fail;
594out_oversize:
595 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
596out_fail:
597 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
598 return err;
599}
600
601/* Process an incoming IP datagram fragment. */
602int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
603{
604 struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
605 int vif = l3mdev_master_ifindex_rcu(dev);
606 struct ipq *qp;
607
608 __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
609 skb_orphan(skb);
610
611 /* Lookup (or create) queue header */
612 qp = ip_find(net, ip_hdr(skb), user, vif);
613 if (qp) {
614 int ret;
615
616 spin_lock(&qp->q.lock);
617
618 ret = ip_frag_queue(qp, skb);
619
620 spin_unlock(&qp->q.lock);
621 ipq_put(qp);
622 return ret;
623 }
624
625 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
626 kfree_skb(skb);
627 return -ENOMEM;
628}
629EXPORT_SYMBOL(ip_defrag);
630
631struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
632{
633 struct iphdr iph;
634 int netoff;
635 u32 len;
636
637 if (skb->protocol != htons(ETH_P_IP))
638 return skb;
639
640 netoff = skb_network_offset(skb);
641
642 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
643 return skb;
644
645 if (iph.ihl < 5 || iph.version != 4)
646 return skb;
647
648 len = ntohs(iph.tot_len);
649 if (skb->len < netoff + len || len < (iph.ihl * 4))
650 return skb;
651
652 if (ip_is_fragment(&iph)) {
653 skb = skb_share_check(skb, GFP_ATOMIC);
654 if (skb) {
655 if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
656 return skb;
657 if (pskb_trim_rcsum(skb, netoff + len))
658 return skb;
659 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
660 if (ip_defrag(net, skb, user))
661 return NULL;
662 skb_clear_hash(skb);
663 }
664 }
665 return skb;
666}
667EXPORT_SYMBOL(ip_check_defrag);
668
669#ifdef CONFIG_SYSCTL
670static int dist_min;
671
672static struct ctl_table ip4_frags_ns_ctl_table[] = {
673 {
674 .procname = "ipfrag_high_thresh",
675 .data = &init_net.ipv4.frags.high_thresh,
676 .maxlen = sizeof(unsigned long),
677 .mode = 0644,
678 .proc_handler = proc_doulongvec_minmax,
679 .extra1 = &init_net.ipv4.frags.low_thresh
680 },
681 {
682 .procname = "ipfrag_low_thresh",
683 .data = &init_net.ipv4.frags.low_thresh,
684 .maxlen = sizeof(unsigned long),
685 .mode = 0644,
686 .proc_handler = proc_doulongvec_minmax,
687 .extra2 = &init_net.ipv4.frags.high_thresh
688 },
689 {
690 .procname = "ipfrag_time",
691 .data = &init_net.ipv4.frags.timeout,
692 .maxlen = sizeof(int),
693 .mode = 0644,
694 .proc_handler = proc_dointvec_jiffies,
695 },
696 {
697 .procname = "ipfrag_max_dist",
698 .data = &init_net.ipv4.frags.max_dist,
699 .maxlen = sizeof(int),
700 .mode = 0644,
701 .proc_handler = proc_dointvec_minmax,
702 .extra1 = &dist_min,
703 },
704 { }
705};
706
707/* secret interval has been deprecated */
708static int ip4_frags_secret_interval_unused;
709static struct ctl_table ip4_frags_ctl_table[] = {
710 {
711 .procname = "ipfrag_secret_interval",
712 .data = &ip4_frags_secret_interval_unused,
713 .maxlen = sizeof(int),
714 .mode = 0644,
715 .proc_handler = proc_dointvec_jiffies,
716 },
717 { }
718};
719
720static int __net_init ip4_frags_ns_ctl_register(struct net *net)
721{
722 struct ctl_table *table;
723 struct ctl_table_header *hdr;
724
725 table = ip4_frags_ns_ctl_table;
726 if (!net_eq(net, &init_net)) {
727 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
728 if (!table)
729 goto err_alloc;
730
731 table[0].data = &net->ipv4.frags.high_thresh;
732 table[0].extra1 = &net->ipv4.frags.low_thresh;
733 table[0].extra2 = &init_net.ipv4.frags.high_thresh;
734 table[1].data = &net->ipv4.frags.low_thresh;
735 table[1].extra2 = &net->ipv4.frags.high_thresh;
736 table[2].data = &net->ipv4.frags.timeout;
737 table[3].data = &net->ipv4.frags.max_dist;
738 }
739
740 hdr = register_net_sysctl(net, "net/ipv4", table);
741 if (!hdr)
742 goto err_reg;
743
744 net->ipv4.frags_hdr = hdr;
745 return 0;
746
747err_reg:
748 if (!net_eq(net, &init_net))
749 kfree(table);
750err_alloc:
751 return -ENOMEM;
752}
753
754static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
755{
756 struct ctl_table *table;
757
758 table = net->ipv4.frags_hdr->ctl_table_arg;
759 unregister_net_sysctl_table(net->ipv4.frags_hdr);
760 kfree(table);
761}
762
763static void __init ip4_frags_ctl_register(void)
764{
765 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
766}
767#else
768static int ip4_frags_ns_ctl_register(struct net *net)
769{
770 return 0;
771}
772
773static void ip4_frags_ns_ctl_unregister(struct net *net)
774{
775}
776
777static void __init ip4_frags_ctl_register(void)
778{
779}
780#endif
781
782static int __net_init ipv4_frags_init_net(struct net *net)
783{
784 int res;
785
786 /* Fragment cache limits.
787 *
788 * The fragment memory accounting code, (tries to) account for
789 * the real memory usage, by measuring both the size of frag
790 * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue))
791 * and the SKB's truesize.
792 *
793 * A 64K fragment consumes 129736 bytes (44*2944)+200
794 * (1500 truesize == 2944, sizeof(struct ipq) == 200)
795 *
796 * We will commit 4MB at one time. Should we cross that limit
797 * we will prune down to 3MB, making room for approx 8 big 64K
798 * fragments 8x128k.
799 */
800 net->ipv4.frags.high_thresh = 4 * 1024 * 1024;
801 net->ipv4.frags.low_thresh = 3 * 1024 * 1024;
802 /*
803 * Important NOTE! Fragment queue must be destroyed before MSL expires.
804 * RFC791 is wrong proposing to prolongate timer each fragment arrival
805 * by TTL.
806 */
807 net->ipv4.frags.timeout = IP_FRAG_TIME;
808
809 net->ipv4.frags.max_dist = 64;
810 net->ipv4.frags.f = &ip4_frags;
811
812 res = inet_frags_init_net(&net->ipv4.frags);
813 if (res < 0)
814 return res;
815 res = ip4_frags_ns_ctl_register(net);
816 if (res < 0)
817 inet_frags_exit_net(&net->ipv4.frags);
818 return res;
819}
820
821static void __net_exit ipv4_frags_exit_net(struct net *net)
822{
823 ip4_frags_ns_ctl_unregister(net);
824 inet_frags_exit_net(&net->ipv4.frags);
825}
826
827static struct pernet_operations ip4_frags_ops = {
828 .init = ipv4_frags_init_net,
829 .exit = ipv4_frags_exit_net,
830};
831
832
833static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
834{
835 return jhash2(data,
836 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
837}
838
839static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
840{
841 const struct inet_frag_queue *fq = data;
842
843 return jhash2((const u32 *)&fq->key.v4,
844 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
845}
846
847static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
848{
849 const struct frag_v4_compare_key *key = arg->key;
850 const struct inet_frag_queue *fq = ptr;
851
852 return !!memcmp(&fq->key, key, sizeof(*key));
853}
854
855static const struct rhashtable_params ip4_rhash_params = {
856 .head_offset = offsetof(struct inet_frag_queue, node),
857 .key_offset = offsetof(struct inet_frag_queue, key),
858 .key_len = sizeof(struct frag_v4_compare_key),
859 .hashfn = ip4_key_hashfn,
860 .obj_hashfn = ip4_obj_hashfn,
861 .obj_cmpfn = ip4_obj_cmpfn,
862 .automatic_shrinking = true,
863};
864
865void __init ipfrag_init(void)
866{
867 ip4_frags.constructor = ip4_frag_init;
868 ip4_frags.destructor = ip4_frag_free;
869 ip4_frags.qsize = sizeof(struct ipq);
870 ip4_frags.frag_expire = ip_expire;
871 ip4_frags.frags_cache_name = ip_frag_cache_name;
872 ip4_frags.rhash_params = ip4_rhash_params;
873 if (inet_frags_init(&ip4_frags))
874 panic("IP: failed to allocate ip4_frags cache\n");
875 ip4_frags_ctl_register();
876 register_pernet_subsys(&ip4_frags_ops);
877}