Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * inet fragments management
4 *
5 * Authors: Pavel Emelyanov <xemul@openvz.org>
6 * Started as consolidation of ipv4/ip_fragment.c,
7 * ipv6/reassembly. and ipv6 nf conntrack reassembly
8 */
9
10#include <linux/list.h>
11#include <linux/spinlock.h>
12#include <linux/module.h>
13#include <linux/timer.h>
14#include <linux/mm.h>
15#include <linux/random.h>
16#include <linux/skbuff.h>
17#include <linux/rtnetlink.h>
18#include <linux/slab.h>
19#include <linux/rhashtable.h>
20
21#include <net/sock.h>
22#include <net/inet_frag.h>
23#include <net/inet_ecn.h>
24#include <net/ip.h>
25#include <net/ipv6.h>
26
27#include "../core/sock_destructor.h"
28
29/* Use skb->cb to track consecutive/adjacent fragments coming at
30 * the end of the queue. Nodes in the rb-tree queue will
31 * contain "runs" of one or more adjacent fragments.
32 *
33 * Invariants:
34 * - next_frag is NULL at the tail of a "run";
35 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
36 */
37struct ipfrag_skb_cb {
38 union {
39 struct inet_skb_parm h4;
40 struct inet6_skb_parm h6;
41 };
42 struct sk_buff *next_frag;
43 int frag_run_len;
44 int ip_defrag_offset;
45};
46
47#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
48
49static void fragcb_clear(struct sk_buff *skb)
50{
51 RB_CLEAR_NODE(&skb->rbnode);
52 FRAG_CB(skb)->next_frag = NULL;
53 FRAG_CB(skb)->frag_run_len = skb->len;
54}
55
56/* Append skb to the last "run". */
57static void fragrun_append_to_last(struct inet_frag_queue *q,
58 struct sk_buff *skb)
59{
60 fragcb_clear(skb);
61
62 FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
63 FRAG_CB(q->fragments_tail)->next_frag = skb;
64 q->fragments_tail = skb;
65}
66
67/* Create a new "run" with the skb. */
68static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
69{
70 BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
71 fragcb_clear(skb);
72
73 if (q->last_run_head)
74 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
75 &q->last_run_head->rbnode.rb_right);
76 else
77 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
78 rb_insert_color(&skb->rbnode, &q->rb_fragments);
79
80 q->fragments_tail = skb;
81 q->last_run_head = skb;
82}
83
84/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
85 * Value : 0xff if frame should be dropped.
86 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
87 */
88const u8 ip_frag_ecn_table[16] = {
89 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
90 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
91 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
92 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
93
94 /* invalid combinations : drop frame */
95 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
96 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
97 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
98 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
99 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
100 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
101 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
102};
103EXPORT_SYMBOL(ip_frag_ecn_table);
104
105int inet_frags_init(struct inet_frags *f)
106{
107 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
108 NULL);
109 if (!f->frags_cachep)
110 return -ENOMEM;
111
112 refcount_set(&f->refcnt, 1);
113 init_completion(&f->completion);
114 return 0;
115}
116EXPORT_SYMBOL(inet_frags_init);
117
118void inet_frags_fini(struct inet_frags *f)
119{
120 if (refcount_dec_and_test(&f->refcnt))
121 complete(&f->completion);
122
123 wait_for_completion(&f->completion);
124
125 kmem_cache_destroy(f->frags_cachep);
126 f->frags_cachep = NULL;
127}
128EXPORT_SYMBOL(inet_frags_fini);
129
130/* called from rhashtable_free_and_destroy() at netns_frags dismantle */
131static void inet_frags_free_cb(void *ptr, void *arg)
132{
133 struct inet_frag_queue *fq = ptr;
134 int count;
135
136 count = del_timer_sync(&fq->timer) ? 1 : 0;
137
138 spin_lock_bh(&fq->lock);
139 fq->flags |= INET_FRAG_DROP;
140 if (!(fq->flags & INET_FRAG_COMPLETE)) {
141 fq->flags |= INET_FRAG_COMPLETE;
142 count++;
143 } else if (fq->flags & INET_FRAG_HASH_DEAD) {
144 count++;
145 }
146 spin_unlock_bh(&fq->lock);
147
148 if (refcount_sub_and_test(count, &fq->refcnt))
149 inet_frag_destroy(fq);
150}
151
152static LLIST_HEAD(fqdir_free_list);
153
154static void fqdir_free_fn(struct work_struct *work)
155{
156 struct llist_node *kill_list;
157 struct fqdir *fqdir, *tmp;
158 struct inet_frags *f;
159
160 /* Atomically snapshot the list of fqdirs to free */
161 kill_list = llist_del_all(&fqdir_free_list);
162
163 /* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
164 * have completed, since they need to dereference fqdir.
165 * Would it not be nice to have kfree_rcu_barrier() ? :)
166 */
167 rcu_barrier();
168
169 llist_for_each_entry_safe(fqdir, tmp, kill_list, free_list) {
170 f = fqdir->f;
171 if (refcount_dec_and_test(&f->refcnt))
172 complete(&f->completion);
173
174 kfree(fqdir);
175 }
176}
177
178static DECLARE_DELAYED_WORK(fqdir_free_work, fqdir_free_fn);
179
180static void fqdir_work_fn(struct work_struct *work)
181{
182 struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
183
184 rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
185
186 if (llist_add(&fqdir->free_list, &fqdir_free_list))
187 queue_delayed_work(system_wq, &fqdir_free_work, HZ);
188}
189
190int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
191{
192 struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
193 int res;
194
195 if (!fqdir)
196 return -ENOMEM;
197 fqdir->f = f;
198 fqdir->net = net;
199 res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
200 if (res < 0) {
201 kfree(fqdir);
202 return res;
203 }
204 refcount_inc(&f->refcnt);
205 *fqdirp = fqdir;
206 return 0;
207}
208EXPORT_SYMBOL(fqdir_init);
209
210static struct workqueue_struct *inet_frag_wq;
211
212static int __init inet_frag_wq_init(void)
213{
214 inet_frag_wq = create_workqueue("inet_frag_wq");
215 if (!inet_frag_wq)
216 panic("Could not create inet frag workq");
217 return 0;
218}
219
220pure_initcall(inet_frag_wq_init);
221
222void fqdir_exit(struct fqdir *fqdir)
223{
224 INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
225 queue_work(inet_frag_wq, &fqdir->destroy_work);
226}
227EXPORT_SYMBOL(fqdir_exit);
228
229void inet_frag_kill(struct inet_frag_queue *fq)
230{
231 if (del_timer(&fq->timer))
232 refcount_dec(&fq->refcnt);
233
234 if (!(fq->flags & INET_FRAG_COMPLETE)) {
235 struct fqdir *fqdir = fq->fqdir;
236
237 fq->flags |= INET_FRAG_COMPLETE;
238 rcu_read_lock();
239 /* The RCU read lock provides a memory barrier
240 * guaranteeing that if fqdir->dead is false then
241 * the hash table destruction will not start until
242 * after we unlock. Paired with fqdir_pre_exit().
243 */
244 if (!READ_ONCE(fqdir->dead)) {
245 rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
246 fqdir->f->rhash_params);
247 refcount_dec(&fq->refcnt);
248 } else {
249 fq->flags |= INET_FRAG_HASH_DEAD;
250 }
251 rcu_read_unlock();
252 }
253}
254EXPORT_SYMBOL(inet_frag_kill);
255
256static void inet_frag_destroy_rcu(struct rcu_head *head)
257{
258 struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
259 rcu);
260 struct inet_frags *f = q->fqdir->f;
261
262 if (f->destructor)
263 f->destructor(q);
264 kmem_cache_free(f->frags_cachep, q);
265}
266
267unsigned int inet_frag_rbtree_purge(struct rb_root *root,
268 enum skb_drop_reason reason)
269{
270 struct rb_node *p = rb_first(root);
271 unsigned int sum = 0;
272
273 while (p) {
274 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
275
276 p = rb_next(p);
277 rb_erase(&skb->rbnode, root);
278 while (skb) {
279 struct sk_buff *next = FRAG_CB(skb)->next_frag;
280
281 sum += skb->truesize;
282 kfree_skb_reason(skb, reason);
283 skb = next;
284 }
285 }
286 return sum;
287}
288EXPORT_SYMBOL(inet_frag_rbtree_purge);
289
290void inet_frag_destroy(struct inet_frag_queue *q)
291{
292 unsigned int sum, sum_truesize = 0;
293 enum skb_drop_reason reason;
294 struct inet_frags *f;
295 struct fqdir *fqdir;
296
297 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
298 reason = (q->flags & INET_FRAG_DROP) ?
299 SKB_DROP_REASON_FRAG_REASM_TIMEOUT :
300 SKB_CONSUMED;
301 WARN_ON(del_timer(&q->timer) != 0);
302
303 /* Release all fragment data. */
304 fqdir = q->fqdir;
305 f = fqdir->f;
306 sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments, reason);
307 sum = sum_truesize + f->qsize;
308
309 call_rcu(&q->rcu, inet_frag_destroy_rcu);
310
311 sub_frag_mem_limit(fqdir, sum);
312}
313EXPORT_SYMBOL(inet_frag_destroy);
314
315static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
316 struct inet_frags *f,
317 void *arg)
318{
319 struct inet_frag_queue *q;
320
321 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
322 if (!q)
323 return NULL;
324
325 q->fqdir = fqdir;
326 f->constructor(q, arg);
327 add_frag_mem_limit(fqdir, f->qsize);
328
329 timer_setup(&q->timer, f->frag_expire, 0);
330 spin_lock_init(&q->lock);
331 refcount_set(&q->refcnt, 3);
332
333 return q;
334}
335
336static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
337 void *arg,
338 struct inet_frag_queue **prev)
339{
340 struct inet_frags *f = fqdir->f;
341 struct inet_frag_queue *q;
342
343 q = inet_frag_alloc(fqdir, f, arg);
344 if (!q) {
345 *prev = ERR_PTR(-ENOMEM);
346 return NULL;
347 }
348 mod_timer(&q->timer, jiffies + fqdir->timeout);
349
350 *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
351 &q->node, f->rhash_params);
352 if (*prev) {
353 q->flags |= INET_FRAG_COMPLETE;
354 inet_frag_kill(q);
355 inet_frag_destroy(q);
356 return NULL;
357 }
358 return q;
359}
360
361/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
362struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
363{
364 /* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
365 long high_thresh = READ_ONCE(fqdir->high_thresh);
366 struct inet_frag_queue *fq = NULL, *prev;
367
368 if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
369 return NULL;
370
371 rcu_read_lock();
372
373 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
374 if (!prev)
375 fq = inet_frag_create(fqdir, key, &prev);
376 if (!IS_ERR_OR_NULL(prev)) {
377 fq = prev;
378 if (!refcount_inc_not_zero(&fq->refcnt))
379 fq = NULL;
380 }
381 rcu_read_unlock();
382 return fq;
383}
384EXPORT_SYMBOL(inet_frag_find);
385
386int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
387 int offset, int end)
388{
389 struct sk_buff *last = q->fragments_tail;
390
391 /* RFC5722, Section 4, amended by Errata ID : 3089
392 * When reassembling an IPv6 datagram, if
393 * one or more its constituent fragments is determined to be an
394 * overlapping fragment, the entire datagram (and any constituent
395 * fragments) MUST be silently discarded.
396 *
397 * Duplicates, however, should be ignored (i.e. skb dropped, but the
398 * queue/fragments kept for later reassembly).
399 */
400 if (!last)
401 fragrun_create(q, skb); /* First fragment. */
402 else if (FRAG_CB(last)->ip_defrag_offset + last->len < end) {
403 /* This is the common case: skb goes to the end. */
404 /* Detect and discard overlaps. */
405 if (offset < FRAG_CB(last)->ip_defrag_offset + last->len)
406 return IPFRAG_OVERLAP;
407 if (offset == FRAG_CB(last)->ip_defrag_offset + last->len)
408 fragrun_append_to_last(q, skb);
409 else
410 fragrun_create(q, skb);
411 } else {
412 /* Binary search. Note that skb can become the first fragment,
413 * but not the last (covered above).
414 */
415 struct rb_node **rbn, *parent;
416
417 rbn = &q->rb_fragments.rb_node;
418 do {
419 struct sk_buff *curr;
420 int curr_run_end;
421
422 parent = *rbn;
423 curr = rb_to_skb(parent);
424 curr_run_end = FRAG_CB(curr)->ip_defrag_offset +
425 FRAG_CB(curr)->frag_run_len;
426 if (end <= FRAG_CB(curr)->ip_defrag_offset)
427 rbn = &parent->rb_left;
428 else if (offset >= curr_run_end)
429 rbn = &parent->rb_right;
430 else if (offset >= FRAG_CB(curr)->ip_defrag_offset &&
431 end <= curr_run_end)
432 return IPFRAG_DUP;
433 else
434 return IPFRAG_OVERLAP;
435 } while (*rbn);
436 /* Here we have parent properly set, and rbn pointing to
437 * one of its NULL left/right children. Insert skb.
438 */
439 fragcb_clear(skb);
440 rb_link_node(&skb->rbnode, parent, rbn);
441 rb_insert_color(&skb->rbnode, &q->rb_fragments);
442 }
443
444 FRAG_CB(skb)->ip_defrag_offset = offset;
445
446 return IPFRAG_OK;
447}
448EXPORT_SYMBOL(inet_frag_queue_insert);
449
450void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
451 struct sk_buff *parent)
452{
453 struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
454 void (*destructor)(struct sk_buff *);
455 unsigned int orig_truesize = 0;
456 struct sk_buff **nextp = NULL;
457 struct sock *sk = skb->sk;
458 int delta;
459
460 if (sk && is_skb_wmem(skb)) {
461 /* TX: skb->sk might have been passed as argument to
462 * dst->output and must remain valid until tx completes.
463 *
464 * Move sk to reassembled skb and fix up wmem accounting.
465 */
466 orig_truesize = skb->truesize;
467 destructor = skb->destructor;
468 }
469
470 if (head != skb) {
471 fp = skb_clone(skb, GFP_ATOMIC);
472 if (!fp) {
473 head = skb;
474 goto out_restore_sk;
475 }
476 FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
477 if (RB_EMPTY_NODE(&skb->rbnode))
478 FRAG_CB(parent)->next_frag = fp;
479 else
480 rb_replace_node(&skb->rbnode, &fp->rbnode,
481 &q->rb_fragments);
482 if (q->fragments_tail == skb)
483 q->fragments_tail = fp;
484
485 if (orig_truesize) {
486 /* prevent skb_morph from releasing sk */
487 skb->sk = NULL;
488 skb->destructor = NULL;
489 }
490 skb_morph(skb, head);
491 FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
492 rb_replace_node(&head->rbnode, &skb->rbnode,
493 &q->rb_fragments);
494 consume_skb(head);
495 head = skb;
496 }
497 WARN_ON(FRAG_CB(head)->ip_defrag_offset != 0);
498
499 delta = -head->truesize;
500
501 /* Head of list must not be cloned. */
502 if (skb_unclone(head, GFP_ATOMIC))
503 goto out_restore_sk;
504
505 delta += head->truesize;
506 if (delta)
507 add_frag_mem_limit(q->fqdir, delta);
508
509 /* If the first fragment is fragmented itself, we split
510 * it to two chunks: the first with data and paged part
511 * and the second, holding only fragments.
512 */
513 if (skb_has_frag_list(head)) {
514 struct sk_buff *clone;
515 int i, plen = 0;
516
517 clone = alloc_skb(0, GFP_ATOMIC);
518 if (!clone)
519 goto out_restore_sk;
520 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
521 skb_frag_list_init(head);
522 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
523 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
524 clone->data_len = head->data_len - plen;
525 clone->len = clone->data_len;
526 head->truesize += clone->truesize;
527 clone->csum = 0;
528 clone->ip_summed = head->ip_summed;
529 add_frag_mem_limit(q->fqdir, clone->truesize);
530 skb_shinfo(head)->frag_list = clone;
531 nextp = &clone->next;
532 } else {
533 nextp = &skb_shinfo(head)->frag_list;
534 }
535
536out_restore_sk:
537 if (orig_truesize) {
538 int ts_delta = head->truesize - orig_truesize;
539
540 /* if this reassembled skb is fragmented later,
541 * fraglist skbs will get skb->sk assigned from head->sk,
542 * and each frag skb will be released via sock_wfree.
543 *
544 * Update sk_wmem_alloc.
545 */
546 head->sk = sk;
547 head->destructor = destructor;
548 refcount_add(ts_delta, &sk->sk_wmem_alloc);
549 }
550
551 return nextp;
552}
553EXPORT_SYMBOL(inet_frag_reasm_prepare);
554
555void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
556 void *reasm_data, bool try_coalesce)
557{
558 struct sock *sk = is_skb_wmem(head) ? head->sk : NULL;
559 const unsigned int head_truesize = head->truesize;
560 struct sk_buff **nextp = reasm_data;
561 struct rb_node *rbn;
562 struct sk_buff *fp;
563 int sum_truesize;
564
565 skb_push(head, head->data - skb_network_header(head));
566
567 /* Traverse the tree in order, to build frag_list. */
568 fp = FRAG_CB(head)->next_frag;
569 rbn = rb_next(&head->rbnode);
570 rb_erase(&head->rbnode, &q->rb_fragments);
571
572 sum_truesize = head->truesize;
573 while (rbn || fp) {
574 /* fp points to the next sk_buff in the current run;
575 * rbn points to the next run.
576 */
577 /* Go through the current run. */
578 while (fp) {
579 struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
580 bool stolen;
581 int delta;
582
583 sum_truesize += fp->truesize;
584 if (head->ip_summed != fp->ip_summed)
585 head->ip_summed = CHECKSUM_NONE;
586 else if (head->ip_summed == CHECKSUM_COMPLETE)
587 head->csum = csum_add(head->csum, fp->csum);
588
589 if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
590 &delta)) {
591 kfree_skb_partial(fp, stolen);
592 } else {
593 fp->prev = NULL;
594 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
595 fp->sk = NULL;
596
597 head->data_len += fp->len;
598 head->len += fp->len;
599 head->truesize += fp->truesize;
600
601 *nextp = fp;
602 nextp = &fp->next;
603 }
604
605 fp = next_frag;
606 }
607 /* Move to the next run. */
608 if (rbn) {
609 struct rb_node *rbnext = rb_next(rbn);
610
611 fp = rb_to_skb(rbn);
612 rb_erase(rbn, &q->rb_fragments);
613 rbn = rbnext;
614 }
615 }
616 sub_frag_mem_limit(q->fqdir, sum_truesize);
617
618 *nextp = NULL;
619 skb_mark_not_on_list(head);
620 head->prev = NULL;
621 head->tstamp = q->stamp;
622 head->tstamp_type = q->tstamp_type;
623
624 if (sk)
625 refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
626}
627EXPORT_SYMBOL(inet_frag_reasm_finish);
628
629struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
630{
631 struct sk_buff *head, *skb;
632
633 head = skb_rb_first(&q->rb_fragments);
634 if (!head)
635 return NULL;
636 skb = FRAG_CB(head)->next_frag;
637 if (skb)
638 rb_replace_node(&head->rbnode, &skb->rbnode,
639 &q->rb_fragments);
640 else
641 rb_erase(&head->rbnode, &q->rb_fragments);
642 memset(&head->rbnode, 0, sizeof(head->rbnode));
643 barrier();
644
645 if (head == q->fragments_tail)
646 q->fragments_tail = NULL;
647
648 sub_frag_mem_limit(q->fqdir, head->truesize);
649
650 return head;
651}
652EXPORT_SYMBOL(inet_frag_pull_head);
1/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
22#include <linux/slab.h>
23
24#include <net/sock.h>
25#include <net/inet_frag.h>
26#include <net/inet_ecn.h>
27
28#define INETFRAGS_EVICT_BUCKETS 128
29#define INETFRAGS_EVICT_MAX 512
30
31/* don't rebuild inetfrag table with new secret more often than this */
32#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
33
34/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
35 * Value : 0xff if frame should be dropped.
36 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
37 */
38const u8 ip_frag_ecn_table[16] = {
39 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
40 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
41 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
42 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
43
44 /* invalid combinations : drop frame */
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
46 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
47 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
48 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
49 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
50 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
51 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
52};
53EXPORT_SYMBOL(ip_frag_ecn_table);
54
55static unsigned int
56inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
57{
58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
59}
60
61static bool inet_frag_may_rebuild(struct inet_frags *f)
62{
63 return time_after(jiffies,
64 f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
65}
66
67static void inet_frag_secret_rebuild(struct inet_frags *f)
68{
69 int i;
70
71 write_seqlock_bh(&f->rnd_seqlock);
72
73 if (!inet_frag_may_rebuild(f))
74 goto out;
75
76 get_random_bytes(&f->rnd, sizeof(u32));
77
78 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
79 struct inet_frag_bucket *hb;
80 struct inet_frag_queue *q;
81 struct hlist_node *n;
82
83 hb = &f->hash[i];
84 spin_lock(&hb->chain_lock);
85
86 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
87 unsigned int hval = inet_frag_hashfn(f, q);
88
89 if (hval != i) {
90 struct inet_frag_bucket *hb_dest;
91
92 hlist_del(&q->list);
93
94 /* Relink to new hash chain. */
95 hb_dest = &f->hash[hval];
96
97 /* This is the only place where we take
98 * another chain_lock while already holding
99 * one. As this will not run concurrently,
100 * we cannot deadlock on hb_dest lock below, if its
101 * already locked it will be released soon since
102 * other caller cannot be waiting for hb lock
103 * that we've taken above.
104 */
105 spin_lock_nested(&hb_dest->chain_lock,
106 SINGLE_DEPTH_NESTING);
107 hlist_add_head(&q->list, &hb_dest->chain);
108 spin_unlock(&hb_dest->chain_lock);
109 }
110 }
111 spin_unlock(&hb->chain_lock);
112 }
113
114 f->rebuild = false;
115 f->last_rebuild_jiffies = jiffies;
116out:
117 write_sequnlock_bh(&f->rnd_seqlock);
118}
119
120static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
121{
122 return q->net->low_thresh == 0 ||
123 frag_mem_limit(q->net) >= q->net->low_thresh;
124}
125
126static unsigned int
127inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
128{
129 struct inet_frag_queue *fq;
130 struct hlist_node *n;
131 unsigned int evicted = 0;
132 HLIST_HEAD(expired);
133
134 spin_lock(&hb->chain_lock);
135
136 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
137 if (!inet_fragq_should_evict(fq))
138 continue;
139
140 if (!del_timer(&fq->timer))
141 continue;
142
143 hlist_add_head(&fq->list_evictor, &expired);
144 ++evicted;
145 }
146
147 spin_unlock(&hb->chain_lock);
148
149 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
150 f->frag_expire((unsigned long) fq);
151
152 return evicted;
153}
154
155static void inet_frag_worker(struct work_struct *work)
156{
157 unsigned int budget = INETFRAGS_EVICT_BUCKETS;
158 unsigned int i, evicted = 0;
159 struct inet_frags *f;
160
161 f = container_of(work, struct inet_frags, frags_work);
162
163 BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
164
165 local_bh_disable();
166
167 for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
168 evicted += inet_evict_bucket(f, &f->hash[i]);
169 i = (i + 1) & (INETFRAGS_HASHSZ - 1);
170 if (evicted > INETFRAGS_EVICT_MAX)
171 break;
172 }
173
174 f->next_bucket = i;
175
176 local_bh_enable();
177
178 if (f->rebuild && inet_frag_may_rebuild(f))
179 inet_frag_secret_rebuild(f);
180}
181
182static void inet_frag_schedule_worker(struct inet_frags *f)
183{
184 if (unlikely(!work_pending(&f->frags_work)))
185 schedule_work(&f->frags_work);
186}
187
188int inet_frags_init(struct inet_frags *f)
189{
190 int i;
191
192 INIT_WORK(&f->frags_work, inet_frag_worker);
193
194 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
195 struct inet_frag_bucket *hb = &f->hash[i];
196
197 spin_lock_init(&hb->chain_lock);
198 INIT_HLIST_HEAD(&hb->chain);
199 }
200
201 seqlock_init(&f->rnd_seqlock);
202 f->last_rebuild_jiffies = 0;
203 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
204 NULL);
205 if (!f->frags_cachep)
206 return -ENOMEM;
207
208 return 0;
209}
210EXPORT_SYMBOL(inet_frags_init);
211
212void inet_frags_fini(struct inet_frags *f)
213{
214 cancel_work_sync(&f->frags_work);
215 kmem_cache_destroy(f->frags_cachep);
216}
217EXPORT_SYMBOL(inet_frags_fini);
218
219void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
220{
221 unsigned int seq;
222 int i;
223
224 nf->low_thresh = 0;
225
226evict_again:
227 local_bh_disable();
228 seq = read_seqbegin(&f->rnd_seqlock);
229
230 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
231 inet_evict_bucket(f, &f->hash[i]);
232
233 local_bh_enable();
234 cond_resched();
235
236 if (read_seqretry(&f->rnd_seqlock, seq) ||
237 percpu_counter_sum(&nf->mem))
238 goto evict_again;
239
240 percpu_counter_destroy(&nf->mem);
241}
242EXPORT_SYMBOL(inet_frags_exit_net);
243
244static struct inet_frag_bucket *
245get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
246__acquires(hb->chain_lock)
247{
248 struct inet_frag_bucket *hb;
249 unsigned int seq, hash;
250
251 restart:
252 seq = read_seqbegin(&f->rnd_seqlock);
253
254 hash = inet_frag_hashfn(f, fq);
255 hb = &f->hash[hash];
256
257 spin_lock(&hb->chain_lock);
258 if (read_seqretry(&f->rnd_seqlock, seq)) {
259 spin_unlock(&hb->chain_lock);
260 goto restart;
261 }
262
263 return hb;
264}
265
266static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
267{
268 struct inet_frag_bucket *hb;
269
270 hb = get_frag_bucket_locked(fq, f);
271 hlist_del(&fq->list);
272 fq->flags |= INET_FRAG_COMPLETE;
273 spin_unlock(&hb->chain_lock);
274}
275
276void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
277{
278 if (del_timer(&fq->timer))
279 atomic_dec(&fq->refcnt);
280
281 if (!(fq->flags & INET_FRAG_COMPLETE)) {
282 fq_unlink(fq, f);
283 atomic_dec(&fq->refcnt);
284 }
285}
286EXPORT_SYMBOL(inet_frag_kill);
287
288void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
289{
290 struct sk_buff *fp;
291 struct netns_frags *nf;
292 unsigned int sum, sum_truesize = 0;
293
294 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
295 WARN_ON(del_timer(&q->timer) != 0);
296
297 /* Release all fragment data. */
298 fp = q->fragments;
299 nf = q->net;
300 while (fp) {
301 struct sk_buff *xp = fp->next;
302
303 sum_truesize += fp->truesize;
304 kfree_skb(fp);
305 fp = xp;
306 }
307 sum = sum_truesize + f->qsize;
308
309 if (f->destructor)
310 f->destructor(q);
311 kmem_cache_free(f->frags_cachep, q);
312
313 sub_frag_mem_limit(nf, sum);
314}
315EXPORT_SYMBOL(inet_frag_destroy);
316
317static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
318 struct inet_frag_queue *qp_in,
319 struct inet_frags *f,
320 void *arg)
321{
322 struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
323 struct inet_frag_queue *qp;
324
325#ifdef CONFIG_SMP
326 /* With SMP race we have to recheck hash table, because
327 * such entry could have been created on other cpu before
328 * we acquired hash bucket lock.
329 */
330 hlist_for_each_entry(qp, &hb->chain, list) {
331 if (qp->net == nf && f->match(qp, arg)) {
332 atomic_inc(&qp->refcnt);
333 spin_unlock(&hb->chain_lock);
334 qp_in->flags |= INET_FRAG_COMPLETE;
335 inet_frag_put(qp_in, f);
336 return qp;
337 }
338 }
339#endif
340 qp = qp_in;
341 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
342 atomic_inc(&qp->refcnt);
343
344 atomic_inc(&qp->refcnt);
345 hlist_add_head(&qp->list, &hb->chain);
346
347 spin_unlock(&hb->chain_lock);
348
349 return qp;
350}
351
352static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
353 struct inet_frags *f,
354 void *arg)
355{
356 struct inet_frag_queue *q;
357
358 if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
359 inet_frag_schedule_worker(f);
360 return NULL;
361 }
362
363 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
364 if (!q)
365 return NULL;
366
367 q->net = nf;
368 f->constructor(q, arg);
369 add_frag_mem_limit(nf, f->qsize);
370
371 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
372 spin_lock_init(&q->lock);
373 atomic_set(&q->refcnt, 1);
374
375 return q;
376}
377
378static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
379 struct inet_frags *f,
380 void *arg)
381{
382 struct inet_frag_queue *q;
383
384 q = inet_frag_alloc(nf, f, arg);
385 if (!q)
386 return NULL;
387
388 return inet_frag_intern(nf, q, f, arg);
389}
390
391struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
392 struct inet_frags *f, void *key,
393 unsigned int hash)
394{
395 struct inet_frag_bucket *hb;
396 struct inet_frag_queue *q;
397 int depth = 0;
398
399 if (frag_mem_limit(nf) > nf->low_thresh)
400 inet_frag_schedule_worker(f);
401
402 hash &= (INETFRAGS_HASHSZ - 1);
403 hb = &f->hash[hash];
404
405 spin_lock(&hb->chain_lock);
406 hlist_for_each_entry(q, &hb->chain, list) {
407 if (q->net == nf && f->match(q, key)) {
408 atomic_inc(&q->refcnt);
409 spin_unlock(&hb->chain_lock);
410 return q;
411 }
412 depth++;
413 }
414 spin_unlock(&hb->chain_lock);
415
416 if (depth <= INETFRAGS_MAXDEPTH)
417 return inet_frag_create(nf, f, key);
418
419 if (inet_frag_may_rebuild(f)) {
420 if (!f->rebuild)
421 f->rebuild = true;
422 inet_frag_schedule_worker(f);
423 }
424
425 return ERR_PTR(-ENOBUFS);
426}
427EXPORT_SYMBOL(inet_frag_find);
428
429void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
430 const char *prefix)
431{
432 static const char msg[] = "inet_frag_find: Fragment hash bucket"
433 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
434 ". Dropping fragment.\n";
435
436 if (PTR_ERR(q) == -ENOBUFS)
437 net_dbg_ratelimited("%s%s", prefix, msg);
438}
439EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);