Loading...
1/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
22#include <linux/slab.h>
23
24#include <net/sock.h>
25#include <net/inet_frag.h>
26#include <net/inet_ecn.h>
27
28/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
29 * Value : 0xff if frame should be dropped.
30 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
31 */
32const u8 ip_frag_ecn_table[16] = {
33 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
34 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
35 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
36 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
37
38 /* invalid combinations : drop frame */
39 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
40 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
41 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
42 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
43 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
44 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
46};
47EXPORT_SYMBOL(ip_frag_ecn_table);
48
49static void inet_frag_secret_rebuild(unsigned long dummy)
50{
51 struct inet_frags *f = (struct inet_frags *)dummy;
52 unsigned long now = jiffies;
53 int i;
54
55 /* Per bucket lock NOT needed here, due to write lock protection */
56 write_lock(&f->lock);
57
58 get_random_bytes(&f->rnd, sizeof(u32));
59 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
60 struct inet_frag_bucket *hb;
61 struct inet_frag_queue *q;
62 struct hlist_node *n;
63
64 hb = &f->hash[i];
65 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
66 unsigned int hval = f->hashfn(q);
67
68 if (hval != i) {
69 struct inet_frag_bucket *hb_dest;
70
71 hlist_del(&q->list);
72
73 /* Relink to new hash chain. */
74 hb_dest = &f->hash[hval];
75 hlist_add_head(&q->list, &hb_dest->chain);
76 }
77 }
78 }
79 write_unlock(&f->lock);
80
81 mod_timer(&f->secret_timer, now + f->secret_interval);
82}
83
84void inet_frags_init(struct inet_frags *f)
85{
86 int i;
87
88 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
89 struct inet_frag_bucket *hb = &f->hash[i];
90
91 spin_lock_init(&hb->chain_lock);
92 INIT_HLIST_HEAD(&hb->chain);
93 }
94 rwlock_init(&f->lock);
95
96 setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
97 (unsigned long)f);
98 f->secret_timer.expires = jiffies + f->secret_interval;
99 add_timer(&f->secret_timer);
100}
101EXPORT_SYMBOL(inet_frags_init);
102
103void inet_frags_init_net(struct netns_frags *nf)
104{
105 nf->nqueues = 0;
106 init_frag_mem_limit(nf);
107 INIT_LIST_HEAD(&nf->lru_list);
108 spin_lock_init(&nf->lru_lock);
109}
110EXPORT_SYMBOL(inet_frags_init_net);
111
112void inet_frags_fini(struct inet_frags *f)
113{
114 del_timer(&f->secret_timer);
115}
116EXPORT_SYMBOL(inet_frags_fini);
117
118void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
119{
120 nf->low_thresh = 0;
121
122 local_bh_disable();
123 inet_frag_evictor(nf, f, true);
124 local_bh_enable();
125
126 percpu_counter_destroy(&nf->mem);
127}
128EXPORT_SYMBOL(inet_frags_exit_net);
129
130static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
131{
132 struct inet_frag_bucket *hb;
133 unsigned int hash;
134
135 read_lock(&f->lock);
136 hash = f->hashfn(fq);
137 hb = &f->hash[hash];
138
139 spin_lock(&hb->chain_lock);
140 hlist_del(&fq->list);
141 spin_unlock(&hb->chain_lock);
142
143 read_unlock(&f->lock);
144 inet_frag_lru_del(fq);
145}
146
147void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
148{
149 if (del_timer(&fq->timer))
150 atomic_dec(&fq->refcnt);
151
152 if (!(fq->last_in & INET_FRAG_COMPLETE)) {
153 fq_unlink(fq, f);
154 atomic_dec(&fq->refcnt);
155 fq->last_in |= INET_FRAG_COMPLETE;
156 }
157}
158EXPORT_SYMBOL(inet_frag_kill);
159
160static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
161 struct sk_buff *skb)
162{
163 if (f->skb_free)
164 f->skb_free(skb);
165 kfree_skb(skb);
166}
167
168void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
169 int *work)
170{
171 struct sk_buff *fp;
172 struct netns_frags *nf;
173 unsigned int sum, sum_truesize = 0;
174
175 WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
176 WARN_ON(del_timer(&q->timer) != 0);
177
178 /* Release all fragment data. */
179 fp = q->fragments;
180 nf = q->net;
181 while (fp) {
182 struct sk_buff *xp = fp->next;
183
184 sum_truesize += fp->truesize;
185 frag_kfree_skb(nf, f, fp);
186 fp = xp;
187 }
188 sum = sum_truesize + f->qsize;
189 if (work)
190 *work -= sum;
191 sub_frag_mem_limit(q, sum);
192
193 if (f->destructor)
194 f->destructor(q);
195 kfree(q);
196
197}
198EXPORT_SYMBOL(inet_frag_destroy);
199
200int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
201{
202 struct inet_frag_queue *q;
203 int work, evicted = 0;
204
205 if (!force) {
206 if (frag_mem_limit(nf) <= nf->high_thresh)
207 return 0;
208 }
209
210 work = frag_mem_limit(nf) - nf->low_thresh;
211 while (work > 0 || force) {
212 spin_lock(&nf->lru_lock);
213
214 if (list_empty(&nf->lru_list)) {
215 spin_unlock(&nf->lru_lock);
216 break;
217 }
218
219 q = list_first_entry(&nf->lru_list,
220 struct inet_frag_queue, lru_list);
221 atomic_inc(&q->refcnt);
222 /* Remove q from list to avoid several CPUs grabbing it */
223 list_del_init(&q->lru_list);
224
225 spin_unlock(&nf->lru_lock);
226
227 spin_lock(&q->lock);
228 if (!(q->last_in & INET_FRAG_COMPLETE))
229 inet_frag_kill(q, f);
230 spin_unlock(&q->lock);
231
232 if (atomic_dec_and_test(&q->refcnt))
233 inet_frag_destroy(q, f, &work);
234 evicted++;
235 }
236
237 return evicted;
238}
239EXPORT_SYMBOL(inet_frag_evictor);
240
241static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
242 struct inet_frag_queue *qp_in, struct inet_frags *f,
243 void *arg)
244{
245 struct inet_frag_bucket *hb;
246 struct inet_frag_queue *qp;
247 unsigned int hash;
248
249 read_lock(&f->lock); /* Protects against hash rebuild */
250 /*
251 * While we stayed w/o the lock other CPU could update
252 * the rnd seed, so we need to re-calculate the hash
253 * chain. Fortunatelly the qp_in can be used to get one.
254 */
255 hash = f->hashfn(qp_in);
256 hb = &f->hash[hash];
257 spin_lock(&hb->chain_lock);
258
259#ifdef CONFIG_SMP
260 /* With SMP race we have to recheck hash table, because
261 * such entry could be created on other cpu, while we
262 * released the hash bucket lock.
263 */
264 hlist_for_each_entry(qp, &hb->chain, list) {
265 if (qp->net == nf && f->match(qp, arg)) {
266 atomic_inc(&qp->refcnt);
267 spin_unlock(&hb->chain_lock);
268 read_unlock(&f->lock);
269 qp_in->last_in |= INET_FRAG_COMPLETE;
270 inet_frag_put(qp_in, f);
271 return qp;
272 }
273 }
274#endif
275 qp = qp_in;
276 if (!mod_timer(&qp->timer, jiffies + nf->timeout))
277 atomic_inc(&qp->refcnt);
278
279 atomic_inc(&qp->refcnt);
280 hlist_add_head(&qp->list, &hb->chain);
281 inet_frag_lru_add(nf, qp);
282 spin_unlock(&hb->chain_lock);
283 read_unlock(&f->lock);
284
285 return qp;
286}
287
288static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
289 struct inet_frags *f, void *arg)
290{
291 struct inet_frag_queue *q;
292
293 q = kzalloc(f->qsize, GFP_ATOMIC);
294 if (q == NULL)
295 return NULL;
296
297 q->net = nf;
298 f->constructor(q, arg);
299 add_frag_mem_limit(q, f->qsize);
300
301 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
302 spin_lock_init(&q->lock);
303 atomic_set(&q->refcnt, 1);
304 INIT_LIST_HEAD(&q->lru_list);
305
306 return q;
307}
308
309static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
310 struct inet_frags *f, void *arg)
311{
312 struct inet_frag_queue *q;
313
314 q = inet_frag_alloc(nf, f, arg);
315 if (q == NULL)
316 return NULL;
317
318 return inet_frag_intern(nf, q, f, arg);
319}
320
321struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
322 struct inet_frags *f, void *key, unsigned int hash)
323 __releases(&f->lock)
324{
325 struct inet_frag_bucket *hb;
326 struct inet_frag_queue *q;
327 int depth = 0;
328
329 hb = &f->hash[hash];
330
331 spin_lock(&hb->chain_lock);
332 hlist_for_each_entry(q, &hb->chain, list) {
333 if (q->net == nf && f->match(q, key)) {
334 atomic_inc(&q->refcnt);
335 spin_unlock(&hb->chain_lock);
336 read_unlock(&f->lock);
337 return q;
338 }
339 depth++;
340 }
341 spin_unlock(&hb->chain_lock);
342 read_unlock(&f->lock);
343
344 if (depth <= INETFRAGS_MAXDEPTH)
345 return inet_frag_create(nf, f, key);
346 else
347 return ERR_PTR(-ENOBUFS);
348}
349EXPORT_SYMBOL(inet_frag_find);
350
351void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
352 const char *prefix)
353{
354 static const char msg[] = "inet_frag_find: Fragment hash bucket"
355 " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
356 ". Dropping fragment.\n";
357
358 if (PTR_ERR(q) == -ENOBUFS)
359 LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
360}
361EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * inet fragments management
4 *
5 * Authors: Pavel Emelyanov <xemul@openvz.org>
6 * Started as consolidation of ipv4/ip_fragment.c,
7 * ipv6/reassembly. and ipv6 nf conntrack reassembly
8 */
9
10#include <linux/list.h>
11#include <linux/spinlock.h>
12#include <linux/module.h>
13#include <linux/timer.h>
14#include <linux/mm.h>
15#include <linux/random.h>
16#include <linux/skbuff.h>
17#include <linux/rtnetlink.h>
18#include <linux/slab.h>
19#include <linux/rhashtable.h>
20
21#include <net/sock.h>
22#include <net/inet_frag.h>
23#include <net/inet_ecn.h>
24#include <net/ip.h>
25#include <net/ipv6.h>
26
27/* Use skb->cb to track consecutive/adjacent fragments coming at
28 * the end of the queue. Nodes in the rb-tree queue will
29 * contain "runs" of one or more adjacent fragments.
30 *
31 * Invariants:
32 * - next_frag is NULL at the tail of a "run";
33 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
34 */
35struct ipfrag_skb_cb {
36 union {
37 struct inet_skb_parm h4;
38 struct inet6_skb_parm h6;
39 };
40 struct sk_buff *next_frag;
41 int frag_run_len;
42};
43
44#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
45
46static void fragcb_clear(struct sk_buff *skb)
47{
48 RB_CLEAR_NODE(&skb->rbnode);
49 FRAG_CB(skb)->next_frag = NULL;
50 FRAG_CB(skb)->frag_run_len = skb->len;
51}
52
53/* Append skb to the last "run". */
54static void fragrun_append_to_last(struct inet_frag_queue *q,
55 struct sk_buff *skb)
56{
57 fragcb_clear(skb);
58
59 FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
60 FRAG_CB(q->fragments_tail)->next_frag = skb;
61 q->fragments_tail = skb;
62}
63
64/* Create a new "run" with the skb. */
65static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
66{
67 BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
68 fragcb_clear(skb);
69
70 if (q->last_run_head)
71 rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
72 &q->last_run_head->rbnode.rb_right);
73 else
74 rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
75 rb_insert_color(&skb->rbnode, &q->rb_fragments);
76
77 q->fragments_tail = skb;
78 q->last_run_head = skb;
79}
80
81/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
82 * Value : 0xff if frame should be dropped.
83 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
84 */
85const u8 ip_frag_ecn_table[16] = {
86 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
87 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
88 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
89 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
90
91 /* invalid combinations : drop frame */
92 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
93 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
94 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
95 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
96 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
97 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
98 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
99};
100EXPORT_SYMBOL(ip_frag_ecn_table);
101
102int inet_frags_init(struct inet_frags *f)
103{
104 f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
105 NULL);
106 if (!f->frags_cachep)
107 return -ENOMEM;
108
109 refcount_set(&f->refcnt, 1);
110 init_completion(&f->completion);
111 return 0;
112}
113EXPORT_SYMBOL(inet_frags_init);
114
115void inet_frags_fini(struct inet_frags *f)
116{
117 if (refcount_dec_and_test(&f->refcnt))
118 complete(&f->completion);
119
120 wait_for_completion(&f->completion);
121
122 kmem_cache_destroy(f->frags_cachep);
123 f->frags_cachep = NULL;
124}
125EXPORT_SYMBOL(inet_frags_fini);
126
127/* called from rhashtable_free_and_destroy() at netns_frags dismantle */
128static void inet_frags_free_cb(void *ptr, void *arg)
129{
130 struct inet_frag_queue *fq = ptr;
131 int count;
132
133 count = del_timer_sync(&fq->timer) ? 1 : 0;
134
135 spin_lock_bh(&fq->lock);
136 fq->flags |= INET_FRAG_DROP;
137 if (!(fq->flags & INET_FRAG_COMPLETE)) {
138 fq->flags |= INET_FRAG_COMPLETE;
139 count++;
140 } else if (fq->flags & INET_FRAG_HASH_DEAD) {
141 count++;
142 }
143 spin_unlock_bh(&fq->lock);
144
145 if (refcount_sub_and_test(count, &fq->refcnt))
146 inet_frag_destroy(fq);
147}
148
149static LLIST_HEAD(fqdir_free_list);
150
151static void fqdir_free_fn(struct work_struct *work)
152{
153 struct llist_node *kill_list;
154 struct fqdir *fqdir, *tmp;
155 struct inet_frags *f;
156
157 /* Atomically snapshot the list of fqdirs to free */
158 kill_list = llist_del_all(&fqdir_free_list);
159
160 /* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
161 * have completed, since they need to dereference fqdir.
162 * Would it not be nice to have kfree_rcu_barrier() ? :)
163 */
164 rcu_barrier();
165
166 llist_for_each_entry_safe(fqdir, tmp, kill_list, free_list) {
167 f = fqdir->f;
168 if (refcount_dec_and_test(&f->refcnt))
169 complete(&f->completion);
170
171 kfree(fqdir);
172 }
173}
174
175static DECLARE_WORK(fqdir_free_work, fqdir_free_fn);
176
177static void fqdir_work_fn(struct work_struct *work)
178{
179 struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
180
181 rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
182
183 if (llist_add(&fqdir->free_list, &fqdir_free_list))
184 queue_work(system_wq, &fqdir_free_work);
185}
186
187int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
188{
189 struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
190 int res;
191
192 if (!fqdir)
193 return -ENOMEM;
194 fqdir->f = f;
195 fqdir->net = net;
196 res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
197 if (res < 0) {
198 kfree(fqdir);
199 return res;
200 }
201 refcount_inc(&f->refcnt);
202 *fqdirp = fqdir;
203 return 0;
204}
205EXPORT_SYMBOL(fqdir_init);
206
207static struct workqueue_struct *inet_frag_wq;
208
209static int __init inet_frag_wq_init(void)
210{
211 inet_frag_wq = create_workqueue("inet_frag_wq");
212 if (!inet_frag_wq)
213 panic("Could not create inet frag workq");
214 return 0;
215}
216
217pure_initcall(inet_frag_wq_init);
218
219void fqdir_exit(struct fqdir *fqdir)
220{
221 INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
222 queue_work(inet_frag_wq, &fqdir->destroy_work);
223}
224EXPORT_SYMBOL(fqdir_exit);
225
226void inet_frag_kill(struct inet_frag_queue *fq)
227{
228 if (del_timer(&fq->timer))
229 refcount_dec(&fq->refcnt);
230
231 if (!(fq->flags & INET_FRAG_COMPLETE)) {
232 struct fqdir *fqdir = fq->fqdir;
233
234 fq->flags |= INET_FRAG_COMPLETE;
235 rcu_read_lock();
236 /* The RCU read lock provides a memory barrier
237 * guaranteeing that if fqdir->dead is false then
238 * the hash table destruction will not start until
239 * after we unlock. Paired with fqdir_pre_exit().
240 */
241 if (!READ_ONCE(fqdir->dead)) {
242 rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
243 fqdir->f->rhash_params);
244 refcount_dec(&fq->refcnt);
245 } else {
246 fq->flags |= INET_FRAG_HASH_DEAD;
247 }
248 rcu_read_unlock();
249 }
250}
251EXPORT_SYMBOL(inet_frag_kill);
252
253static void inet_frag_destroy_rcu(struct rcu_head *head)
254{
255 struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
256 rcu);
257 struct inet_frags *f = q->fqdir->f;
258
259 if (f->destructor)
260 f->destructor(q);
261 kmem_cache_free(f->frags_cachep, q);
262}
263
264unsigned int inet_frag_rbtree_purge(struct rb_root *root,
265 enum skb_drop_reason reason)
266{
267 struct rb_node *p = rb_first(root);
268 unsigned int sum = 0;
269
270 while (p) {
271 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
272
273 p = rb_next(p);
274 rb_erase(&skb->rbnode, root);
275 while (skb) {
276 struct sk_buff *next = FRAG_CB(skb)->next_frag;
277
278 sum += skb->truesize;
279 kfree_skb_reason(skb, reason);
280 skb = next;
281 }
282 }
283 return sum;
284}
285EXPORT_SYMBOL(inet_frag_rbtree_purge);
286
287void inet_frag_destroy(struct inet_frag_queue *q)
288{
289 unsigned int sum, sum_truesize = 0;
290 enum skb_drop_reason reason;
291 struct inet_frags *f;
292 struct fqdir *fqdir;
293
294 WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
295 reason = (q->flags & INET_FRAG_DROP) ?
296 SKB_DROP_REASON_FRAG_REASM_TIMEOUT :
297 SKB_CONSUMED;
298 WARN_ON(del_timer(&q->timer) != 0);
299
300 /* Release all fragment data. */
301 fqdir = q->fqdir;
302 f = fqdir->f;
303 sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments, reason);
304 sum = sum_truesize + f->qsize;
305
306 call_rcu(&q->rcu, inet_frag_destroy_rcu);
307
308 sub_frag_mem_limit(fqdir, sum);
309}
310EXPORT_SYMBOL(inet_frag_destroy);
311
312static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
313 struct inet_frags *f,
314 void *arg)
315{
316 struct inet_frag_queue *q;
317
318 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
319 if (!q)
320 return NULL;
321
322 q->fqdir = fqdir;
323 f->constructor(q, arg);
324 add_frag_mem_limit(fqdir, f->qsize);
325
326 timer_setup(&q->timer, f->frag_expire, 0);
327 spin_lock_init(&q->lock);
328 refcount_set(&q->refcnt, 3);
329
330 return q;
331}
332
333static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
334 void *arg,
335 struct inet_frag_queue **prev)
336{
337 struct inet_frags *f = fqdir->f;
338 struct inet_frag_queue *q;
339
340 q = inet_frag_alloc(fqdir, f, arg);
341 if (!q) {
342 *prev = ERR_PTR(-ENOMEM);
343 return NULL;
344 }
345 mod_timer(&q->timer, jiffies + fqdir->timeout);
346
347 *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
348 &q->node, f->rhash_params);
349 if (*prev) {
350 q->flags |= INET_FRAG_COMPLETE;
351 inet_frag_kill(q);
352 inet_frag_destroy(q);
353 return NULL;
354 }
355 return q;
356}
357
358/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
359struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
360{
361 /* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
362 long high_thresh = READ_ONCE(fqdir->high_thresh);
363 struct inet_frag_queue *fq = NULL, *prev;
364
365 if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
366 return NULL;
367
368 rcu_read_lock();
369
370 prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
371 if (!prev)
372 fq = inet_frag_create(fqdir, key, &prev);
373 if (!IS_ERR_OR_NULL(prev)) {
374 fq = prev;
375 if (!refcount_inc_not_zero(&fq->refcnt))
376 fq = NULL;
377 }
378 rcu_read_unlock();
379 return fq;
380}
381EXPORT_SYMBOL(inet_frag_find);
382
383int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
384 int offset, int end)
385{
386 struct sk_buff *last = q->fragments_tail;
387
388 /* RFC5722, Section 4, amended by Errata ID : 3089
389 * When reassembling an IPv6 datagram, if
390 * one or more its constituent fragments is determined to be an
391 * overlapping fragment, the entire datagram (and any constituent
392 * fragments) MUST be silently discarded.
393 *
394 * Duplicates, however, should be ignored (i.e. skb dropped, but the
395 * queue/fragments kept for later reassembly).
396 */
397 if (!last)
398 fragrun_create(q, skb); /* First fragment. */
399 else if (last->ip_defrag_offset + last->len < end) {
400 /* This is the common case: skb goes to the end. */
401 /* Detect and discard overlaps. */
402 if (offset < last->ip_defrag_offset + last->len)
403 return IPFRAG_OVERLAP;
404 if (offset == last->ip_defrag_offset + last->len)
405 fragrun_append_to_last(q, skb);
406 else
407 fragrun_create(q, skb);
408 } else {
409 /* Binary search. Note that skb can become the first fragment,
410 * but not the last (covered above).
411 */
412 struct rb_node **rbn, *parent;
413
414 rbn = &q->rb_fragments.rb_node;
415 do {
416 struct sk_buff *curr;
417 int curr_run_end;
418
419 parent = *rbn;
420 curr = rb_to_skb(parent);
421 curr_run_end = curr->ip_defrag_offset +
422 FRAG_CB(curr)->frag_run_len;
423 if (end <= curr->ip_defrag_offset)
424 rbn = &parent->rb_left;
425 else if (offset >= curr_run_end)
426 rbn = &parent->rb_right;
427 else if (offset >= curr->ip_defrag_offset &&
428 end <= curr_run_end)
429 return IPFRAG_DUP;
430 else
431 return IPFRAG_OVERLAP;
432 } while (*rbn);
433 /* Here we have parent properly set, and rbn pointing to
434 * one of its NULL left/right children. Insert skb.
435 */
436 fragcb_clear(skb);
437 rb_link_node(&skb->rbnode, parent, rbn);
438 rb_insert_color(&skb->rbnode, &q->rb_fragments);
439 }
440
441 skb->ip_defrag_offset = offset;
442
443 return IPFRAG_OK;
444}
445EXPORT_SYMBOL(inet_frag_queue_insert);
446
447void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
448 struct sk_buff *parent)
449{
450 struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
451 struct sk_buff **nextp;
452 int delta;
453
454 if (head != skb) {
455 fp = skb_clone(skb, GFP_ATOMIC);
456 if (!fp)
457 return NULL;
458 FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
459 if (RB_EMPTY_NODE(&skb->rbnode))
460 FRAG_CB(parent)->next_frag = fp;
461 else
462 rb_replace_node(&skb->rbnode, &fp->rbnode,
463 &q->rb_fragments);
464 if (q->fragments_tail == skb)
465 q->fragments_tail = fp;
466 skb_morph(skb, head);
467 FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
468 rb_replace_node(&head->rbnode, &skb->rbnode,
469 &q->rb_fragments);
470 consume_skb(head);
471 head = skb;
472 }
473 WARN_ON(head->ip_defrag_offset != 0);
474
475 delta = -head->truesize;
476
477 /* Head of list must not be cloned. */
478 if (skb_unclone(head, GFP_ATOMIC))
479 return NULL;
480
481 delta += head->truesize;
482 if (delta)
483 add_frag_mem_limit(q->fqdir, delta);
484
485 /* If the first fragment is fragmented itself, we split
486 * it to two chunks: the first with data and paged part
487 * and the second, holding only fragments.
488 */
489 if (skb_has_frag_list(head)) {
490 struct sk_buff *clone;
491 int i, plen = 0;
492
493 clone = alloc_skb(0, GFP_ATOMIC);
494 if (!clone)
495 return NULL;
496 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
497 skb_frag_list_init(head);
498 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
499 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
500 clone->data_len = head->data_len - plen;
501 clone->len = clone->data_len;
502 head->truesize += clone->truesize;
503 clone->csum = 0;
504 clone->ip_summed = head->ip_summed;
505 add_frag_mem_limit(q->fqdir, clone->truesize);
506 skb_shinfo(head)->frag_list = clone;
507 nextp = &clone->next;
508 } else {
509 nextp = &skb_shinfo(head)->frag_list;
510 }
511
512 return nextp;
513}
514EXPORT_SYMBOL(inet_frag_reasm_prepare);
515
516void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
517 void *reasm_data, bool try_coalesce)
518{
519 struct sk_buff **nextp = reasm_data;
520 struct rb_node *rbn;
521 struct sk_buff *fp;
522 int sum_truesize;
523
524 skb_push(head, head->data - skb_network_header(head));
525
526 /* Traverse the tree in order, to build frag_list. */
527 fp = FRAG_CB(head)->next_frag;
528 rbn = rb_next(&head->rbnode);
529 rb_erase(&head->rbnode, &q->rb_fragments);
530
531 sum_truesize = head->truesize;
532 while (rbn || fp) {
533 /* fp points to the next sk_buff in the current run;
534 * rbn points to the next run.
535 */
536 /* Go through the current run. */
537 while (fp) {
538 struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
539 bool stolen;
540 int delta;
541
542 sum_truesize += fp->truesize;
543 if (head->ip_summed != fp->ip_summed)
544 head->ip_summed = CHECKSUM_NONE;
545 else if (head->ip_summed == CHECKSUM_COMPLETE)
546 head->csum = csum_add(head->csum, fp->csum);
547
548 if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
549 &delta)) {
550 kfree_skb_partial(fp, stolen);
551 } else {
552 fp->prev = NULL;
553 memset(&fp->rbnode, 0, sizeof(fp->rbnode));
554 fp->sk = NULL;
555
556 head->data_len += fp->len;
557 head->len += fp->len;
558 head->truesize += fp->truesize;
559
560 *nextp = fp;
561 nextp = &fp->next;
562 }
563
564 fp = next_frag;
565 }
566 /* Move to the next run. */
567 if (rbn) {
568 struct rb_node *rbnext = rb_next(rbn);
569
570 fp = rb_to_skb(rbn);
571 rb_erase(rbn, &q->rb_fragments);
572 rbn = rbnext;
573 }
574 }
575 sub_frag_mem_limit(q->fqdir, sum_truesize);
576
577 *nextp = NULL;
578 skb_mark_not_on_list(head);
579 head->prev = NULL;
580 head->tstamp = q->stamp;
581 head->mono_delivery_time = q->mono_delivery_time;
582}
583EXPORT_SYMBOL(inet_frag_reasm_finish);
584
585struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
586{
587 struct sk_buff *head, *skb;
588
589 head = skb_rb_first(&q->rb_fragments);
590 if (!head)
591 return NULL;
592 skb = FRAG_CB(head)->next_frag;
593 if (skb)
594 rb_replace_node(&head->rbnode, &skb->rbnode,
595 &q->rb_fragments);
596 else
597 rb_erase(&head->rbnode, &q->rb_fragments);
598 memset(&head->rbnode, 0, sizeof(head->rbnode));
599 barrier();
600
601 if (head == q->fragments_tail)
602 q->fragments_tail = NULL;
603
604 sub_frag_mem_limit(q->fqdir, head->truesize);
605
606 return head;
607}
608EXPORT_SYMBOL(inet_frag_pull_head);