Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * inet fragments management
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
 10 *				Started as consolidation of ipv4/ip_fragment.c,
 11 *				ipv6/reassembly. and ipv6 nf conntrack reassembly
 12 */
 13
 14#include <linux/list.h>
 15#include <linux/spinlock.h>
 16#include <linux/module.h>
 17#include <linux/timer.h>
 18#include <linux/mm.h>
 19#include <linux/random.h>
 20#include <linux/skbuff.h>
 21#include <linux/rtnetlink.h>
 22#include <linux/slab.h>
 
 23
 24#include <net/sock.h>
 25#include <net/inet_frag.h>
 26#include <net/inet_ecn.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27
 28/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 29 * Value : 0xff if frame should be dropped.
 30 *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 31 */
 32const u8 ip_frag_ecn_table[16] = {
 33	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
 34	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
 35	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
 36	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
 37
 38	/* invalid combinations : drop frame */
 39	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
 40	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
 41	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
 42	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
 43	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
 44	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
 45	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
 46};
 47EXPORT_SYMBOL(ip_frag_ecn_table);
 48
 49static void inet_frag_secret_rebuild(unsigned long dummy)
 50{
 51	struct inet_frags *f = (struct inet_frags *)dummy;
 52	unsigned long now = jiffies;
 53	int i;
 
 
 
 
 
 
 
 54
 55	/* Per bucket lock NOT needed here, due to write lock protection */
 56	write_lock(&f->lock);
 
 
 57
 58	get_random_bytes(&f->rnd, sizeof(u32));
 59	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
 60		struct inet_frag_bucket *hb;
 61		struct inet_frag_queue *q;
 62		struct hlist_node *n;
 63
 64		hb = &f->hash[i];
 65		hlist_for_each_entry_safe(q, n, &hb->chain, list) {
 66			unsigned int hval = f->hashfn(q);
 
 67
 68			if (hval != i) {
 69				struct inet_frag_bucket *hb_dest;
 
 
 
 70
 71				hlist_del(&q->list);
 72
 73				/* Relink to new hash chain. */
 74				hb_dest = &f->hash[hval];
 75				hlist_add_head(&q->list, &hb_dest->chain);
 76			}
 77		}
 
 
 78	}
 79	write_unlock(&f->lock);
 80
 81	mod_timer(&f->secret_timer, now + f->secret_interval);
 
 82}
 83
 84void inet_frags_init(struct inet_frags *f)
 
 
 85{
 86	int i;
 
 
 87
 88	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
 89		struct inet_frag_bucket *hb = &f->hash[i];
 90
 91		spin_lock_init(&hb->chain_lock);
 92		INIT_HLIST_HEAD(&hb->chain);
 93	}
 94	rwlock_init(&f->lock);
 
 95
 96	setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
 97			(unsigned long)f);
 98	f->secret_timer.expires = jiffies + f->secret_interval;
 99	add_timer(&f->secret_timer);
100}
101EXPORT_SYMBOL(inet_frags_init);
102
103void inet_frags_init_net(struct netns_frags *nf)
104{
105	nf->nqueues = 0;
106	init_frag_mem_limit(nf);
107	INIT_LIST_HEAD(&nf->lru_list);
108	spin_lock_init(&nf->lru_lock);
109}
110EXPORT_SYMBOL(inet_frags_init_net);
111
112void inet_frags_fini(struct inet_frags *f)
113{
114	del_timer(&f->secret_timer);
115}
116EXPORT_SYMBOL(inet_frags_fini);
117
118void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
119{
120	nf->low_thresh = 0;
121
122	local_bh_disable();
123	inet_frag_evictor(nf, f, true);
124	local_bh_enable();
125
126	percpu_counter_destroy(&nf->mem);
 
127}
128EXPORT_SYMBOL(inet_frags_exit_net);
129
130static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
131{
132	struct inet_frag_bucket *hb;
133	unsigned int hash;
134
135	read_lock(&f->lock);
136	hash = f->hashfn(fq);
137	hb = &f->hash[hash];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
139	spin_lock(&hb->chain_lock);
140	hlist_del(&fq->list);
141	spin_unlock(&hb->chain_lock);
142
143	read_unlock(&f->lock);
144	inet_frag_lru_del(fq);
 
 
145}
 
146
147void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
148{
149	if (del_timer(&fq->timer))
150		atomic_dec(&fq->refcnt);
151
152	if (!(fq->last_in & INET_FRAG_COMPLETE)) {
153		fq_unlink(fq, f);
154		atomic_dec(&fq->refcnt);
155		fq->last_in |= INET_FRAG_COMPLETE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156	}
157}
158EXPORT_SYMBOL(inet_frag_kill);
159
160static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
161		struct sk_buff *skb)
162{
163	if (f->skb_free)
164		f->skb_free(skb);
165	kfree_skb(skb);
 
 
 
 
166}
167
168void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
169					int *work)
170{
171	struct sk_buff *fp;
172	struct netns_frags *nf;
173	unsigned int sum, sum_truesize = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
175	WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
 
 
 
 
 
 
 
 
 
 
176	WARN_ON(del_timer(&q->timer) != 0);
177
178	/* Release all fragment data. */
179	fp = q->fragments;
180	nf = q->net;
181	while (fp) {
182		struct sk_buff *xp = fp->next;
183
184		sum_truesize += fp->truesize;
185		frag_kfree_skb(nf, f, fp);
186		fp = xp;
187	}
188	sum = sum_truesize + f->qsize;
189	if (work)
190		*work -= sum;
191	sub_frag_mem_limit(q, sum);
192
193	if (f->destructor)
194		f->destructor(q);
195	kfree(q);
196
 
197}
198EXPORT_SYMBOL(inet_frag_destroy);
199
200int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
 
 
201{
202	struct inet_frag_queue *q;
203	int work, evicted = 0;
204
205	if (!force) {
206		if (frag_mem_limit(nf) <= nf->high_thresh)
207			return 0;
208	}
209
210	work = frag_mem_limit(nf) - nf->low_thresh;
211	while (work > 0 || force) {
212		spin_lock(&nf->lru_lock);
213
214		if (list_empty(&nf->lru_list)) {
215			spin_unlock(&nf->lru_lock);
216			break;
217		}
218
219		q = list_first_entry(&nf->lru_list,
220				struct inet_frag_queue, lru_list);
221		atomic_inc(&q->refcnt);
222		/* Remove q from list to avoid several CPUs grabbing it */
223		list_del_init(&q->lru_list);
224
225		spin_unlock(&nf->lru_lock);
 
 
 
 
 
226
227		spin_lock(&q->lock);
228		if (!(q->last_in & INET_FRAG_COMPLETE))
229			inet_frag_kill(q, f);
230		spin_unlock(&q->lock);
 
 
231
232		if (atomic_dec_and_test(&q->refcnt))
233			inet_frag_destroy(q, f, &work);
234		evicted++;
 
 
 
 
235	}
 
 
 
 
 
 
 
 
 
 
 
 
236
237	return evicted;
 
 
 
 
 
 
 
 
 
 
 
238}
239EXPORT_SYMBOL(inet_frag_evictor);
240
241static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
242		struct inet_frag_queue *qp_in, struct inet_frags *f,
243		void *arg)
244{
245	struct inet_frag_bucket *hb;
246	struct inet_frag_queue *qp;
247	unsigned int hash;
248
249	read_lock(&f->lock); /* Protects against hash rebuild */
250	/*
251	 * While we stayed w/o the lock other CPU could update
252	 * the rnd seed, so we need to re-calculate the hash
253	 * chain. Fortunatelly the qp_in can be used to get one.
254	 */
255	hash = f->hashfn(qp_in);
256	hb = &f->hash[hash];
257	spin_lock(&hb->chain_lock);
258
259#ifdef CONFIG_SMP
260	/* With SMP race we have to recheck hash table, because
261	 * such entry could be created on other cpu, while we
262	 * released the hash bucket lock.
263	 */
264	hlist_for_each_entry(qp, &hb->chain, list) {
265		if (qp->net == nf && f->match(qp, arg)) {
266			atomic_inc(&qp->refcnt);
267			spin_unlock(&hb->chain_lock);
268			read_unlock(&f->lock);
269			qp_in->last_in |= INET_FRAG_COMPLETE;
270			inet_frag_put(qp_in, f);
271			return qp;
272		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273	}
274#endif
275	qp = qp_in;
276	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
277		atomic_inc(&qp->refcnt);
278
279	atomic_inc(&qp->refcnt);
280	hlist_add_head(&qp->list, &hb->chain);
281	inet_frag_lru_add(nf, qp);
282	spin_unlock(&hb->chain_lock);
283	read_unlock(&f->lock);
284
285	return qp;
 
 
286}
 
287
288static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
289		struct inet_frags *f, void *arg)
290{
291	struct inet_frag_queue *q;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
293	q = kzalloc(f->qsize, GFP_ATOMIC);
294	if (q == NULL)
295		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
297	q->net = nf;
298	f->constructor(q, arg);
299	add_frag_mem_limit(q, f->qsize);
300
301	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
302	spin_lock_init(&q->lock);
303	atomic_set(&q->refcnt, 1);
304	INIT_LIST_HEAD(&q->lru_list);
305
306	return q;
307}
 
308
309static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
310		struct inet_frags *f, void *arg)
311{
312	struct inet_frag_queue *q;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313
314	q = inet_frag_alloc(nf, f, arg);
315	if (q == NULL)
316		return NULL;
 
 
 
 
 
 
 
 
 
 
 
317
318	return inet_frag_intern(nf, q, f, arg);
319}
 
320
321struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
322		struct inet_frags *f, void *key, unsigned int hash)
323	__releases(&f->lock)
324{
325	struct inet_frag_bucket *hb;
326	struct inet_frag_queue *q;
327	int depth = 0;
 
 
 
328
329	hb = &f->hash[hash];
330
331	spin_lock(&hb->chain_lock);
332	hlist_for_each_entry(q, &hb->chain, list) {
333		if (q->net == nf && f->match(q, key)) {
334			atomic_inc(&q->refcnt);
335			spin_unlock(&hb->chain_lock);
336			read_unlock(&f->lock);
337			return q;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338		}
339		depth++;
340	}
341	spin_unlock(&hb->chain_lock);
342	read_unlock(&f->lock);
343
344	if (depth <= INETFRAGS_MAXDEPTH)
345		return inet_frag_create(nf, f, key);
346	else
347		return ERR_PTR(-ENOBUFS);
 
 
 
 
348}
349EXPORT_SYMBOL(inet_frag_find);
350
351void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
352				   const char *prefix)
353{
354	static const char msg[] = "inet_frag_find: Fragment hash bucket"
355		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
356		". Dropping fragment.\n";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
357
358	if (PTR_ERR(q) == -ENOBUFS)
359		LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
360}
361EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * inet fragments management
  4 *
 
 
 
 
 
  5 * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
  6 *				Started as consolidation of ipv4/ip_fragment.c,
  7 *				ipv6/reassembly. and ipv6 nf conntrack reassembly
  8 */
  9
 10#include <linux/list.h>
 11#include <linux/spinlock.h>
 12#include <linux/module.h>
 13#include <linux/timer.h>
 14#include <linux/mm.h>
 15#include <linux/random.h>
 16#include <linux/skbuff.h>
 17#include <linux/rtnetlink.h>
 18#include <linux/slab.h>
 19#include <linux/rhashtable.h>
 20
 21#include <net/sock.h>
 22#include <net/inet_frag.h>
 23#include <net/inet_ecn.h>
 24#include <net/ip.h>
 25#include <net/ipv6.h>
 26
 27#include "../core/sock_destructor.h"
 28
 29/* Use skb->cb to track consecutive/adjacent fragments coming at
 30 * the end of the queue. Nodes in the rb-tree queue will
 31 * contain "runs" of one or more adjacent fragments.
 32 *
 33 * Invariants:
 34 * - next_frag is NULL at the tail of a "run";
 35 * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
 36 */
 37struct ipfrag_skb_cb {
 38	union {
 39		struct inet_skb_parm	h4;
 40		struct inet6_skb_parm	h6;
 41	};
 42	struct sk_buff		*next_frag;
 43	int			frag_run_len;
 44	int			ip_defrag_offset;
 45};
 46
 47#define FRAG_CB(skb)		((struct ipfrag_skb_cb *)((skb)->cb))
 48
 49static void fragcb_clear(struct sk_buff *skb)
 50{
 51	RB_CLEAR_NODE(&skb->rbnode);
 52	FRAG_CB(skb)->next_frag = NULL;
 53	FRAG_CB(skb)->frag_run_len = skb->len;
 54}
 55
 56/* Append skb to the last "run". */
 57static void fragrun_append_to_last(struct inet_frag_queue *q,
 58				   struct sk_buff *skb)
 59{
 60	fragcb_clear(skb);
 61
 62	FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
 63	FRAG_CB(q->fragments_tail)->next_frag = skb;
 64	q->fragments_tail = skb;
 65}
 66
 67/* Create a new "run" with the skb. */
 68static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
 69{
 70	BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
 71	fragcb_clear(skb);
 72
 73	if (q->last_run_head)
 74		rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
 75			     &q->last_run_head->rbnode.rb_right);
 76	else
 77		rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
 78	rb_insert_color(&skb->rbnode, &q->rb_fragments);
 79
 80	q->fragments_tail = skb;
 81	q->last_run_head = skb;
 82}
 83
 84/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 85 * Value : 0xff if frame should be dropped.
 86 *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 87 */
 88const u8 ip_frag_ecn_table[16] = {
 89	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
 90	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
 91	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
 92	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,
 93
 94	/* invalid combinations : drop frame */
 95	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
 96	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
 97	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
 98	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
 99	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
100	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
101	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
102};
103EXPORT_SYMBOL(ip_frag_ecn_table);
104
105int inet_frags_init(struct inet_frags *f)
106{
107	f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
108					    NULL);
109	if (!f->frags_cachep)
110		return -ENOMEM;
111
112	refcount_set(&f->refcnt, 1);
113	init_completion(&f->completion);
114	return 0;
115}
116EXPORT_SYMBOL(inet_frags_init);
117
118void inet_frags_fini(struct inet_frags *f)
119{
120	if (refcount_dec_and_test(&f->refcnt))
121		complete(&f->completion);
122
123	wait_for_completion(&f->completion);
 
 
 
 
124
125	kmem_cache_destroy(f->frags_cachep);
126	f->frags_cachep = NULL;
127}
128EXPORT_SYMBOL(inet_frags_fini);
129
130/* called from rhashtable_free_and_destroy() at netns_frags dismantle */
131static void inet_frags_free_cb(void *ptr, void *arg)
132{
133	struct inet_frag_queue *fq = ptr;
134	int count;
135
136	count = del_timer_sync(&fq->timer) ? 1 : 0;
137
138	spin_lock_bh(&fq->lock);
139	fq->flags |= INET_FRAG_DROP;
140	if (!(fq->flags & INET_FRAG_COMPLETE)) {
141		fq->flags |= INET_FRAG_COMPLETE;
142		count++;
143	} else if (fq->flags & INET_FRAG_HASH_DEAD) {
144		count++;
145	}
146	spin_unlock_bh(&fq->lock);
147
148	if (refcount_sub_and_test(count, &fq->refcnt))
149		inet_frag_destroy(fq);
150}
151
152static LLIST_HEAD(fqdir_free_list);
153
154static void fqdir_free_fn(struct work_struct *work)
155{
156	struct llist_node *kill_list;
157	struct fqdir *fqdir, *tmp;
158	struct inet_frags *f;
159
160	/* Atomically snapshot the list of fqdirs to free */
161	kill_list = llist_del_all(&fqdir_free_list);
162
163	/* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
164	 * have completed, since they need to dereference fqdir.
165	 * Would it not be nice to have kfree_rcu_barrier() ? :)
166	 */
167	rcu_barrier();
168
169	llist_for_each_entry_safe(fqdir, tmp, kill_list, free_list) {
170		f = fqdir->f;
171		if (refcount_dec_and_test(&f->refcnt))
172			complete(&f->completion);
 
 
173
174		kfree(fqdir);
175	}
 
 
 
 
176}
 
177
178static DECLARE_DELAYED_WORK(fqdir_free_work, fqdir_free_fn);
 
 
 
 
179
180static void fqdir_work_fn(struct work_struct *work)
181{
182	struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
183
184	rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
 
 
185
186	if (llist_add(&fqdir->free_list, &fqdir_free_list))
187		queue_delayed_work(system_wq, &fqdir_free_work, HZ);
188}
 
189
190int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
191{
192	struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
193	int res;
194
195	if (!fqdir)
196		return -ENOMEM;
197	fqdir->f = f;
198	fqdir->net = net;
199	res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
200	if (res < 0) {
201		kfree(fqdir);
202		return res;
203	}
204	refcount_inc(&f->refcnt);
205	*fqdirp = fqdir;
206	return 0;
207}
208EXPORT_SYMBOL(fqdir_init);
209
210static struct workqueue_struct *inet_frag_wq;
211
212static int __init inet_frag_wq_init(void)
213{
214	inet_frag_wq = create_workqueue("inet_frag_wq");
215	if (!inet_frag_wq)
216		panic("Could not create inet frag workq");
217	return 0;
218}
219
220pure_initcall(inet_frag_wq_init);
 
 
221
222void fqdir_exit(struct fqdir *fqdir)
223{
224	INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
225	queue_work(inet_frag_wq, &fqdir->destroy_work);
226}
227EXPORT_SYMBOL(fqdir_exit);
228
229void inet_frag_kill(struct inet_frag_queue *fq)
230{
231	if (del_timer(&fq->timer))
232		refcount_dec(&fq->refcnt);
233
234	if (!(fq->flags & INET_FRAG_COMPLETE)) {
235		struct fqdir *fqdir = fq->fqdir;
236
237		fq->flags |= INET_FRAG_COMPLETE;
238		rcu_read_lock();
239		/* The RCU read lock provides a memory barrier
240		 * guaranteeing that if fqdir->dead is false then
241		 * the hash table destruction will not start until
242		 * after we unlock.  Paired with fqdir_pre_exit().
243		 */
244		if (!READ_ONCE(fqdir->dead)) {
245			rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
246					       fqdir->f->rhash_params);
247			refcount_dec(&fq->refcnt);
248		} else {
249			fq->flags |= INET_FRAG_HASH_DEAD;
250		}
251		rcu_read_unlock();
252	}
253}
254EXPORT_SYMBOL(inet_frag_kill);
255
256static void inet_frag_destroy_rcu(struct rcu_head *head)
 
257{
258	struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
259						 rcu);
260	struct inet_frags *f = q->fqdir->f;
261
262	if (f->destructor)
263		f->destructor(q);
264	kmem_cache_free(f->frags_cachep, q);
265}
266
267unsigned int inet_frag_rbtree_purge(struct rb_root *root,
268				    enum skb_drop_reason reason)
269{
270	struct rb_node *p = rb_first(root);
271	unsigned int sum = 0;
272
273	while (p) {
274		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
275
276		p = rb_next(p);
277		rb_erase(&skb->rbnode, root);
278		while (skb) {
279			struct sk_buff *next = FRAG_CB(skb)->next_frag;
280
281			sum += skb->truesize;
282			kfree_skb_reason(skb, reason);
283			skb = next;
284		}
285	}
286	return sum;
287}
288EXPORT_SYMBOL(inet_frag_rbtree_purge);
289
290void inet_frag_destroy(struct inet_frag_queue *q)
291{
292	unsigned int sum, sum_truesize = 0;
293	enum skb_drop_reason reason;
294	struct inet_frags *f;
295	struct fqdir *fqdir;
296
297	WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
298	reason = (q->flags & INET_FRAG_DROP) ?
299			SKB_DROP_REASON_FRAG_REASM_TIMEOUT :
300			SKB_CONSUMED;
301	WARN_ON(del_timer(&q->timer) != 0);
302
303	/* Release all fragment data. */
304	fqdir = q->fqdir;
305	f = fqdir->f;
306	sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments, reason);
 
 
 
 
 
 
307	sum = sum_truesize + f->qsize;
 
 
 
308
309	call_rcu(&q->rcu, inet_frag_destroy_rcu);
 
 
310
311	sub_frag_mem_limit(fqdir, sum);
312}
313EXPORT_SYMBOL(inet_frag_destroy);
314
315static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
316					       struct inet_frags *f,
317					       void *arg)
318{
319	struct inet_frag_queue *q;
 
320
321	q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
322	if (!q)
323		return NULL;
 
324
325	q->fqdir = fqdir;
326	f->constructor(q, arg);
327	add_frag_mem_limit(fqdir, f->qsize);
328
329	timer_setup(&q->timer, f->frag_expire, 0);
330	spin_lock_init(&q->lock);
331	refcount_set(&q->refcnt, 3);
 
332
333	return q;
334}
 
 
 
335
336static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
337						void *arg,
338						struct inet_frag_queue **prev)
339{
340	struct inet_frags *f = fqdir->f;
341	struct inet_frag_queue *q;
342
343	q = inet_frag_alloc(fqdir, f, arg);
344	if (!q) {
345		*prev = ERR_PTR(-ENOMEM);
346		return NULL;
347	}
348	mod_timer(&q->timer, jiffies + fqdir->timeout);
349
350	*prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
351						 &q->node, f->rhash_params);
352	if (*prev) {
353		q->flags |= INET_FRAG_COMPLETE;
354		inet_frag_kill(q);
355		inet_frag_destroy(q);
356		return NULL;
357	}
358	return q;
359}
360
361/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
362struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
363{
364	/* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
365	long high_thresh = READ_ONCE(fqdir->high_thresh);
366	struct inet_frag_queue *fq = NULL, *prev;
367
368	if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
369		return NULL;
370
371	rcu_read_lock();
372
373	prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
374	if (!prev)
375		fq = inet_frag_create(fqdir, key, &prev);
376	if (!IS_ERR_OR_NULL(prev)) {
377		fq = prev;
378		if (!refcount_inc_not_zero(&fq->refcnt))
379			fq = NULL;
380	}
381	rcu_read_unlock();
382	return fq;
383}
384EXPORT_SYMBOL(inet_frag_find);
385
386int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
387			   int offset, int end)
 
388{
389	struct sk_buff *last = q->fragments_tail;
 
 
390
391	/* RFC5722, Section 4, amended by Errata ID : 3089
392	 *                          When reassembling an IPv6 datagram, if
393	 *   one or more its constituent fragments is determined to be an
394	 *   overlapping fragment, the entire datagram (and any constituent
395	 *   fragments) MUST be silently discarded.
396	 *
397	 * Duplicates, however, should be ignored (i.e. skb dropped, but the
398	 * queue/fragments kept for later reassembly).
 
 
 
 
 
 
399	 */
400	if (!last)
401		fragrun_create(q, skb);  /* First fragment. */
402	else if (FRAG_CB(last)->ip_defrag_offset + last->len < end) {
403		/* This is the common case: skb goes to the end. */
404		/* Detect and discard overlaps. */
405		if (offset < FRAG_CB(last)->ip_defrag_offset + last->len)
406			return IPFRAG_OVERLAP;
407		if (offset == FRAG_CB(last)->ip_defrag_offset + last->len)
408			fragrun_append_to_last(q, skb);
409		else
410			fragrun_create(q, skb);
411	} else {
412		/* Binary search. Note that skb can become the first fragment,
413		 * but not the last (covered above).
414		 */
415		struct rb_node **rbn, *parent;
416
417		rbn = &q->rb_fragments.rb_node;
418		do {
419			struct sk_buff *curr;
420			int curr_run_end;
421
422			parent = *rbn;
423			curr = rb_to_skb(parent);
424			curr_run_end = FRAG_CB(curr)->ip_defrag_offset +
425					FRAG_CB(curr)->frag_run_len;
426			if (end <= FRAG_CB(curr)->ip_defrag_offset)
427				rbn = &parent->rb_left;
428			else if (offset >= curr_run_end)
429				rbn = &parent->rb_right;
430			else if (offset >= FRAG_CB(curr)->ip_defrag_offset &&
431				 end <= curr_run_end)
432				return IPFRAG_DUP;
433			else
434				return IPFRAG_OVERLAP;
435		} while (*rbn);
436		/* Here we have parent properly set, and rbn pointing to
437		 * one of its NULL left/right children. Insert skb.
438		 */
439		fragcb_clear(skb);
440		rb_link_node(&skb->rbnode, parent, rbn);
441		rb_insert_color(&skb->rbnode, &q->rb_fragments);
442	}
 
 
 
 
 
 
 
 
 
 
443
444	FRAG_CB(skb)->ip_defrag_offset = offset;
445
446	return IPFRAG_OK;
447}
448EXPORT_SYMBOL(inet_frag_queue_insert);
449
450void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
451			      struct sk_buff *parent)
452{
453	struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
454	void (*destructor)(struct sk_buff *);
455	unsigned int orig_truesize = 0;
456	struct sk_buff **nextp = NULL;
457	struct sock *sk = skb->sk;
458	int delta;
459
460	if (sk && is_skb_wmem(skb)) {
461		/* TX: skb->sk might have been passed as argument to
462		 * dst->output and must remain valid until tx completes.
463		 *
464		 * Move sk to reassembled skb and fix up wmem accounting.
465		 */
466		orig_truesize = skb->truesize;
467		destructor = skb->destructor;
468	}
469
470	if (head != skb) {
471		fp = skb_clone(skb, GFP_ATOMIC);
472		if (!fp) {
473			head = skb;
474			goto out_restore_sk;
475		}
476		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
477		if (RB_EMPTY_NODE(&skb->rbnode))
478			FRAG_CB(parent)->next_frag = fp;
479		else
480			rb_replace_node(&skb->rbnode, &fp->rbnode,
481					&q->rb_fragments);
482		if (q->fragments_tail == skb)
483			q->fragments_tail = fp;
484
485		if (orig_truesize) {
486			/* prevent skb_morph from releasing sk */
487			skb->sk = NULL;
488			skb->destructor = NULL;
489		}
490		skb_morph(skb, head);
491		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
492		rb_replace_node(&head->rbnode, &skb->rbnode,
493				&q->rb_fragments);
494		consume_skb(head);
495		head = skb;
496	}
497	WARN_ON(FRAG_CB(head)->ip_defrag_offset != 0);
498
499	delta = -head->truesize;
 
 
500
501	/* Head of list must not be cloned. */
502	if (skb_unclone(head, GFP_ATOMIC))
503		goto out_restore_sk;
 
504
505	delta += head->truesize;
506	if (delta)
507		add_frag_mem_limit(q->fqdir, delta);
508
509	/* If the first fragment is fragmented itself, we split
510	 * it to two chunks: the first with data and paged part
511	 * and the second, holding only fragments.
512	 */
513	if (skb_has_frag_list(head)) {
514		struct sk_buff *clone;
515		int i, plen = 0;
516
517		clone = alloc_skb(0, GFP_ATOMIC);
518		if (!clone)
519			goto out_restore_sk;
520		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
521		skb_frag_list_init(head);
522		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
523			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
524		clone->data_len = head->data_len - plen;
525		clone->len = clone->data_len;
526		head->truesize += clone->truesize;
527		clone->csum = 0;
528		clone->ip_summed = head->ip_summed;
529		add_frag_mem_limit(q->fqdir, clone->truesize);
530		skb_shinfo(head)->frag_list = clone;
531		nextp = &clone->next;
532	} else {
533		nextp = &skb_shinfo(head)->frag_list;
534	}
535
536out_restore_sk:
537	if (orig_truesize) {
538		int ts_delta = head->truesize - orig_truesize;
539
540		/* if this reassembled skb is fragmented later,
541		 * fraglist skbs will get skb->sk assigned from head->sk,
542		 * and each frag skb will be released via sock_wfree.
543		 *
544		 * Update sk_wmem_alloc.
545		 */
546		head->sk = sk;
547		head->destructor = destructor;
548		refcount_add(ts_delta, &sk->sk_wmem_alloc);
549	}
550
551	return nextp;
552}
553EXPORT_SYMBOL(inet_frag_reasm_prepare);
554
555void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
556			    void *reasm_data, bool try_coalesce)
 
557{
558	struct sock *sk = is_skb_wmem(head) ? head->sk : NULL;
559	const unsigned int head_truesize = head->truesize;
560	struct sk_buff **nextp = reasm_data;
561	struct rb_node *rbn;
562	struct sk_buff *fp;
563	int sum_truesize;
564
565	skb_push(head, head->data - skb_network_header(head));
566
567	/* Traverse the tree in order, to build frag_list. */
568	fp = FRAG_CB(head)->next_frag;
569	rbn = rb_next(&head->rbnode);
570	rb_erase(&head->rbnode, &q->rb_fragments);
571
572	sum_truesize = head->truesize;
573	while (rbn || fp) {
574		/* fp points to the next sk_buff in the current run;
575		 * rbn points to the next run.
576		 */
577		/* Go through the current run. */
578		while (fp) {
579			struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
580			bool stolen;
581			int delta;
582
583			sum_truesize += fp->truesize;
584			if (head->ip_summed != fp->ip_summed)
585				head->ip_summed = CHECKSUM_NONE;
586			else if (head->ip_summed == CHECKSUM_COMPLETE)
587				head->csum = csum_add(head->csum, fp->csum);
588
589			if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
590							     &delta)) {
591				kfree_skb_partial(fp, stolen);
592			} else {
593				fp->prev = NULL;
594				memset(&fp->rbnode, 0, sizeof(fp->rbnode));
595				fp->sk = NULL;
596
597				head->data_len += fp->len;
598				head->len += fp->len;
599				head->truesize += fp->truesize;
600
601				*nextp = fp;
602				nextp = &fp->next;
603			}
604
605			fp = next_frag;
606		}
607		/* Move to the next run. */
608		if (rbn) {
609			struct rb_node *rbnext = rb_next(rbn);
610
611			fp = rb_to_skb(rbn);
612			rb_erase(rbn, &q->rb_fragments);
613			rbn = rbnext;
614		}
 
615	}
616	sub_frag_mem_limit(q->fqdir, sum_truesize);
 
617
618	*nextp = NULL;
619	skb_mark_not_on_list(head);
620	head->prev = NULL;
621	head->tstamp = q->stamp;
622	head->tstamp_type = q->tstamp_type;
623
624	if (sk)
625		refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
626}
627EXPORT_SYMBOL(inet_frag_reasm_finish);
628
629struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
 
630{
631	struct sk_buff *head, *skb;
632
633	head = skb_rb_first(&q->rb_fragments);
634	if (!head)
635		return NULL;
636	skb = FRAG_CB(head)->next_frag;
637	if (skb)
638		rb_replace_node(&head->rbnode, &skb->rbnode,
639				&q->rb_fragments);
640	else
641		rb_erase(&head->rbnode, &q->rb_fragments);
642	memset(&head->rbnode, 0, sizeof(head->rbnode));
643	barrier();
644
645	if (head == q->fragments_tail)
646		q->fragments_tail = NULL;
647
648	sub_frag_mem_limit(q->fqdir, head->truesize);
649
650	return head;
 
651}
652EXPORT_SYMBOL(inet_frag_pull_head);