Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/interrupt.h>
 13#include <linux/spinlock.h>
 14#include <linux/random.h>
 15#include <linux/timer.h>
 16#include <linux/time.h>
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/net.h>
 20#include <linux/workqueue.h>
 21#include <net/ip.h>
 22#include <net/inetpeer.h>
 23#include <net/secure_seq.h>
 24
 25/*
 26 *  Theory of operations.
 27 *  We keep one entry for each peer IP address.  The nodes contains long-living
 28 *  information about the peer which doesn't depend on routes.
 
 
 
 
 
 
 
 
 29 *
 
 
 
 
 
 30 *  Nodes are removed only when reference counter goes to 0.
 31 *  When it's happened the node may be removed when a sufficient amount of
 32 *  time has been passed since its last use.  The less-recently-used entry can
 33 *  also be removed if the pool is overloaded i.e. if the total amount of
 34 *  entries is greater-or-equal than the threshold.
 35 *
 36 *  Node pool is organised as an AVL tree.
 37 *  Such an implementation has been chosen not just for fun.  It's a way to
 38 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 39 *  amount of long living nodes in a single hash slot would significantly delay
 40 *  lookups performed with disabled BHs.
 41 *
 42 *  Serialisation issues.
 43 *  1.  Nodes may appear in the tree only with the pool lock held.
 44 *  2.  Nodes may disappear from the tree only with the pool lock held
 45 *      AND reference count being 0.
 46 *  3.  Global variable peer_total is modified under the pool lock.
 47 *  4.  struct inet_peer fields modification:
 48 *		avl_left, avl_right, avl_parent, avl_height: pool lock
 49 *		refcnt: atomically against modifications on other CPU;
 50 *		   usually under some other lock to prevent node disappearing
 51 *		daddr: unchangeable
 
 52 */
 53
 54static struct kmem_cache *peer_cachep __read_mostly;
 55
 56static LIST_HEAD(gc_list);
 57static const int gc_delay = 60 * HZ;
 58static struct delayed_work gc_work;
 59static DEFINE_SPINLOCK(gc_lock);
 60
 61#define node_height(x) x->avl_height
 62
 63#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
 64#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
 65static const struct inet_peer peer_fake_node = {
 66	.avl_left	= peer_avl_empty_rcu,
 67	.avl_right	= peer_avl_empty_rcu,
 68	.avl_height	= 0
 69};
 70
 71void inet_peer_base_init(struct inet_peer_base *bp)
 72{
 73	bp->root = peer_avl_empty_rcu;
 74	seqlock_init(&bp->lock);
 75	bp->total = 0;
 76}
 77EXPORT_SYMBOL_GPL(inet_peer_base_init);
 
 
 
 
 
 
 
 
 
 
 78
 79#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
 80
 81/* Exported for sysctl_net_ipv4.  */
 82int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
 83					 * aggressively at this stage */
 84int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
 85int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
 86
 87static void inetpeer_gc_worker(struct work_struct *work)
 88{
 89	struct inet_peer *p, *n, *c;
 90	struct list_head list;
 91
 92	spin_lock_bh(&gc_lock);
 93	list_replace_init(&gc_list, &list);
 94	spin_unlock_bh(&gc_lock);
 95
 96	if (list_empty(&list))
 97		return;
 98
 99	list_for_each_entry_safe(p, n, &list, gc_list) {
100
101		if (need_resched())
102			cond_resched();
103
104		c = rcu_dereference_protected(p->avl_left, 1);
105		if (c != peer_avl_empty) {
106			list_add_tail(&c->gc_list, &list);
107			p->avl_left = peer_avl_empty_rcu;
108		}
109
110		c = rcu_dereference_protected(p->avl_right, 1);
111		if (c != peer_avl_empty) {
112			list_add_tail(&c->gc_list, &list);
113			p->avl_right = peer_avl_empty_rcu;
114		}
115
116		n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
117
118		if (!atomic_read(&p->refcnt)) {
119			list_del(&p->gc_list);
120			kmem_cache_free(peer_cachep, p);
121		}
122	}
123
124	if (list_empty(&list))
125		return;
126
127	spin_lock_bh(&gc_lock);
128	list_splice(&list, &gc_list);
129	spin_unlock_bh(&gc_lock);
130
131	schedule_delayed_work(&gc_work, gc_delay);
132}
133
134/* Called from ip_output.c:ip_init  */
135void __init inet_initpeers(void)
136{
137	struct sysinfo si;
138
139	/* Use the straight interface to information about memory. */
140	si_meminfo(&si);
141	/* The values below were suggested by Alexey Kuznetsov
142	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
143	 * myself.  --SAW
144	 */
145	if (si.totalram <= (32768*1024)/PAGE_SIZE)
146		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
147	if (si.totalram <= (16384*1024)/PAGE_SIZE)
148		inet_peer_threshold >>= 1; /* about 512KB */
149	if (si.totalram <= (8192*1024)/PAGE_SIZE)
150		inet_peer_threshold >>= 2; /* about 128KB */
151
152	peer_cachep = kmem_cache_create("inet_peer_cache",
153			sizeof(struct inet_peer),
154			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
155			NULL);
156
157	INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158}
159
160#define rcu_deref_locked(X, BASE)				\
161	rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
162
163/*
164 * Called with local BH disabled and the pool lock held.
165 */
166#define lookup(_daddr, _stack, _base)				\
167({								\
168	struct inet_peer *u;					\
169	struct inet_peer __rcu **v;				\
170								\
171	stackptr = _stack;					\
172	*stackptr++ = &_base->root;				\
173	for (u = rcu_deref_locked(_base->root, _base);		\
174	     u != peer_avl_empty;) {				\
175		int cmp = inetpeer_addr_cmp(_daddr, &u->daddr);	\
176		if (cmp == 0)					\
177			break;					\
178		if (cmp == -1)					\
179			v = &u->avl_left;			\
180		else						\
181			v = &u->avl_right;			\
182		*stackptr++ = v;				\
183		u = rcu_deref_locked(*v, _base);		\
184	}							\
185	u;							\
186})
187
188/*
189 * Called with rcu_read_lock()
190 * Because we hold no lock against a writer, its quite possible we fall
191 * in an endless loop.
192 * But every pointer we follow is guaranteed to be valid thanks to RCU.
193 * We exit from this function if number of links exceeds PEER_MAXDEPTH
194 */
195static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
196				    struct inet_peer_base *base)
197{
198	struct inet_peer *u = rcu_dereference(base->root);
199	int count = 0;
200
201	while (u != peer_avl_empty) {
202		int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
203		if (cmp == 0) {
204			/* Before taking a reference, check if this entry was
205			 * deleted (refcnt=-1)
206			 */
207			if (!atomic_add_unless(&u->refcnt, 1, -1))
208				u = NULL;
209			return u;
210		}
211		if (cmp == -1)
212			u = rcu_dereference(u->avl_left);
213		else
214			u = rcu_dereference(u->avl_right);
215		if (unlikely(++count == PEER_MAXDEPTH))
216			break;
217	}
218	return NULL;
219}
220
221/* Called with local BH disabled and the pool lock held. */
222#define lookup_rightempty(start, base)				\
223({								\
224	struct inet_peer *u;					\
225	struct inet_peer __rcu **v;				\
226	*stackptr++ = &start->avl_left;				\
227	v = &start->avl_left;					\
228	for (u = rcu_deref_locked(*v, base);			\
229	     u->avl_right != peer_avl_empty_rcu;) {		\
230		v = &u->avl_right;				\
231		*stackptr++ = v;				\
232		u = rcu_deref_locked(*v, base);			\
233	}							\
234	u;							\
235})
236
237/* Called with local BH disabled and the pool lock held.
238 * Variable names are the proof of operation correctness.
239 * Look into mm/map_avl.c for more detail description of the ideas.
240 */
241static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
242			       struct inet_peer __rcu ***stackend,
243			       struct inet_peer_base *base)
244{
245	struct inet_peer __rcu **nodep;
246	struct inet_peer *node, *l, *r;
247	int lh, rh;
248
249	while (stackend > stack) {
250		nodep = *--stackend;
251		node = rcu_deref_locked(*nodep, base);
252		l = rcu_deref_locked(node->avl_left, base);
253		r = rcu_deref_locked(node->avl_right, base);
254		lh = node_height(l);
255		rh = node_height(r);
256		if (lh > rh + 1) { /* l: RH+2 */
257			struct inet_peer *ll, *lr, *lrl, *lrr;
258			int lrh;
259			ll = rcu_deref_locked(l->avl_left, base);
260			lr = rcu_deref_locked(l->avl_right, base);
261			lrh = node_height(lr);
262			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
263				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
264				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
265				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
266				RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
267				RCU_INIT_POINTER(l->avl_right, node);	/* node: RH+1 or RH+2 */
268				l->avl_height = node->avl_height + 1;
269				RCU_INIT_POINTER(*nodep, l);
270			} else { /* ll: RH, lr: RH+1 */
271				lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
272				lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
273				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
274				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
275				node->avl_height = rh + 1; /* node: RH+1 */
276				RCU_INIT_POINTER(l->avl_left, ll);	/* ll: RH */
277				RCU_INIT_POINTER(l->avl_right, lrl);	/* lrl: RH or RH-1 */
278				l->avl_height = rh + 1;	/* l: RH+1 */
279				RCU_INIT_POINTER(lr->avl_left, l);	/* l: RH+1 */
280				RCU_INIT_POINTER(lr->avl_right, node);	/* node: RH+1 */
281				lr->avl_height = rh + 2;
282				RCU_INIT_POINTER(*nodep, lr);
283			}
284		} else if (rh > lh + 1) { /* r: LH+2 */
285			struct inet_peer *rr, *rl, *rlr, *rll;
286			int rlh;
287			rr = rcu_deref_locked(r->avl_right, base);
288			rl = rcu_deref_locked(r->avl_left, base);
289			rlh = node_height(rl);
290			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
291				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
292				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
293				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
294				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH+1 */
295				RCU_INIT_POINTER(r->avl_left, node);	/* node: LH+1 or LH+2 */
296				r->avl_height = node->avl_height + 1;
297				RCU_INIT_POINTER(*nodep, r);
298			} else { /* rr: RH, rl: RH+1 */
299				rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
300				rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
301				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
302				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
303				node->avl_height = lh + 1; /* node: LH+1 */
304				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH */
305				RCU_INIT_POINTER(r->avl_left, rlr);	/* rlr: LH or LH-1 */
306				r->avl_height = lh + 1;	/* r: LH+1 */
307				RCU_INIT_POINTER(rl->avl_right, r);	/* r: LH+1 */
308				RCU_INIT_POINTER(rl->avl_left, node);	/* node: LH+1 */
309				rl->avl_height = lh + 2;
310				RCU_INIT_POINTER(*nodep, rl);
311			}
312		} else {
313			node->avl_height = (lh > rh ? lh : rh) + 1;
314		}
315	}
316}
317
318/* Called with local BH disabled and the pool lock held. */
319#define link_to_pool(n, base)					\
320do {								\
321	n->avl_height = 1;					\
322	n->avl_left = peer_avl_empty_rcu;			\
323	n->avl_right = peer_avl_empty_rcu;			\
324	/* lockless readers can catch us now */			\
325	rcu_assign_pointer(**--stackptr, n);			\
326	peer_avl_rebalance(stack, stackptr, base);		\
327} while (0)
328
329static void inetpeer_free_rcu(struct rcu_head *head)
330{
331	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
332}
333
334static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
335			     struct inet_peer __rcu **stack[PEER_MAXDEPTH])
336{
337	struct inet_peer __rcu ***stackptr, ***delp;
338
339	if (lookup(&p->daddr, stack, base) != p)
340		BUG();
341	delp = stackptr - 1; /* *delp[0] == p */
342	if (p->avl_left == peer_avl_empty_rcu) {
343		*delp[0] = p->avl_right;
344		--stackptr;
345	} else {
346		/* look for a node to insert instead of p */
347		struct inet_peer *t;
348		t = lookup_rightempty(p, base);
349		BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
350		**--stackptr = t->avl_left;
351		/* t is removed, t->daddr > x->daddr for any
352		 * x in p->avl_left subtree.
353		 * Put t in the old place of p. */
354		RCU_INIT_POINTER(*delp[0], t);
355		t->avl_left = p->avl_left;
356		t->avl_right = p->avl_right;
357		t->avl_height = p->avl_height;
358		BUG_ON(delp[1] != &p->avl_left);
359		delp[1] = &t->avl_left; /* was &p->avl_left */
360	}
361	peer_avl_rebalance(stack, stackptr, base);
362	base->total--;
363	call_rcu(&p->rcu, inetpeer_free_rcu);
364}
365
 
 
 
 
 
366/* perform garbage collect on all items stacked during a lookup */
367static int inet_peer_gc(struct inet_peer_base *base,
368			struct inet_peer __rcu **stack[PEER_MAXDEPTH],
369			struct inet_peer __rcu ***stackptr)
370{
371	struct inet_peer *p, *gchead = NULL;
372	__u32 delta, ttl;
373	int cnt = 0;
374
375	if (base->total >= inet_peer_threshold)
376		ttl = 0; /* be aggressive */
377	else
378		ttl = inet_peer_maxttl
379				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
380					base->total / inet_peer_threshold * HZ;
381	stackptr--; /* last stack slot is peer_avl_empty */
382	while (stackptr > stack) {
383		stackptr--;
384		p = rcu_deref_locked(**stackptr, base);
385		if (atomic_read(&p->refcnt) == 0) {
386			smp_rmb();
387			delta = (__u32)jiffies - p->dtime;
388			if (delta >= ttl &&
389			    atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
390				p->gc_next = gchead;
391				gchead = p;
392			}
393		}
394	}
395	while ((p = gchead) != NULL) {
396		gchead = p->gc_next;
397		cnt++;
398		unlink_from_pool(p, base, stack);
399	}
400	return cnt;
401}
402
403struct inet_peer *inet_getpeer(struct inet_peer_base *base,
404			       const struct inetpeer_addr *daddr,
405			       int create)
406{
407	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
 
408	struct inet_peer *p;
409	unsigned int sequence;
410	int invalidated, gccnt = 0;
411
412	/* Attempt a lockless lookup first.
413	 * Because of a concurrent writer, we might not find an existing entry.
414	 */
415	rcu_read_lock();
416	sequence = read_seqbegin(&base->lock);
417	p = lookup_rcu(daddr, base);
418	invalidated = read_seqretry(&base->lock, sequence);
419	rcu_read_unlock();
420
421	if (p)
422		return p;
423
424	/* If no writer did a change during our lookup, we can return early. */
425	if (!create && !invalidated)
426		return NULL;
427
428	/* retry an exact lookup, taking the lock before.
429	 * At least, nodes should be hot in our cache.
430	 */
431	write_seqlock_bh(&base->lock);
432relookup:
433	p = lookup(daddr, stack, base);
434	if (p != peer_avl_empty) {
435		atomic_inc(&p->refcnt);
436		write_sequnlock_bh(&base->lock);
437		return p;
438	}
439	if (!gccnt) {
440		gccnt = inet_peer_gc(base, stack, stackptr);
441		if (gccnt && create)
442			goto relookup;
443	}
444	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
445	if (p) {
446		p->daddr = *daddr;
447		atomic_set(&p->refcnt, 1);
448		atomic_set(&p->rid, 0);
 
 
 
 
 
449		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
450		p->rate_tokens = 0;
451		/* 60*HZ is arbitrary, but chosen enough high so that the first
452		 * calculation of tokens is at its maximum.
453		 */
454		p->rate_last = jiffies - 60*HZ;
455		INIT_LIST_HEAD(&p->gc_list);
456
457		/* Link the node. */
458		link_to_pool(p, base);
459		base->total++;
460	}
461	write_sequnlock_bh(&base->lock);
462
463	return p;
464}
465EXPORT_SYMBOL_GPL(inet_getpeer);
466
467void inet_putpeer(struct inet_peer *p)
468{
469	p->dtime = (__u32)jiffies;
470	smp_mb__before_atomic();
471	atomic_dec(&p->refcnt);
472}
473EXPORT_SYMBOL_GPL(inet_putpeer);
474
475/*
476 *	Check transmit rate limitation for given message.
477 *	The rate information is held in the inet_peer entries now.
478 *	This function is generic and could be used for other purposes
479 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
480 *
481 *	Note that the same inet_peer fields are modified by functions in
482 *	route.c too, but these work for packet destinations while xrlim_allow
483 *	works for icmp destinations. This means the rate limiting information
484 *	for one "ip object" is shared - and these ICMPs are twice limited:
485 *	by source and by destination.
486 *
487 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
488 *			  SHOULD allow setting of rate limits
489 *
490 * 	Shared between ICMPv4 and ICMPv6.
491 */
492#define XRLIM_BURST_FACTOR 6
493bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
494{
495	unsigned long now, token;
496	bool rc = false;
497
498	if (!peer)
499		return true;
500
501	token = peer->rate_tokens;
502	now = jiffies;
503	token += now - peer->rate_last;
504	peer->rate_last = now;
505	if (token > XRLIM_BURST_FACTOR * timeout)
506		token = XRLIM_BURST_FACTOR * timeout;
507	if (token >= timeout) {
508		token -= timeout;
509		rc = true;
510	}
511	peer->rate_tokens = token;
512	return rc;
513}
514EXPORT_SYMBOL(inet_peer_xrlim_allow);
515
516static void inetpeer_inval_rcu(struct rcu_head *head)
517{
518	struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
519
520	spin_lock_bh(&gc_lock);
521	list_add_tail(&p->gc_list, &gc_list);
522	spin_unlock_bh(&gc_lock);
523
524	schedule_delayed_work(&gc_work, gc_delay);
525}
526
527void inetpeer_invalidate_tree(struct inet_peer_base *base)
528{
529	struct inet_peer *root;
 
530
531	write_seqlock_bh(&base->lock);
532
533	root = rcu_deref_locked(base->root, base);
534	if (root != peer_avl_empty) {
535		base->root = peer_avl_empty_rcu;
 
 
 
 
 
536		base->total = 0;
537		call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
538	}
539
 
540	write_sequnlock_bh(&base->lock);
541}
542EXPORT_SYMBOL(inetpeer_invalidate_tree);
v3.5.6
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/interrupt.h>
 13#include <linux/spinlock.h>
 14#include <linux/random.h>
 15#include <linux/timer.h>
 16#include <linux/time.h>
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/net.h>
 20#include <linux/workqueue.h>
 21#include <net/ip.h>
 22#include <net/inetpeer.h>
 23#include <net/secure_seq.h>
 24
 25/*
 26 *  Theory of operations.
 27 *  We keep one entry for each peer IP address.  The nodes contains long-living
 28 *  information about the peer which doesn't depend on routes.
 29 *  At this moment this information consists only of ID field for the next
 30 *  outgoing IP packet.  This field is incremented with each packet as encoded
 31 *  in inet_getid() function (include/net/inetpeer.h).
 32 *  At the moment of writing this notes identifier of IP packets is generated
 33 *  to be unpredictable using this code only for packets subjected
 34 *  (actually or potentially) to defragmentation.  I.e. DF packets less than
 35 *  PMTU in size uses a constant ID and do not use this code (see
 36 *  ip_select_ident() in include/net/ip.h).
 37 *
 38 *  Route cache entries hold references to our nodes.
 39 *  New cache entries get references via lookup by destination IP address in
 40 *  the avl tree.  The reference is grabbed only when it's needed i.e. only
 41 *  when we try to output IP packet which needs an unpredictable ID (see
 42 *  __ip_select_ident() in net/ipv4/route.c).
 43 *  Nodes are removed only when reference counter goes to 0.
 44 *  When it's happened the node may be removed when a sufficient amount of
 45 *  time has been passed since its last use.  The less-recently-used entry can
 46 *  also be removed if the pool is overloaded i.e. if the total amount of
 47 *  entries is greater-or-equal than the threshold.
 48 *
 49 *  Node pool is organised as an AVL tree.
 50 *  Such an implementation has been chosen not just for fun.  It's a way to
 51 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 52 *  amount of long living nodes in a single hash slot would significantly delay
 53 *  lookups performed with disabled BHs.
 54 *
 55 *  Serialisation issues.
 56 *  1.  Nodes may appear in the tree only with the pool lock held.
 57 *  2.  Nodes may disappear from the tree only with the pool lock held
 58 *      AND reference count being 0.
 59 *  3.  Global variable peer_total is modified under the pool lock.
 60 *  4.  struct inet_peer fields modification:
 61 *		avl_left, avl_right, avl_parent, avl_height: pool lock
 62 *		refcnt: atomically against modifications on other CPU;
 63 *		   usually under some other lock to prevent node disappearing
 64 *		daddr: unchangeable
 65 *		ip_id_count: atomic value (no lock needed)
 66 */
 67
 68static struct kmem_cache *peer_cachep __read_mostly;
 69
 70static LIST_HEAD(gc_list);
 71static const int gc_delay = 60 * HZ;
 72static struct delayed_work gc_work;
 73static DEFINE_SPINLOCK(gc_lock);
 74
 75#define node_height(x) x->avl_height
 76
 77#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
 78#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
 79static const struct inet_peer peer_fake_node = {
 80	.avl_left	= peer_avl_empty_rcu,
 81	.avl_right	= peer_avl_empty_rcu,
 82	.avl_height	= 0
 83};
 84
 85struct inet_peer_base {
 86	struct inet_peer __rcu *root;
 87	seqlock_t	lock;
 88	int		total;
 89};
 90
 91static struct inet_peer_base v4_peers = {
 92	.root		= peer_avl_empty_rcu,
 93	.lock		= __SEQLOCK_UNLOCKED(v4_peers.lock),
 94	.total		= 0,
 95};
 96
 97static struct inet_peer_base v6_peers = {
 98	.root		= peer_avl_empty_rcu,
 99	.lock		= __SEQLOCK_UNLOCKED(v6_peers.lock),
100	.total		= 0,
101};
102
103#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
104
105/* Exported for sysctl_net_ipv4.  */
106int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
107					 * aggressively at this stage */
108int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
109int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
110
111static void inetpeer_gc_worker(struct work_struct *work)
112{
113	struct inet_peer *p, *n;
114	LIST_HEAD(list);
115
116	spin_lock_bh(&gc_lock);
117	list_replace_init(&gc_list, &list);
118	spin_unlock_bh(&gc_lock);
119
120	if (list_empty(&list))
121		return;
122
123	list_for_each_entry_safe(p, n, &list, gc_list) {
124
125		if(need_resched())
126			cond_resched();
127
128		if (p->avl_left != peer_avl_empty) {
129			list_add_tail(&p->avl_left->gc_list, &list);
130			p->avl_left = peer_avl_empty;
 
131		}
132
133		if (p->avl_right != peer_avl_empty) {
134			list_add_tail(&p->avl_right->gc_list, &list);
135			p->avl_right = peer_avl_empty;
 
136		}
137
138		n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
139
140		if (!atomic_read(&p->refcnt)) {
141			list_del(&p->gc_list);
142			kmem_cache_free(peer_cachep, p);
143		}
144	}
145
146	if (list_empty(&list))
147		return;
148
149	spin_lock_bh(&gc_lock);
150	list_splice(&list, &gc_list);
151	spin_unlock_bh(&gc_lock);
152
153	schedule_delayed_work(&gc_work, gc_delay);
154}
155
156/* Called from ip_output.c:ip_init  */
157void __init inet_initpeers(void)
158{
159	struct sysinfo si;
160
161	/* Use the straight interface to information about memory. */
162	si_meminfo(&si);
163	/* The values below were suggested by Alexey Kuznetsov
164	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
165	 * myself.  --SAW
166	 */
167	if (si.totalram <= (32768*1024)/PAGE_SIZE)
168		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
169	if (si.totalram <= (16384*1024)/PAGE_SIZE)
170		inet_peer_threshold >>= 1; /* about 512KB */
171	if (si.totalram <= (8192*1024)/PAGE_SIZE)
172		inet_peer_threshold >>= 2; /* about 128KB */
173
174	peer_cachep = kmem_cache_create("inet_peer_cache",
175			sizeof(struct inet_peer),
176			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
177			NULL);
178
179	INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
180}
181
182static int addr_compare(const struct inetpeer_addr *a,
183			const struct inetpeer_addr *b)
184{
185	int i, n = (a->family == AF_INET ? 1 : 4);
186
187	for (i = 0; i < n; i++) {
188		if (a->addr.a6[i] == b->addr.a6[i])
189			continue;
190		if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
191			return -1;
192		return 1;
193	}
194
195	return 0;
196}
197
198#define rcu_deref_locked(X, BASE)				\
199	rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
200
201/*
202 * Called with local BH disabled and the pool lock held.
203 */
204#define lookup(_daddr, _stack, _base)				\
205({								\
206	struct inet_peer *u;					\
207	struct inet_peer __rcu **v;				\
208								\
209	stackptr = _stack;					\
210	*stackptr++ = &_base->root;				\
211	for (u = rcu_deref_locked(_base->root, _base);		\
212	     u != peer_avl_empty; ) {				\
213		int cmp = addr_compare(_daddr, &u->daddr);	\
214		if (cmp == 0)					\
215			break;					\
216		if (cmp == -1)					\
217			v = &u->avl_left;			\
218		else						\
219			v = &u->avl_right;			\
220		*stackptr++ = v;				\
221		u = rcu_deref_locked(*v, _base);		\
222	}							\
223	u;							\
224})
225
226/*
227 * Called with rcu_read_lock()
228 * Because we hold no lock against a writer, its quite possible we fall
229 * in an endless loop.
230 * But every pointer we follow is guaranteed to be valid thanks to RCU.
231 * We exit from this function if number of links exceeds PEER_MAXDEPTH
232 */
233static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
234				    struct inet_peer_base *base)
235{
236	struct inet_peer *u = rcu_dereference(base->root);
237	int count = 0;
238
239	while (u != peer_avl_empty) {
240		int cmp = addr_compare(daddr, &u->daddr);
241		if (cmp == 0) {
242			/* Before taking a reference, check if this entry was
243			 * deleted (refcnt=-1)
244			 */
245			if (!atomic_add_unless(&u->refcnt, 1, -1))
246				u = NULL;
247			return u;
248		}
249		if (cmp == -1)
250			u = rcu_dereference(u->avl_left);
251		else
252			u = rcu_dereference(u->avl_right);
253		if (unlikely(++count == PEER_MAXDEPTH))
254			break;
255	}
256	return NULL;
257}
258
259/* Called with local BH disabled and the pool lock held. */
260#define lookup_rightempty(start, base)				\
261({								\
262	struct inet_peer *u;					\
263	struct inet_peer __rcu **v;				\
264	*stackptr++ = &start->avl_left;				\
265	v = &start->avl_left;					\
266	for (u = rcu_deref_locked(*v, base);			\
267	     u->avl_right != peer_avl_empty_rcu; ) {		\
268		v = &u->avl_right;				\
269		*stackptr++ = v;				\
270		u = rcu_deref_locked(*v, base);			\
271	}							\
272	u;							\
273})
274
275/* Called with local BH disabled and the pool lock held.
276 * Variable names are the proof of operation correctness.
277 * Look into mm/map_avl.c for more detail description of the ideas.
278 */
279static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
280			       struct inet_peer __rcu ***stackend,
281			       struct inet_peer_base *base)
282{
283	struct inet_peer __rcu **nodep;
284	struct inet_peer *node, *l, *r;
285	int lh, rh;
286
287	while (stackend > stack) {
288		nodep = *--stackend;
289		node = rcu_deref_locked(*nodep, base);
290		l = rcu_deref_locked(node->avl_left, base);
291		r = rcu_deref_locked(node->avl_right, base);
292		lh = node_height(l);
293		rh = node_height(r);
294		if (lh > rh + 1) { /* l: RH+2 */
295			struct inet_peer *ll, *lr, *lrl, *lrr;
296			int lrh;
297			ll = rcu_deref_locked(l->avl_left, base);
298			lr = rcu_deref_locked(l->avl_right, base);
299			lrh = node_height(lr);
300			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
301				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
302				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
303				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
304				RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
305				RCU_INIT_POINTER(l->avl_right, node);	/* node: RH+1 or RH+2 */
306				l->avl_height = node->avl_height + 1;
307				RCU_INIT_POINTER(*nodep, l);
308			} else { /* ll: RH, lr: RH+1 */
309				lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
310				lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
311				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
312				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
313				node->avl_height = rh + 1; /* node: RH+1 */
314				RCU_INIT_POINTER(l->avl_left, ll);	/* ll: RH */
315				RCU_INIT_POINTER(l->avl_right, lrl);	/* lrl: RH or RH-1 */
316				l->avl_height = rh + 1;	/* l: RH+1 */
317				RCU_INIT_POINTER(lr->avl_left, l);	/* l: RH+1 */
318				RCU_INIT_POINTER(lr->avl_right, node);	/* node: RH+1 */
319				lr->avl_height = rh + 2;
320				RCU_INIT_POINTER(*nodep, lr);
321			}
322		} else if (rh > lh + 1) { /* r: LH+2 */
323			struct inet_peer *rr, *rl, *rlr, *rll;
324			int rlh;
325			rr = rcu_deref_locked(r->avl_right, base);
326			rl = rcu_deref_locked(r->avl_left, base);
327			rlh = node_height(rl);
328			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
329				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
330				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
331				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
332				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH+1 */
333				RCU_INIT_POINTER(r->avl_left, node);	/* node: LH+1 or LH+2 */
334				r->avl_height = node->avl_height + 1;
335				RCU_INIT_POINTER(*nodep, r);
336			} else { /* rr: RH, rl: RH+1 */
337				rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
338				rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
339				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
340				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
341				node->avl_height = lh + 1; /* node: LH+1 */
342				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH */
343				RCU_INIT_POINTER(r->avl_left, rlr);	/* rlr: LH or LH-1 */
344				r->avl_height = lh + 1;	/* r: LH+1 */
345				RCU_INIT_POINTER(rl->avl_right, r);	/* r: LH+1 */
346				RCU_INIT_POINTER(rl->avl_left, node);	/* node: LH+1 */
347				rl->avl_height = lh + 2;
348				RCU_INIT_POINTER(*nodep, rl);
349			}
350		} else {
351			node->avl_height = (lh > rh ? lh : rh) + 1;
352		}
353	}
354}
355
356/* Called with local BH disabled and the pool lock held. */
357#define link_to_pool(n, base)					\
358do {								\
359	n->avl_height = 1;					\
360	n->avl_left = peer_avl_empty_rcu;			\
361	n->avl_right = peer_avl_empty_rcu;			\
362	/* lockless readers can catch us now */			\
363	rcu_assign_pointer(**--stackptr, n);			\
364	peer_avl_rebalance(stack, stackptr, base);		\
365} while (0)
366
367static void inetpeer_free_rcu(struct rcu_head *head)
368{
369	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
370}
371
372static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
373			     struct inet_peer __rcu **stack[PEER_MAXDEPTH])
374{
375	struct inet_peer __rcu ***stackptr, ***delp;
376
377	if (lookup(&p->daddr, stack, base) != p)
378		BUG();
379	delp = stackptr - 1; /* *delp[0] == p */
380	if (p->avl_left == peer_avl_empty_rcu) {
381		*delp[0] = p->avl_right;
382		--stackptr;
383	} else {
384		/* look for a node to insert instead of p */
385		struct inet_peer *t;
386		t = lookup_rightempty(p, base);
387		BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
388		**--stackptr = t->avl_left;
389		/* t is removed, t->daddr > x->daddr for any
390		 * x in p->avl_left subtree.
391		 * Put t in the old place of p. */
392		RCU_INIT_POINTER(*delp[0], t);
393		t->avl_left = p->avl_left;
394		t->avl_right = p->avl_right;
395		t->avl_height = p->avl_height;
396		BUG_ON(delp[1] != &p->avl_left);
397		delp[1] = &t->avl_left; /* was &p->avl_left */
398	}
399	peer_avl_rebalance(stack, stackptr, base);
400	base->total--;
401	call_rcu(&p->rcu, inetpeer_free_rcu);
402}
403
404static struct inet_peer_base *family_to_base(int family)
405{
406	return family == AF_INET ? &v4_peers : &v6_peers;
407}
408
409/* perform garbage collect on all items stacked during a lookup */
410static int inet_peer_gc(struct inet_peer_base *base,
411			struct inet_peer __rcu **stack[PEER_MAXDEPTH],
412			struct inet_peer __rcu ***stackptr)
413{
414	struct inet_peer *p, *gchead = NULL;
415	__u32 delta, ttl;
416	int cnt = 0;
417
418	if (base->total >= inet_peer_threshold)
419		ttl = 0; /* be aggressive */
420	else
421		ttl = inet_peer_maxttl
422				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
423					base->total / inet_peer_threshold * HZ;
424	stackptr--; /* last stack slot is peer_avl_empty */
425	while (stackptr > stack) {
426		stackptr--;
427		p = rcu_deref_locked(**stackptr, base);
428		if (atomic_read(&p->refcnt) == 0) {
429			smp_rmb();
430			delta = (__u32)jiffies - p->dtime;
431			if (delta >= ttl &&
432			    atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
433				p->gc_next = gchead;
434				gchead = p;
435			}
436		}
437	}
438	while ((p = gchead) != NULL) {
439		gchead = p->gc_next;
440		cnt++;
441		unlink_from_pool(p, base, stack);
442	}
443	return cnt;
444}
445
446struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
 
 
447{
448	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
449	struct inet_peer_base *base = family_to_base(daddr->family);
450	struct inet_peer *p;
451	unsigned int sequence;
452	int invalidated, gccnt = 0;
453
454	/* Attempt a lockless lookup first.
455	 * Because of a concurrent writer, we might not find an existing entry.
456	 */
457	rcu_read_lock();
458	sequence = read_seqbegin(&base->lock);
459	p = lookup_rcu(daddr, base);
460	invalidated = read_seqretry(&base->lock, sequence);
461	rcu_read_unlock();
462
463	if (p)
464		return p;
465
466	/* If no writer did a change during our lookup, we can return early. */
467	if (!create && !invalidated)
468		return NULL;
469
470	/* retry an exact lookup, taking the lock before.
471	 * At least, nodes should be hot in our cache.
472	 */
473	write_seqlock_bh(&base->lock);
474relookup:
475	p = lookup(daddr, stack, base);
476	if (p != peer_avl_empty) {
477		atomic_inc(&p->refcnt);
478		write_sequnlock_bh(&base->lock);
479		return p;
480	}
481	if (!gccnt) {
482		gccnt = inet_peer_gc(base, stack, stackptr);
483		if (gccnt && create)
484			goto relookup;
485	}
486	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
487	if (p) {
488		p->daddr = *daddr;
489		atomic_set(&p->refcnt, 1);
490		atomic_set(&p->rid, 0);
491		atomic_set(&p->ip_id_count,
492				(daddr->family == AF_INET) ?
493					secure_ip_id(daddr->addr.a4) :
494					secure_ipv6_id(daddr->addr.a6));
495		p->tcp_ts_stamp = 0;
496		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
497		p->rate_tokens = 0;
498		p->rate_last = 0;
499		p->pmtu_expires = 0;
500		p->pmtu_orig = 0;
501		memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
502		INIT_LIST_HEAD(&p->gc_list);
503
504		/* Link the node. */
505		link_to_pool(p, base);
506		base->total++;
507	}
508	write_sequnlock_bh(&base->lock);
509
510	return p;
511}
512EXPORT_SYMBOL_GPL(inet_getpeer);
513
514void inet_putpeer(struct inet_peer *p)
515{
516	p->dtime = (__u32)jiffies;
517	smp_mb__before_atomic_dec();
518	atomic_dec(&p->refcnt);
519}
520EXPORT_SYMBOL_GPL(inet_putpeer);
521
522/*
523 *	Check transmit rate limitation for given message.
524 *	The rate information is held in the inet_peer entries now.
525 *	This function is generic and could be used for other purposes
526 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
527 *
528 *	Note that the same inet_peer fields are modified by functions in
529 *	route.c too, but these work for packet destinations while xrlim_allow
530 *	works for icmp destinations. This means the rate limiting information
531 *	for one "ip object" is shared - and these ICMPs are twice limited:
532 *	by source and by destination.
533 *
534 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
535 *			  SHOULD allow setting of rate limits
536 *
537 * 	Shared between ICMPv4 and ICMPv6.
538 */
539#define XRLIM_BURST_FACTOR 6
540bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
541{
542	unsigned long now, token;
543	bool rc = false;
544
545	if (!peer)
546		return true;
547
548	token = peer->rate_tokens;
549	now = jiffies;
550	token += now - peer->rate_last;
551	peer->rate_last = now;
552	if (token > XRLIM_BURST_FACTOR * timeout)
553		token = XRLIM_BURST_FACTOR * timeout;
554	if (token >= timeout) {
555		token -= timeout;
556		rc = true;
557	}
558	peer->rate_tokens = token;
559	return rc;
560}
561EXPORT_SYMBOL(inet_peer_xrlim_allow);
562
563static void inetpeer_inval_rcu(struct rcu_head *head)
564{
565	struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
566
567	spin_lock_bh(&gc_lock);
568	list_add_tail(&p->gc_list, &gc_list);
569	spin_unlock_bh(&gc_lock);
570
571	schedule_delayed_work(&gc_work, gc_delay);
572}
573
574void inetpeer_invalidate_tree(int family)
575{
576	struct inet_peer *old, *new, *prev;
577	struct inet_peer_base *base = family_to_base(family);
578
579	write_seqlock_bh(&base->lock);
580
581	old = base->root;
582	if (old == peer_avl_empty_rcu)
583		goto out;
584
585	new = peer_avl_empty_rcu;
586
587	prev = cmpxchg(&base->root, old, new);
588	if (prev == old) {
589		base->total = 0;
590		call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
591	}
592
593out:
594	write_sequnlock_bh(&base->lock);
595}
596EXPORT_SYMBOL(inetpeer_invalidate_tree);