Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
 
  9#include <linux/module.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/interrupt.h>
 13#include <linux/spinlock.h>
 14#include <linux/random.h>
 15#include <linux/timer.h>
 16#include <linux/time.h>
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/net.h>
 20#include <linux/workqueue.h>
 21#include <net/ip.h>
 22#include <net/inetpeer.h>
 23#include <net/secure_seq.h>
 24
 25/*
 26 *  Theory of operations.
 27 *  We keep one entry for each peer IP address.  The nodes contains long-living
 28 *  information about the peer which doesn't depend on routes.
 29 *  At this moment this information consists only of ID field for the next
 30 *  outgoing IP packet.  This field is incremented with each packet as encoded
 31 *  in inet_getid() function (include/net/inetpeer.h).
 32 *  At the moment of writing this notes identifier of IP packets is generated
 33 *  to be unpredictable using this code only for packets subjected
 34 *  (actually or potentially) to defragmentation.  I.e. DF packets less than
 35 *  PMTU in size when local fragmentation is disabled use a constant ID and do
 36 *  not use this code (see ip_select_ident() in include/net/ip.h).
 37 *
 38 *  Route cache entries hold references to our nodes.
 39 *  New cache entries get references via lookup by destination IP address in
 40 *  the avl tree.  The reference is grabbed only when it's needed i.e. only
 41 *  when we try to output IP packet which needs an unpredictable ID (see
 42 *  __ip_select_ident() in net/ipv4/route.c).
 43 *  Nodes are removed only when reference counter goes to 0.
 44 *  When it's happened the node may be removed when a sufficient amount of
 45 *  time has been passed since its last use.  The less-recently-used entry can
 46 *  also be removed if the pool is overloaded i.e. if the total amount of
 47 *  entries is greater-or-equal than the threshold.
 48 *
 49 *  Node pool is organised as an AVL tree.
 50 *  Such an implementation has been chosen not just for fun.  It's a way to
 51 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 52 *  amount of long living nodes in a single hash slot would significantly delay
 53 *  lookups performed with disabled BHs.
 54 *
 55 *  Serialisation issues.
 56 *  1.  Nodes may appear in the tree only with the pool lock held.
 57 *  2.  Nodes may disappear from the tree only with the pool lock held
 58 *      AND reference count being 0.
 59 *  3.  Global variable peer_total is modified under the pool lock.
 60 *  4.  struct inet_peer fields modification:
 61 *		avl_left, avl_right, avl_parent, avl_height: pool lock
 62 *		refcnt: atomically against modifications on other CPU;
 63 *		   usually under some other lock to prevent node disappearing
 64 *		daddr: unchangeable
 65 *		ip_id_count: atomic value (no lock needed)
 66 */
 67
 68static struct kmem_cache *peer_cachep __read_mostly;
 69
 70static LIST_HEAD(gc_list);
 71static const int gc_delay = 60 * HZ;
 72static struct delayed_work gc_work;
 73static DEFINE_SPINLOCK(gc_lock);
 74
 75#define node_height(x) x->avl_height
 76
 77#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
 78#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
 79static const struct inet_peer peer_fake_node = {
 80	.avl_left	= peer_avl_empty_rcu,
 81	.avl_right	= peer_avl_empty_rcu,
 82	.avl_height	= 0
 83};
 84
 85void inet_peer_base_init(struct inet_peer_base *bp)
 86{
 87	bp->root = peer_avl_empty_rcu;
 88	seqlock_init(&bp->lock);
 89	bp->flush_seq = ~0U;
 90	bp->total = 0;
 91}
 92EXPORT_SYMBOL_GPL(inet_peer_base_init);
 93
 94static atomic_t v4_seq = ATOMIC_INIT(0);
 95static atomic_t v6_seq = ATOMIC_INIT(0);
 96
 97static atomic_t *inetpeer_seq_ptr(int family)
 98{
 99	return (family == AF_INET ? &v4_seq : &v6_seq);
100}
101
102static inline void flush_check(struct inet_peer_base *base, int family)
103{
104	atomic_t *fp = inetpeer_seq_ptr(family);
105
106	if (unlikely(base->flush_seq != atomic_read(fp))) {
107		inetpeer_invalidate_tree(base);
108		base->flush_seq = atomic_read(fp);
109	}
110}
111
112#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
113
114/* Exported for sysctl_net_ipv4.  */
115int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
116					 * aggressively at this stage */
117int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
118int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
119
120static void inetpeer_gc_worker(struct work_struct *work)
121{
122	struct inet_peer *p, *n, *c;
123	LIST_HEAD(list);
124
125	spin_lock_bh(&gc_lock);
126	list_replace_init(&gc_list, &list);
127	spin_unlock_bh(&gc_lock);
128
129	if (list_empty(&list))
130		return;
131
132	list_for_each_entry_safe(p, n, &list, gc_list) {
133
134		if (need_resched())
135			cond_resched();
136
137		c = rcu_dereference_protected(p->avl_left, 1);
138		if (c != peer_avl_empty) {
139			list_add_tail(&c->gc_list, &list);
140			p->avl_left = peer_avl_empty_rcu;
141		}
142
143		c = rcu_dereference_protected(p->avl_right, 1);
144		if (c != peer_avl_empty) {
145			list_add_tail(&c->gc_list, &list);
146			p->avl_right = peer_avl_empty_rcu;
147		}
148
149		n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
150
151		if (!atomic_read(&p->refcnt)) {
152			list_del(&p->gc_list);
153			kmem_cache_free(peer_cachep, p);
154		}
155	}
156
157	if (list_empty(&list))
158		return;
159
160	spin_lock_bh(&gc_lock);
161	list_splice(&list, &gc_list);
162	spin_unlock_bh(&gc_lock);
163
164	schedule_delayed_work(&gc_work, gc_delay);
165}
166
167/* Called from ip_output.c:ip_init  */
168void __init inet_initpeers(void)
169{
170	struct sysinfo si;
171
172	/* Use the straight interface to information about memory. */
173	si_meminfo(&si);
174	/* The values below were suggested by Alexey Kuznetsov
175	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
176	 * myself.  --SAW
177	 */
178	if (si.totalram <= (32768*1024)/PAGE_SIZE)
179		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
180	if (si.totalram <= (16384*1024)/PAGE_SIZE)
181		inet_peer_threshold >>= 1; /* about 512KB */
182	if (si.totalram <= (8192*1024)/PAGE_SIZE)
183		inet_peer_threshold >>= 2; /* about 128KB */
184
185	peer_cachep = kmem_cache_create("inet_peer_cache",
186			sizeof(struct inet_peer),
187			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
188			NULL);
189
190	INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
191}
192
193static int addr_compare(const struct inetpeer_addr *a,
194			const struct inetpeer_addr *b)
 
 
 
 
 
 
195{
196	int i, n = (a->family == AF_INET ? 1 : 4);
197
198	for (i = 0; i < n; i++) {
199		if (a->addr.a6[i] == b->addr.a6[i])
200			continue;
201		if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
202			return -1;
203		return 1;
204	}
205
206	return 0;
207}
208
209#define rcu_deref_locked(X, BASE)				\
210	rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
 
 
211
212/*
213 * Called with local BH disabled and the pool lock held.
214 */
215#define lookup(_daddr, _stack, _base)				\
216({								\
217	struct inet_peer *u;					\
218	struct inet_peer __rcu **v;				\
219								\
220	stackptr = _stack;					\
221	*stackptr++ = &_base->root;				\
222	for (u = rcu_deref_locked(_base->root, _base);		\
223	     u != peer_avl_empty;) {				\
224		int cmp = addr_compare(_daddr, &u->daddr);	\
225		if (cmp == 0)					\
226			break;					\
227		if (cmp == -1)					\
228			v = &u->avl_left;			\
229		else						\
230			v = &u->avl_right;			\
231		*stackptr++ = v;				\
232		u = rcu_deref_locked(*v, _base);		\
233	}							\
234	u;							\
235})
236
237/*
238 * Called with rcu_read_lock()
239 * Because we hold no lock against a writer, its quite possible we fall
240 * in an endless loop.
241 * But every pointer we follow is guaranteed to be valid thanks to RCU.
242 * We exit from this function if number of links exceeds PEER_MAXDEPTH
243 */
244static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
245				    struct inet_peer_base *base)
246{
247	struct inet_peer *u = rcu_dereference(base->root);
248	int count = 0;
249
250	while (u != peer_avl_empty) {
251		int cmp = addr_compare(daddr, &u->daddr);
252		if (cmp == 0) {
253			/* Before taking a reference, check if this entry was
254			 * deleted (refcnt=-1)
255			 */
256			if (!atomic_add_unless(&u->refcnt, 1, -1))
257				u = NULL;
258			return u;
 
 
 
259		}
260		if (cmp == -1)
261			u = rcu_dereference(u->avl_left);
262		else
263			u = rcu_dereference(u->avl_right);
264		if (unlikely(++count == PEER_MAXDEPTH))
265			break;
266	}
 
 
267	return NULL;
268}
269
270/* Called with local BH disabled and the pool lock held. */
271#define lookup_rightempty(start, base)				\
272({								\
273	struct inet_peer *u;					\
274	struct inet_peer __rcu **v;				\
275	*stackptr++ = &start->avl_left;				\
276	v = &start->avl_left;					\
277	for (u = rcu_deref_locked(*v, base);			\
278	     u->avl_right != peer_avl_empty_rcu;) {		\
279		v = &u->avl_right;				\
280		*stackptr++ = v;				\
281		u = rcu_deref_locked(*v, base);			\
282	}							\
283	u;							\
284})
285
286/* Called with local BH disabled and the pool lock held.
287 * Variable names are the proof of operation correctness.
288 * Look into mm/map_avl.c for more detail description of the ideas.
289 */
290static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
291			       struct inet_peer __rcu ***stackend,
292			       struct inet_peer_base *base)
293{
294	struct inet_peer __rcu **nodep;
295	struct inet_peer *node, *l, *r;
296	int lh, rh;
297
298	while (stackend > stack) {
299		nodep = *--stackend;
300		node = rcu_deref_locked(*nodep, base);
301		l = rcu_deref_locked(node->avl_left, base);
302		r = rcu_deref_locked(node->avl_right, base);
303		lh = node_height(l);
304		rh = node_height(r);
305		if (lh > rh + 1) { /* l: RH+2 */
306			struct inet_peer *ll, *lr, *lrl, *lrr;
307			int lrh;
308			ll = rcu_deref_locked(l->avl_left, base);
309			lr = rcu_deref_locked(l->avl_right, base);
310			lrh = node_height(lr);
311			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
312				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
313				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
314				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
315				RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
316				RCU_INIT_POINTER(l->avl_right, node);	/* node: RH+1 or RH+2 */
317				l->avl_height = node->avl_height + 1;
318				RCU_INIT_POINTER(*nodep, l);
319			} else { /* ll: RH, lr: RH+1 */
320				lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
321				lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
322				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
323				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
324				node->avl_height = rh + 1; /* node: RH+1 */
325				RCU_INIT_POINTER(l->avl_left, ll);	/* ll: RH */
326				RCU_INIT_POINTER(l->avl_right, lrl);	/* lrl: RH or RH-1 */
327				l->avl_height = rh + 1;	/* l: RH+1 */
328				RCU_INIT_POINTER(lr->avl_left, l);	/* l: RH+1 */
329				RCU_INIT_POINTER(lr->avl_right, node);	/* node: RH+1 */
330				lr->avl_height = rh + 2;
331				RCU_INIT_POINTER(*nodep, lr);
332			}
333		} else if (rh > lh + 1) { /* r: LH+2 */
334			struct inet_peer *rr, *rl, *rlr, *rll;
335			int rlh;
336			rr = rcu_deref_locked(r->avl_right, base);
337			rl = rcu_deref_locked(r->avl_left, base);
338			rlh = node_height(rl);
339			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
340				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
341				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
342				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
343				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH+1 */
344				RCU_INIT_POINTER(r->avl_left, node);	/* node: LH+1 or LH+2 */
345				r->avl_height = node->avl_height + 1;
346				RCU_INIT_POINTER(*nodep, r);
347			} else { /* rr: RH, rl: RH+1 */
348				rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
349				rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
350				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
351				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
352				node->avl_height = lh + 1; /* node: LH+1 */
353				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH */
354				RCU_INIT_POINTER(r->avl_left, rlr);	/* rlr: LH or LH-1 */
355				r->avl_height = lh + 1;	/* r: LH+1 */
356				RCU_INIT_POINTER(rl->avl_right, r);	/* r: LH+1 */
357				RCU_INIT_POINTER(rl->avl_left, node);	/* node: LH+1 */
358				rl->avl_height = lh + 2;
359				RCU_INIT_POINTER(*nodep, rl);
360			}
361		} else {
362			node->avl_height = (lh > rh ? lh : rh) + 1;
363		}
364	}
365}
366
367/* Called with local BH disabled and the pool lock held. */
368#define link_to_pool(n, base)					\
369do {								\
370	n->avl_height = 1;					\
371	n->avl_left = peer_avl_empty_rcu;			\
372	n->avl_right = peer_avl_empty_rcu;			\
373	/* lockless readers can catch us now */			\
374	rcu_assign_pointer(**--stackptr, n);			\
375	peer_avl_rebalance(stack, stackptr, base);		\
376} while (0)
377
378static void inetpeer_free_rcu(struct rcu_head *head)
379{
380	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
381}
382
383static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
384			     struct inet_peer __rcu **stack[PEER_MAXDEPTH])
385{
386	struct inet_peer __rcu ***stackptr, ***delp;
387
388	if (lookup(&p->daddr, stack, base) != p)
389		BUG();
390	delp = stackptr - 1; /* *delp[0] == p */
391	if (p->avl_left == peer_avl_empty_rcu) {
392		*delp[0] = p->avl_right;
393		--stackptr;
394	} else {
395		/* look for a node to insert instead of p */
396		struct inet_peer *t;
397		t = lookup_rightempty(p, base);
398		BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
399		**--stackptr = t->avl_left;
400		/* t is removed, t->daddr > x->daddr for any
401		 * x in p->avl_left subtree.
402		 * Put t in the old place of p. */
403		RCU_INIT_POINTER(*delp[0], t);
404		t->avl_left = p->avl_left;
405		t->avl_right = p->avl_right;
406		t->avl_height = p->avl_height;
407		BUG_ON(delp[1] != &p->avl_left);
408		delp[1] = &t->avl_left; /* was &p->avl_left */
409	}
410	peer_avl_rebalance(stack, stackptr, base);
411	base->total--;
412	call_rcu(&p->rcu, inetpeer_free_rcu);
413}
414
415/* perform garbage collect on all items stacked during a lookup */
416static int inet_peer_gc(struct inet_peer_base *base,
417			struct inet_peer __rcu **stack[PEER_MAXDEPTH],
418			struct inet_peer __rcu ***stackptr)
419{
420	struct inet_peer *p, *gchead = NULL;
421	__u32 delta, ttl;
422	int cnt = 0;
423
424	if (base->total >= inet_peer_threshold)
425		ttl = 0; /* be aggressive */
426	else
427		ttl = inet_peer_maxttl
428				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
429					base->total / inet_peer_threshold * HZ;
430	stackptr--; /* last stack slot is peer_avl_empty */
431	while (stackptr > stack) {
432		stackptr--;
433		p = rcu_deref_locked(**stackptr, base);
434		if (atomic_read(&p->refcnt) == 0) {
435			smp_rmb();
436			delta = (__u32)jiffies - p->dtime;
437			if (delta >= ttl &&
438			    atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
439				p->gc_next = gchead;
440				gchead = p;
441			}
442		}
443	}
444	while ((p = gchead) != NULL) {
445		gchead = p->gc_next;
446		cnt++;
447		unlink_from_pool(p, base, stack);
448	}
449	return cnt;
450}
451
452struct inet_peer *inet_getpeer(struct inet_peer_base *base,
453			       const struct inetpeer_addr *daddr,
454			       int create)
455{
456	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
457	struct inet_peer *p;
458	unsigned int sequence;
459	int invalidated, gccnt = 0;
460
461	flush_check(base, daddr->family);
462
463	/* Attempt a lockless lookup first.
464	 * Because of a concurrent writer, we might not find an existing entry.
465	 */
466	rcu_read_lock();
467	sequence = read_seqbegin(&base->lock);
468	p = lookup_rcu(daddr, base);
469	invalidated = read_seqretry(&base->lock, sequence);
470	rcu_read_unlock();
471
472	if (p)
473		return p;
474
475	/* If no writer did a change during our lookup, we can return early. */
476	if (!create && !invalidated)
477		return NULL;
478
479	/* retry an exact lookup, taking the lock before.
480	 * At least, nodes should be hot in our cache.
481	 */
 
482	write_seqlock_bh(&base->lock);
483relookup:
484	p = lookup(daddr, stack, base);
485	if (p != peer_avl_empty) {
486		atomic_inc(&p->refcnt);
487		write_sequnlock_bh(&base->lock);
488		return p;
489	}
490	if (!gccnt) {
491		gccnt = inet_peer_gc(base, stack, stackptr);
492		if (gccnt && create)
493			goto relookup;
494	}
495	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
496	if (p) {
497		p->daddr = *daddr;
498		atomic_set(&p->refcnt, 1);
499		atomic_set(&p->rid, 0);
500		atomic_set(&p->ip_id_count,
501				(daddr->family == AF_INET) ?
502					secure_ip_id(daddr->addr.a4) :
503					secure_ipv6_id(daddr->addr.a6));
504		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
505		p->rate_tokens = 0;
506		/* 60*HZ is arbitrary, but chosen enough high so that the first
507		 * calculation of tokens is at its maximum.
508		 */
509		p->rate_last = jiffies - 60*HZ;
510		INIT_LIST_HEAD(&p->gc_list);
511
512		/* Link the node. */
513		link_to_pool(p, base);
514		base->total++;
515	}
 
 
516	write_sequnlock_bh(&base->lock);
517
518	return p;
519}
520EXPORT_SYMBOL_GPL(inet_getpeer);
521
522void inet_putpeer(struct inet_peer *p)
523{
524	p->dtime = (__u32)jiffies;
525	smp_mb__before_atomic_dec();
526	atomic_dec(&p->refcnt);
 
527}
528EXPORT_SYMBOL_GPL(inet_putpeer);
529
530/*
531 *	Check transmit rate limitation for given message.
532 *	The rate information is held in the inet_peer entries now.
533 *	This function is generic and could be used for other purposes
534 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
535 *
536 *	Note that the same inet_peer fields are modified by functions in
537 *	route.c too, but these work for packet destinations while xrlim_allow
538 *	works for icmp destinations. This means the rate limiting information
539 *	for one "ip object" is shared - and these ICMPs are twice limited:
540 *	by source and by destination.
541 *
542 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
543 *			  SHOULD allow setting of rate limits
544 *
545 * 	Shared between ICMPv4 and ICMPv6.
546 */
547#define XRLIM_BURST_FACTOR 6
548bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
549{
550	unsigned long now, token;
551	bool rc = false;
552
553	if (!peer)
554		return true;
555
556	token = peer->rate_tokens;
557	now = jiffies;
558	token += now - peer->rate_last;
559	peer->rate_last = now;
560	if (token > XRLIM_BURST_FACTOR * timeout)
561		token = XRLIM_BURST_FACTOR * timeout;
562	if (token >= timeout) {
563		token -= timeout;
564		rc = true;
565	}
566	peer->rate_tokens = token;
567	return rc;
568}
569EXPORT_SYMBOL(inet_peer_xrlim_allow);
570
571static void inetpeer_inval_rcu(struct rcu_head *head)
572{
573	struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
574
575	spin_lock_bh(&gc_lock);
576	list_add_tail(&p->gc_list, &gc_list);
577	spin_unlock_bh(&gc_lock);
578
579	schedule_delayed_work(&gc_work, gc_delay);
580}
581
582void inetpeer_invalidate_tree(struct inet_peer_base *base)
583{
584	struct inet_peer *root;
585
586	write_seqlock_bh(&base->lock);
 
587
588	root = rcu_deref_locked(base->root, base);
589	if (root != peer_avl_empty) {
590		base->root = peer_avl_empty_rcu;
591		base->total = 0;
592		call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
593	}
594
595	write_sequnlock_bh(&base->lock);
596}
597EXPORT_SYMBOL(inetpeer_invalidate_tree);
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
  9#include <linux/cache.h>
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/slab.h>
 13#include <linux/interrupt.h>
 14#include <linux/spinlock.h>
 15#include <linux/random.h>
 16#include <linux/timer.h>
 17#include <linux/time.h>
 18#include <linux/kernel.h>
 19#include <linux/mm.h>
 20#include <linux/net.h>
 21#include <linux/workqueue.h>
 22#include <net/ip.h>
 23#include <net/inetpeer.h>
 24#include <net/secure_seq.h>
 25
 26/*
 27 *  Theory of operations.
 28 *  We keep one entry for each peer IP address.  The nodes contains long-living
 29 *  information about the peer which doesn't depend on routes.
 
 
 
 
 
 
 
 
 30 *
 
 
 
 
 
 31 *  Nodes are removed only when reference counter goes to 0.
 32 *  When it's happened the node may be removed when a sufficient amount of
 33 *  time has been passed since its last use.  The less-recently-used entry can
 34 *  also be removed if the pool is overloaded i.e. if the total amount of
 35 *  entries is greater-or-equal than the threshold.
 36 *
 37 *  Node pool is organised as an RB tree.
 38 *  Such an implementation has been chosen not just for fun.  It's a way to
 39 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 40 *  amount of long living nodes in a single hash slot would significantly delay
 41 *  lookups performed with disabled BHs.
 42 *
 43 *  Serialisation issues.
 44 *  1.  Nodes may appear in the tree only with the pool lock held.
 45 *  2.  Nodes may disappear from the tree only with the pool lock held
 46 *      AND reference count being 0.
 47 *  3.  Global variable peer_total is modified under the pool lock.
 48 *  4.  struct inet_peer fields modification:
 49 *		rb_node: pool lock
 50 *		refcnt: atomically against modifications on other CPU;
 51 *		   usually under some other lock to prevent node disappearing
 52 *		daddr: unchangeable
 
 53 */
 54
 55static struct kmem_cache *peer_cachep __ro_after_init;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56
 57void inet_peer_base_init(struct inet_peer_base *bp)
 58{
 59	bp->rb_root = RB_ROOT;
 60	seqlock_init(&bp->lock);
 
 61	bp->total = 0;
 62}
 63EXPORT_SYMBOL_GPL(inet_peer_base_init);
 64
 65#define PEER_MAX_GC 32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66
 67/* Exported for sysctl_net_ipv4.  */
 68int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
 69					 * aggressively at this stage */
 70int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
 71int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
 72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73/* Called from ip_output.c:ip_init  */
 74void __init inet_initpeers(void)
 75{
 76	struct sysinfo si;
 77
 78	/* Use the straight interface to information about memory. */
 79	si_meminfo(&si);
 80	/* The values below were suggested by Alexey Kuznetsov
 81	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
 82	 * myself.  --SAW
 83	 */
 84	if (si.totalram <= (32768*1024)/PAGE_SIZE)
 85		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
 86	if (si.totalram <= (16384*1024)/PAGE_SIZE)
 87		inet_peer_threshold >>= 1; /* about 512KB */
 88	if (si.totalram <= (8192*1024)/PAGE_SIZE)
 89		inet_peer_threshold >>= 2; /* about 128KB */
 90
 91	peer_cachep = kmem_cache_create("inet_peer_cache",
 92			sizeof(struct inet_peer),
 93			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
 94			NULL);
 
 
 95}
 96
 97/* Called with rcu_read_lock() or base->lock held */
 98static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
 99				struct inet_peer_base *base,
100				unsigned int seq,
101				struct inet_peer *gc_stack[],
102				unsigned int *gc_cnt,
103				struct rb_node **parent_p,
104				struct rb_node ***pp_p)
105{
106	struct rb_node **pp, *parent, *next;
107	struct inet_peer *p;
 
 
 
 
 
 
 
 
 
 
108
109	pp = &base->rb_root.rb_node;
110	parent = NULL;
111	while (1) {
112		int cmp;
113
114		next = rcu_dereference_raw(*pp);
115		if (!next)
116			break;
117		parent = next;
118		p = rb_entry(parent, struct inet_peer, rb_node);
119		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120		if (cmp == 0) {
121			if (!refcount_inc_not_zero(&p->refcnt))
122				break;
123			return p;
124		}
125		if (gc_stack) {
126			if (*gc_cnt < PEER_MAX_GC)
127				gc_stack[(*gc_cnt)++] = p;
128		} else if (unlikely(read_seqretry(&base->lock, seq))) {
129			break;
130		}
131		if (cmp == -1)
132			pp = &next->rb_left;
133		else
134			pp = &next->rb_right;
 
 
135	}
136	*parent_p = parent;
137	*pp_p = pp;
138	return NULL;
139}
140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141static void inetpeer_free_rcu(struct rcu_head *head)
142{
143	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
144}
145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146/* perform garbage collect on all items stacked during a lookup */
147static void inet_peer_gc(struct inet_peer_base *base,
148			 struct inet_peer *gc_stack[],
149			 unsigned int gc_cnt)
150{
151	struct inet_peer *p;
152	__u32 delta, ttl;
153	int i;
154
155	if (base->total >= inet_peer_threshold)
156		ttl = 0; /* be aggressive */
157	else
158		ttl = inet_peer_maxttl
159				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
160					base->total / inet_peer_threshold * HZ;
161	for (i = 0; i < gc_cnt; i++) {
162		p = gc_stack[i];
163		delta = (__u32)jiffies - p->dtime;
164		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
165			gc_stack[i] = NULL;
166	}
167	for (i = 0; i < gc_cnt; i++) {
168		p = gc_stack[i];
169		if (p) {
170			rb_erase(&p->rb_node, &base->rb_root);
171			base->total--;
172			call_rcu(&p->rcu, inetpeer_free_rcu);
173		}
174	}
 
 
 
 
 
 
175}
176
177struct inet_peer *inet_getpeer(struct inet_peer_base *base,
178			       const struct inetpeer_addr *daddr,
179			       int create)
180{
181	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
182	struct rb_node **pp, *parent;
183	unsigned int gc_cnt, seq;
184	int invalidated;
 
 
185
186	/* Attempt a lockless lookup first.
187	 * Because of a concurrent writer, we might not find an existing entry.
188	 */
189	rcu_read_lock();
190	seq = read_seqbegin(&base->lock);
191	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
192	invalidated = read_seqretry(&base->lock, seq);
193	rcu_read_unlock();
194
195	if (p)
196		return p;
197
198	/* If no writer did a change during our lookup, we can return early. */
199	if (!create && !invalidated)
200		return NULL;
201
202	/* retry an exact lookup, taking the lock before.
203	 * At least, nodes should be hot in our cache.
204	 */
205	parent = NULL;
206	write_seqlock_bh(&base->lock);
207
208	gc_cnt = 0;
209	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
210	if (!p && create) {
211		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
212		if (p) {
213			p->daddr = *daddr;
214			p->dtime = (__u32)jiffies;
215			refcount_set(&p->refcnt, 2);
216			atomic_set(&p->rid, 0);
217			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
218			p->rate_tokens = 0;
219			p->n_redirects = 0;
220			/* 60*HZ is arbitrary, but chosen enough high so that the first
221			 * calculation of tokens is at its maximum.
222			 */
223			p->rate_last = jiffies - 60*HZ;
224
225			rb_link_node(&p->rb_node, parent, pp);
226			rb_insert_color(&p->rb_node, &base->rb_root);
227			base->total++;
228		}
 
 
 
 
 
 
 
 
 
 
229	}
230	if (gc_cnt)
231		inet_peer_gc(base, gc_stack, gc_cnt);
232	write_sequnlock_bh(&base->lock);
233
234	return p;
235}
236EXPORT_SYMBOL_GPL(inet_getpeer);
237
238void inet_putpeer(struct inet_peer *p)
239{
240	p->dtime = (__u32)jiffies;
241
242	if (refcount_dec_and_test(&p->refcnt))
243		call_rcu(&p->rcu, inetpeer_free_rcu);
244}
245EXPORT_SYMBOL_GPL(inet_putpeer);
246
247/*
248 *	Check transmit rate limitation for given message.
249 *	The rate information is held in the inet_peer entries now.
250 *	This function is generic and could be used for other purposes
251 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
252 *
253 *	Note that the same inet_peer fields are modified by functions in
254 *	route.c too, but these work for packet destinations while xrlim_allow
255 *	works for icmp destinations. This means the rate limiting information
256 *	for one "ip object" is shared - and these ICMPs are twice limited:
257 *	by source and by destination.
258 *
259 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
260 *			  SHOULD allow setting of rate limits
261 *
262 * 	Shared between ICMPv4 and ICMPv6.
263 */
264#define XRLIM_BURST_FACTOR 6
265bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
266{
267	unsigned long now, token;
268	bool rc = false;
269
270	if (!peer)
271		return true;
272
273	token = peer->rate_tokens;
274	now = jiffies;
275	token += now - peer->rate_last;
276	peer->rate_last = now;
277	if (token > XRLIM_BURST_FACTOR * timeout)
278		token = XRLIM_BURST_FACTOR * timeout;
279	if (token >= timeout) {
280		token -= timeout;
281		rc = true;
282	}
283	peer->rate_tokens = token;
284	return rc;
285}
286EXPORT_SYMBOL(inet_peer_xrlim_allow);
287
 
 
 
 
 
 
 
 
 
 
 
288void inetpeer_invalidate_tree(struct inet_peer_base *base)
289{
290	struct rb_node *p = rb_first(&base->rb_root);
291
292	while (p) {
293		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
294
295		p = rb_next(p);
296		rb_erase(&peer->rb_node, &base->rb_root);
297		inet_putpeer(peer);
298		cond_resched();
 
299	}
300
301	base->total = 0;
302}
303EXPORT_SYMBOL(inetpeer_invalidate_tree);