Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
  9#include <linux/cache.h>
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/slab.h>
 13#include <linux/interrupt.h>
 14#include <linux/spinlock.h>
 15#include <linux/random.h>
 16#include <linux/timer.h>
 17#include <linux/time.h>
 18#include <linux/kernel.h>
 19#include <linux/mm.h>
 20#include <linux/net.h>
 21#include <linux/workqueue.h>
 22#include <net/ip.h>
 23#include <net/inetpeer.h>
 24#include <net/secure_seq.h>
 25
 26/*
 27 *  Theory of operations.
 28 *  We keep one entry for each peer IP address.  The nodes contains long-living
 29 *  information about the peer which doesn't depend on routes.
 30 *
 31 *  Nodes are removed only when reference counter goes to 0.
 32 *  When it's happened the node may be removed when a sufficient amount of
 33 *  time has been passed since its last use.  The less-recently-used entry can
 34 *  also be removed if the pool is overloaded i.e. if the total amount of
 35 *  entries is greater-or-equal than the threshold.
 36 *
 37 *  Node pool is organised as an RB tree.
 38 *  Such an implementation has been chosen not just for fun.  It's a way to
 39 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 40 *  amount of long living nodes in a single hash slot would significantly delay
 41 *  lookups performed with disabled BHs.
 42 *
 43 *  Serialisation issues.
 44 *  1.  Nodes may appear in the tree only with the pool lock held.
 45 *  2.  Nodes may disappear from the tree only with the pool lock held
 46 *      AND reference count being 0.
 47 *  3.  Global variable peer_total is modified under the pool lock.
 48 *  4.  struct inet_peer fields modification:
 49 *		rb_node: pool lock
 50 *		refcnt: atomically against modifications on other CPU;
 51 *		   usually under some other lock to prevent node disappearing
 52 *		daddr: unchangeable
 53 */
 54
 55static struct kmem_cache *peer_cachep __ro_after_init;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56
 57void inet_peer_base_init(struct inet_peer_base *bp)
 58{
 59	bp->rb_root = RB_ROOT;
 60	seqlock_init(&bp->lock);
 61	bp->total = 0;
 62}
 63EXPORT_SYMBOL_GPL(inet_peer_base_init);
 64
 65#define PEER_MAX_GC 32
 66
 67/* Exported for sysctl_net_ipv4.  */
 68int inet_peer_threshold __read_mostly;	/* start to throw entries more
 69					 * aggressively at this stage */
 70int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
 71int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
 72
 73/* Called from ip_output.c:ip_init  */
 74void __init inet_initpeers(void)
 75{
 76	u64 nr_entries;
 
 
 
 
 
 
 
 
 77
 78	 /* 1% of physical memory */
 79	nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
 80			      100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
 81
 82	inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
 
 
 
 
 
 
 
 
 
 
 
 
 
 83
 84	peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85}
 86
 87/* Called with rcu_read_lock() or base->lock held */
 88static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
 89				struct inet_peer_base *base,
 90				unsigned int seq,
 91				struct inet_peer *gc_stack[],
 92				unsigned int *gc_cnt,
 93				struct rb_node **parent_p,
 94				struct rb_node ***pp_p)
 95{
 96	struct rb_node **pp, *parent, *next;
 97	struct inet_peer *p;
 98	u32 now;
 99
100	pp = &base->rb_root.rb_node;
101	parent = NULL;
102	while (1) {
103		int cmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
104
105		next = rcu_dereference_raw(*pp);
106		if (!next)
107			break;
108		parent = next;
109		p = rb_entry(parent, struct inet_peer, rb_node);
110		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111		if (cmp == 0) {
112			now = jiffies;
113			if (READ_ONCE(p->dtime) != now)
114				WRITE_ONCE(p->dtime, now);
115			return p;
116		}
117		if (gc_stack) {
118			if (*gc_cnt < PEER_MAX_GC)
119				gc_stack[(*gc_cnt)++] = p;
120		} else if (unlikely(read_seqretry(&base->lock, seq))) {
121			break;
122		}
123		if (cmp == -1)
124			pp = &next->rb_left;
125		else
126			pp = &next->rb_right;
 
 
127	}
128	*parent_p = parent;
129	*pp_p = pp;
130	return NULL;
131}
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133/* perform garbage collect on all items stacked during a lookup */
134static void inet_peer_gc(struct inet_peer_base *base,
135			 struct inet_peer *gc_stack[],
136			 unsigned int gc_cnt)
137{
138	int peer_threshold, peer_maxttl, peer_minttl;
139	struct inet_peer *p;
140	__u32 delta, ttl;
141	int i;
142
143	peer_threshold = READ_ONCE(inet_peer_threshold);
144	peer_maxttl = READ_ONCE(inet_peer_maxttl);
145	peer_minttl = READ_ONCE(inet_peer_minttl);
146
147	if (base->total >= peer_threshold)
148		ttl = 0; /* be aggressive */
149	else
150		ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
151			base->total / peer_threshold * HZ;
152	for (i = 0; i < gc_cnt; i++) {
153		p = gc_stack[i];
154
155		delta = (__u32)jiffies - READ_ONCE(p->dtime);
156
157		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
158			gc_stack[i] = NULL;
159	}
160	for (i = 0; i < gc_cnt; i++) {
161		p = gc_stack[i];
162		if (p) {
163			rb_erase(&p->rb_node, &base->rb_root);
164			base->total--;
165			kfree_rcu(p, rcu);
166		}
167	}
 
 
 
 
 
 
168}
169
170/* Must be called under RCU : No refcount change is done here. */
171struct inet_peer *inet_getpeer(struct inet_peer_base *base,
172			       const struct inetpeer_addr *daddr)
 
173{
174	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
175	struct rb_node **pp, *parent;
176	unsigned int gc_cnt, seq;
 
177
178	/* Attempt a lockless lookup first.
179	 * Because of a concurrent writer, we might not find an existing entry.
180	 */
181	seq = read_seqbegin(&base->lock);
182	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
 
 
 
183
184	if (p)
185		return p;
186
 
 
 
 
187	/* retry an exact lookup, taking the lock before.
188	 * At least, nodes should be hot in our cache.
189	 */
190	parent = NULL;
191	write_seqlock_bh(&base->lock);
192
193	gc_cnt = 0;
194	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
195	if (!p) {
196		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
197		if (p) {
198			p->daddr = *daddr;
199			p->dtime = (__u32)jiffies;
200			refcount_set(&p->refcnt, 1);
201			atomic_set(&p->rid, 0);
202			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
203			p->rate_tokens = 0;
204			p->n_redirects = 0;
205			/* 60*HZ is arbitrary, but chosen enough high so that the first
206			 * calculation of tokens is at its maximum.
207			 */
208			p->rate_last = jiffies - 60*HZ;
209
210			rb_link_node(&p->rb_node, parent, pp);
211			rb_insert_color(&p->rb_node, &base->rb_root);
212			base->total++;
213		}
 
 
 
 
 
 
214	}
215	if (gc_cnt)
216		inet_peer_gc(base, gc_stack, gc_cnt);
217	write_sequnlock_bh(&base->lock);
218
219	return p;
220}
221EXPORT_SYMBOL_GPL(inet_getpeer);
222
223void inet_putpeer(struct inet_peer *p)
224{
225	if (refcount_dec_and_test(&p->refcnt))
226		kfree_rcu(p, rcu);
 
227}
 
228
229/*
230 *	Check transmit rate limitation for given message.
231 *	The rate information is held in the inet_peer entries now.
232 *	This function is generic and could be used for other purposes
233 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
234 *
235 *	Note that the same inet_peer fields are modified by functions in
236 *	route.c too, but these work for packet destinations while xrlim_allow
237 *	works for icmp destinations. This means the rate limiting information
238 *	for one "ip object" is shared - and these ICMPs are twice limited:
239 *	by source and by destination.
240 *
241 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
242 *			  SHOULD allow setting of rate limits
243 *
244 * 	Shared between ICMPv4 and ICMPv6.
245 */
246#define XRLIM_BURST_FACTOR 6
247bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
248{
249	unsigned long now, token;
250	bool rc = false;
251
252	if (!peer)
253		return true;
254
255	token = peer->rate_tokens;
256	now = jiffies;
257	token += now - peer->rate_last;
258	peer->rate_last = now;
259	if (token > XRLIM_BURST_FACTOR * timeout)
260		token = XRLIM_BURST_FACTOR * timeout;
261	if (token >= timeout) {
262		token -= timeout;
263		rc = true;
264	}
265	peer->rate_tokens = token;
266	return rc;
267}
268EXPORT_SYMBOL(inet_peer_xrlim_allow);
269
 
 
 
 
 
 
 
 
 
 
 
270void inetpeer_invalidate_tree(struct inet_peer_base *base)
271{
272	struct rb_node *p = rb_first(&base->rb_root);
273
274	while (p) {
275		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
276
277		p = rb_next(p);
278		rb_erase(&peer->rb_node, &base->rb_root);
279		inet_putpeer(peer);
280		cond_resched();
 
281	}
282
283	base->total = 0;
284}
285EXPORT_SYMBOL(inetpeer_invalidate_tree);
v4.6
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
 
  9#include <linux/module.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/interrupt.h>
 13#include <linux/spinlock.h>
 14#include <linux/random.h>
 15#include <linux/timer.h>
 16#include <linux/time.h>
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/net.h>
 20#include <linux/workqueue.h>
 21#include <net/ip.h>
 22#include <net/inetpeer.h>
 23#include <net/secure_seq.h>
 24
 25/*
 26 *  Theory of operations.
 27 *  We keep one entry for each peer IP address.  The nodes contains long-living
 28 *  information about the peer which doesn't depend on routes.
 29 *
 30 *  Nodes are removed only when reference counter goes to 0.
 31 *  When it's happened the node may be removed when a sufficient amount of
 32 *  time has been passed since its last use.  The less-recently-used entry can
 33 *  also be removed if the pool is overloaded i.e. if the total amount of
 34 *  entries is greater-or-equal than the threshold.
 35 *
 36 *  Node pool is organised as an AVL tree.
 37 *  Such an implementation has been chosen not just for fun.  It's a way to
 38 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 39 *  amount of long living nodes in a single hash slot would significantly delay
 40 *  lookups performed with disabled BHs.
 41 *
 42 *  Serialisation issues.
 43 *  1.  Nodes may appear in the tree only with the pool lock held.
 44 *  2.  Nodes may disappear from the tree only with the pool lock held
 45 *      AND reference count being 0.
 46 *  3.  Global variable peer_total is modified under the pool lock.
 47 *  4.  struct inet_peer fields modification:
 48 *		avl_left, avl_right, avl_parent, avl_height: pool lock
 49 *		refcnt: atomically against modifications on other CPU;
 50 *		   usually under some other lock to prevent node disappearing
 51 *		daddr: unchangeable
 52 */
 53
 54static struct kmem_cache *peer_cachep __read_mostly;
 55
 56static LIST_HEAD(gc_list);
 57static const int gc_delay = 60 * HZ;
 58static struct delayed_work gc_work;
 59static DEFINE_SPINLOCK(gc_lock);
 60
 61#define node_height(x) x->avl_height
 62
 63#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
 64#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
 65static const struct inet_peer peer_fake_node = {
 66	.avl_left	= peer_avl_empty_rcu,
 67	.avl_right	= peer_avl_empty_rcu,
 68	.avl_height	= 0
 69};
 70
 71void inet_peer_base_init(struct inet_peer_base *bp)
 72{
 73	bp->root = peer_avl_empty_rcu;
 74	seqlock_init(&bp->lock);
 75	bp->total = 0;
 76}
 77EXPORT_SYMBOL_GPL(inet_peer_base_init);
 78
 79#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
 80
 81/* Exported for sysctl_net_ipv4.  */
 82int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
 83					 * aggressively at this stage */
 84int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
 85int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
 86
 87static void inetpeer_gc_worker(struct work_struct *work)
 
 88{
 89	struct inet_peer *p, *n, *c;
 90	struct list_head list;
 91
 92	spin_lock_bh(&gc_lock);
 93	list_replace_init(&gc_list, &list);
 94	spin_unlock_bh(&gc_lock);
 95
 96	if (list_empty(&list))
 97		return;
 98
 99	list_for_each_entry_safe(p, n, &list, gc_list) {
 
 
100
101		if (need_resched())
102			cond_resched();
103
104		c = rcu_dereference_protected(p->avl_left, 1);
105		if (c != peer_avl_empty) {
106			list_add_tail(&c->gc_list, &list);
107			p->avl_left = peer_avl_empty_rcu;
108		}
109
110		c = rcu_dereference_protected(p->avl_right, 1);
111		if (c != peer_avl_empty) {
112			list_add_tail(&c->gc_list, &list);
113			p->avl_right = peer_avl_empty_rcu;
114		}
115
116		n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
117
118		if (!atomic_read(&p->refcnt)) {
119			list_del(&p->gc_list);
120			kmem_cache_free(peer_cachep, p);
121		}
122	}
123
124	if (list_empty(&list))
125		return;
126
127	spin_lock_bh(&gc_lock);
128	list_splice(&list, &gc_list);
129	spin_unlock_bh(&gc_lock);
130
131	schedule_delayed_work(&gc_work, gc_delay);
132}
133
134/* Called from ip_output.c:ip_init  */
135void __init inet_initpeers(void)
 
 
 
 
 
 
136{
137	struct sysinfo si;
 
 
138
139	/* Use the straight interface to information about memory. */
140	si_meminfo(&si);
141	/* The values below were suggested by Alexey Kuznetsov
142	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
143	 * myself.  --SAW
144	 */
145	if (si.totalram <= (32768*1024)/PAGE_SIZE)
146		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
147	if (si.totalram <= (16384*1024)/PAGE_SIZE)
148		inet_peer_threshold >>= 1; /* about 512KB */
149	if (si.totalram <= (8192*1024)/PAGE_SIZE)
150		inet_peer_threshold >>= 2; /* about 128KB */
151
152	peer_cachep = kmem_cache_create("inet_peer_cache",
153			sizeof(struct inet_peer),
154			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
155			NULL);
156
157	INIT_DEFERRABLE_WORK(&gc_work, inetpeer_gc_worker);
158}
159
160#define rcu_deref_locked(X, BASE)				\
161	rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
162
163/*
164 * Called with local BH disabled and the pool lock held.
165 */
166#define lookup(_daddr, _stack, _base)				\
167({								\
168	struct inet_peer *u;					\
169	struct inet_peer __rcu **v;				\
170								\
171	stackptr = _stack;					\
172	*stackptr++ = &_base->root;				\
173	for (u = rcu_deref_locked(_base->root, _base);		\
174	     u != peer_avl_empty;) {				\
175		int cmp = inetpeer_addr_cmp(_daddr, &u->daddr);	\
176		if (cmp == 0)					\
177			break;					\
178		if (cmp == -1)					\
179			v = &u->avl_left;			\
180		else						\
181			v = &u->avl_right;			\
182		*stackptr++ = v;				\
183		u = rcu_deref_locked(*v, _base);		\
184	}							\
185	u;							\
186})
187
188/*
189 * Called with rcu_read_lock()
190 * Because we hold no lock against a writer, its quite possible we fall
191 * in an endless loop.
192 * But every pointer we follow is guaranteed to be valid thanks to RCU.
193 * We exit from this function if number of links exceeds PEER_MAXDEPTH
194 */
195static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
196				    struct inet_peer_base *base)
197{
198	struct inet_peer *u = rcu_dereference(base->root);
199	int count = 0;
200
201	while (u != peer_avl_empty) {
202		int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
203		if (cmp == 0) {
204			/* Before taking a reference, check if this entry was
205			 * deleted (refcnt=-1)
206			 */
207			if (!atomic_add_unless(&u->refcnt, 1, -1))
208				u = NULL;
209			return u;
 
 
 
 
210		}
211		if (cmp == -1)
212			u = rcu_dereference(u->avl_left);
213		else
214			u = rcu_dereference(u->avl_right);
215		if (unlikely(++count == PEER_MAXDEPTH))
216			break;
217	}
 
 
218	return NULL;
219}
220
221/* Called with local BH disabled and the pool lock held. */
222#define lookup_rightempty(start, base)				\
223({								\
224	struct inet_peer *u;					\
225	struct inet_peer __rcu **v;				\
226	*stackptr++ = &start->avl_left;				\
227	v = &start->avl_left;					\
228	for (u = rcu_deref_locked(*v, base);			\
229	     u->avl_right != peer_avl_empty_rcu;) {		\
230		v = &u->avl_right;				\
231		*stackptr++ = v;				\
232		u = rcu_deref_locked(*v, base);			\
233	}							\
234	u;							\
235})
236
237/* Called with local BH disabled and the pool lock held.
238 * Variable names are the proof of operation correctness.
239 * Look into mm/map_avl.c for more detail description of the ideas.
240 */
241static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
242			       struct inet_peer __rcu ***stackend,
243			       struct inet_peer_base *base)
244{
245	struct inet_peer __rcu **nodep;
246	struct inet_peer *node, *l, *r;
247	int lh, rh;
248
249	while (stackend > stack) {
250		nodep = *--stackend;
251		node = rcu_deref_locked(*nodep, base);
252		l = rcu_deref_locked(node->avl_left, base);
253		r = rcu_deref_locked(node->avl_right, base);
254		lh = node_height(l);
255		rh = node_height(r);
256		if (lh > rh + 1) { /* l: RH+2 */
257			struct inet_peer *ll, *lr, *lrl, *lrr;
258			int lrh;
259			ll = rcu_deref_locked(l->avl_left, base);
260			lr = rcu_deref_locked(l->avl_right, base);
261			lrh = node_height(lr);
262			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
263				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
264				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
265				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
266				RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
267				RCU_INIT_POINTER(l->avl_right, node);	/* node: RH+1 or RH+2 */
268				l->avl_height = node->avl_height + 1;
269				RCU_INIT_POINTER(*nodep, l);
270			} else { /* ll: RH, lr: RH+1 */
271				lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
272				lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
273				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
274				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
275				node->avl_height = rh + 1; /* node: RH+1 */
276				RCU_INIT_POINTER(l->avl_left, ll);	/* ll: RH */
277				RCU_INIT_POINTER(l->avl_right, lrl);	/* lrl: RH or RH-1 */
278				l->avl_height = rh + 1;	/* l: RH+1 */
279				RCU_INIT_POINTER(lr->avl_left, l);	/* l: RH+1 */
280				RCU_INIT_POINTER(lr->avl_right, node);	/* node: RH+1 */
281				lr->avl_height = rh + 2;
282				RCU_INIT_POINTER(*nodep, lr);
283			}
284		} else if (rh > lh + 1) { /* r: LH+2 */
285			struct inet_peer *rr, *rl, *rlr, *rll;
286			int rlh;
287			rr = rcu_deref_locked(r->avl_right, base);
288			rl = rcu_deref_locked(r->avl_left, base);
289			rlh = node_height(rl);
290			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
291				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
292				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
293				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
294				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH+1 */
295				RCU_INIT_POINTER(r->avl_left, node);	/* node: LH+1 or LH+2 */
296				r->avl_height = node->avl_height + 1;
297				RCU_INIT_POINTER(*nodep, r);
298			} else { /* rr: RH, rl: RH+1 */
299				rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
300				rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
301				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
302				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
303				node->avl_height = lh + 1; /* node: LH+1 */
304				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH */
305				RCU_INIT_POINTER(r->avl_left, rlr);	/* rlr: LH or LH-1 */
306				r->avl_height = lh + 1;	/* r: LH+1 */
307				RCU_INIT_POINTER(rl->avl_right, r);	/* r: LH+1 */
308				RCU_INIT_POINTER(rl->avl_left, node);	/* node: LH+1 */
309				rl->avl_height = lh + 2;
310				RCU_INIT_POINTER(*nodep, rl);
311			}
312		} else {
313			node->avl_height = (lh > rh ? lh : rh) + 1;
314		}
315	}
316}
317
318/* Called with local BH disabled and the pool lock held. */
319#define link_to_pool(n, base)					\
320do {								\
321	n->avl_height = 1;					\
322	n->avl_left = peer_avl_empty_rcu;			\
323	n->avl_right = peer_avl_empty_rcu;			\
324	/* lockless readers can catch us now */			\
325	rcu_assign_pointer(**--stackptr, n);			\
326	peer_avl_rebalance(stack, stackptr, base);		\
327} while (0)
328
329static void inetpeer_free_rcu(struct rcu_head *head)
330{
331	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
332}
333
334static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
335			     struct inet_peer __rcu **stack[PEER_MAXDEPTH])
336{
337	struct inet_peer __rcu ***stackptr, ***delp;
338
339	if (lookup(&p->daddr, stack, base) != p)
340		BUG();
341	delp = stackptr - 1; /* *delp[0] == p */
342	if (p->avl_left == peer_avl_empty_rcu) {
343		*delp[0] = p->avl_right;
344		--stackptr;
345	} else {
346		/* look for a node to insert instead of p */
347		struct inet_peer *t;
348		t = lookup_rightempty(p, base);
349		BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
350		**--stackptr = t->avl_left;
351		/* t is removed, t->daddr > x->daddr for any
352		 * x in p->avl_left subtree.
353		 * Put t in the old place of p. */
354		RCU_INIT_POINTER(*delp[0], t);
355		t->avl_left = p->avl_left;
356		t->avl_right = p->avl_right;
357		t->avl_height = p->avl_height;
358		BUG_ON(delp[1] != &p->avl_left);
359		delp[1] = &t->avl_left; /* was &p->avl_left */
360	}
361	peer_avl_rebalance(stack, stackptr, base);
362	base->total--;
363	call_rcu(&p->rcu, inetpeer_free_rcu);
364}
365
366/* perform garbage collect on all items stacked during a lookup */
367static int inet_peer_gc(struct inet_peer_base *base,
368			struct inet_peer __rcu **stack[PEER_MAXDEPTH],
369			struct inet_peer __rcu ***stackptr)
370{
371	struct inet_peer *p, *gchead = NULL;
 
372	__u32 delta, ttl;
373	int cnt = 0;
 
 
 
 
374
375	if (base->total >= inet_peer_threshold)
376		ttl = 0; /* be aggressive */
377	else
378		ttl = inet_peer_maxttl
379				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
380					base->total / inet_peer_threshold * HZ;
381	stackptr--; /* last stack slot is peer_avl_empty */
382	while (stackptr > stack) {
383		stackptr--;
384		p = rcu_deref_locked(**stackptr, base);
385		if (atomic_read(&p->refcnt) == 0) {
386			smp_rmb();
387			delta = (__u32)jiffies - p->dtime;
388			if (delta >= ttl &&
389			    atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
390				p->gc_next = gchead;
391				gchead = p;
392			}
 
393		}
394	}
395	while ((p = gchead) != NULL) {
396		gchead = p->gc_next;
397		cnt++;
398		unlink_from_pool(p, base, stack);
399	}
400	return cnt;
401}
402
 
403struct inet_peer *inet_getpeer(struct inet_peer_base *base,
404			       const struct inetpeer_addr *daddr,
405			       int create)
406{
407	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
408	struct inet_peer *p;
409	unsigned int sequence;
410	int invalidated, gccnt = 0;
411
412	/* Attempt a lockless lookup first.
413	 * Because of a concurrent writer, we might not find an existing entry.
414	 */
415	rcu_read_lock();
416	sequence = read_seqbegin(&base->lock);
417	p = lookup_rcu(daddr, base);
418	invalidated = read_seqretry(&base->lock, sequence);
419	rcu_read_unlock();
420
421	if (p)
422		return p;
423
424	/* If no writer did a change during our lookup, we can return early. */
425	if (!create && !invalidated)
426		return NULL;
427
428	/* retry an exact lookup, taking the lock before.
429	 * At least, nodes should be hot in our cache.
430	 */
 
431	write_seqlock_bh(&base->lock);
432relookup:
433	p = lookup(daddr, stack, base);
434	if (p != peer_avl_empty) {
435		atomic_inc(&p->refcnt);
436		write_sequnlock_bh(&base->lock);
437		return p;
438	}
439	if (!gccnt) {
440		gccnt = inet_peer_gc(base, stack, stackptr);
441		if (gccnt && create)
442			goto relookup;
443	}
444	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
445	if (p) {
446		p->daddr = *daddr;
447		atomic_set(&p->refcnt, 1);
448		atomic_set(&p->rid, 0);
449		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
450		p->rate_tokens = 0;
451		/* 60*HZ is arbitrary, but chosen enough high so that the first
452		 * calculation of tokens is at its maximum.
453		 */
454		p->rate_last = jiffies - 60*HZ;
455		INIT_LIST_HEAD(&p->gc_list);
456
457		/* Link the node. */
458		link_to_pool(p, base);
459		base->total++;
460	}
 
 
461	write_sequnlock_bh(&base->lock);
462
463	return p;
464}
465EXPORT_SYMBOL_GPL(inet_getpeer);
466
467void inet_putpeer(struct inet_peer *p)
468{
469	p->dtime = (__u32)jiffies;
470	smp_mb__before_atomic();
471	atomic_dec(&p->refcnt);
472}
473EXPORT_SYMBOL_GPL(inet_putpeer);
474
475/*
476 *	Check transmit rate limitation for given message.
477 *	The rate information is held in the inet_peer entries now.
478 *	This function is generic and could be used for other purposes
479 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
480 *
481 *	Note that the same inet_peer fields are modified by functions in
482 *	route.c too, but these work for packet destinations while xrlim_allow
483 *	works for icmp destinations. This means the rate limiting information
484 *	for one "ip object" is shared - and these ICMPs are twice limited:
485 *	by source and by destination.
486 *
487 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
488 *			  SHOULD allow setting of rate limits
489 *
490 * 	Shared between ICMPv4 and ICMPv6.
491 */
492#define XRLIM_BURST_FACTOR 6
493bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
494{
495	unsigned long now, token;
496	bool rc = false;
497
498	if (!peer)
499		return true;
500
501	token = peer->rate_tokens;
502	now = jiffies;
503	token += now - peer->rate_last;
504	peer->rate_last = now;
505	if (token > XRLIM_BURST_FACTOR * timeout)
506		token = XRLIM_BURST_FACTOR * timeout;
507	if (token >= timeout) {
508		token -= timeout;
509		rc = true;
510	}
511	peer->rate_tokens = token;
512	return rc;
513}
514EXPORT_SYMBOL(inet_peer_xrlim_allow);
515
516static void inetpeer_inval_rcu(struct rcu_head *head)
517{
518	struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
519
520	spin_lock_bh(&gc_lock);
521	list_add_tail(&p->gc_list, &gc_list);
522	spin_unlock_bh(&gc_lock);
523
524	schedule_delayed_work(&gc_work, gc_delay);
525}
526
527void inetpeer_invalidate_tree(struct inet_peer_base *base)
528{
529	struct inet_peer *root;
530
531	write_seqlock_bh(&base->lock);
 
532
533	root = rcu_deref_locked(base->root, base);
534	if (root != peer_avl_empty) {
535		base->root = peer_avl_empty_rcu;
536		base->total = 0;
537		call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
538	}
539
540	write_sequnlock_bh(&base->lock);
541}
542EXPORT_SYMBOL(inetpeer_invalidate_tree);