Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
  9#include <linux/cache.h>
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/slab.h>
 13#include <linux/interrupt.h>
 14#include <linux/spinlock.h>
 15#include <linux/random.h>
 16#include <linux/timer.h>
 17#include <linux/time.h>
 18#include <linux/kernel.h>
 19#include <linux/mm.h>
 20#include <linux/net.h>
 21#include <linux/workqueue.h>
 22#include <net/ip.h>
 23#include <net/inetpeer.h>
 24#include <net/secure_seq.h>
 25
 26/*
 27 *  Theory of operations.
 28 *  We keep one entry for each peer IP address.  The nodes contains long-living
 29 *  information about the peer which doesn't depend on routes.
 
 
 
 
 
 
 
 
 30 *
 
 
 
 
 
 31 *  Nodes are removed only when reference counter goes to 0.
 32 *  When it's happened the node may be removed when a sufficient amount of
 33 *  time has been passed since its last use.  The less-recently-used entry can
 34 *  also be removed if the pool is overloaded i.e. if the total amount of
 35 *  entries is greater-or-equal than the threshold.
 36 *
 37 *  Node pool is organised as an RB tree.
 38 *  Such an implementation has been chosen not just for fun.  It's a way to
 39 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 40 *  amount of long living nodes in a single hash slot would significantly delay
 41 *  lookups performed with disabled BHs.
 42 *
 43 *  Serialisation issues.
 44 *  1.  Nodes may appear in the tree only with the pool lock held.
 45 *  2.  Nodes may disappear from the tree only with the pool lock held
 46 *      AND reference count being 0.
 47 *  3.  Global variable peer_total is modified under the pool lock.
 48 *  4.  struct inet_peer fields modification:
 49 *		rb_node: pool lock
 50 *		refcnt: atomically against modifications on other CPU;
 51 *		   usually under some other lock to prevent node disappearing
 52 *		daddr: unchangeable
 
 53 */
 54
 55static struct kmem_cache *peer_cachep __ro_after_init;
 56
 57void inet_peer_base_init(struct inet_peer_base *bp)
 58{
 59	bp->rb_root = RB_ROOT;
 60	seqlock_init(&bp->lock);
 61	bp->total = 0;
 62}
 63EXPORT_SYMBOL_GPL(inet_peer_base_init);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 64
 65#define PEER_MAX_GC 32
 66
 67/* Exported for sysctl_net_ipv4.  */
 68int inet_peer_threshold __read_mostly;	/* start to throw entries more
 69					 * aggressively at this stage */
 70int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
 71int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
 72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73/* Called from ip_output.c:ip_init  */
 74void __init inet_initpeers(void)
 75{
 76	u64 nr_entries;
 77
 78	 /* 1% of physical memory */
 79	nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
 80			      100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
 81
 82	inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
 
 
 
 
 
 
 
 
 
 
 
 
 83
 84	peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
 85}
 86
 87/* Called with rcu_read_lock() or base->lock held */
 88static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
 89				struct inet_peer_base *base,
 90				unsigned int seq,
 91				struct inet_peer *gc_stack[],
 92				unsigned int *gc_cnt,
 93				struct rb_node **parent_p,
 94				struct rb_node ***pp_p)
 95{
 96	struct rb_node **pp, *parent, *next;
 97	struct inet_peer *p;
 98	u32 now;
 99
100	pp = &base->rb_root.rb_node;
101	parent = NULL;
102	while (1) {
103		int cmp;
 
 
 
104
105		next = rcu_dereference_raw(*pp);
106		if (!next)
107			break;
108		parent = next;
109		p = rb_entry(parent, struct inet_peer, rb_node);
110		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111		if (cmp == 0) {
112			now = jiffies;
113			if (READ_ONCE(p->dtime) != now)
114				WRITE_ONCE(p->dtime, now);
115			return p;
116		}
117		if (gc_stack) {
118			if (*gc_cnt < PEER_MAX_GC)
119				gc_stack[(*gc_cnt)++] = p;
120		} else if (unlikely(read_seqretry(&base->lock, seq))) {
121			break;
122		}
123		if (cmp == -1)
124			pp = &next->rb_left;
125		else
126			pp = &next->rb_right;
 
 
127	}
128	*parent_p = parent;
129	*pp_p = pp;
130	return NULL;
131}
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133/* perform garbage collect on all items stacked during a lookup */
134static void inet_peer_gc(struct inet_peer_base *base,
135			 struct inet_peer *gc_stack[],
136			 unsigned int gc_cnt)
137{
138	int peer_threshold, peer_maxttl, peer_minttl;
139	struct inet_peer *p;
140	__u32 delta, ttl;
141	int i;
142
143	peer_threshold = READ_ONCE(inet_peer_threshold);
144	peer_maxttl = READ_ONCE(inet_peer_maxttl);
145	peer_minttl = READ_ONCE(inet_peer_minttl);
146
147	if (base->total >= peer_threshold)
148		ttl = 0; /* be aggressive */
149	else
150		ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
151			base->total / peer_threshold * HZ;
152	for (i = 0; i < gc_cnt; i++) {
153		p = gc_stack[i];
154
155		delta = (__u32)jiffies - READ_ONCE(p->dtime);
156
157		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
158			gc_stack[i] = NULL;
159	}
160	for (i = 0; i < gc_cnt; i++) {
161		p = gc_stack[i];
162		if (p) {
163			rb_erase(&p->rb_node, &base->rb_root);
164			base->total--;
165			kfree_rcu(p, rcu);
166		}
167	}
 
 
 
 
 
 
168}
169
170/* Must be called under RCU : No refcount change is done here. */
171struct inet_peer *inet_getpeer(struct inet_peer_base *base,
172			       const struct inetpeer_addr *daddr)
173{
174	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
175	struct rb_node **pp, *parent;
176	unsigned int gc_cnt, seq;
177
178	/* Attempt a lockless lookup first.
179	 * Because of a concurrent writer, we might not find an existing entry.
180	 */
181	seq = read_seqbegin(&base->lock);
182	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
 
 
 
183
184	if (p)
185		return p;
186
 
 
 
 
187	/* retry an exact lookup, taking the lock before.
188	 * At least, nodes should be hot in our cache.
189	 */
190	parent = NULL;
191	write_seqlock_bh(&base->lock);
192
193	gc_cnt = 0;
194	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
195	if (!p) {
196		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
197		if (p) {
198			p->daddr = *daddr;
199			p->dtime = (__u32)jiffies;
200			refcount_set(&p->refcnt, 1);
201			atomic_set(&p->rid, 0);
202			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
203			p->rate_tokens = 0;
204			p->n_redirects = 0;
205			/* 60*HZ is arbitrary, but chosen enough high so that the first
206			 * calculation of tokens is at its maximum.
207			 */
208			p->rate_last = jiffies - 60*HZ;
209
210			rb_link_node(&p->rb_node, parent, pp);
211			rb_insert_color(&p->rb_node, &base->rb_root);
212			base->total++;
213		}
 
 
 
 
 
 
 
 
 
 
 
214	}
215	if (gc_cnt)
216		inet_peer_gc(base, gc_stack, gc_cnt);
217	write_sequnlock_bh(&base->lock);
218
219	return p;
220}
221EXPORT_SYMBOL_GPL(inet_getpeer);
222
223void inet_putpeer(struct inet_peer *p)
224{
225	if (refcount_dec_and_test(&p->refcnt))
226		kfree_rcu(p, rcu);
 
227}
 
228
229/*
230 *	Check transmit rate limitation for given message.
231 *	The rate information is held in the inet_peer entries now.
232 *	This function is generic and could be used for other purposes
233 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
234 *
235 *	Note that the same inet_peer fields are modified by functions in
236 *	route.c too, but these work for packet destinations while xrlim_allow
237 *	works for icmp destinations. This means the rate limiting information
238 *	for one "ip object" is shared - and these ICMPs are twice limited:
239 *	by source and by destination.
240 *
241 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
242 *			  SHOULD allow setting of rate limits
243 *
244 * 	Shared between ICMPv4 and ICMPv6.
245 */
246#define XRLIM_BURST_FACTOR 6
247bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
248{
249	unsigned long now, token;
250	bool rc = false;
251
252	if (!peer)
253		return true;
254
255	token = peer->rate_tokens;
256	now = jiffies;
257	token += now - peer->rate_last;
258	peer->rate_last = now;
259	if (token > XRLIM_BURST_FACTOR * timeout)
260		token = XRLIM_BURST_FACTOR * timeout;
261	if (token >= timeout) {
262		token -= timeout;
263		rc = true;
264	}
265	peer->rate_tokens = token;
266	return rc;
267}
268EXPORT_SYMBOL(inet_peer_xrlim_allow);
269
270void inetpeer_invalidate_tree(struct inet_peer_base *base)
271{
272	struct rb_node *p = rb_first(&base->rb_root);
273
274	while (p) {
275		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
 
276
277		p = rb_next(p);
278		rb_erase(&peer->rb_node, &base->rb_root);
279		inet_putpeer(peer);
280		cond_resched();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
281	}
282
283	base->total = 0;
 
284}
285EXPORT_SYMBOL(inetpeer_invalidate_tree);
v3.5.6
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
 
  9#include <linux/module.h>
 10#include <linux/types.h>
 11#include <linux/slab.h>
 12#include <linux/interrupt.h>
 13#include <linux/spinlock.h>
 14#include <linux/random.h>
 15#include <linux/timer.h>
 16#include <linux/time.h>
 17#include <linux/kernel.h>
 18#include <linux/mm.h>
 19#include <linux/net.h>
 20#include <linux/workqueue.h>
 21#include <net/ip.h>
 22#include <net/inetpeer.h>
 23#include <net/secure_seq.h>
 24
 25/*
 26 *  Theory of operations.
 27 *  We keep one entry for each peer IP address.  The nodes contains long-living
 28 *  information about the peer which doesn't depend on routes.
 29 *  At this moment this information consists only of ID field for the next
 30 *  outgoing IP packet.  This field is incremented with each packet as encoded
 31 *  in inet_getid() function (include/net/inetpeer.h).
 32 *  At the moment of writing this notes identifier of IP packets is generated
 33 *  to be unpredictable using this code only for packets subjected
 34 *  (actually or potentially) to defragmentation.  I.e. DF packets less than
 35 *  PMTU in size uses a constant ID and do not use this code (see
 36 *  ip_select_ident() in include/net/ip.h).
 37 *
 38 *  Route cache entries hold references to our nodes.
 39 *  New cache entries get references via lookup by destination IP address in
 40 *  the avl tree.  The reference is grabbed only when it's needed i.e. only
 41 *  when we try to output IP packet which needs an unpredictable ID (see
 42 *  __ip_select_ident() in net/ipv4/route.c).
 43 *  Nodes are removed only when reference counter goes to 0.
 44 *  When it's happened the node may be removed when a sufficient amount of
 45 *  time has been passed since its last use.  The less-recently-used entry can
 46 *  also be removed if the pool is overloaded i.e. if the total amount of
 47 *  entries is greater-or-equal than the threshold.
 48 *
 49 *  Node pool is organised as an AVL tree.
 50 *  Such an implementation has been chosen not just for fun.  It's a way to
 51 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 52 *  amount of long living nodes in a single hash slot would significantly delay
 53 *  lookups performed with disabled BHs.
 54 *
 55 *  Serialisation issues.
 56 *  1.  Nodes may appear in the tree only with the pool lock held.
 57 *  2.  Nodes may disappear from the tree only with the pool lock held
 58 *      AND reference count being 0.
 59 *  3.  Global variable peer_total is modified under the pool lock.
 60 *  4.  struct inet_peer fields modification:
 61 *		avl_left, avl_right, avl_parent, avl_height: pool lock
 62 *		refcnt: atomically against modifications on other CPU;
 63 *		   usually under some other lock to prevent node disappearing
 64 *		daddr: unchangeable
 65 *		ip_id_count: atomic value (no lock needed)
 66 */
 67
 68static struct kmem_cache *peer_cachep __read_mostly;
 69
 70static LIST_HEAD(gc_list);
 71static const int gc_delay = 60 * HZ;
 72static struct delayed_work gc_work;
 73static DEFINE_SPINLOCK(gc_lock);
 74
 75#define node_height(x) x->avl_height
 76
 77#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
 78#define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node)
 79static const struct inet_peer peer_fake_node = {
 80	.avl_left	= peer_avl_empty_rcu,
 81	.avl_right	= peer_avl_empty_rcu,
 82	.avl_height	= 0
 83};
 84
 85struct inet_peer_base {
 86	struct inet_peer __rcu *root;
 87	seqlock_t	lock;
 88	int		total;
 89};
 90
 91static struct inet_peer_base v4_peers = {
 92	.root		= peer_avl_empty_rcu,
 93	.lock		= __SEQLOCK_UNLOCKED(v4_peers.lock),
 94	.total		= 0,
 95};
 96
 97static struct inet_peer_base v6_peers = {
 98	.root		= peer_avl_empty_rcu,
 99	.lock		= __SEQLOCK_UNLOCKED(v6_peers.lock),
100	.total		= 0,
101};
102
103#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
104
105/* Exported for sysctl_net_ipv4.  */
106int inet_peer_threshold __read_mostly = 65536 + 128;	/* start to throw entries more
107					 * aggressively at this stage */
108int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
109int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
110
111static void inetpeer_gc_worker(struct work_struct *work)
112{
113	struct inet_peer *p, *n;
114	LIST_HEAD(list);
115
116	spin_lock_bh(&gc_lock);
117	list_replace_init(&gc_list, &list);
118	spin_unlock_bh(&gc_lock);
119
120	if (list_empty(&list))
121		return;
122
123	list_for_each_entry_safe(p, n, &list, gc_list) {
124
125		if(need_resched())
126			cond_resched();
127
128		if (p->avl_left != peer_avl_empty) {
129			list_add_tail(&p->avl_left->gc_list, &list);
130			p->avl_left = peer_avl_empty;
131		}
132
133		if (p->avl_right != peer_avl_empty) {
134			list_add_tail(&p->avl_right->gc_list, &list);
135			p->avl_right = peer_avl_empty;
136		}
137
138		n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
139
140		if (!atomic_read(&p->refcnt)) {
141			list_del(&p->gc_list);
142			kmem_cache_free(peer_cachep, p);
143		}
144	}
145
146	if (list_empty(&list))
147		return;
148
149	spin_lock_bh(&gc_lock);
150	list_splice(&list, &gc_list);
151	spin_unlock_bh(&gc_lock);
152
153	schedule_delayed_work(&gc_work, gc_delay);
154}
155
156/* Called from ip_output.c:ip_init  */
157void __init inet_initpeers(void)
158{
159	struct sysinfo si;
160
161	/* Use the straight interface to information about memory. */
162	si_meminfo(&si);
163	/* The values below were suggested by Alexey Kuznetsov
164	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
165	 * myself.  --SAW
166	 */
167	if (si.totalram <= (32768*1024)/PAGE_SIZE)
168		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
169	if (si.totalram <= (16384*1024)/PAGE_SIZE)
170		inet_peer_threshold >>= 1; /* about 512KB */
171	if (si.totalram <= (8192*1024)/PAGE_SIZE)
172		inet_peer_threshold >>= 2; /* about 128KB */
173
174	peer_cachep = kmem_cache_create("inet_peer_cache",
175			sizeof(struct inet_peer),
176			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
177			NULL);
178
179	INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
180}
181
182static int addr_compare(const struct inetpeer_addr *a,
183			const struct inetpeer_addr *b)
 
 
 
 
 
 
184{
185	int i, n = (a->family == AF_INET ? 1 : 4);
 
 
186
187	for (i = 0; i < n; i++) {
188		if (a->addr.a6[i] == b->addr.a6[i])
189			continue;
190		if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
191			return -1;
192		return 1;
193	}
194
195	return 0;
196}
197
198#define rcu_deref_locked(X, BASE)				\
199	rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock))
200
201/*
202 * Called with local BH disabled and the pool lock held.
203 */
204#define lookup(_daddr, _stack, _base)				\
205({								\
206	struct inet_peer *u;					\
207	struct inet_peer __rcu **v;				\
208								\
209	stackptr = _stack;					\
210	*stackptr++ = &_base->root;				\
211	for (u = rcu_deref_locked(_base->root, _base);		\
212	     u != peer_avl_empty; ) {				\
213		int cmp = addr_compare(_daddr, &u->daddr);	\
214		if (cmp == 0)					\
215			break;					\
216		if (cmp == -1)					\
217			v = &u->avl_left;			\
218		else						\
219			v = &u->avl_right;			\
220		*stackptr++ = v;				\
221		u = rcu_deref_locked(*v, _base);		\
222	}							\
223	u;							\
224})
225
226/*
227 * Called with rcu_read_lock()
228 * Because we hold no lock against a writer, its quite possible we fall
229 * in an endless loop.
230 * But every pointer we follow is guaranteed to be valid thanks to RCU.
231 * We exit from this function if number of links exceeds PEER_MAXDEPTH
232 */
233static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
234				    struct inet_peer_base *base)
235{
236	struct inet_peer *u = rcu_dereference(base->root);
237	int count = 0;
238
239	while (u != peer_avl_empty) {
240		int cmp = addr_compare(daddr, &u->daddr);
241		if (cmp == 0) {
242			/* Before taking a reference, check if this entry was
243			 * deleted (refcnt=-1)
244			 */
245			if (!atomic_add_unless(&u->refcnt, 1, -1))
246				u = NULL;
247			return u;
 
 
 
 
248		}
249		if (cmp == -1)
250			u = rcu_dereference(u->avl_left);
251		else
252			u = rcu_dereference(u->avl_right);
253		if (unlikely(++count == PEER_MAXDEPTH))
254			break;
255	}
 
 
256	return NULL;
257}
258
259/* Called with local BH disabled and the pool lock held. */
260#define lookup_rightempty(start, base)				\
261({								\
262	struct inet_peer *u;					\
263	struct inet_peer __rcu **v;				\
264	*stackptr++ = &start->avl_left;				\
265	v = &start->avl_left;					\
266	for (u = rcu_deref_locked(*v, base);			\
267	     u->avl_right != peer_avl_empty_rcu; ) {		\
268		v = &u->avl_right;				\
269		*stackptr++ = v;				\
270		u = rcu_deref_locked(*v, base);			\
271	}							\
272	u;							\
273})
274
275/* Called with local BH disabled and the pool lock held.
276 * Variable names are the proof of operation correctness.
277 * Look into mm/map_avl.c for more detail description of the ideas.
278 */
279static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
280			       struct inet_peer __rcu ***stackend,
281			       struct inet_peer_base *base)
282{
283	struct inet_peer __rcu **nodep;
284	struct inet_peer *node, *l, *r;
285	int lh, rh;
286
287	while (stackend > stack) {
288		nodep = *--stackend;
289		node = rcu_deref_locked(*nodep, base);
290		l = rcu_deref_locked(node->avl_left, base);
291		r = rcu_deref_locked(node->avl_right, base);
292		lh = node_height(l);
293		rh = node_height(r);
294		if (lh > rh + 1) { /* l: RH+2 */
295			struct inet_peer *ll, *lr, *lrl, *lrr;
296			int lrh;
297			ll = rcu_deref_locked(l->avl_left, base);
298			lr = rcu_deref_locked(l->avl_right, base);
299			lrh = node_height(lr);
300			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
301				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
302				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
303				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
304				RCU_INIT_POINTER(l->avl_left, ll);       /* ll: RH+1 */
305				RCU_INIT_POINTER(l->avl_right, node);	/* node: RH+1 or RH+2 */
306				l->avl_height = node->avl_height + 1;
307				RCU_INIT_POINTER(*nodep, l);
308			} else { /* ll: RH, lr: RH+1 */
309				lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */
310				lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */
311				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
312				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
313				node->avl_height = rh + 1; /* node: RH+1 */
314				RCU_INIT_POINTER(l->avl_left, ll);	/* ll: RH */
315				RCU_INIT_POINTER(l->avl_right, lrl);	/* lrl: RH or RH-1 */
316				l->avl_height = rh + 1;	/* l: RH+1 */
317				RCU_INIT_POINTER(lr->avl_left, l);	/* l: RH+1 */
318				RCU_INIT_POINTER(lr->avl_right, node);	/* node: RH+1 */
319				lr->avl_height = rh + 2;
320				RCU_INIT_POINTER(*nodep, lr);
321			}
322		} else if (rh > lh + 1) { /* r: LH+2 */
323			struct inet_peer *rr, *rl, *rlr, *rll;
324			int rlh;
325			rr = rcu_deref_locked(r->avl_right, base);
326			rl = rcu_deref_locked(r->avl_left, base);
327			rlh = node_height(rl);
328			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
329				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
330				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
331				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
332				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH+1 */
333				RCU_INIT_POINTER(r->avl_left, node);	/* node: LH+1 or LH+2 */
334				r->avl_height = node->avl_height + 1;
335				RCU_INIT_POINTER(*nodep, r);
336			} else { /* rr: RH, rl: RH+1 */
337				rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */
338				rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */
339				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
340				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
341				node->avl_height = lh + 1; /* node: LH+1 */
342				RCU_INIT_POINTER(r->avl_right, rr);	/* rr: LH */
343				RCU_INIT_POINTER(r->avl_left, rlr);	/* rlr: LH or LH-1 */
344				r->avl_height = lh + 1;	/* r: LH+1 */
345				RCU_INIT_POINTER(rl->avl_right, r);	/* r: LH+1 */
346				RCU_INIT_POINTER(rl->avl_left, node);	/* node: LH+1 */
347				rl->avl_height = lh + 2;
348				RCU_INIT_POINTER(*nodep, rl);
349			}
350		} else {
351			node->avl_height = (lh > rh ? lh : rh) + 1;
352		}
353	}
354}
355
356/* Called with local BH disabled and the pool lock held. */
357#define link_to_pool(n, base)					\
358do {								\
359	n->avl_height = 1;					\
360	n->avl_left = peer_avl_empty_rcu;			\
361	n->avl_right = peer_avl_empty_rcu;			\
362	/* lockless readers can catch us now */			\
363	rcu_assign_pointer(**--stackptr, n);			\
364	peer_avl_rebalance(stack, stackptr, base);		\
365} while (0)
366
367static void inetpeer_free_rcu(struct rcu_head *head)
368{
369	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
370}
371
372static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
373			     struct inet_peer __rcu **stack[PEER_MAXDEPTH])
374{
375	struct inet_peer __rcu ***stackptr, ***delp;
376
377	if (lookup(&p->daddr, stack, base) != p)
378		BUG();
379	delp = stackptr - 1; /* *delp[0] == p */
380	if (p->avl_left == peer_avl_empty_rcu) {
381		*delp[0] = p->avl_right;
382		--stackptr;
383	} else {
384		/* look for a node to insert instead of p */
385		struct inet_peer *t;
386		t = lookup_rightempty(p, base);
387		BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
388		**--stackptr = t->avl_left;
389		/* t is removed, t->daddr > x->daddr for any
390		 * x in p->avl_left subtree.
391		 * Put t in the old place of p. */
392		RCU_INIT_POINTER(*delp[0], t);
393		t->avl_left = p->avl_left;
394		t->avl_right = p->avl_right;
395		t->avl_height = p->avl_height;
396		BUG_ON(delp[1] != &p->avl_left);
397		delp[1] = &t->avl_left; /* was &p->avl_left */
398	}
399	peer_avl_rebalance(stack, stackptr, base);
400	base->total--;
401	call_rcu(&p->rcu, inetpeer_free_rcu);
402}
403
404static struct inet_peer_base *family_to_base(int family)
405{
406	return family == AF_INET ? &v4_peers : &v6_peers;
407}
408
409/* perform garbage collect on all items stacked during a lookup */
410static int inet_peer_gc(struct inet_peer_base *base,
411			struct inet_peer __rcu **stack[PEER_MAXDEPTH],
412			struct inet_peer __rcu ***stackptr)
413{
414	struct inet_peer *p, *gchead = NULL;
 
415	__u32 delta, ttl;
416	int cnt = 0;
417
418	if (base->total >= inet_peer_threshold)
 
 
 
 
419		ttl = 0; /* be aggressive */
420	else
421		ttl = inet_peer_maxttl
422				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
423					base->total / inet_peer_threshold * HZ;
424	stackptr--; /* last stack slot is peer_avl_empty */
425	while (stackptr > stack) {
426		stackptr--;
427		p = rcu_deref_locked(**stackptr, base);
428		if (atomic_read(&p->refcnt) == 0) {
429			smp_rmb();
430			delta = (__u32)jiffies - p->dtime;
431			if (delta >= ttl &&
432			    atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
433				p->gc_next = gchead;
434				gchead = p;
435			}
 
436		}
437	}
438	while ((p = gchead) != NULL) {
439		gchead = p->gc_next;
440		cnt++;
441		unlink_from_pool(p, base, stack);
442	}
443	return cnt;
444}
445
446struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
447{
448	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
449	struct inet_peer_base *base = family_to_base(daddr->family);
450	struct inet_peer *p;
451	unsigned int sequence;
452	int invalidated, gccnt = 0;
453
454	/* Attempt a lockless lookup first.
455	 * Because of a concurrent writer, we might not find an existing entry.
456	 */
457	rcu_read_lock();
458	sequence = read_seqbegin(&base->lock);
459	p = lookup_rcu(daddr, base);
460	invalidated = read_seqretry(&base->lock, sequence);
461	rcu_read_unlock();
462
463	if (p)
464		return p;
465
466	/* If no writer did a change during our lookup, we can return early. */
467	if (!create && !invalidated)
468		return NULL;
469
470	/* retry an exact lookup, taking the lock before.
471	 * At least, nodes should be hot in our cache.
472	 */
 
473	write_seqlock_bh(&base->lock);
474relookup:
475	p = lookup(daddr, stack, base);
476	if (p != peer_avl_empty) {
477		atomic_inc(&p->refcnt);
478		write_sequnlock_bh(&base->lock);
479		return p;
480	}
481	if (!gccnt) {
482		gccnt = inet_peer_gc(base, stack, stackptr);
483		if (gccnt && create)
484			goto relookup;
485	}
486	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
487	if (p) {
488		p->daddr = *daddr;
489		atomic_set(&p->refcnt, 1);
490		atomic_set(&p->rid, 0);
491		atomic_set(&p->ip_id_count,
492				(daddr->family == AF_INET) ?
493					secure_ip_id(daddr->addr.a4) :
494					secure_ipv6_id(daddr->addr.a6));
495		p->tcp_ts_stamp = 0;
496		p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
497		p->rate_tokens = 0;
498		p->rate_last = 0;
499		p->pmtu_expires = 0;
500		p->pmtu_orig = 0;
501		memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
502		INIT_LIST_HEAD(&p->gc_list);
503
504		/* Link the node. */
505		link_to_pool(p, base);
506		base->total++;
507	}
 
 
508	write_sequnlock_bh(&base->lock);
509
510	return p;
511}
512EXPORT_SYMBOL_GPL(inet_getpeer);
513
514void inet_putpeer(struct inet_peer *p)
515{
516	p->dtime = (__u32)jiffies;
517	smp_mb__before_atomic_dec();
518	atomic_dec(&p->refcnt);
519}
520EXPORT_SYMBOL_GPL(inet_putpeer);
521
522/*
523 *	Check transmit rate limitation for given message.
524 *	The rate information is held in the inet_peer entries now.
525 *	This function is generic and could be used for other purposes
526 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
527 *
528 *	Note that the same inet_peer fields are modified by functions in
529 *	route.c too, but these work for packet destinations while xrlim_allow
530 *	works for icmp destinations. This means the rate limiting information
531 *	for one "ip object" is shared - and these ICMPs are twice limited:
532 *	by source and by destination.
533 *
534 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
535 *			  SHOULD allow setting of rate limits
536 *
537 * 	Shared between ICMPv4 and ICMPv6.
538 */
539#define XRLIM_BURST_FACTOR 6
540bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
541{
542	unsigned long now, token;
543	bool rc = false;
544
545	if (!peer)
546		return true;
547
548	token = peer->rate_tokens;
549	now = jiffies;
550	token += now - peer->rate_last;
551	peer->rate_last = now;
552	if (token > XRLIM_BURST_FACTOR * timeout)
553		token = XRLIM_BURST_FACTOR * timeout;
554	if (token >= timeout) {
555		token -= timeout;
556		rc = true;
557	}
558	peer->rate_tokens = token;
559	return rc;
560}
561EXPORT_SYMBOL(inet_peer_xrlim_allow);
562
563static void inetpeer_inval_rcu(struct rcu_head *head)
564{
565	struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
566
567	spin_lock_bh(&gc_lock);
568	list_add_tail(&p->gc_list, &gc_list);
569	spin_unlock_bh(&gc_lock);
570
571	schedule_delayed_work(&gc_work, gc_delay);
572}
573
574void inetpeer_invalidate_tree(int family)
575{
576	struct inet_peer *old, *new, *prev;
577	struct inet_peer_base *base = family_to_base(family);
578
579	write_seqlock_bh(&base->lock);
580
581	old = base->root;
582	if (old == peer_avl_empty_rcu)
583		goto out;
584
585	new = peer_avl_empty_rcu;
586
587	prev = cmpxchg(&base->root, old, new);
588	if (prev == old) {
589		base->total = 0;
590		call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
591	}
592
593out:
594	write_sequnlock_bh(&base->lock);
595}
596EXPORT_SYMBOL(inetpeer_invalidate_tree);