Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
  9#include <linux/cache.h>
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/slab.h>
 13#include <linux/interrupt.h>
 14#include <linux/spinlock.h>
 15#include <linux/random.h>
 16#include <linux/timer.h>
 17#include <linux/time.h>
 18#include <linux/kernel.h>
 19#include <linux/mm.h>
 20#include <linux/net.h>
 21#include <linux/workqueue.h>
 22#include <net/ip.h>
 23#include <net/inetpeer.h>
 24#include <net/secure_seq.h>
 25
 26/*
 27 *  Theory of operations.
 28 *  We keep one entry for each peer IP address.  The nodes contains long-living
 29 *  information about the peer which doesn't depend on routes.
 30 *
 31 *  Nodes are removed only when reference counter goes to 0.
 32 *  When it's happened the node may be removed when a sufficient amount of
 33 *  time has been passed since its last use.  The less-recently-used entry can
 34 *  also be removed if the pool is overloaded i.e. if the total amount of
 35 *  entries is greater-or-equal than the threshold.
 36 *
 37 *  Node pool is organised as an RB tree.
 38 *  Such an implementation has been chosen not just for fun.  It's a way to
 39 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 40 *  amount of long living nodes in a single hash slot would significantly delay
 41 *  lookups performed with disabled BHs.
 42 *
 43 *  Serialisation issues.
 44 *  1.  Nodes may appear in the tree only with the pool lock held.
 45 *  2.  Nodes may disappear from the tree only with the pool lock held
 46 *      AND reference count being 0.
 47 *  3.  Global variable peer_total is modified under the pool lock.
 48 *  4.  struct inet_peer fields modification:
 49 *		rb_node: pool lock
 50 *		refcnt: atomically against modifications on other CPU;
 51 *		   usually under some other lock to prevent node disappearing
 52 *		daddr: unchangeable
 53 */
 54
 55static struct kmem_cache *peer_cachep __ro_after_init;
 56
 57void inet_peer_base_init(struct inet_peer_base *bp)
 58{
 59	bp->rb_root = RB_ROOT;
 60	seqlock_init(&bp->lock);
 61	bp->total = 0;
 62}
 63EXPORT_SYMBOL_GPL(inet_peer_base_init);
 64
 65#define PEER_MAX_GC 32
 66
 67/* Exported for sysctl_net_ipv4.  */
 68int inet_peer_threshold __read_mostly;	/* start to throw entries more
 69					 * aggressively at this stage */
 70int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
 71int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
 72
 73/* Called from ip_output.c:ip_init  */
 74void __init inet_initpeers(void)
 75{
 76	u64 nr_entries;
 77
 78	 /* 1% of physical memory */
 79	nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
 80			      100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
 81
 82	inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
 83
 84	peer_cachep = KMEM_CACHE(inet_peer, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
 
 
 
 85}
 86
 87/* Called with rcu_read_lock() or base->lock held */
 88static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
 89				struct inet_peer_base *base,
 90				unsigned int seq,
 91				struct inet_peer *gc_stack[],
 92				unsigned int *gc_cnt,
 93				struct rb_node **parent_p,
 94				struct rb_node ***pp_p)
 95{
 96	struct rb_node **pp, *parent, *next;
 97	struct inet_peer *p;
 98	u32 now;
 99
100	pp = &base->rb_root.rb_node;
101	parent = NULL;
102	while (1) {
103		int cmp;
104
105		next = rcu_dereference_raw(*pp);
106		if (!next)
107			break;
108		parent = next;
109		p = rb_entry(parent, struct inet_peer, rb_node);
110		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
111		if (cmp == 0) {
112			now = jiffies;
113			if (READ_ONCE(p->dtime) != now)
114				WRITE_ONCE(p->dtime, now);
115			return p;
116		}
117		if (gc_stack) {
118			if (*gc_cnt < PEER_MAX_GC)
119				gc_stack[(*gc_cnt)++] = p;
120		} else if (unlikely(read_seqretry(&base->lock, seq))) {
121			break;
122		}
123		if (cmp == -1)
124			pp = &next->rb_left;
125		else
126			pp = &next->rb_right;
127	}
128	*parent_p = parent;
129	*pp_p = pp;
130	return NULL;
131}
132
 
 
 
 
 
133/* perform garbage collect on all items stacked during a lookup */
134static void inet_peer_gc(struct inet_peer_base *base,
135			 struct inet_peer *gc_stack[],
136			 unsigned int gc_cnt)
137{
138	int peer_threshold, peer_maxttl, peer_minttl;
139	struct inet_peer *p;
140	__u32 delta, ttl;
141	int i;
142
143	peer_threshold = READ_ONCE(inet_peer_threshold);
144	peer_maxttl = READ_ONCE(inet_peer_maxttl);
145	peer_minttl = READ_ONCE(inet_peer_minttl);
146
147	if (base->total >= peer_threshold)
148		ttl = 0; /* be aggressive */
149	else
150		ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
151			base->total / peer_threshold * HZ;
152	for (i = 0; i < gc_cnt; i++) {
153		p = gc_stack[i];
154
 
 
 
155		delta = (__u32)jiffies - READ_ONCE(p->dtime);
156
157		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
158			gc_stack[i] = NULL;
159	}
160	for (i = 0; i < gc_cnt; i++) {
161		p = gc_stack[i];
162		if (p) {
163			rb_erase(&p->rb_node, &base->rb_root);
164			base->total--;
165			kfree_rcu(p, rcu);
166		}
167	}
168}
169
170/* Must be called under RCU : No refcount change is done here. */
171struct inet_peer *inet_getpeer(struct inet_peer_base *base,
172			       const struct inetpeer_addr *daddr)
 
173{
174	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
175	struct rb_node **pp, *parent;
176	unsigned int gc_cnt, seq;
 
177
178	/* Attempt a lockless lookup first.
179	 * Because of a concurrent writer, we might not find an existing entry.
180	 */
 
181	seq = read_seqbegin(&base->lock);
182	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
 
 
183
184	if (p)
185		return p;
186
 
 
 
 
187	/* retry an exact lookup, taking the lock before.
188	 * At least, nodes should be hot in our cache.
189	 */
190	parent = NULL;
191	write_seqlock_bh(&base->lock);
192
193	gc_cnt = 0;
194	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
195	if (!p) {
196		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
197		if (p) {
198			p->daddr = *daddr;
199			p->dtime = (__u32)jiffies;
200			refcount_set(&p->refcnt, 1);
201			atomic_set(&p->rid, 0);
202			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
203			p->rate_tokens = 0;
204			p->n_redirects = 0;
205			/* 60*HZ is arbitrary, but chosen enough high so that the first
206			 * calculation of tokens is at its maximum.
207			 */
208			p->rate_last = jiffies - 60*HZ;
209
210			rb_link_node(&p->rb_node, parent, pp);
211			rb_insert_color(&p->rb_node, &base->rb_root);
212			base->total++;
213		}
214	}
215	if (gc_cnt)
216		inet_peer_gc(base, gc_stack, gc_cnt);
217	write_sequnlock_bh(&base->lock);
218
219	return p;
220}
221EXPORT_SYMBOL_GPL(inet_getpeer);
222
223void inet_putpeer(struct inet_peer *p)
224{
 
 
 
 
 
225	if (refcount_dec_and_test(&p->refcnt))
226		kfree_rcu(p, rcu);
227}
 
228
229/*
230 *	Check transmit rate limitation for given message.
231 *	The rate information is held in the inet_peer entries now.
232 *	This function is generic and could be used for other purposes
233 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
234 *
235 *	Note that the same inet_peer fields are modified by functions in
236 *	route.c too, but these work for packet destinations while xrlim_allow
237 *	works for icmp destinations. This means the rate limiting information
238 *	for one "ip object" is shared - and these ICMPs are twice limited:
239 *	by source and by destination.
240 *
241 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
242 *			  SHOULD allow setting of rate limits
243 *
244 * 	Shared between ICMPv4 and ICMPv6.
245 */
246#define XRLIM_BURST_FACTOR 6
247bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
248{
249	unsigned long now, token;
250	bool rc = false;
251
252	if (!peer)
253		return true;
254
255	token = peer->rate_tokens;
256	now = jiffies;
257	token += now - peer->rate_last;
258	peer->rate_last = now;
259	if (token > XRLIM_BURST_FACTOR * timeout)
260		token = XRLIM_BURST_FACTOR * timeout;
261	if (token >= timeout) {
262		token -= timeout;
263		rc = true;
264	}
265	peer->rate_tokens = token;
266	return rc;
267}
268EXPORT_SYMBOL(inet_peer_xrlim_allow);
269
270void inetpeer_invalidate_tree(struct inet_peer_base *base)
271{
272	struct rb_node *p = rb_first(&base->rb_root);
273
274	while (p) {
275		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
276
277		p = rb_next(p);
278		rb_erase(&peer->rb_node, &base->rb_root);
279		inet_putpeer(peer);
280		cond_resched();
281	}
282
283	base->total = 0;
284}
285EXPORT_SYMBOL(inetpeer_invalidate_tree);
v6.8
  1/*
  2 *		INETPEER - A storage for permanent information about peers
  3 *
  4 *  This source is covered by the GNU GPL, the same as all kernel sources.
  5 *
  6 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
  7 */
  8
  9#include <linux/cache.h>
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/slab.h>
 13#include <linux/interrupt.h>
 14#include <linux/spinlock.h>
 15#include <linux/random.h>
 16#include <linux/timer.h>
 17#include <linux/time.h>
 18#include <linux/kernel.h>
 19#include <linux/mm.h>
 20#include <linux/net.h>
 21#include <linux/workqueue.h>
 22#include <net/ip.h>
 23#include <net/inetpeer.h>
 24#include <net/secure_seq.h>
 25
 26/*
 27 *  Theory of operations.
 28 *  We keep one entry for each peer IP address.  The nodes contains long-living
 29 *  information about the peer which doesn't depend on routes.
 30 *
 31 *  Nodes are removed only when reference counter goes to 0.
 32 *  When it's happened the node may be removed when a sufficient amount of
 33 *  time has been passed since its last use.  The less-recently-used entry can
 34 *  also be removed if the pool is overloaded i.e. if the total amount of
 35 *  entries is greater-or-equal than the threshold.
 36 *
 37 *  Node pool is organised as an RB tree.
 38 *  Such an implementation has been chosen not just for fun.  It's a way to
 39 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
 40 *  amount of long living nodes in a single hash slot would significantly delay
 41 *  lookups performed with disabled BHs.
 42 *
 43 *  Serialisation issues.
 44 *  1.  Nodes may appear in the tree only with the pool lock held.
 45 *  2.  Nodes may disappear from the tree only with the pool lock held
 46 *      AND reference count being 0.
 47 *  3.  Global variable peer_total is modified under the pool lock.
 48 *  4.  struct inet_peer fields modification:
 49 *		rb_node: pool lock
 50 *		refcnt: atomically against modifications on other CPU;
 51 *		   usually under some other lock to prevent node disappearing
 52 *		daddr: unchangeable
 53 */
 54
 55static struct kmem_cache *peer_cachep __ro_after_init;
 56
 57void inet_peer_base_init(struct inet_peer_base *bp)
 58{
 59	bp->rb_root = RB_ROOT;
 60	seqlock_init(&bp->lock);
 61	bp->total = 0;
 62}
 63EXPORT_SYMBOL_GPL(inet_peer_base_init);
 64
 65#define PEER_MAX_GC 32
 66
 67/* Exported for sysctl_net_ipv4.  */
 68int inet_peer_threshold __read_mostly;	/* start to throw entries more
 69					 * aggressively at this stage */
 70int inet_peer_minttl __read_mostly = 120 * HZ;	/* TTL under high load: 120 sec */
 71int inet_peer_maxttl __read_mostly = 10 * 60 * HZ;	/* usual time to live: 10 min */
 72
 73/* Called from ip_output.c:ip_init  */
 74void __init inet_initpeers(void)
 75{
 76	u64 nr_entries;
 77
 78	 /* 1% of physical memory */
 79	nr_entries = div64_ul((u64)totalram_pages() << PAGE_SHIFT,
 80			      100 * L1_CACHE_ALIGN(sizeof(struct inet_peer)));
 81
 82	inet_peer_threshold = clamp_val(nr_entries, 4096, 65536 + 128);
 83
 84	peer_cachep = kmem_cache_create("inet_peer_cache",
 85			sizeof(struct inet_peer),
 86			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
 87			NULL);
 88}
 89
 90/* Called with rcu_read_lock() or base->lock held */
 91static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
 92				struct inet_peer_base *base,
 93				unsigned int seq,
 94				struct inet_peer *gc_stack[],
 95				unsigned int *gc_cnt,
 96				struct rb_node **parent_p,
 97				struct rb_node ***pp_p)
 98{
 99	struct rb_node **pp, *parent, *next;
100	struct inet_peer *p;
 
101
102	pp = &base->rb_root.rb_node;
103	parent = NULL;
104	while (1) {
105		int cmp;
106
107		next = rcu_dereference_raw(*pp);
108		if (!next)
109			break;
110		parent = next;
111		p = rb_entry(parent, struct inet_peer, rb_node);
112		cmp = inetpeer_addr_cmp(daddr, &p->daddr);
113		if (cmp == 0) {
114			if (!refcount_inc_not_zero(&p->refcnt))
115				break;
 
116			return p;
117		}
118		if (gc_stack) {
119			if (*gc_cnt < PEER_MAX_GC)
120				gc_stack[(*gc_cnt)++] = p;
121		} else if (unlikely(read_seqretry(&base->lock, seq))) {
122			break;
123		}
124		if (cmp == -1)
125			pp = &next->rb_left;
126		else
127			pp = &next->rb_right;
128	}
129	*parent_p = parent;
130	*pp_p = pp;
131	return NULL;
132}
133
134static void inetpeer_free_rcu(struct rcu_head *head)
135{
136	kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
137}
138
139/* perform garbage collect on all items stacked during a lookup */
140static void inet_peer_gc(struct inet_peer_base *base,
141			 struct inet_peer *gc_stack[],
142			 unsigned int gc_cnt)
143{
144	int peer_threshold, peer_maxttl, peer_minttl;
145	struct inet_peer *p;
146	__u32 delta, ttl;
147	int i;
148
149	peer_threshold = READ_ONCE(inet_peer_threshold);
150	peer_maxttl = READ_ONCE(inet_peer_maxttl);
151	peer_minttl = READ_ONCE(inet_peer_minttl);
152
153	if (base->total >= peer_threshold)
154		ttl = 0; /* be aggressive */
155	else
156		ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
157			base->total / peer_threshold * HZ;
158	for (i = 0; i < gc_cnt; i++) {
159		p = gc_stack[i];
160
161		/* The READ_ONCE() pairs with the WRITE_ONCE()
162		 * in inet_putpeer()
163		 */
164		delta = (__u32)jiffies - READ_ONCE(p->dtime);
165
166		if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
167			gc_stack[i] = NULL;
168	}
169	for (i = 0; i < gc_cnt; i++) {
170		p = gc_stack[i];
171		if (p) {
172			rb_erase(&p->rb_node, &base->rb_root);
173			base->total--;
174			call_rcu(&p->rcu, inetpeer_free_rcu);
175		}
176	}
177}
178
 
179struct inet_peer *inet_getpeer(struct inet_peer_base *base,
180			       const struct inetpeer_addr *daddr,
181			       int create)
182{
183	struct inet_peer *p, *gc_stack[PEER_MAX_GC];
184	struct rb_node **pp, *parent;
185	unsigned int gc_cnt, seq;
186	int invalidated;
187
188	/* Attempt a lockless lookup first.
189	 * Because of a concurrent writer, we might not find an existing entry.
190	 */
191	rcu_read_lock();
192	seq = read_seqbegin(&base->lock);
193	p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
194	invalidated = read_seqretry(&base->lock, seq);
195	rcu_read_unlock();
196
197	if (p)
198		return p;
199
200	/* If no writer did a change during our lookup, we can return early. */
201	if (!create && !invalidated)
202		return NULL;
203
204	/* retry an exact lookup, taking the lock before.
205	 * At least, nodes should be hot in our cache.
206	 */
207	parent = NULL;
208	write_seqlock_bh(&base->lock);
209
210	gc_cnt = 0;
211	p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
212	if (!p && create) {
213		p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
214		if (p) {
215			p->daddr = *daddr;
216			p->dtime = (__u32)jiffies;
217			refcount_set(&p->refcnt, 2);
218			atomic_set(&p->rid, 0);
219			p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
220			p->rate_tokens = 0;
221			p->n_redirects = 0;
222			/* 60*HZ is arbitrary, but chosen enough high so that the first
223			 * calculation of tokens is at its maximum.
224			 */
225			p->rate_last = jiffies - 60*HZ;
226
227			rb_link_node(&p->rb_node, parent, pp);
228			rb_insert_color(&p->rb_node, &base->rb_root);
229			base->total++;
230		}
231	}
232	if (gc_cnt)
233		inet_peer_gc(base, gc_stack, gc_cnt);
234	write_sequnlock_bh(&base->lock);
235
236	return p;
237}
238EXPORT_SYMBOL_GPL(inet_getpeer);
239
240void inet_putpeer(struct inet_peer *p)
241{
242	/* The WRITE_ONCE() pairs with itself (we run lockless)
243	 * and the READ_ONCE() in inet_peer_gc()
244	 */
245	WRITE_ONCE(p->dtime, (__u32)jiffies);
246
247	if (refcount_dec_and_test(&p->refcnt))
248		call_rcu(&p->rcu, inetpeer_free_rcu);
249}
250EXPORT_SYMBOL_GPL(inet_putpeer);
251
252/*
253 *	Check transmit rate limitation for given message.
254 *	The rate information is held in the inet_peer entries now.
255 *	This function is generic and could be used for other purposes
256 *	too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
257 *
258 *	Note that the same inet_peer fields are modified by functions in
259 *	route.c too, but these work for packet destinations while xrlim_allow
260 *	works for icmp destinations. This means the rate limiting information
261 *	for one "ip object" is shared - and these ICMPs are twice limited:
262 *	by source and by destination.
263 *
264 *	RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
265 *			  SHOULD allow setting of rate limits
266 *
267 * 	Shared between ICMPv4 and ICMPv6.
268 */
269#define XRLIM_BURST_FACTOR 6
270bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
271{
272	unsigned long now, token;
273	bool rc = false;
274
275	if (!peer)
276		return true;
277
278	token = peer->rate_tokens;
279	now = jiffies;
280	token += now - peer->rate_last;
281	peer->rate_last = now;
282	if (token > XRLIM_BURST_FACTOR * timeout)
283		token = XRLIM_BURST_FACTOR * timeout;
284	if (token >= timeout) {
285		token -= timeout;
286		rc = true;
287	}
288	peer->rate_tokens = token;
289	return rc;
290}
291EXPORT_SYMBOL(inet_peer_xrlim_allow);
292
293void inetpeer_invalidate_tree(struct inet_peer_base *base)
294{
295	struct rb_node *p = rb_first(&base->rb_root);
296
297	while (p) {
298		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
299
300		p = rb_next(p);
301		rb_erase(&peer->rb_node, &base->rb_root);
302		inet_putpeer(peer);
303		cond_resched();
304	}
305
306	base->total = 0;
307}
308EXPORT_SYMBOL(inetpeer_invalidate_tree);