Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/gen_estimator.c	Simple rate estimator.
  4 *
 
 
 
 
 
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 *		Eric Dumazet <edumazet@google.com>
  7 *
  8 * Changes:
  9 *              Jamal Hadi Salim - moved it to net/core and reshulfed
 10 *              names to make it usable in general net subsystem.
 11 */
 12
 13#include <linux/uaccess.h>
 14#include <linux/bitops.h>
 15#include <linux/module.h>
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/jiffies.h>
 19#include <linux/string.h>
 20#include <linux/mm.h>
 21#include <linux/socket.h>
 22#include <linux/sockios.h>
 23#include <linux/in.h>
 24#include <linux/errno.h>
 25#include <linux/interrupt.h>
 26#include <linux/netdevice.h>
 27#include <linux/skbuff.h>
 28#include <linux/rtnetlink.h>
 29#include <linux/init.h>
 
 30#include <linux/slab.h>
 31#include <linux/seqlock.h>
 32#include <net/sock.h>
 33#include <net/gen_stats.h>
 34
 35/* This code is NOT intended to be used for statistics collection,
 36 * its purpose is to provide a base for statistical multiplexing
 37 * for controlled load service.
 38 * If you need only statistics, run a user level daemon which
 39 * periodically reads byte counters.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40 */
 41
 42struct net_rate_estimator {
 43	struct gnet_stats_basic_sync	*bstats;
 44	spinlock_t		*stats_lock;
 45	bool			running;
 46	struct gnet_stats_basic_sync __percpu *cpu_bstats;
 47	u8			ewma_log;
 48	u8			intvl_log; /* period : (250ms << intvl_log) */
 49
 50	seqcount_t		seq;
 51	u64			last_packets;
 
 
 
 
 
 
 
 52	u64			last_bytes;
 53
 54	u64			avpps;
 55	u64			avbps;
 
 
 
 
 
 56
 57	unsigned long           next_jiffies;
 58	struct timer_list       timer;
 59	struct rcu_head		rcu;
 
 60};
 61
 62static void est_fetch_counters(struct net_rate_estimator *e,
 63			       struct gnet_stats_basic_sync *b)
 
 
 
 
 
 
 
 
 64{
 65	gnet_stats_basic_sync_init(b);
 66	if (e->stats_lock)
 67		spin_lock(e->stats_lock);
 68
 69	gnet_stats_add_basic(b, e->cpu_bstats, e->bstats, e->running);
 
 
 
 
 70
 71	if (e->stats_lock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72		spin_unlock(e->stats_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 73
 
 
 
 
 
 
 
 
 
 
 74}
 75
 76static void est_timer(struct timer_list *t)
 
 
 77{
 78	struct net_rate_estimator *est = from_timer(est, t, timer);
 79	struct gnet_stats_basic_sync b;
 80	u64 b_bytes, b_packets;
 81	u64 rate, brate;
 82
 83	est_fetch_counters(est, &b);
 84	b_bytes = u64_stats_read(&b.bytes);
 85	b_packets = u64_stats_read(&b.packets);
 86
 87	brate = (b_bytes - est->last_bytes) << (10 - est->intvl_log);
 88	brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
 89
 90	rate = (b_packets - est->last_packets) << (10 - est->intvl_log);
 91	rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
 92
 93	write_seqcount_begin(&est->seq);
 94	est->avbps += brate;
 95	est->avpps += rate;
 96	write_seqcount_end(&est->seq);
 97
 98	est->last_bytes = b_bytes;
 99	est->last_packets = b_packets;
100
101	est->next_jiffies += ((HZ/4) << est->intvl_log);
102
103	if (unlikely(time_after_eq(jiffies, est->next_jiffies))) {
104		/* Ouch... timer was delayed. */
105		est->next_jiffies = jiffies + 1;
106	}
107	mod_timer(&est->timer, est->next_jiffies);
108}
109
110/**
111 * gen_new_estimator - create a new rate estimator
112 * @bstats: basic statistics
113 * @cpu_bstats: bstats per cpu
114 * @rate_est: rate estimator statistics
115 * @lock: lock for statistics and control path
116 * @running: true if @bstats represents a running qdisc, thus @bstats'
117 *           internal values might change during basic reads. Only used
118 *           if @bstats_cpu is NULL
119 * @opt: rate estimator configuration TLV
120 *
121 * Creates a new rate estimator with &bstats as source and &rate_est
122 * as destination. A new timer with the interval specified in the
123 * configuration TLV is created. Upon each interval, the latest statistics
124 * will be read from &bstats and the estimated rate will be stored in
125 * &rate_est with the statistics lock grabbed during this period.
126 *
127 * Returns 0 on success or a negative error code.
128 *
129 */
130int gen_new_estimator(struct gnet_stats_basic_sync *bstats,
131		      struct gnet_stats_basic_sync __percpu *cpu_bstats,
132		      struct net_rate_estimator __rcu **rate_est,
133		      spinlock_t *lock,
134		      bool running,
135		      struct nlattr *opt)
136{
 
137	struct gnet_estimator *parm = nla_data(opt);
138	struct net_rate_estimator *old, *est;
139	struct gnet_stats_basic_sync b;
140	int intvl_log;
141
142	if (nla_len(opt) < sizeof(*parm))
143		return -EINVAL;
144
145	/* allowed timer periods are :
146	 * -2 : 250ms,   -1 : 500ms,    0 : 1 sec
147	 *  1 : 2 sec,    2 : 4 sec,    3 : 8 sec
148	 */
149	if (parm->interval < -2 || parm->interval > 3)
150		return -EINVAL;
151
152	if (parm->ewma_log == 0 || parm->ewma_log >= 31)
153		return -EINVAL;
154
155	est = kzalloc(sizeof(*est), GFP_KERNEL);
156	if (!est)
157		return -ENOBUFS;
158
159	seqcount_init(&est->seq);
160	intvl_log = parm->interval + 2;
 
161	est->bstats = bstats;
162	est->stats_lock = lock;
163	est->running  = running;
164	est->ewma_log = parm->ewma_log;
165	est->intvl_log = intvl_log;
 
 
 
166	est->cpu_bstats = cpu_bstats;
167
168	if (lock)
169		local_bh_disable();
170	est_fetch_counters(est, &b);
171	if (lock)
172		local_bh_enable();
173	est->last_bytes = u64_stats_read(&b.bytes);
174	est->last_packets = u64_stats_read(&b.packets);
175
176	if (lock)
177		spin_lock_bh(lock);
178	old = rcu_dereference_protected(*rate_est, 1);
179	if (old) {
180		del_timer_sync(&old->timer);
181		est->avbps = old->avbps;
182		est->avpps = old->avpps;
183	}
184
185	est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
186	timer_setup(&est->timer, est_timer, 0);
187	mod_timer(&est->timer, est->next_jiffies);
188
189	rcu_assign_pointer(*rate_est, est);
190	if (lock)
191		spin_unlock_bh(lock);
192	if (old)
193		kfree_rcu(old, rcu);
194	return 0;
195}
196EXPORT_SYMBOL(gen_new_estimator);
197
198/**
199 * gen_kill_estimator - remove a rate estimator
200 * @rate_est: rate estimator
 
201 *
202 * Removes the rate estimator.
203 *
 
204 */
205void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
 
206{
207	struct net_rate_estimator *est;
208
209	est = unrcu_pointer(xchg(rate_est, NULL));
210	if (est) {
211		timer_shutdown_sync(&est->timer);
212		kfree_rcu(est, rcu);
 
 
 
 
 
 
213	}
 
214}
215EXPORT_SYMBOL(gen_kill_estimator);
216
217/**
218 * gen_replace_estimator - replace rate estimator configuration
219 * @bstats: basic statistics
220 * @cpu_bstats: bstats per cpu
221 * @rate_est: rate estimator statistics
222 * @lock: lock for statistics and control path
223 * @running: true if @bstats represents a running qdisc, thus @bstats'
224 *           internal values might change during basic reads. Only used
225 *           if @cpu_bstats is NULL
226 * @opt: rate estimator configuration TLV
227 *
228 * Replaces the configuration of a rate estimator by calling
229 * gen_kill_estimator() and gen_new_estimator().
230 *
231 * Returns 0 on success or a negative error code.
232 */
233int gen_replace_estimator(struct gnet_stats_basic_sync *bstats,
234			  struct gnet_stats_basic_sync __percpu *cpu_bstats,
235			  struct net_rate_estimator __rcu **rate_est,
236			  spinlock_t *lock,
237			  bool running, struct nlattr *opt)
238{
239	return gen_new_estimator(bstats, cpu_bstats, rate_est,
240				 lock, running, opt);
241}
242EXPORT_SYMBOL(gen_replace_estimator);
243
244/**
245 * gen_estimator_active - test if estimator is currently in use
246 * @rate_est: rate estimator
 
247 *
248 * Returns true if estimator is active, and false if not.
249 */
250bool gen_estimator_active(struct net_rate_estimator __rcu **rate_est)
 
251{
252	return !!rcu_access_pointer(*rate_est);
253}
254EXPORT_SYMBOL(gen_estimator_active);
255
256bool gen_estimator_read(struct net_rate_estimator __rcu **rate_est,
257			struct gnet_stats_rate_est64 *sample)
258{
259	struct net_rate_estimator *est;
260	unsigned seq;
261
262	rcu_read_lock();
263	est = rcu_dereference(*rate_est);
264	if (!est) {
265		rcu_read_unlock();
266		return false;
267	}
268
269	do {
270		seq = read_seqcount_begin(&est->seq);
271		sample->bps = est->avbps >> 8;
272		sample->pps = est->avpps >> 8;
273	} while (read_seqcount_retry(&est->seq, seq));
274
275	rcu_read_unlock();
276	return true;
277}
278EXPORT_SYMBOL(gen_estimator_read);
v4.6
 
  1/*
  2 * net/sched/gen_estimator.c	Simple rate estimator.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 
 10 *
 11 * Changes:
 12 *              Jamal Hadi Salim - moved it to net/core and reshulfed
 13 *              names to make it usable in general net subsystem.
 14 */
 15
 16#include <asm/uaccess.h>
 17#include <linux/bitops.h>
 18#include <linux/module.h>
 19#include <linux/types.h>
 20#include <linux/kernel.h>
 21#include <linux/jiffies.h>
 22#include <linux/string.h>
 23#include <linux/mm.h>
 24#include <linux/socket.h>
 25#include <linux/sockios.h>
 26#include <linux/in.h>
 27#include <linux/errno.h>
 28#include <linux/interrupt.h>
 29#include <linux/netdevice.h>
 30#include <linux/skbuff.h>
 31#include <linux/rtnetlink.h>
 32#include <linux/init.h>
 33#include <linux/rbtree.h>
 34#include <linux/slab.h>
 
 35#include <net/sock.h>
 36#include <net/gen_stats.h>
 37
 38/*
 39   This code is NOT intended to be used for statistics collection,
 40   its purpose is to provide a base for statistical multiplexing
 41   for controlled load service.
 42   If you need only statistics, run a user level daemon which
 43   periodically reads byte counters.
 44
 45   Unfortunately, rate estimation is not a very easy task.
 46   F.e. I did not find a simple way to estimate the current peak rate
 47   and even failed to formulate the problem 8)8)
 48
 49   So I preferred not to built an estimator into the scheduler,
 50   but run this task separately.
 51   Ideally, it should be kernel thread(s), but for now it runs
 52   from timers, which puts apparent top bounds on the number of rated
 53   flows, has minimal overhead on small, but is enough
 54   to handle controlled load service, sets of aggregates.
 55
 56   We measure rate over A=(1<<interval) seconds and evaluate EWMA:
 57
 58   avrate = avrate*(1-W) + rate*W
 59
 60   where W is chosen as negative power of 2: W = 2^(-ewma_log)
 61
 62   The resulting time constant is:
 63
 64   T = A/(-ln(1-W))
 65
 66
 67   NOTES.
 68
 69   * avbps and avpps are scaled by 2^5.
 70   * both values are reported as 32 bit unsigned values. bps can
 71     overflow for fast links : max speed being 34360Mbit/sec
 72   * Minimal interval is HZ/4=250msec (it is the greatest common divisor
 73     for HZ=100 and HZ=1024 8)), maximal interval
 74     is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
 75     are too expensive, longer ones can be implemented
 76     at user level painlessly.
 77 */
 78
 79#define EST_MAX_INTERVAL	5
 
 
 
 
 
 
 80
 81struct gen_estimator
 82{
 83	struct list_head	list;
 84	struct gnet_stats_basic_packed	*bstats;
 85	struct gnet_stats_rate_est64	*rate_est;
 86	spinlock_t		*stats_lock;
 87	int			ewma_log;
 88	u32			last_packets;
 89	unsigned long		avpps;
 90	u64			last_bytes;
 
 
 91	u64			avbps;
 92	struct rcu_head		e_rcu;
 93	struct rb_node		node;
 94	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
 95	struct rcu_head		head;
 96};
 97
 98struct gen_estimator_head
 99{
100	struct timer_list	timer;
101	struct list_head	list;
102};
103
104static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
105
106/* Protects against NULL dereference */
107static DEFINE_RWLOCK(est_lock);
108
109/* Protects against soft lockup during large deletion */
110static struct rb_root est_root = RB_ROOT;
111static DEFINE_SPINLOCK(est_tree_lock);
112
113static void est_timer(unsigned long arg)
114{
115	int idx = (int)arg;
116	struct gen_estimator *e;
 
117
118	rcu_read_lock();
119	list_for_each_entry_rcu(e, &elist[idx].list, list) {
120		struct gnet_stats_basic_packed b = {0};
121		unsigned long rate;
122		u64 brate;
123
124		spin_lock(e->stats_lock);
125		read_lock(&est_lock);
126		if (e->bstats == NULL)
127			goto skip;
128
129		__gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats);
130
131		brate = (b.bytes - e->last_bytes)<<(7 - idx);
132		e->last_bytes = b.bytes;
133		e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
134		e->rate_est->bps = (e->avbps+0xF)>>5;
135
136		rate = b.packets - e->last_packets;
137		rate <<= (7 - idx);
138		e->last_packets = b.packets;
139		e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
140		e->rate_est->pps = (e->avpps + 0xF) >> 5;
141skip:
142		read_unlock(&est_lock);
143		spin_unlock(e->stats_lock);
144	}
145
146	if (!list_empty(&elist[idx].list))
147		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
148	rcu_read_unlock();
149}
150
151static void gen_add_node(struct gen_estimator *est)
152{
153	struct rb_node **p = &est_root.rb_node, *parent = NULL;
154
155	while (*p) {
156		struct gen_estimator *e;
157
158		parent = *p;
159		e = rb_entry(parent, struct gen_estimator, node);
160
161		if (est->bstats > e->bstats)
162			p = &parent->rb_right;
163		else
164			p = &parent->rb_left;
165	}
166	rb_link_node(&est->node, parent, p);
167	rb_insert_color(&est->node, &est_root);
168}
169
170static
171struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
172				    const struct gnet_stats_rate_est64 *rate_est)
173{
174	struct rb_node *p = est_root.rb_node;
175
176	while (p) {
177		struct gen_estimator *e;
178
179		e = rb_entry(p, struct gen_estimator, node);
180
181		if (bstats > e->bstats)
182			p = p->rb_right;
183		else if (bstats < e->bstats || rate_est != e->rate_est)
184			p = p->rb_left;
185		else
186			return e;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187	}
188	return NULL;
189}
190
191/**
192 * gen_new_estimator - create a new rate estimator
193 * @bstats: basic statistics
194 * @cpu_bstats: bstats per cpu
195 * @rate_est: rate estimator statistics
196 * @stats_lock: statistics lock
 
 
 
197 * @opt: rate estimator configuration TLV
198 *
199 * Creates a new rate estimator with &bstats as source and &rate_est
200 * as destination. A new timer with the interval specified in the
201 * configuration TLV is created. Upon each interval, the latest statistics
202 * will be read from &bstats and the estimated rate will be stored in
203 * &rate_est with the statistics lock grabbed during this period.
204 *
205 * Returns 0 on success or a negative error code.
206 *
207 */
208int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
209		      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
210		      struct gnet_stats_rate_est64 *rate_est,
211		      spinlock_t *stats_lock,
 
212		      struct nlattr *opt)
213{
214	struct gen_estimator *est;
215	struct gnet_estimator *parm = nla_data(opt);
216	struct gnet_stats_basic_packed b = {0};
217	int idx;
 
218
219	if (nla_len(opt) < sizeof(*parm))
220		return -EINVAL;
221
 
 
 
 
222	if (parm->interval < -2 || parm->interval > 3)
223		return -EINVAL;
224
 
 
 
225	est = kzalloc(sizeof(*est), GFP_KERNEL);
226	if (est == NULL)
227		return -ENOBUFS;
228
229	__gnet_stats_copy_basic(&b, cpu_bstats, bstats);
230
231	idx = parm->interval + 2;
232	est->bstats = bstats;
233	est->rate_est = rate_est;
234	est->stats_lock = stats_lock;
235	est->ewma_log = parm->ewma_log;
236	est->last_bytes = b.bytes;
237	est->avbps = rate_est->bps<<5;
238	est->last_packets = b.packets;
239	est->avpps = rate_est->pps<<10;
240	est->cpu_bstats = cpu_bstats;
241
242	spin_lock_bh(&est_tree_lock);
243	if (!elist[idx].timer.function) {
244		INIT_LIST_HEAD(&elist[idx].list);
245		setup_timer(&elist[idx].timer, est_timer, idx);
 
 
 
 
 
 
 
 
 
 
 
246	}
247
248	if (list_empty(&elist[idx].list))
249		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
250
251	list_add_rcu(&est->list, &elist[idx].list);
252	gen_add_node(est);
253	spin_unlock_bh(&est_tree_lock);
254
 
 
255	return 0;
256}
257EXPORT_SYMBOL(gen_new_estimator);
258
259/**
260 * gen_kill_estimator - remove a rate estimator
261 * @bstats: basic statistics
262 * @rate_est: rate estimator statistics
263 *
264 * Removes the rate estimator specified by &bstats and &rate_est.
265 *
266 * Note : Caller should respect an RCU grace period before freeing stats_lock
267 */
268void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
269			struct gnet_stats_rate_est64 *rate_est)
270{
271	struct gen_estimator *e;
272
273	spin_lock_bh(&est_tree_lock);
274	while ((e = gen_find_node(bstats, rate_est))) {
275		rb_erase(&e->node, &est_root);
276
277		write_lock(&est_lock);
278		e->bstats = NULL;
279		write_unlock(&est_lock);
280
281		list_del_rcu(&e->list);
282		kfree_rcu(e, e_rcu);
283	}
284	spin_unlock_bh(&est_tree_lock);
285}
286EXPORT_SYMBOL(gen_kill_estimator);
287
288/**
289 * gen_replace_estimator - replace rate estimator configuration
290 * @bstats: basic statistics
291 * @cpu_bstats: bstats per cpu
292 * @rate_est: rate estimator statistics
293 * @stats_lock: statistics lock
 
 
 
294 * @opt: rate estimator configuration TLV
295 *
296 * Replaces the configuration of a rate estimator by calling
297 * gen_kill_estimator() and gen_new_estimator().
298 *
299 * Returns 0 on success or a negative error code.
300 */
301int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
302			  struct gnet_stats_basic_cpu __percpu *cpu_bstats,
303			  struct gnet_stats_rate_est64 *rate_est,
304			  spinlock_t *stats_lock, struct nlattr *opt)
 
305{
306	gen_kill_estimator(bstats, rate_est);
307	return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt);
308}
309EXPORT_SYMBOL(gen_replace_estimator);
310
311/**
312 * gen_estimator_active - test if estimator is currently in use
313 * @bstats: basic statistics
314 * @rate_est: rate estimator statistics
315 *
316 * Returns true if estimator is active, and false if not.
317 */
318bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
319			  const struct gnet_stats_rate_est64 *rate_est)
320{
321	bool res;
 
 
322
323	ASSERT_RTNL();
 
 
 
 
 
 
 
 
 
 
 
324
325	spin_lock_bh(&est_tree_lock);
326	res = gen_find_node(bstats, rate_est) != NULL;
327	spin_unlock_bh(&est_tree_lock);
 
 
328
329	return res;
 
330}
331EXPORT_SYMBOL(gen_estimator_active);