Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/gen_estimator.c	Simple rate estimator.
  4 *
 
 
 
 
 
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 *		Eric Dumazet <edumazet@google.com>
  7 *
  8 * Changes:
  9 *              Jamal Hadi Salim - moved it to net/core and reshulfed
 10 *              names to make it usable in general net subsystem.
 11 */
 12
 13#include <linux/uaccess.h>
 14#include <linux/bitops.h>
 15#include <linux/module.h>
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/jiffies.h>
 19#include <linux/string.h>
 20#include <linux/mm.h>
 21#include <linux/socket.h>
 22#include <linux/sockios.h>
 23#include <linux/in.h>
 24#include <linux/errno.h>
 25#include <linux/interrupt.h>
 26#include <linux/netdevice.h>
 27#include <linux/skbuff.h>
 28#include <linux/rtnetlink.h>
 29#include <linux/init.h>
 
 30#include <linux/slab.h>
 31#include <linux/seqlock.h>
 32#include <net/sock.h>
 33#include <net/gen_stats.h>
 34
 35/* This code is NOT intended to be used for statistics collection,
 36 * its purpose is to provide a base for statistical multiplexing
 37 * for controlled load service.
 38 * If you need only statistics, run a user level daemon which
 39 * periodically reads byte counters.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40 */
 41
 42struct net_rate_estimator {
 
 
 
 
 43	struct gnet_stats_basic_packed	*bstats;
 
 44	spinlock_t		*stats_lock;
 45	seqcount_t		*running;
 46	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
 47	u8			ewma_log;
 48	u8			intvl_log; /* period : (250ms << intvl_log) */
 49
 50	seqcount_t		seq;
 51	u64			last_packets;
 52	u64			last_bytes;
 53
 54	u64			avpps;
 55	u64			avbps;
 
 
 
 
 
 56
 57	unsigned long           next_jiffies;
 58	struct timer_list       timer;
 59	struct rcu_head		rcu;
 
 60};
 61
 62static void est_fetch_counters(struct net_rate_estimator *e,
 63			       struct gnet_stats_basic_packed *b)
 
 
 
 
 
 
 
 
 64{
 65	memset(b, 0, sizeof(*b));
 66	if (e->stats_lock)
 67		spin_lock(e->stats_lock);
 68
 69	__gnet_stats_copy_basic(e->running, b, e->cpu_bstats, e->bstats);
 
 
 
 
 
 70
 71	if (e->stats_lock)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72		spin_unlock(e->stats_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 73
 
 
 
 
 
 
 
 
 
 
 74}
 75
 76static void est_timer(struct timer_list *t)
 
 
 77{
 78	struct net_rate_estimator *est = from_timer(est, t, timer);
 79	struct gnet_stats_basic_packed b;
 80	u64 rate, brate;
 81
 82	est_fetch_counters(est, &b);
 83	brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
 84	brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
 85
 86	rate = (b.packets - est->last_packets) << (10 - est->intvl_log);
 87	rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
 88
 89	write_seqcount_begin(&est->seq);
 90	est->avbps += brate;
 91	est->avpps += rate;
 92	write_seqcount_end(&est->seq);
 93
 94	est->last_bytes = b.bytes;
 95	est->last_packets = b.packets;
 96
 97	est->next_jiffies += ((HZ/4) << est->intvl_log);
 98
 99	if (unlikely(time_after_eq(jiffies, est->next_jiffies))) {
100		/* Ouch... timer was delayed. */
101		est->next_jiffies = jiffies + 1;
102	}
103	mod_timer(&est->timer, est->next_jiffies);
104}
105
106/**
107 * gen_new_estimator - create a new rate estimator
108 * @bstats: basic statistics
109 * @cpu_bstats: bstats per cpu
110 * @rate_est: rate estimator statistics
111 * @lock: lock for statistics and control path
112 * @running: qdisc running seqcount
113 * @opt: rate estimator configuration TLV
114 *
115 * Creates a new rate estimator with &bstats as source and &rate_est
116 * as destination. A new timer with the interval specified in the
117 * configuration TLV is created. Upon each interval, the latest statistics
118 * will be read from &bstats and the estimated rate will be stored in
119 * &rate_est with the statistics lock grabbed during this period.
120 *
121 * Returns 0 on success or a negative error code.
122 *
123 */
124int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
125		      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
126		      struct net_rate_estimator __rcu **rate_est,
127		      spinlock_t *lock,
128		      seqcount_t *running,
129		      struct nlattr *opt)
130{
 
131	struct gnet_estimator *parm = nla_data(opt);
132	struct net_rate_estimator *old, *est;
133	struct gnet_stats_basic_packed b;
134	int intvl_log;
135
136	if (nla_len(opt) < sizeof(*parm))
137		return -EINVAL;
138
139	/* allowed timer periods are :
140	 * -2 : 250ms,   -1 : 500ms,    0 : 1 sec
141	 *  1 : 2 sec,    2 : 4 sec,    3 : 8 sec
142	 */
143	if (parm->interval < -2 || parm->interval > 3)
144		return -EINVAL;
145
146	if (parm->ewma_log == 0 || parm->ewma_log >= 31)
147		return -EINVAL;
148
149	est = kzalloc(sizeof(*est), GFP_KERNEL);
150	if (!est)
151		return -ENOBUFS;
152
153	seqcount_init(&est->seq);
154	intvl_log = parm->interval + 2;
155	est->bstats = bstats;
156	est->stats_lock = lock;
157	est->running  = running;
158	est->ewma_log = parm->ewma_log;
159	est->intvl_log = intvl_log;
160	est->cpu_bstats = cpu_bstats;
161
162	if (lock)
163		local_bh_disable();
164	est_fetch_counters(est, &b);
165	if (lock)
166		local_bh_enable();
167	est->last_bytes = b.bytes;
168	est->last_packets = b.packets;
169
170	if (lock)
171		spin_lock_bh(lock);
172	old = rcu_dereference_protected(*rate_est, 1);
173	if (old) {
174		del_timer_sync(&old->timer);
175		est->avbps = old->avbps;
176		est->avpps = old->avpps;
177	}
178
179	est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
180	timer_setup(&est->timer, est_timer, 0);
181	mod_timer(&est->timer, est->next_jiffies);
182
183	rcu_assign_pointer(*rate_est, est);
184	if (lock)
185		spin_unlock_bh(lock);
186	if (old)
187		kfree_rcu(old, rcu);
188	return 0;
189}
190EXPORT_SYMBOL(gen_new_estimator);
191
192/**
193 * gen_kill_estimator - remove a rate estimator
194 * @rate_est: rate estimator
 
195 *
196 * Removes the rate estimator.
197 *
 
198 */
199void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
 
200{
201	struct net_rate_estimator *est;
202
203	est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
204	if (est) {
205		del_timer_sync(&est->timer);
206		kfree_rcu(est, rcu);
 
 
 
 
 
 
207	}
 
208}
209EXPORT_SYMBOL(gen_kill_estimator);
210
211/**
212 * gen_replace_estimator - replace rate estimator configuration
213 * @bstats: basic statistics
214 * @cpu_bstats: bstats per cpu
215 * @rate_est: rate estimator statistics
216 * @lock: lock for statistics and control path
217 * @running: qdisc running seqcount (might be NULL)
218 * @opt: rate estimator configuration TLV
219 *
220 * Replaces the configuration of a rate estimator by calling
221 * gen_kill_estimator() and gen_new_estimator().
222 *
223 * Returns 0 on success or a negative error code.
224 */
225int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
226			  struct gnet_stats_basic_cpu __percpu *cpu_bstats,
227			  struct net_rate_estimator __rcu **rate_est,
228			  spinlock_t *lock,
229			  seqcount_t *running, struct nlattr *opt)
230{
231	return gen_new_estimator(bstats, cpu_bstats, rate_est,
232				 lock, running, opt);
233}
234EXPORT_SYMBOL(gen_replace_estimator);
235
236/**
237 * gen_estimator_active - test if estimator is currently in use
238 * @rate_est: rate estimator
 
239 *
240 * Returns true if estimator is active, and false if not.
241 */
242bool gen_estimator_active(struct net_rate_estimator __rcu **rate_est)
 
243{
244	return !!rcu_access_pointer(*rate_est);
245}
246EXPORT_SYMBOL(gen_estimator_active);
247
248bool gen_estimator_read(struct net_rate_estimator __rcu **rate_est,
249			struct gnet_stats_rate_est64 *sample)
250{
251	struct net_rate_estimator *est;
252	unsigned seq;
253
254	rcu_read_lock();
255	est = rcu_dereference(*rate_est);
256	if (!est) {
257		rcu_read_unlock();
258		return false;
259	}
260
261	do {
262		seq = read_seqcount_begin(&est->seq);
263		sample->bps = est->avbps >> 8;
264		sample->pps = est->avpps >> 8;
265	} while (read_seqcount_retry(&est->seq, seq));
266
267	rcu_read_unlock();
268	return true;
269}
270EXPORT_SYMBOL(gen_estimator_read);
v3.5.6
 
  1/*
  2 * net/sched/gen_estimator.c	Simple rate estimator.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 
 10 *
 11 * Changes:
 12 *              Jamal Hadi Salim - moved it to net/core and reshulfed
 13 *              names to make it usable in general net subsystem.
 14 */
 15
 16#include <asm/uaccess.h>
 17#include <linux/bitops.h>
 18#include <linux/module.h>
 19#include <linux/types.h>
 20#include <linux/kernel.h>
 21#include <linux/jiffies.h>
 22#include <linux/string.h>
 23#include <linux/mm.h>
 24#include <linux/socket.h>
 25#include <linux/sockios.h>
 26#include <linux/in.h>
 27#include <linux/errno.h>
 28#include <linux/interrupt.h>
 29#include <linux/netdevice.h>
 30#include <linux/skbuff.h>
 31#include <linux/rtnetlink.h>
 32#include <linux/init.h>
 33#include <linux/rbtree.h>
 34#include <linux/slab.h>
 
 35#include <net/sock.h>
 36#include <net/gen_stats.h>
 37
 38/*
 39   This code is NOT intended to be used for statistics collection,
 40   its purpose is to provide a base for statistical multiplexing
 41   for controlled load service.
 42   If you need only statistics, run a user level daemon which
 43   periodically reads byte counters.
 44
 45   Unfortunately, rate estimation is not a very easy task.
 46   F.e. I did not find a simple way to estimate the current peak rate
 47   and even failed to formulate the problem 8)8)
 48
 49   So I preferred not to built an estimator into the scheduler,
 50   but run this task separately.
 51   Ideally, it should be kernel thread(s), but for now it runs
 52   from timers, which puts apparent top bounds on the number of rated
 53   flows, has minimal overhead on small, but is enough
 54   to handle controlled load service, sets of aggregates.
 55
 56   We measure rate over A=(1<<interval) seconds and evaluate EWMA:
 57
 58   avrate = avrate*(1-W) + rate*W
 59
 60   where W is chosen as negative power of 2: W = 2^(-ewma_log)
 61
 62   The resulting time constant is:
 63
 64   T = A/(-ln(1-W))
 65
 66
 67   NOTES.
 68
 69   * avbps is scaled by 2^5, avpps is scaled by 2^10.
 70   * both values are reported as 32 bit unsigned values. bps can
 71     overflow for fast links : max speed being 34360Mbit/sec
 72   * Minimal interval is HZ/4=250msec (it is the greatest common divisor
 73     for HZ=100 and HZ=1024 8)), maximal interval
 74     is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
 75     are too expensive, longer ones can be implemented
 76     at user level painlessly.
 77 */
 78
 79#define EST_MAX_INTERVAL	5
 80
 81struct gen_estimator
 82{
 83	struct list_head	list;
 84	struct gnet_stats_basic_packed	*bstats;
 85	struct gnet_stats_rate_est	*rate_est;
 86	spinlock_t		*stats_lock;
 87	int			ewma_log;
 
 
 
 
 
 
 88	u64			last_bytes;
 
 
 89	u64			avbps;
 90	u32			last_packets;
 91	u32			avpps;
 92	struct rcu_head		e_rcu;
 93	struct rb_node		node;
 94};
 95
 96struct gen_estimator_head
 97{
 98	struct timer_list	timer;
 99	struct list_head	list;
100};
101
102static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
103
104/* Protects against NULL dereference */
105static DEFINE_RWLOCK(est_lock);
106
107/* Protects against soft lockup during large deletion */
108static struct rb_root est_root = RB_ROOT;
109static DEFINE_SPINLOCK(est_tree_lock);
110
111static void est_timer(unsigned long arg)
112{
113	int idx = (int)arg;
114	struct gen_estimator *e;
 
115
116	rcu_read_lock();
117	list_for_each_entry_rcu(e, &elist[idx].list, list) {
118		u64 nbytes;
119		u64 brate;
120		u32 npackets;
121		u32 rate;
122
123		spin_lock(e->stats_lock);
124		read_lock(&est_lock);
125		if (e->bstats == NULL)
126			goto skip;
127
128		nbytes = e->bstats->bytes;
129		npackets = e->bstats->packets;
130		brate = (nbytes - e->last_bytes)<<(7 - idx);
131		e->last_bytes = nbytes;
132		e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
133		e->rate_est->bps = (e->avbps+0xF)>>5;
134
135		rate = (npackets - e->last_packets)<<(12 - idx);
136		e->last_packets = npackets;
137		e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
138		e->rate_est->pps = (e->avpps+0x1FF)>>10;
139skip:
140		read_unlock(&est_lock);
141		spin_unlock(e->stats_lock);
142	}
143
144	if (!list_empty(&elist[idx].list))
145		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
146	rcu_read_unlock();
147}
148
149static void gen_add_node(struct gen_estimator *est)
150{
151	struct rb_node **p = &est_root.rb_node, *parent = NULL;
152
153	while (*p) {
154		struct gen_estimator *e;
155
156		parent = *p;
157		e = rb_entry(parent, struct gen_estimator, node);
158
159		if (est->bstats > e->bstats)
160			p = &parent->rb_right;
161		else
162			p = &parent->rb_left;
163	}
164	rb_link_node(&est->node, parent, p);
165	rb_insert_color(&est->node, &est_root);
166}
167
168static
169struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
170				    const struct gnet_stats_rate_est *rate_est)
171{
172	struct rb_node *p = est_root.rb_node;
173
174	while (p) {
175		struct gen_estimator *e;
176
177		e = rb_entry(p, struct gen_estimator, node);
178
179		if (bstats > e->bstats)
180			p = p->rb_right;
181		else if (bstats < e->bstats || rate_est != e->rate_est)
182			p = p->rb_left;
183		else
184			return e;
 
 
 
 
 
 
 
 
 
 
 
185	}
186	return NULL;
187}
188
189/**
190 * gen_new_estimator - create a new rate estimator
191 * @bstats: basic statistics
 
192 * @rate_est: rate estimator statistics
193 * @stats_lock: statistics lock
 
194 * @opt: rate estimator configuration TLV
195 *
196 * Creates a new rate estimator with &bstats as source and &rate_est
197 * as destination. A new timer with the interval specified in the
198 * configuration TLV is created. Upon each interval, the latest statistics
199 * will be read from &bstats and the estimated rate will be stored in
200 * &rate_est with the statistics lock grabed during this period.
201 *
202 * Returns 0 on success or a negative error code.
203 *
204 */
205int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
206		      struct gnet_stats_rate_est *rate_est,
207		      spinlock_t *stats_lock,
 
 
208		      struct nlattr *opt)
209{
210	struct gen_estimator *est;
211	struct gnet_estimator *parm = nla_data(opt);
212	int idx;
 
 
213
214	if (nla_len(opt) < sizeof(*parm))
215		return -EINVAL;
216
 
 
 
 
217	if (parm->interval < -2 || parm->interval > 3)
218		return -EINVAL;
219
 
 
 
220	est = kzalloc(sizeof(*est), GFP_KERNEL);
221	if (est == NULL)
222		return -ENOBUFS;
223
224	idx = parm->interval + 2;
 
225	est->bstats = bstats;
226	est->rate_est = rate_est;
227	est->stats_lock = stats_lock;
228	est->ewma_log = parm->ewma_log;
229	est->last_bytes = bstats->bytes;
230	est->avbps = rate_est->bps<<5;
231	est->last_packets = bstats->packets;
232	est->avpps = rate_est->pps<<10;
233
234	spin_lock_bh(&est_tree_lock);
235	if (!elist[idx].timer.function) {
236		INIT_LIST_HEAD(&elist[idx].list);
237		setup_timer(&elist[idx].timer, est_timer, idx);
 
 
 
 
 
 
 
 
 
238	}
239
240	if (list_empty(&elist[idx].list))
241		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
242
243	list_add_rcu(&est->list, &elist[idx].list);
244	gen_add_node(est);
245	spin_unlock_bh(&est_tree_lock);
246
 
 
247	return 0;
248}
249EXPORT_SYMBOL(gen_new_estimator);
250
251/**
252 * gen_kill_estimator - remove a rate estimator
253 * @bstats: basic statistics
254 * @rate_est: rate estimator statistics
255 *
256 * Removes the rate estimator specified by &bstats and &rate_est.
257 *
258 * Note : Caller should respect an RCU grace period before freeing stats_lock
259 */
260void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
261			struct gnet_stats_rate_est *rate_est)
262{
263	struct gen_estimator *e;
264
265	spin_lock_bh(&est_tree_lock);
266	while ((e = gen_find_node(bstats, rate_est))) {
267		rb_erase(&e->node, &est_root);
268
269		write_lock(&est_lock);
270		e->bstats = NULL;
271		write_unlock(&est_lock);
272
273		list_del_rcu(&e->list);
274		kfree_rcu(e, e_rcu);
275	}
276	spin_unlock_bh(&est_tree_lock);
277}
278EXPORT_SYMBOL(gen_kill_estimator);
279
280/**
281 * gen_replace_estimator - replace rate estimator configuration
282 * @bstats: basic statistics
 
283 * @rate_est: rate estimator statistics
284 * @stats_lock: statistics lock
 
285 * @opt: rate estimator configuration TLV
286 *
287 * Replaces the configuration of a rate estimator by calling
288 * gen_kill_estimator() and gen_new_estimator().
289 *
290 * Returns 0 on success or a negative error code.
291 */
292int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
293			  struct gnet_stats_rate_est *rate_est,
294			  spinlock_t *stats_lock, struct nlattr *opt)
 
 
295{
296	gen_kill_estimator(bstats, rate_est);
297	return gen_new_estimator(bstats, rate_est, stats_lock, opt);
298}
299EXPORT_SYMBOL(gen_replace_estimator);
300
301/**
302 * gen_estimator_active - test if estimator is currently in use
303 * @bstats: basic statistics
304 * @rate_est: rate estimator statistics
305 *
306 * Returns true if estimator is active, and false if not.
307 */
308bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
309			  const struct gnet_stats_rate_est *rate_est)
310{
311	bool res;
 
 
312
313	ASSERT_RTNL();
 
 
 
 
 
 
 
 
 
 
 
314
315	spin_lock_bh(&est_tree_lock);
316	res = gen_find_node(bstats, rate_est) != NULL;
317	spin_unlock_bh(&est_tree_lock);
 
 
318
319	return res;
 
320}
321EXPORT_SYMBOL(gen_estimator_active);