Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * net/sched/gen_estimator.c	Simple rate estimator.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 *
 11 * Changes:
 12 *              Jamal Hadi Salim - moved it to net/core and reshulfed
 13 *              names to make it usable in general net subsystem.
 14 */
 15
 16#include <asm/uaccess.h>
 17#include <asm/system.h>
 18#include <linux/bitops.h>
 19#include <linux/module.h>
 20#include <linux/types.h>
 21#include <linux/kernel.h>
 22#include <linux/jiffies.h>
 23#include <linux/string.h>
 24#include <linux/mm.h>
 25#include <linux/socket.h>
 26#include <linux/sockios.h>
 27#include <linux/in.h>
 28#include <linux/errno.h>
 29#include <linux/interrupt.h>
 30#include <linux/netdevice.h>
 31#include <linux/skbuff.h>
 32#include <linux/rtnetlink.h>
 33#include <linux/init.h>
 34#include <linux/rbtree.h>
 35#include <linux/slab.h>
 36#include <net/sock.h>
 37#include <net/gen_stats.h>
 38
 39/*
 40   This code is NOT intended to be used for statistics collection,
 41   its purpose is to provide a base for statistical multiplexing
 42   for controlled load service.
 43   If you need only statistics, run a user level daemon which
 44   periodically reads byte counters.
 45
 46   Unfortunately, rate estimation is not a very easy task.
 47   F.e. I did not find a simple way to estimate the current peak rate
 48   and even failed to formulate the problem 8)8)
 49
 50   So I preferred not to built an estimator into the scheduler,
 51   but run this task separately.
 52   Ideally, it should be kernel thread(s), but for now it runs
 53   from timers, which puts apparent top bounds on the number of rated
 54   flows, has minimal overhead on small, but is enough
 55   to handle controlled load service, sets of aggregates.
 56
 57   We measure rate over A=(1<<interval) seconds and evaluate EWMA:
 58
 59   avrate = avrate*(1-W) + rate*W
 60
 61   where W is chosen as negative power of 2: W = 2^(-ewma_log)
 62
 63   The resulting time constant is:
 64
 65   T = A/(-ln(1-W))
 66
 67
 68   NOTES.
 69
 70   * avbps is scaled by 2^5, avpps is scaled by 2^10.
 71   * both values are reported as 32 bit unsigned values. bps can
 72     overflow for fast links : max speed being 34360Mbit/sec
 73   * Minimal interval is HZ/4=250msec (it is the greatest common divisor
 74     for HZ=100 and HZ=1024 8)), maximal interval
 75     is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
 76     are too expensive, longer ones can be implemented
 77     at user level painlessly.
 78 */
 79
 80#define EST_MAX_INTERVAL	5
 81
 82struct gen_estimator
 83{
 84	struct list_head	list;
 85	struct gnet_stats_basic_packed	*bstats;
 86	struct gnet_stats_rate_est	*rate_est;
 87	spinlock_t		*stats_lock;
 88	int			ewma_log;
 
 
 89	u64			last_bytes;
 90	u64			avbps;
 91	u32			last_packets;
 92	u32			avpps;
 93	struct rcu_head		e_rcu;
 94	struct rb_node		node;
 
 
 95};
 96
 97struct gen_estimator_head
 98{
 99	struct timer_list	timer;
100	struct list_head	list;
101};
102
103static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
104
105/* Protects against NULL dereference */
106static DEFINE_RWLOCK(est_lock);
107
108/* Protects against soft lockup during large deletion */
109static struct rb_root est_root = RB_ROOT;
110static DEFINE_SPINLOCK(est_tree_lock);
111
112static void est_timer(unsigned long arg)
113{
114	int idx = (int)arg;
115	struct gen_estimator *e;
116
117	rcu_read_lock();
118	list_for_each_entry_rcu(e, &elist[idx].list, list) {
119		u64 nbytes;
 
120		u64 brate;
121		u32 npackets;
122		u32 rate;
123
124		spin_lock(e->stats_lock);
125		read_lock(&est_lock);
126		if (e->bstats == NULL)
127			goto skip;
128
129		nbytes = e->bstats->bytes;
130		npackets = e->bstats->packets;
131		brate = (nbytes - e->last_bytes)<<(7 - idx);
132		e->last_bytes = nbytes;
133		e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
134		e->rate_est->bps = (e->avbps+0xF)>>5;
135
136		rate = (npackets - e->last_packets)<<(12 - idx);
137		e->last_packets = npackets;
 
138		e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
139		e->rate_est->pps = (e->avpps+0x1FF)>>10;
140skip:
141		read_unlock(&est_lock);
142		spin_unlock(e->stats_lock);
143	}
144
145	if (!list_empty(&elist[idx].list))
146		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
147	rcu_read_unlock();
148}
149
150static void gen_add_node(struct gen_estimator *est)
151{
152	struct rb_node **p = &est_root.rb_node, *parent = NULL;
153
154	while (*p) {
155		struct gen_estimator *e;
156
157		parent = *p;
158		e = rb_entry(parent, struct gen_estimator, node);
159
160		if (est->bstats > e->bstats)
161			p = &parent->rb_right;
162		else
163			p = &parent->rb_left;
164	}
165	rb_link_node(&est->node, parent, p);
166	rb_insert_color(&est->node, &est_root);
167}
168
169static
170struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
171				    const struct gnet_stats_rate_est *rate_est)
172{
173	struct rb_node *p = est_root.rb_node;
174
175	while (p) {
176		struct gen_estimator *e;
177
178		e = rb_entry(p, struct gen_estimator, node);
179
180		if (bstats > e->bstats)
181			p = p->rb_right;
182		else if (bstats < e->bstats || rate_est != e->rate_est)
183			p = p->rb_left;
184		else
185			return e;
186	}
187	return NULL;
188}
189
190/**
191 * gen_new_estimator - create a new rate estimator
192 * @bstats: basic statistics
 
193 * @rate_est: rate estimator statistics
194 * @stats_lock: statistics lock
195 * @opt: rate estimator configuration TLV
196 *
197 * Creates a new rate estimator with &bstats as source and &rate_est
198 * as destination. A new timer with the interval specified in the
199 * configuration TLV is created. Upon each interval, the latest statistics
200 * will be read from &bstats and the estimated rate will be stored in
201 * &rate_est with the statistics lock grabed during this period.
202 *
203 * Returns 0 on success or a negative error code.
204 *
205 */
206int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
207		      struct gnet_stats_rate_est *rate_est,
 
208		      spinlock_t *stats_lock,
209		      struct nlattr *opt)
210{
211	struct gen_estimator *est;
212	struct gnet_estimator *parm = nla_data(opt);
 
213	int idx;
214
215	if (nla_len(opt) < sizeof(*parm))
216		return -EINVAL;
217
218	if (parm->interval < -2 || parm->interval > 3)
219		return -EINVAL;
220
221	est = kzalloc(sizeof(*est), GFP_KERNEL);
222	if (est == NULL)
223		return -ENOBUFS;
224
 
 
225	idx = parm->interval + 2;
226	est->bstats = bstats;
227	est->rate_est = rate_est;
228	est->stats_lock = stats_lock;
229	est->ewma_log = parm->ewma_log;
230	est->last_bytes = bstats->bytes;
231	est->avbps = rate_est->bps<<5;
232	est->last_packets = bstats->packets;
233	est->avpps = rate_est->pps<<10;
 
234
235	spin_lock_bh(&est_tree_lock);
236	if (!elist[idx].timer.function) {
237		INIT_LIST_HEAD(&elist[idx].list);
238		setup_timer(&elist[idx].timer, est_timer, idx);
239	}
240
241	if (list_empty(&elist[idx].list))
242		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
243
244	list_add_rcu(&est->list, &elist[idx].list);
245	gen_add_node(est);
246	spin_unlock_bh(&est_tree_lock);
247
248	return 0;
249}
250EXPORT_SYMBOL(gen_new_estimator);
251
252/**
253 * gen_kill_estimator - remove a rate estimator
254 * @bstats: basic statistics
255 * @rate_est: rate estimator statistics
256 *
257 * Removes the rate estimator specified by &bstats and &rate_est.
258 *
259 * Note : Caller should respect an RCU grace period before freeing stats_lock
260 */
261void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
262			struct gnet_stats_rate_est *rate_est)
263{
264	struct gen_estimator *e;
265
266	spin_lock_bh(&est_tree_lock);
267	while ((e = gen_find_node(bstats, rate_est))) {
268		rb_erase(&e->node, &est_root);
269
270		write_lock(&est_lock);
271		e->bstats = NULL;
272		write_unlock(&est_lock);
273
274		list_del_rcu(&e->list);
275		kfree_rcu(e, e_rcu);
276	}
277	spin_unlock_bh(&est_tree_lock);
278}
279EXPORT_SYMBOL(gen_kill_estimator);
280
281/**
282 * gen_replace_estimator - replace rate estimator configuration
283 * @bstats: basic statistics
 
284 * @rate_est: rate estimator statistics
285 * @stats_lock: statistics lock
286 * @opt: rate estimator configuration TLV
287 *
288 * Replaces the configuration of a rate estimator by calling
289 * gen_kill_estimator() and gen_new_estimator().
290 *
291 * Returns 0 on success or a negative error code.
292 */
293int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
294			  struct gnet_stats_rate_est *rate_est,
 
295			  spinlock_t *stats_lock, struct nlattr *opt)
296{
297	gen_kill_estimator(bstats, rate_est);
298	return gen_new_estimator(bstats, rate_est, stats_lock, opt);
299}
300EXPORT_SYMBOL(gen_replace_estimator);
301
302/**
303 * gen_estimator_active - test if estimator is currently in use
304 * @bstats: basic statistics
305 * @rate_est: rate estimator statistics
306 *
307 * Returns true if estimator is active, and false if not.
308 */
309bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
310			  const struct gnet_stats_rate_est *rate_est)
311{
312	bool res;
313
314	ASSERT_RTNL();
315
316	spin_lock_bh(&est_tree_lock);
317	res = gen_find_node(bstats, rate_est) != NULL;
318	spin_unlock_bh(&est_tree_lock);
319
320	return res;
321}
322EXPORT_SYMBOL(gen_estimator_active);
v4.6
  1/*
  2 * net/sched/gen_estimator.c	Simple rate estimator.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 *
 11 * Changes:
 12 *              Jamal Hadi Salim - moved it to net/core and reshulfed
 13 *              names to make it usable in general net subsystem.
 14 */
 15
 16#include <asm/uaccess.h>
 
 17#include <linux/bitops.h>
 18#include <linux/module.h>
 19#include <linux/types.h>
 20#include <linux/kernel.h>
 21#include <linux/jiffies.h>
 22#include <linux/string.h>
 23#include <linux/mm.h>
 24#include <linux/socket.h>
 25#include <linux/sockios.h>
 26#include <linux/in.h>
 27#include <linux/errno.h>
 28#include <linux/interrupt.h>
 29#include <linux/netdevice.h>
 30#include <linux/skbuff.h>
 31#include <linux/rtnetlink.h>
 32#include <linux/init.h>
 33#include <linux/rbtree.h>
 34#include <linux/slab.h>
 35#include <net/sock.h>
 36#include <net/gen_stats.h>
 37
 38/*
 39   This code is NOT intended to be used for statistics collection,
 40   its purpose is to provide a base for statistical multiplexing
 41   for controlled load service.
 42   If you need only statistics, run a user level daemon which
 43   periodically reads byte counters.
 44
 45   Unfortunately, rate estimation is not a very easy task.
 46   F.e. I did not find a simple way to estimate the current peak rate
 47   and even failed to formulate the problem 8)8)
 48
 49   So I preferred not to built an estimator into the scheduler,
 50   but run this task separately.
 51   Ideally, it should be kernel thread(s), but for now it runs
 52   from timers, which puts apparent top bounds on the number of rated
 53   flows, has minimal overhead on small, but is enough
 54   to handle controlled load service, sets of aggregates.
 55
 56   We measure rate over A=(1<<interval) seconds and evaluate EWMA:
 57
 58   avrate = avrate*(1-W) + rate*W
 59
 60   where W is chosen as negative power of 2: W = 2^(-ewma_log)
 61
 62   The resulting time constant is:
 63
 64   T = A/(-ln(1-W))
 65
 66
 67   NOTES.
 68
 69   * avbps and avpps are scaled by 2^5.
 70   * both values are reported as 32 bit unsigned values. bps can
 71     overflow for fast links : max speed being 34360Mbit/sec
 72   * Minimal interval is HZ/4=250msec (it is the greatest common divisor
 73     for HZ=100 and HZ=1024 8)), maximal interval
 74     is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
 75     are too expensive, longer ones can be implemented
 76     at user level painlessly.
 77 */
 78
 79#define EST_MAX_INTERVAL	5
 80
 81struct gen_estimator
 82{
 83	struct list_head	list;
 84	struct gnet_stats_basic_packed	*bstats;
 85	struct gnet_stats_rate_est64	*rate_est;
 86	spinlock_t		*stats_lock;
 87	int			ewma_log;
 88	u32			last_packets;
 89	unsigned long		avpps;
 90	u64			last_bytes;
 91	u64			avbps;
 
 
 92	struct rcu_head		e_rcu;
 93	struct rb_node		node;
 94	struct gnet_stats_basic_cpu __percpu *cpu_bstats;
 95	struct rcu_head		head;
 96};
 97
 98struct gen_estimator_head
 99{
100	struct timer_list	timer;
101	struct list_head	list;
102};
103
104static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
105
106/* Protects against NULL dereference */
107static DEFINE_RWLOCK(est_lock);
108
109/* Protects against soft lockup during large deletion */
110static struct rb_root est_root = RB_ROOT;
111static DEFINE_SPINLOCK(est_tree_lock);
112
113static void est_timer(unsigned long arg)
114{
115	int idx = (int)arg;
116	struct gen_estimator *e;
117
118	rcu_read_lock();
119	list_for_each_entry_rcu(e, &elist[idx].list, list) {
120		struct gnet_stats_basic_packed b = {0};
121		unsigned long rate;
122		u64 brate;
 
 
123
124		spin_lock(e->stats_lock);
125		read_lock(&est_lock);
126		if (e->bstats == NULL)
127			goto skip;
128
129		__gnet_stats_copy_basic(&b, e->cpu_bstats, e->bstats);
130
131		brate = (b.bytes - e->last_bytes)<<(7 - idx);
132		e->last_bytes = b.bytes;
133		e->avbps += (brate >> e->ewma_log) - (e->avbps >> e->ewma_log);
134		e->rate_est->bps = (e->avbps+0xF)>>5;
135
136		rate = b.packets - e->last_packets;
137		rate <<= (7 - idx);
138		e->last_packets = b.packets;
139		e->avpps += (rate >> e->ewma_log) - (e->avpps >> e->ewma_log);
140		e->rate_est->pps = (e->avpps + 0xF) >> 5;
141skip:
142		read_unlock(&est_lock);
143		spin_unlock(e->stats_lock);
144	}
145
146	if (!list_empty(&elist[idx].list))
147		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
148	rcu_read_unlock();
149}
150
151static void gen_add_node(struct gen_estimator *est)
152{
153	struct rb_node **p = &est_root.rb_node, *parent = NULL;
154
155	while (*p) {
156		struct gen_estimator *e;
157
158		parent = *p;
159		e = rb_entry(parent, struct gen_estimator, node);
160
161		if (est->bstats > e->bstats)
162			p = &parent->rb_right;
163		else
164			p = &parent->rb_left;
165	}
166	rb_link_node(&est->node, parent, p);
167	rb_insert_color(&est->node, &est_root);
168}
169
170static
171struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
172				    const struct gnet_stats_rate_est64 *rate_est)
173{
174	struct rb_node *p = est_root.rb_node;
175
176	while (p) {
177		struct gen_estimator *e;
178
179		e = rb_entry(p, struct gen_estimator, node);
180
181		if (bstats > e->bstats)
182			p = p->rb_right;
183		else if (bstats < e->bstats || rate_est != e->rate_est)
184			p = p->rb_left;
185		else
186			return e;
187	}
188	return NULL;
189}
190
191/**
192 * gen_new_estimator - create a new rate estimator
193 * @bstats: basic statistics
194 * @cpu_bstats: bstats per cpu
195 * @rate_est: rate estimator statistics
196 * @stats_lock: statistics lock
197 * @opt: rate estimator configuration TLV
198 *
199 * Creates a new rate estimator with &bstats as source and &rate_est
200 * as destination. A new timer with the interval specified in the
201 * configuration TLV is created. Upon each interval, the latest statistics
202 * will be read from &bstats and the estimated rate will be stored in
203 * &rate_est with the statistics lock grabbed during this period.
204 *
205 * Returns 0 on success or a negative error code.
206 *
207 */
208int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
209		      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
210		      struct gnet_stats_rate_est64 *rate_est,
211		      spinlock_t *stats_lock,
212		      struct nlattr *opt)
213{
214	struct gen_estimator *est;
215	struct gnet_estimator *parm = nla_data(opt);
216	struct gnet_stats_basic_packed b = {0};
217	int idx;
218
219	if (nla_len(opt) < sizeof(*parm))
220		return -EINVAL;
221
222	if (parm->interval < -2 || parm->interval > 3)
223		return -EINVAL;
224
225	est = kzalloc(sizeof(*est), GFP_KERNEL);
226	if (est == NULL)
227		return -ENOBUFS;
228
229	__gnet_stats_copy_basic(&b, cpu_bstats, bstats);
230
231	idx = parm->interval + 2;
232	est->bstats = bstats;
233	est->rate_est = rate_est;
234	est->stats_lock = stats_lock;
235	est->ewma_log = parm->ewma_log;
236	est->last_bytes = b.bytes;
237	est->avbps = rate_est->bps<<5;
238	est->last_packets = b.packets;
239	est->avpps = rate_est->pps<<10;
240	est->cpu_bstats = cpu_bstats;
241
242	spin_lock_bh(&est_tree_lock);
243	if (!elist[idx].timer.function) {
244		INIT_LIST_HEAD(&elist[idx].list);
245		setup_timer(&elist[idx].timer, est_timer, idx);
246	}
247
248	if (list_empty(&elist[idx].list))
249		mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
250
251	list_add_rcu(&est->list, &elist[idx].list);
252	gen_add_node(est);
253	spin_unlock_bh(&est_tree_lock);
254
255	return 0;
256}
257EXPORT_SYMBOL(gen_new_estimator);
258
259/**
260 * gen_kill_estimator - remove a rate estimator
261 * @bstats: basic statistics
262 * @rate_est: rate estimator statistics
263 *
264 * Removes the rate estimator specified by &bstats and &rate_est.
265 *
266 * Note : Caller should respect an RCU grace period before freeing stats_lock
267 */
268void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
269			struct gnet_stats_rate_est64 *rate_est)
270{
271	struct gen_estimator *e;
272
273	spin_lock_bh(&est_tree_lock);
274	while ((e = gen_find_node(bstats, rate_est))) {
275		rb_erase(&e->node, &est_root);
276
277		write_lock(&est_lock);
278		e->bstats = NULL;
279		write_unlock(&est_lock);
280
281		list_del_rcu(&e->list);
282		kfree_rcu(e, e_rcu);
283	}
284	spin_unlock_bh(&est_tree_lock);
285}
286EXPORT_SYMBOL(gen_kill_estimator);
287
288/**
289 * gen_replace_estimator - replace rate estimator configuration
290 * @bstats: basic statistics
291 * @cpu_bstats: bstats per cpu
292 * @rate_est: rate estimator statistics
293 * @stats_lock: statistics lock
294 * @opt: rate estimator configuration TLV
295 *
296 * Replaces the configuration of a rate estimator by calling
297 * gen_kill_estimator() and gen_new_estimator().
298 *
299 * Returns 0 on success or a negative error code.
300 */
301int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
302			  struct gnet_stats_basic_cpu __percpu *cpu_bstats,
303			  struct gnet_stats_rate_est64 *rate_est,
304			  spinlock_t *stats_lock, struct nlattr *opt)
305{
306	gen_kill_estimator(bstats, rate_est);
307	return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, opt);
308}
309EXPORT_SYMBOL(gen_replace_estimator);
310
311/**
312 * gen_estimator_active - test if estimator is currently in use
313 * @bstats: basic statistics
314 * @rate_est: rate estimator statistics
315 *
316 * Returns true if estimator is active, and false if not.
317 */
318bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
319			  const struct gnet_stats_rate_est64 *rate_est)
320{
321	bool res;
322
323	ASSERT_RTNL();
324
325	spin_lock_bh(&est_tree_lock);
326	res = gen_find_node(bstats, rate_est) != NULL;
327	spin_unlock_bh(&est_tree_lock);
328
329	return res;
330}
331EXPORT_SYMBOL(gen_estimator_active);