Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (C) 2013 Cisco Systems, Inc, 2013.
  3 *
  4 * Author: Vijay Subramanian <vijaynsu@cisco.com>
  5 * Author: Mythili Prabhu <mysuryan@cisco.com>
  6 *
  7 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
  8 * University of Oslo, Norway.
  9 *
 10 * References:
 11 * RFC 8033: https://tools.ietf.org/html/rfc8033
 12 */
 13
 14#include <linux/module.h>
 15#include <linux/slab.h>
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/skbuff.h>
 20#include <net/pkt_sched.h>
 21#include <net/inet_ecn.h>
 22#include <net/pie.h>
 23
 24/* private data for the Qdisc */
 25struct pie_sched_data {
 26	struct pie_vars vars;
 27	struct pie_params params;
 28	struct pie_stats stats;
 29	struct timer_list adapt_timer;
 30	struct Qdisc *sch;
 31};
 32
 33bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
 34		    struct pie_vars *vars, u32 backlog, u32 packet_size)
 35{
 36	u64 rnd;
 37	u64 local_prob = vars->prob;
 38	u32 mtu = psched_mtu(qdisc_dev(sch));
 39
 40	/* If there is still burst allowance left skip random early drop */
 41	if (vars->burst_time > 0)
 42		return false;
 43
 44	/* If current delay is less than half of target, and
 45	 * if drop prob is low already, disable early_drop
 46	 */
 47	if ((vars->qdelay < params->target / 2) &&
 48	    (vars->prob < MAX_PROB / 5))
 49		return false;
 50
 51	/* If we have fewer than 2 mtu-sized packets, disable pie_drop_early,
 52	 * similar to min_th in RED
 53	 */
 54	if (backlog < 2 * mtu)
 55		return false;
 56
 57	/* If bytemode is turned on, use packet size to compute new
 58	 * probablity. Smaller packets will have lower drop prob in this case
 59	 */
 60	if (params->bytemode && packet_size <= mtu)
 61		local_prob = (u64)packet_size * div_u64(local_prob, mtu);
 62	else
 63		local_prob = vars->prob;
 64
 65	if (local_prob == 0)
 66		vars->accu_prob = 0;
 67	else
 68		vars->accu_prob += local_prob;
 69
 70	if (vars->accu_prob < (MAX_PROB / 100) * 85)
 71		return false;
 72	if (vars->accu_prob >= (MAX_PROB / 2) * 17)
 73		return true;
 74
 75	get_random_bytes(&rnd, 8);
 76	if ((rnd >> BITS_PER_BYTE) < local_prob) {
 77		vars->accu_prob = 0;
 78		return true;
 79	}
 80
 81	return false;
 82}
 83EXPORT_SYMBOL_GPL(pie_drop_early);
 84
 85static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 86			     struct sk_buff **to_free)
 87{
 88	struct pie_sched_data *q = qdisc_priv(sch);
 89	bool enqueue = false;
 90
 91	if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
 92		q->stats.overlimit++;
 93		goto out;
 94	}
 95
 96	if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
 97			    skb->len)) {
 98		enqueue = true;
 99	} else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
100		   INET_ECN_set_ce(skb)) {
101		/* If packet is ecn capable, mark it if drop probability
102		 * is lower than 10%, else drop it.
103		 */
104		q->stats.ecn_mark++;
105		enqueue = true;
106	}
107
108	/* we can enqueue the packet */
109	if (enqueue) {
110		/* Set enqueue time only when dq_rate_estimator is disabled. */
111		if (!q->params.dq_rate_estimator)
112			pie_set_enqueue_time(skb);
113
114		q->stats.packets_in++;
115		if (qdisc_qlen(sch) > q->stats.maxq)
116			q->stats.maxq = qdisc_qlen(sch);
117
118		return qdisc_enqueue_tail(skb, sch);
119	}
120
121out:
122	q->stats.dropped++;
123	q->vars.accu_prob = 0;
124	return qdisc_drop(skb, sch, to_free);
125}
126
127static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
128	[TCA_PIE_TARGET]		= {.type = NLA_U32},
129	[TCA_PIE_LIMIT]			= {.type = NLA_U32},
130	[TCA_PIE_TUPDATE]		= {.type = NLA_U32},
131	[TCA_PIE_ALPHA]			= {.type = NLA_U32},
132	[TCA_PIE_BETA]			= {.type = NLA_U32},
133	[TCA_PIE_ECN]			= {.type = NLA_U32},
134	[TCA_PIE_BYTEMODE]		= {.type = NLA_U32},
135	[TCA_PIE_DQ_RATE_ESTIMATOR]	= {.type = NLA_U32},
136};
137
138static int pie_change(struct Qdisc *sch, struct nlattr *opt,
139		      struct netlink_ext_ack *extack)
140{
141	struct pie_sched_data *q = qdisc_priv(sch);
142	struct nlattr *tb[TCA_PIE_MAX + 1];
143	unsigned int qlen, dropped = 0;
144	int err;
145
146	err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
147					  NULL);
148	if (err < 0)
149		return err;
150
151	sch_tree_lock(sch);
152
153	/* convert from microseconds to pschedtime */
154	if (tb[TCA_PIE_TARGET]) {
155		/* target is in us */
156		u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
157
158		/* convert to pschedtime */
159		WRITE_ONCE(q->params.target,
160			   PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
161	}
162
163	/* tupdate is in jiffies */
164	if (tb[TCA_PIE_TUPDATE])
165		WRITE_ONCE(q->params.tupdate,
166			   usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])));
167
168	if (tb[TCA_PIE_LIMIT]) {
169		u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
170
171		WRITE_ONCE(q->params.limit, limit);
172		WRITE_ONCE(sch->limit, limit);
173	}
174
175	if (tb[TCA_PIE_ALPHA])
176		WRITE_ONCE(q->params.alpha, nla_get_u32(tb[TCA_PIE_ALPHA]));
177
178	if (tb[TCA_PIE_BETA])
179		WRITE_ONCE(q->params.beta, nla_get_u32(tb[TCA_PIE_BETA]));
180
181	if (tb[TCA_PIE_ECN])
182		WRITE_ONCE(q->params.ecn, nla_get_u32(tb[TCA_PIE_ECN]));
183
184	if (tb[TCA_PIE_BYTEMODE])
185		WRITE_ONCE(q->params.bytemode,
186			   nla_get_u32(tb[TCA_PIE_BYTEMODE]));
187
188	if (tb[TCA_PIE_DQ_RATE_ESTIMATOR])
189		WRITE_ONCE(q->params.dq_rate_estimator,
190			   nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]));
191
192	/* Drop excess packets if new limit is lower */
193	qlen = sch->q.qlen;
194	while (sch->q.qlen > sch->limit) {
195		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
196
197		dropped += qdisc_pkt_len(skb);
198		qdisc_qstats_backlog_dec(sch, skb);
199		rtnl_qdisc_drop(skb, sch);
200	}
201	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
202
203	sch_tree_unlock(sch);
204	return 0;
205}
206
207void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
208			 struct pie_vars *vars, u32 backlog)
209{
210	psched_time_t now = psched_get_time();
211	u32 dtime = 0;
212
213	/* If dq_rate_estimator is disabled, calculate qdelay using the
214	 * packet timestamp.
215	 */
216	if (!params->dq_rate_estimator) {
217		vars->qdelay = now - pie_get_enqueue_time(skb);
218
219		if (vars->dq_tstamp != DTIME_INVALID)
220			dtime = now - vars->dq_tstamp;
221
222		vars->dq_tstamp = now;
223
224		if (backlog == 0)
225			vars->qdelay = 0;
226
227		if (dtime == 0)
228			return;
229
230		goto burst_allowance_reduction;
231	}
232
233	/* If current queue is about 10 packets or more and dq_count is unset
234	 * we have enough packets to calculate the drain rate. Save
235	 * current time as dq_tstamp and start measurement cycle.
236	 */
237	if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
238		vars->dq_tstamp = psched_get_time();
239		vars->dq_count = 0;
240	}
241
242	/* Calculate the average drain rate from this value. If queue length
243	 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes, reset
244	 * the dq_count to -1 as we don't have enough packets to calculate the
245	 * drain rate anymore. The following if block is entered only when we
246	 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
247	 * and we calculate the drain rate for the threshold here.  dq_count is
248	 * in bytes, time difference in psched_time, hence rate is in
249	 * bytes/psched_time.
250	 */
251	if (vars->dq_count != DQCOUNT_INVALID) {
252		vars->dq_count += skb->len;
253
254		if (vars->dq_count >= QUEUE_THRESHOLD) {
255			u32 count = vars->dq_count << PIE_SCALE;
256
257			dtime = now - vars->dq_tstamp;
258
259			if (dtime == 0)
260				return;
261
262			count = count / dtime;
263
264			if (vars->avg_dq_rate == 0)
265				vars->avg_dq_rate = count;
266			else
267				vars->avg_dq_rate =
268				    (vars->avg_dq_rate -
269				     (vars->avg_dq_rate >> 3)) + (count >> 3);
270
271			/* If the queue has receded below the threshold, we hold
272			 * on to the last drain rate calculated, else we reset
273			 * dq_count to 0 to re-enter the if block when the next
274			 * packet is dequeued
275			 */
276			if (backlog < QUEUE_THRESHOLD) {
277				vars->dq_count = DQCOUNT_INVALID;
278			} else {
279				vars->dq_count = 0;
280				vars->dq_tstamp = psched_get_time();
281			}
282
283			goto burst_allowance_reduction;
284		}
285	}
286
287	return;
288
289burst_allowance_reduction:
290	if (vars->burst_time > 0) {
291		if (vars->burst_time > dtime)
292			vars->burst_time -= dtime;
293		else
294			vars->burst_time = 0;
295	}
296}
297EXPORT_SYMBOL_GPL(pie_process_dequeue);
298
299void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
300			       u32 backlog)
301{
302	psched_time_t qdelay = 0;	/* in pschedtime */
303	psched_time_t qdelay_old = 0;	/* in pschedtime */
304	s64 delta = 0;		/* determines the change in probability */
305	u64 oldprob;
306	u64 alpha, beta;
307	u32 power;
308	bool update_prob = true;
309
310	if (params->dq_rate_estimator) {
311		qdelay_old = vars->qdelay;
312		vars->qdelay_old = vars->qdelay;
313
314		if (vars->avg_dq_rate > 0)
315			qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate;
316		else
317			qdelay = 0;
318	} else {
319		qdelay = vars->qdelay;
320		qdelay_old = vars->qdelay_old;
321	}
322
323	/* If qdelay is zero and backlog is not, it means backlog is very small,
324	 * so we do not update probability in this round.
325	 */
326	if (qdelay == 0 && backlog != 0)
327		update_prob = false;
328
329	/* In the algorithm, alpha and beta are between 0 and 2 with typical
330	 * value for alpha as 0.125. In this implementation, we use values 0-32
331	 * passed from user space to represent this. Also, alpha and beta have
332	 * unit of HZ and need to be scaled before they can used to update
333	 * probability. alpha/beta are updated locally below by scaling down
334	 * by 16 to come to 0-2 range.
335	 */
336	alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
337	beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
338
339	/* We scale alpha and beta differently depending on how heavy the
340	 * congestion is. Please see RFC 8033 for details.
341	 */
342	if (vars->prob < MAX_PROB / 10) {
343		alpha >>= 1;
344		beta >>= 1;
345
346		power = 100;
347		while (vars->prob < div_u64(MAX_PROB, power) &&
348		       power <= 1000000) {
349			alpha >>= 2;
350			beta >>= 2;
351			power *= 10;
352		}
353	}
354
355	/* alpha and beta should be between 0 and 32, in multiples of 1/16 */
356	delta += alpha * (qdelay - params->target);
357	delta += beta * (qdelay - qdelay_old);
358
359	oldprob = vars->prob;
360
361	/* to ensure we increase probability in steps of no more than 2% */
362	if (delta > (s64)(MAX_PROB / (100 / 2)) &&
363	    vars->prob >= MAX_PROB / 10)
364		delta = (MAX_PROB / 100) * 2;
365
366	/* Non-linear drop:
367	 * Tune drop probability to increase quickly for high delays(>= 250ms)
368	 * 250ms is derived through experiments and provides error protection
369	 */
370
371	if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
372		delta += MAX_PROB / (100 / 2);
373
374	vars->prob += delta;
375
376	if (delta > 0) {
377		/* prevent overflow */
378		if (vars->prob < oldprob) {
379			vars->prob = MAX_PROB;
380			/* Prevent normalization error. If probability is at
381			 * maximum value already, we normalize it here, and
382			 * skip the check to do a non-linear drop in the next
383			 * section.
384			 */
385			update_prob = false;
386		}
387	} else {
388		/* prevent underflow */
389		if (vars->prob > oldprob)
390			vars->prob = 0;
391	}
392
393	/* Non-linear drop in probability: Reduce drop probability quickly if
394	 * delay is 0 for 2 consecutive Tupdate periods.
395	 */
396
397	if (qdelay == 0 && qdelay_old == 0 && update_prob)
398		/* Reduce drop probability to 98.4% */
399		vars->prob -= vars->prob / 64;
400
401	vars->qdelay = qdelay;
402	vars->backlog_old = backlog;
403
404	/* We restart the measurement cycle if the following conditions are met
405	 * 1. If the delay has been low for 2 consecutive Tupdate periods
406	 * 2. Calculated drop probability is zero
407	 * 3. If average dq_rate_estimator is enabled, we have at least one
408	 *    estimate for the avg_dq_rate ie., is a non-zero value
409	 */
410	if ((vars->qdelay < params->target / 2) &&
411	    (vars->qdelay_old < params->target / 2) &&
412	    vars->prob == 0 &&
413	    (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) {
414		pie_vars_init(vars);
415	}
416
417	if (!params->dq_rate_estimator)
418		vars->qdelay_old = qdelay;
419}
420EXPORT_SYMBOL_GPL(pie_calculate_probability);
421
422static void pie_timer(struct timer_list *t)
423{
424	struct pie_sched_data *q = from_timer(q, t, adapt_timer);
425	struct Qdisc *sch = q->sch;
426	spinlock_t *root_lock;
427
428	rcu_read_lock();
429	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
430	spin_lock(root_lock);
431	pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
432
433	/* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
434	if (q->params.tupdate)
435		mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
436	spin_unlock(root_lock);
437	rcu_read_unlock();
438}
439
440static int pie_init(struct Qdisc *sch, struct nlattr *opt,
441		    struct netlink_ext_ack *extack)
442{
443	struct pie_sched_data *q = qdisc_priv(sch);
444
445	pie_params_init(&q->params);
446	pie_vars_init(&q->vars);
447	sch->limit = q->params.limit;
448
449	q->sch = sch;
450	timer_setup(&q->adapt_timer, pie_timer, 0);
451
452	if (opt) {
453		int err = pie_change(sch, opt, extack);
454
455		if (err)
456			return err;
457	}
458
459	mod_timer(&q->adapt_timer, jiffies + HZ / 2);
460	return 0;
461}
462
463static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
464{
465	struct pie_sched_data *q = qdisc_priv(sch);
466	struct nlattr *opts;
467
468	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
469	if (!opts)
470		goto nla_put_failure;
471
472	/* convert target from pschedtime to us */
473	if (nla_put_u32(skb, TCA_PIE_TARGET,
474			((u32)PSCHED_TICKS2NS(READ_ONCE(q->params.target))) /
475			NSEC_PER_USEC) ||
476	    nla_put_u32(skb, TCA_PIE_LIMIT, READ_ONCE(sch->limit)) ||
477	    nla_put_u32(skb, TCA_PIE_TUPDATE,
478			jiffies_to_usecs(READ_ONCE(q->params.tupdate))) ||
479	    nla_put_u32(skb, TCA_PIE_ALPHA, READ_ONCE(q->params.alpha)) ||
480	    nla_put_u32(skb, TCA_PIE_BETA, READ_ONCE(q->params.beta)) ||
481	    nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
482	    nla_put_u32(skb, TCA_PIE_BYTEMODE,
483			READ_ONCE(q->params.bytemode)) ||
484	    nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR,
485			READ_ONCE(q->params.dq_rate_estimator)))
486		goto nla_put_failure;
487
488	return nla_nest_end(skb, opts);
489
490nla_put_failure:
491	nla_nest_cancel(skb, opts);
492	return -1;
493}
494
495static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
496{
497	struct pie_sched_data *q = qdisc_priv(sch);
498	struct tc_pie_xstats st = {
499		.prob		= q->vars.prob << BITS_PER_BYTE,
500		.delay		= ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
501				   NSEC_PER_USEC,
502		.packets_in	= q->stats.packets_in,
503		.overlimit	= q->stats.overlimit,
504		.maxq		= q->stats.maxq,
505		.dropped	= q->stats.dropped,
506		.ecn_mark	= q->stats.ecn_mark,
507	};
508
509	/* avg_dq_rate is only valid if dq_rate_estimator is enabled */
510	st.dq_rate_estimating = q->params.dq_rate_estimator;
511
512	/* unscale and return dq_rate in bytes per sec */
513	if (q->params.dq_rate_estimator)
514		st.avg_dq_rate = q->vars.avg_dq_rate *
515				 (PSCHED_TICKS_PER_SEC) >> PIE_SCALE;
516
517	return gnet_stats_copy_app(d, &st, sizeof(st));
518}
519
520static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
521{
522	struct pie_sched_data *q = qdisc_priv(sch);
523	struct sk_buff *skb = qdisc_dequeue_head(sch);
524
525	if (!skb)
526		return NULL;
527
528	pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog);
529	return skb;
530}
531
532static void pie_reset(struct Qdisc *sch)
533{
534	struct pie_sched_data *q = qdisc_priv(sch);
535
536	qdisc_reset_queue(sch);
537	pie_vars_init(&q->vars);
538}
539
540static void pie_destroy(struct Qdisc *sch)
541{
542	struct pie_sched_data *q = qdisc_priv(sch);
543
544	q->params.tupdate = 0;
545	del_timer_sync(&q->adapt_timer);
546}
547
548static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
549	.id		= "pie",
550	.priv_size	= sizeof(struct pie_sched_data),
551	.enqueue	= pie_qdisc_enqueue,
552	.dequeue	= pie_qdisc_dequeue,
553	.peek		= qdisc_peek_dequeued,
554	.init		= pie_init,
555	.destroy	= pie_destroy,
556	.reset		= pie_reset,
557	.change		= pie_change,
558	.dump		= pie_dump,
559	.dump_stats	= pie_dump_stats,
560	.owner		= THIS_MODULE,
561};
562MODULE_ALIAS_NET_SCH("pie");
563
564static int __init pie_module_init(void)
565{
566	return register_qdisc(&pie_qdisc_ops);
567}
568
569static void __exit pie_module_exit(void)
570{
571	unregister_qdisc(&pie_qdisc_ops);
572}
573
574module_init(pie_module_init);
575module_exit(pie_module_exit);
576
577MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
578MODULE_AUTHOR("Vijay Subramanian");
579MODULE_AUTHOR("Mythili Prabhu");
580MODULE_LICENSE("GPL");