Loading...
1/* Copyright (C) 2013 Cisco Systems, Inc, 2013.
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License
5 * as published by the Free Software Foundation; either version 2
6 * of the License.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Author: Vijay Subramanian <vijaynsu@cisco.com>
14 * Author: Mythili Prabhu <mysuryan@cisco.com>
15 *
16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
17 * University of Oslo, Norway.
18 *
19 * References:
20 * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
21 * IEEE Conference on High Performance Switching and Routing 2013 :
22 * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
23 */
24
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/skbuff.h>
31#include <net/pkt_sched.h>
32#include <net/inet_ecn.h>
33
34#define QUEUE_THRESHOLD 10000
35#define DQCOUNT_INVALID -1
36#define MAX_PROB 0xffffffff
37#define PIE_SCALE 8
38
39/* parameters used */
40struct pie_params {
41 psched_time_t target; /* user specified target delay in pschedtime */
42 u32 tupdate; /* timer frequency (in jiffies) */
43 u32 limit; /* number of packets that can be enqueued */
44 u32 alpha; /* alpha and beta are between 0 and 32 */
45 u32 beta; /* and are used for shift relative to 1 */
46 bool ecn; /* true if ecn is enabled */
47 bool bytemode; /* to scale drop early prob based on pkt size */
48};
49
50/* variables used */
51struct pie_vars {
52 u32 prob; /* probability but scaled by u32 limit. */
53 psched_time_t burst_time;
54 psched_time_t qdelay;
55 psched_time_t qdelay_old;
56 u64 dq_count; /* measured in bytes */
57 psched_time_t dq_tstamp; /* drain rate */
58 u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */
59 u32 qlen_old; /* in bytes */
60};
61
62/* statistics gathering */
63struct pie_stats {
64 u32 packets_in; /* total number of packets enqueued */
65 u32 dropped; /* packets dropped due to pie_action */
66 u32 overlimit; /* dropped due to lack of space in queue */
67 u32 maxq; /* maximum queue size */
68 u32 ecn_mark; /* packets marked with ECN */
69};
70
71/* private data for the Qdisc */
72struct pie_sched_data {
73 struct pie_params params;
74 struct pie_vars vars;
75 struct pie_stats stats;
76 struct timer_list adapt_timer;
77};
78
79static void pie_params_init(struct pie_params *params)
80{
81 params->alpha = 2;
82 params->beta = 20;
83 params->tupdate = usecs_to_jiffies(30 * USEC_PER_MSEC); /* 30 ms */
84 params->limit = 1000; /* default of 1000 packets */
85 params->target = PSCHED_NS2TICKS(20 * NSEC_PER_MSEC); /* 20 ms */
86 params->ecn = false;
87 params->bytemode = false;
88}
89
90static void pie_vars_init(struct pie_vars *vars)
91{
92 vars->dq_count = DQCOUNT_INVALID;
93 vars->avg_dq_rate = 0;
94 /* default of 100 ms in pschedtime */
95 vars->burst_time = PSCHED_NS2TICKS(100 * NSEC_PER_MSEC);
96}
97
98static bool drop_early(struct Qdisc *sch, u32 packet_size)
99{
100 struct pie_sched_data *q = qdisc_priv(sch);
101 u32 rnd;
102 u32 local_prob = q->vars.prob;
103 u32 mtu = psched_mtu(qdisc_dev(sch));
104
105 /* If there is still burst allowance left skip random early drop */
106 if (q->vars.burst_time > 0)
107 return false;
108
109 /* If current delay is less than half of target, and
110 * if drop prob is low already, disable early_drop
111 */
112 if ((q->vars.qdelay < q->params.target / 2)
113 && (q->vars.prob < MAX_PROB / 5))
114 return false;
115
116 /* If we have fewer than 2 mtu-sized packets, disable drop_early,
117 * similar to min_th in RED
118 */
119 if (sch->qstats.backlog < 2 * mtu)
120 return false;
121
122 /* If bytemode is turned on, use packet size to compute new
123 * probablity. Smaller packets will have lower drop prob in this case
124 */
125 if (q->params.bytemode && packet_size <= mtu)
126 local_prob = (local_prob / mtu) * packet_size;
127 else
128 local_prob = q->vars.prob;
129
130 rnd = prandom_u32();
131 if (rnd < local_prob)
132 return true;
133
134 return false;
135}
136
137static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
138 struct sk_buff **to_free)
139{
140 struct pie_sched_data *q = qdisc_priv(sch);
141 bool enqueue = false;
142
143 if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
144 q->stats.overlimit++;
145 goto out;
146 }
147
148 if (!drop_early(sch, skb->len)) {
149 enqueue = true;
150 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
151 INET_ECN_set_ce(skb)) {
152 /* If packet is ecn capable, mark it if drop probability
153 * is lower than 10%, else drop it.
154 */
155 q->stats.ecn_mark++;
156 enqueue = true;
157 }
158
159 /* we can enqueue the packet */
160 if (enqueue) {
161 q->stats.packets_in++;
162 if (qdisc_qlen(sch) > q->stats.maxq)
163 q->stats.maxq = qdisc_qlen(sch);
164
165 return qdisc_enqueue_tail(skb, sch);
166 }
167
168out:
169 q->stats.dropped++;
170 return qdisc_drop(skb, sch, to_free);
171}
172
173static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
174 [TCA_PIE_TARGET] = {.type = NLA_U32},
175 [TCA_PIE_LIMIT] = {.type = NLA_U32},
176 [TCA_PIE_TUPDATE] = {.type = NLA_U32},
177 [TCA_PIE_ALPHA] = {.type = NLA_U32},
178 [TCA_PIE_BETA] = {.type = NLA_U32},
179 [TCA_PIE_ECN] = {.type = NLA_U32},
180 [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
181};
182
183static int pie_change(struct Qdisc *sch, struct nlattr *opt)
184{
185 struct pie_sched_data *q = qdisc_priv(sch);
186 struct nlattr *tb[TCA_PIE_MAX + 1];
187 unsigned int qlen, dropped = 0;
188 int err;
189
190 if (!opt)
191 return -EINVAL;
192
193 err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy);
194 if (err < 0)
195 return err;
196
197 sch_tree_lock(sch);
198
199 /* convert from microseconds to pschedtime */
200 if (tb[TCA_PIE_TARGET]) {
201 /* target is in us */
202 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
203
204 /* convert to pschedtime */
205 q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
206 }
207
208 /* tupdate is in jiffies */
209 if (tb[TCA_PIE_TUPDATE])
210 q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
211
212 if (tb[TCA_PIE_LIMIT]) {
213 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
214
215 q->params.limit = limit;
216 sch->limit = limit;
217 }
218
219 if (tb[TCA_PIE_ALPHA])
220 q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
221
222 if (tb[TCA_PIE_BETA])
223 q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
224
225 if (tb[TCA_PIE_ECN])
226 q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
227
228 if (tb[TCA_PIE_BYTEMODE])
229 q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
230
231 /* Drop excess packets if new limit is lower */
232 qlen = sch->q.qlen;
233 while (sch->q.qlen > sch->limit) {
234 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
235
236 dropped += qdisc_pkt_len(skb);
237 qdisc_qstats_backlog_dec(sch, skb);
238 rtnl_qdisc_drop(skb, sch);
239 }
240 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
241
242 sch_tree_unlock(sch);
243 return 0;
244}
245
246static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
247{
248
249 struct pie_sched_data *q = qdisc_priv(sch);
250 int qlen = sch->qstats.backlog; /* current queue size in bytes */
251
252 /* If current queue is about 10 packets or more and dq_count is unset
253 * we have enough packets to calculate the drain rate. Save
254 * current time as dq_tstamp and start measurement cycle.
255 */
256 if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) {
257 q->vars.dq_tstamp = psched_get_time();
258 q->vars.dq_count = 0;
259 }
260
261 /* Calculate the average drain rate from this value. If queue length
262 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset
263 * the dq_count to -1 as we don't have enough packets to calculate the
264 * drain rate anymore The following if block is entered only when we
265 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
266 * and we calculate the drain rate for the threshold here. dq_count is
267 * in bytes, time difference in psched_time, hence rate is in
268 * bytes/psched_time.
269 */
270 if (q->vars.dq_count != DQCOUNT_INVALID) {
271 q->vars.dq_count += skb->len;
272
273 if (q->vars.dq_count >= QUEUE_THRESHOLD) {
274 psched_time_t now = psched_get_time();
275 u32 dtime = now - q->vars.dq_tstamp;
276 u32 count = q->vars.dq_count << PIE_SCALE;
277
278 if (dtime == 0)
279 return;
280
281 count = count / dtime;
282
283 if (q->vars.avg_dq_rate == 0)
284 q->vars.avg_dq_rate = count;
285 else
286 q->vars.avg_dq_rate =
287 (q->vars.avg_dq_rate -
288 (q->vars.avg_dq_rate >> 3)) + (count >> 3);
289
290 /* If the queue has receded below the threshold, we hold
291 * on to the last drain rate calculated, else we reset
292 * dq_count to 0 to re-enter the if block when the next
293 * packet is dequeued
294 */
295 if (qlen < QUEUE_THRESHOLD)
296 q->vars.dq_count = DQCOUNT_INVALID;
297 else {
298 q->vars.dq_count = 0;
299 q->vars.dq_tstamp = psched_get_time();
300 }
301
302 if (q->vars.burst_time > 0) {
303 if (q->vars.burst_time > dtime)
304 q->vars.burst_time -= dtime;
305 else
306 q->vars.burst_time = 0;
307 }
308 }
309 }
310}
311
312static void calculate_probability(struct Qdisc *sch)
313{
314 struct pie_sched_data *q = qdisc_priv(sch);
315 u32 qlen = sch->qstats.backlog; /* queue size in bytes */
316 psched_time_t qdelay = 0; /* in pschedtime */
317 psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */
318 s32 delta = 0; /* determines the change in probability */
319 u32 oldprob;
320 u32 alpha, beta;
321 bool update_prob = true;
322
323 q->vars.qdelay_old = q->vars.qdelay;
324
325 if (q->vars.avg_dq_rate > 0)
326 qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
327 else
328 qdelay = 0;
329
330 /* If qdelay is zero and qlen is not, it means qlen is very small, less
331 * than dequeue_rate, so we do not update probabilty in this round
332 */
333 if (qdelay == 0 && qlen != 0)
334 update_prob = false;
335
336 /* In the algorithm, alpha and beta are between 0 and 2 with typical
337 * value for alpha as 0.125. In this implementation, we use values 0-32
338 * passed from user space to represent this. Also, alpha and beta have
339 * unit of HZ and need to be scaled before they can used to update
340 * probability. alpha/beta are updated locally below by 1) scaling them
341 * appropriately 2) scaling down by 16 to come to 0-2 range.
342 * Please see paper for details.
343 *
344 * We scale alpha and beta differently depending on whether we are in
345 * light, medium or high dropping mode.
346 */
347 if (q->vars.prob < MAX_PROB / 100) {
348 alpha =
349 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
350 beta =
351 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
352 } else if (q->vars.prob < MAX_PROB / 10) {
353 alpha =
354 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
355 beta =
356 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
357 } else {
358 alpha =
359 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
360 beta =
361 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
362 }
363
364 /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
365 delta += alpha * ((qdelay - q->params.target));
366 delta += beta * ((qdelay - qdelay_old));
367
368 oldprob = q->vars.prob;
369
370 /* to ensure we increase probability in steps of no more than 2% */
371 if (delta > (s32) (MAX_PROB / (100 / 2)) &&
372 q->vars.prob >= MAX_PROB / 10)
373 delta = (MAX_PROB / 100) * 2;
374
375 /* Non-linear drop:
376 * Tune drop probability to increase quickly for high delays(>= 250ms)
377 * 250ms is derived through experiments and provides error protection
378 */
379
380 if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
381 delta += MAX_PROB / (100 / 2);
382
383 q->vars.prob += delta;
384
385 if (delta > 0) {
386 /* prevent overflow */
387 if (q->vars.prob < oldprob) {
388 q->vars.prob = MAX_PROB;
389 /* Prevent normalization error. If probability is at
390 * maximum value already, we normalize it here, and
391 * skip the check to do a non-linear drop in the next
392 * section.
393 */
394 update_prob = false;
395 }
396 } else {
397 /* prevent underflow */
398 if (q->vars.prob > oldprob)
399 q->vars.prob = 0;
400 }
401
402 /* Non-linear drop in probability: Reduce drop probability quickly if
403 * delay is 0 for 2 consecutive Tupdate periods.
404 */
405
406 if ((qdelay == 0) && (qdelay_old == 0) && update_prob)
407 q->vars.prob = (q->vars.prob * 98) / 100;
408
409 q->vars.qdelay = qdelay;
410 q->vars.qlen_old = qlen;
411
412 /* We restart the measurement cycle if the following conditions are met
413 * 1. If the delay has been low for 2 consecutive Tupdate periods
414 * 2. Calculated drop probability is zero
415 * 3. We have atleast one estimate for the avg_dq_rate ie.,
416 * is a non-zero value
417 */
418 if ((q->vars.qdelay < q->params.target / 2) &&
419 (q->vars.qdelay_old < q->params.target / 2) &&
420 (q->vars.prob == 0) &&
421 (q->vars.avg_dq_rate > 0))
422 pie_vars_init(&q->vars);
423}
424
425static void pie_timer(unsigned long arg)
426{
427 struct Qdisc *sch = (struct Qdisc *)arg;
428 struct pie_sched_data *q = qdisc_priv(sch);
429 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
430
431 spin_lock(root_lock);
432 calculate_probability(sch);
433
434 /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
435 if (q->params.tupdate)
436 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
437 spin_unlock(root_lock);
438
439}
440
441static int pie_init(struct Qdisc *sch, struct nlattr *opt)
442{
443 struct pie_sched_data *q = qdisc_priv(sch);
444
445 pie_params_init(&q->params);
446 pie_vars_init(&q->vars);
447 sch->limit = q->params.limit;
448
449 setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch);
450
451 if (opt) {
452 int err = pie_change(sch, opt);
453
454 if (err)
455 return err;
456 }
457
458 mod_timer(&q->adapt_timer, jiffies + HZ / 2);
459 return 0;
460}
461
462static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
463{
464 struct pie_sched_data *q = qdisc_priv(sch);
465 struct nlattr *opts;
466
467 opts = nla_nest_start(skb, TCA_OPTIONS);
468 if (opts == NULL)
469 goto nla_put_failure;
470
471 /* convert target from pschedtime to us */
472 if (nla_put_u32(skb, TCA_PIE_TARGET,
473 ((u32) PSCHED_TICKS2NS(q->params.target)) /
474 NSEC_PER_USEC) ||
475 nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
476 nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) ||
477 nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
478 nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
479 nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
480 nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
481 goto nla_put_failure;
482
483 return nla_nest_end(skb, opts);
484
485nla_put_failure:
486 nla_nest_cancel(skb, opts);
487 return -1;
488
489}
490
491static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
492{
493 struct pie_sched_data *q = qdisc_priv(sch);
494 struct tc_pie_xstats st = {
495 .prob = q->vars.prob,
496 .delay = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) /
497 NSEC_PER_USEC,
498 /* unscale and return dq_rate in bytes per sec */
499 .avg_dq_rate = q->vars.avg_dq_rate *
500 (PSCHED_TICKS_PER_SEC) >> PIE_SCALE,
501 .packets_in = q->stats.packets_in,
502 .overlimit = q->stats.overlimit,
503 .maxq = q->stats.maxq,
504 .dropped = q->stats.dropped,
505 .ecn_mark = q->stats.ecn_mark,
506 };
507
508 return gnet_stats_copy_app(d, &st, sizeof(st));
509}
510
511static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
512{
513 struct sk_buff *skb;
514 skb = qdisc_dequeue_head(sch);
515
516 if (!skb)
517 return NULL;
518
519 pie_process_dequeue(sch, skb);
520 return skb;
521}
522
523static void pie_reset(struct Qdisc *sch)
524{
525 struct pie_sched_data *q = qdisc_priv(sch);
526 qdisc_reset_queue(sch);
527 pie_vars_init(&q->vars);
528}
529
530static void pie_destroy(struct Qdisc *sch)
531{
532 struct pie_sched_data *q = qdisc_priv(sch);
533 q->params.tupdate = 0;
534 del_timer_sync(&q->adapt_timer);
535}
536
537static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
538 .id = "pie",
539 .priv_size = sizeof(struct pie_sched_data),
540 .enqueue = pie_qdisc_enqueue,
541 .dequeue = pie_qdisc_dequeue,
542 .peek = qdisc_peek_dequeued,
543 .init = pie_init,
544 .destroy = pie_destroy,
545 .reset = pie_reset,
546 .change = pie_change,
547 .dump = pie_dump,
548 .dump_stats = pie_dump_stats,
549 .owner = THIS_MODULE,
550};
551
552static int __init pie_module_init(void)
553{
554 return register_qdisc(&pie_qdisc_ops);
555}
556
557static void __exit pie_module_exit(void)
558{
559 unregister_qdisc(&pie_qdisc_ops);
560}
561
562module_init(pie_module_init);
563module_exit(pie_module_exit);
564
565MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
566MODULE_AUTHOR("Vijay Subramanian");
567MODULE_AUTHOR("Mythili Prabhu");
568MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2013 Cisco Systems, Inc, 2013.
3 *
4 * Author: Vijay Subramanian <vijaynsu@cisco.com>
5 * Author: Mythili Prabhu <mysuryan@cisco.com>
6 *
7 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
8 * University of Oslo, Norway.
9 *
10 * References:
11 * RFC 8033: https://tools.ietf.org/html/rfc8033
12 */
13
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <net/pkt_sched.h>
21#include <net/inet_ecn.h>
22#include <net/pie.h>
23
24/* private data for the Qdisc */
25struct pie_sched_data {
26 struct pie_vars vars;
27 struct pie_params params;
28 struct pie_stats stats;
29 struct timer_list adapt_timer;
30 struct Qdisc *sch;
31};
32
33bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
34 struct pie_vars *vars, u32 backlog, u32 packet_size)
35{
36 u64 rnd;
37 u64 local_prob = vars->prob;
38 u32 mtu = psched_mtu(qdisc_dev(sch));
39
40 /* If there is still burst allowance left skip random early drop */
41 if (vars->burst_time > 0)
42 return false;
43
44 /* If current delay is less than half of target, and
45 * if drop prob is low already, disable early_drop
46 */
47 if ((vars->qdelay < params->target / 2) &&
48 (vars->prob < MAX_PROB / 5))
49 return false;
50
51 /* If we have fewer than 2 mtu-sized packets, disable pie_drop_early,
52 * similar to min_th in RED
53 */
54 if (backlog < 2 * mtu)
55 return false;
56
57 /* If bytemode is turned on, use packet size to compute new
58 * probablity. Smaller packets will have lower drop prob in this case
59 */
60 if (params->bytemode && packet_size <= mtu)
61 local_prob = (u64)packet_size * div_u64(local_prob, mtu);
62 else
63 local_prob = vars->prob;
64
65 if (local_prob == 0)
66 vars->accu_prob = 0;
67 else
68 vars->accu_prob += local_prob;
69
70 if (vars->accu_prob < (MAX_PROB / 100) * 85)
71 return false;
72 if (vars->accu_prob >= (MAX_PROB / 2) * 17)
73 return true;
74
75 get_random_bytes(&rnd, 8);
76 if ((rnd >> BITS_PER_BYTE) < local_prob) {
77 vars->accu_prob = 0;
78 return true;
79 }
80
81 return false;
82}
83EXPORT_SYMBOL_GPL(pie_drop_early);
84
85static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
86 struct sk_buff **to_free)
87{
88 struct pie_sched_data *q = qdisc_priv(sch);
89 bool enqueue = false;
90
91 if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
92 q->stats.overlimit++;
93 goto out;
94 }
95
96 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
97 skb->len)) {
98 enqueue = true;
99 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
100 INET_ECN_set_ce(skb)) {
101 /* If packet is ecn capable, mark it if drop probability
102 * is lower than 10%, else drop it.
103 */
104 q->stats.ecn_mark++;
105 enqueue = true;
106 }
107
108 /* we can enqueue the packet */
109 if (enqueue) {
110 /* Set enqueue time only when dq_rate_estimator is disabled. */
111 if (!q->params.dq_rate_estimator)
112 pie_set_enqueue_time(skb);
113
114 q->stats.packets_in++;
115 if (qdisc_qlen(sch) > q->stats.maxq)
116 q->stats.maxq = qdisc_qlen(sch);
117
118 return qdisc_enqueue_tail(skb, sch);
119 }
120
121out:
122 q->stats.dropped++;
123 q->vars.accu_prob = 0;
124 return qdisc_drop(skb, sch, to_free);
125}
126
127static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
128 [TCA_PIE_TARGET] = {.type = NLA_U32},
129 [TCA_PIE_LIMIT] = {.type = NLA_U32},
130 [TCA_PIE_TUPDATE] = {.type = NLA_U32},
131 [TCA_PIE_ALPHA] = {.type = NLA_U32},
132 [TCA_PIE_BETA] = {.type = NLA_U32},
133 [TCA_PIE_ECN] = {.type = NLA_U32},
134 [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
135 [TCA_PIE_DQ_RATE_ESTIMATOR] = {.type = NLA_U32},
136};
137
138static int pie_change(struct Qdisc *sch, struct nlattr *opt,
139 struct netlink_ext_ack *extack)
140{
141 struct pie_sched_data *q = qdisc_priv(sch);
142 struct nlattr *tb[TCA_PIE_MAX + 1];
143 unsigned int qlen, dropped = 0;
144 int err;
145
146 err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
147 NULL);
148 if (err < 0)
149 return err;
150
151 sch_tree_lock(sch);
152
153 /* convert from microseconds to pschedtime */
154 if (tb[TCA_PIE_TARGET]) {
155 /* target is in us */
156 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
157
158 /* convert to pschedtime */
159 WRITE_ONCE(q->params.target,
160 PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
161 }
162
163 /* tupdate is in jiffies */
164 if (tb[TCA_PIE_TUPDATE])
165 WRITE_ONCE(q->params.tupdate,
166 usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE])));
167
168 if (tb[TCA_PIE_LIMIT]) {
169 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
170
171 WRITE_ONCE(q->params.limit, limit);
172 WRITE_ONCE(sch->limit, limit);
173 }
174
175 if (tb[TCA_PIE_ALPHA])
176 WRITE_ONCE(q->params.alpha, nla_get_u32(tb[TCA_PIE_ALPHA]));
177
178 if (tb[TCA_PIE_BETA])
179 WRITE_ONCE(q->params.beta, nla_get_u32(tb[TCA_PIE_BETA]));
180
181 if (tb[TCA_PIE_ECN])
182 WRITE_ONCE(q->params.ecn, nla_get_u32(tb[TCA_PIE_ECN]));
183
184 if (tb[TCA_PIE_BYTEMODE])
185 WRITE_ONCE(q->params.bytemode,
186 nla_get_u32(tb[TCA_PIE_BYTEMODE]));
187
188 if (tb[TCA_PIE_DQ_RATE_ESTIMATOR])
189 WRITE_ONCE(q->params.dq_rate_estimator,
190 nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]));
191
192 /* Drop excess packets if new limit is lower */
193 qlen = sch->q.qlen;
194 while (sch->q.qlen > sch->limit) {
195 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
196
197 dropped += qdisc_pkt_len(skb);
198 qdisc_qstats_backlog_dec(sch, skb);
199 rtnl_qdisc_drop(skb, sch);
200 }
201 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
202
203 sch_tree_unlock(sch);
204 return 0;
205}
206
207void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
208 struct pie_vars *vars, u32 backlog)
209{
210 psched_time_t now = psched_get_time();
211 u32 dtime = 0;
212
213 /* If dq_rate_estimator is disabled, calculate qdelay using the
214 * packet timestamp.
215 */
216 if (!params->dq_rate_estimator) {
217 vars->qdelay = now - pie_get_enqueue_time(skb);
218
219 if (vars->dq_tstamp != DTIME_INVALID)
220 dtime = now - vars->dq_tstamp;
221
222 vars->dq_tstamp = now;
223
224 if (backlog == 0)
225 vars->qdelay = 0;
226
227 if (dtime == 0)
228 return;
229
230 goto burst_allowance_reduction;
231 }
232
233 /* If current queue is about 10 packets or more and dq_count is unset
234 * we have enough packets to calculate the drain rate. Save
235 * current time as dq_tstamp and start measurement cycle.
236 */
237 if (backlog >= QUEUE_THRESHOLD && vars->dq_count == DQCOUNT_INVALID) {
238 vars->dq_tstamp = psched_get_time();
239 vars->dq_count = 0;
240 }
241
242 /* Calculate the average drain rate from this value. If queue length
243 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes, reset
244 * the dq_count to -1 as we don't have enough packets to calculate the
245 * drain rate anymore. The following if block is entered only when we
246 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
247 * and we calculate the drain rate for the threshold here. dq_count is
248 * in bytes, time difference in psched_time, hence rate is in
249 * bytes/psched_time.
250 */
251 if (vars->dq_count != DQCOUNT_INVALID) {
252 vars->dq_count += skb->len;
253
254 if (vars->dq_count >= QUEUE_THRESHOLD) {
255 u32 count = vars->dq_count << PIE_SCALE;
256
257 dtime = now - vars->dq_tstamp;
258
259 if (dtime == 0)
260 return;
261
262 count = count / dtime;
263
264 if (vars->avg_dq_rate == 0)
265 vars->avg_dq_rate = count;
266 else
267 vars->avg_dq_rate =
268 (vars->avg_dq_rate -
269 (vars->avg_dq_rate >> 3)) + (count >> 3);
270
271 /* If the queue has receded below the threshold, we hold
272 * on to the last drain rate calculated, else we reset
273 * dq_count to 0 to re-enter the if block when the next
274 * packet is dequeued
275 */
276 if (backlog < QUEUE_THRESHOLD) {
277 vars->dq_count = DQCOUNT_INVALID;
278 } else {
279 vars->dq_count = 0;
280 vars->dq_tstamp = psched_get_time();
281 }
282
283 goto burst_allowance_reduction;
284 }
285 }
286
287 return;
288
289burst_allowance_reduction:
290 if (vars->burst_time > 0) {
291 if (vars->burst_time > dtime)
292 vars->burst_time -= dtime;
293 else
294 vars->burst_time = 0;
295 }
296}
297EXPORT_SYMBOL_GPL(pie_process_dequeue);
298
299void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
300 u32 backlog)
301{
302 psched_time_t qdelay = 0; /* in pschedtime */
303 psched_time_t qdelay_old = 0; /* in pschedtime */
304 s64 delta = 0; /* determines the change in probability */
305 u64 oldprob;
306 u64 alpha, beta;
307 u32 power;
308 bool update_prob = true;
309
310 if (params->dq_rate_estimator) {
311 qdelay_old = vars->qdelay;
312 vars->qdelay_old = vars->qdelay;
313
314 if (vars->avg_dq_rate > 0)
315 qdelay = (backlog << PIE_SCALE) / vars->avg_dq_rate;
316 else
317 qdelay = 0;
318 } else {
319 qdelay = vars->qdelay;
320 qdelay_old = vars->qdelay_old;
321 }
322
323 /* If qdelay is zero and backlog is not, it means backlog is very small,
324 * so we do not update probability in this round.
325 */
326 if (qdelay == 0 && backlog != 0)
327 update_prob = false;
328
329 /* In the algorithm, alpha and beta are between 0 and 2 with typical
330 * value for alpha as 0.125. In this implementation, we use values 0-32
331 * passed from user space to represent this. Also, alpha and beta have
332 * unit of HZ and need to be scaled before they can used to update
333 * probability. alpha/beta are updated locally below by scaling down
334 * by 16 to come to 0-2 range.
335 */
336 alpha = ((u64)params->alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
337 beta = ((u64)params->beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
338
339 /* We scale alpha and beta differently depending on how heavy the
340 * congestion is. Please see RFC 8033 for details.
341 */
342 if (vars->prob < MAX_PROB / 10) {
343 alpha >>= 1;
344 beta >>= 1;
345
346 power = 100;
347 while (vars->prob < div_u64(MAX_PROB, power) &&
348 power <= 1000000) {
349 alpha >>= 2;
350 beta >>= 2;
351 power *= 10;
352 }
353 }
354
355 /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
356 delta += alpha * (qdelay - params->target);
357 delta += beta * (qdelay - qdelay_old);
358
359 oldprob = vars->prob;
360
361 /* to ensure we increase probability in steps of no more than 2% */
362 if (delta > (s64)(MAX_PROB / (100 / 2)) &&
363 vars->prob >= MAX_PROB / 10)
364 delta = (MAX_PROB / 100) * 2;
365
366 /* Non-linear drop:
367 * Tune drop probability to increase quickly for high delays(>= 250ms)
368 * 250ms is derived through experiments and provides error protection
369 */
370
371 if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
372 delta += MAX_PROB / (100 / 2);
373
374 vars->prob += delta;
375
376 if (delta > 0) {
377 /* prevent overflow */
378 if (vars->prob < oldprob) {
379 vars->prob = MAX_PROB;
380 /* Prevent normalization error. If probability is at
381 * maximum value already, we normalize it here, and
382 * skip the check to do a non-linear drop in the next
383 * section.
384 */
385 update_prob = false;
386 }
387 } else {
388 /* prevent underflow */
389 if (vars->prob > oldprob)
390 vars->prob = 0;
391 }
392
393 /* Non-linear drop in probability: Reduce drop probability quickly if
394 * delay is 0 for 2 consecutive Tupdate periods.
395 */
396
397 if (qdelay == 0 && qdelay_old == 0 && update_prob)
398 /* Reduce drop probability to 98.4% */
399 vars->prob -= vars->prob / 64;
400
401 vars->qdelay = qdelay;
402 vars->backlog_old = backlog;
403
404 /* We restart the measurement cycle if the following conditions are met
405 * 1. If the delay has been low for 2 consecutive Tupdate periods
406 * 2. Calculated drop probability is zero
407 * 3. If average dq_rate_estimator is enabled, we have at least one
408 * estimate for the avg_dq_rate ie., is a non-zero value
409 */
410 if ((vars->qdelay < params->target / 2) &&
411 (vars->qdelay_old < params->target / 2) &&
412 vars->prob == 0 &&
413 (!params->dq_rate_estimator || vars->avg_dq_rate > 0)) {
414 pie_vars_init(vars);
415 }
416
417 if (!params->dq_rate_estimator)
418 vars->qdelay_old = qdelay;
419}
420EXPORT_SYMBOL_GPL(pie_calculate_probability);
421
422static void pie_timer(struct timer_list *t)
423{
424 struct pie_sched_data *q = from_timer(q, t, adapt_timer);
425 struct Qdisc *sch = q->sch;
426 spinlock_t *root_lock;
427
428 rcu_read_lock();
429 root_lock = qdisc_lock(qdisc_root_sleeping(sch));
430 spin_lock(root_lock);
431 pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
432
433 /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
434 if (q->params.tupdate)
435 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
436 spin_unlock(root_lock);
437 rcu_read_unlock();
438}
439
440static int pie_init(struct Qdisc *sch, struct nlattr *opt,
441 struct netlink_ext_ack *extack)
442{
443 struct pie_sched_data *q = qdisc_priv(sch);
444
445 pie_params_init(&q->params);
446 pie_vars_init(&q->vars);
447 sch->limit = q->params.limit;
448
449 q->sch = sch;
450 timer_setup(&q->adapt_timer, pie_timer, 0);
451
452 if (opt) {
453 int err = pie_change(sch, opt, extack);
454
455 if (err)
456 return err;
457 }
458
459 mod_timer(&q->adapt_timer, jiffies + HZ / 2);
460 return 0;
461}
462
463static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
464{
465 struct pie_sched_data *q = qdisc_priv(sch);
466 struct nlattr *opts;
467
468 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
469 if (!opts)
470 goto nla_put_failure;
471
472 /* convert target from pschedtime to us */
473 if (nla_put_u32(skb, TCA_PIE_TARGET,
474 ((u32)PSCHED_TICKS2NS(READ_ONCE(q->params.target))) /
475 NSEC_PER_USEC) ||
476 nla_put_u32(skb, TCA_PIE_LIMIT, READ_ONCE(sch->limit)) ||
477 nla_put_u32(skb, TCA_PIE_TUPDATE,
478 jiffies_to_usecs(READ_ONCE(q->params.tupdate))) ||
479 nla_put_u32(skb, TCA_PIE_ALPHA, READ_ONCE(q->params.alpha)) ||
480 nla_put_u32(skb, TCA_PIE_BETA, READ_ONCE(q->params.beta)) ||
481 nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
482 nla_put_u32(skb, TCA_PIE_BYTEMODE,
483 READ_ONCE(q->params.bytemode)) ||
484 nla_put_u32(skb, TCA_PIE_DQ_RATE_ESTIMATOR,
485 READ_ONCE(q->params.dq_rate_estimator)))
486 goto nla_put_failure;
487
488 return nla_nest_end(skb, opts);
489
490nla_put_failure:
491 nla_nest_cancel(skb, opts);
492 return -1;
493}
494
495static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
496{
497 struct pie_sched_data *q = qdisc_priv(sch);
498 struct tc_pie_xstats st = {
499 .prob = q->vars.prob << BITS_PER_BYTE,
500 .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
501 NSEC_PER_USEC,
502 .packets_in = q->stats.packets_in,
503 .overlimit = q->stats.overlimit,
504 .maxq = q->stats.maxq,
505 .dropped = q->stats.dropped,
506 .ecn_mark = q->stats.ecn_mark,
507 };
508
509 /* avg_dq_rate is only valid if dq_rate_estimator is enabled */
510 st.dq_rate_estimating = q->params.dq_rate_estimator;
511
512 /* unscale and return dq_rate in bytes per sec */
513 if (q->params.dq_rate_estimator)
514 st.avg_dq_rate = q->vars.avg_dq_rate *
515 (PSCHED_TICKS_PER_SEC) >> PIE_SCALE;
516
517 return gnet_stats_copy_app(d, &st, sizeof(st));
518}
519
520static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
521{
522 struct pie_sched_data *q = qdisc_priv(sch);
523 struct sk_buff *skb = qdisc_dequeue_head(sch);
524
525 if (!skb)
526 return NULL;
527
528 pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog);
529 return skb;
530}
531
532static void pie_reset(struct Qdisc *sch)
533{
534 struct pie_sched_data *q = qdisc_priv(sch);
535
536 qdisc_reset_queue(sch);
537 pie_vars_init(&q->vars);
538}
539
540static void pie_destroy(struct Qdisc *sch)
541{
542 struct pie_sched_data *q = qdisc_priv(sch);
543
544 q->params.tupdate = 0;
545 del_timer_sync(&q->adapt_timer);
546}
547
548static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
549 .id = "pie",
550 .priv_size = sizeof(struct pie_sched_data),
551 .enqueue = pie_qdisc_enqueue,
552 .dequeue = pie_qdisc_dequeue,
553 .peek = qdisc_peek_dequeued,
554 .init = pie_init,
555 .destroy = pie_destroy,
556 .reset = pie_reset,
557 .change = pie_change,
558 .dump = pie_dump,
559 .dump_stats = pie_dump_stats,
560 .owner = THIS_MODULE,
561};
562MODULE_ALIAS_NET_SCH("pie");
563
564static int __init pie_module_init(void)
565{
566 return register_qdisc(&pie_qdisc_ops);
567}
568
569static void __exit pie_module_exit(void)
570{
571 unregister_qdisc(&pie_qdisc_ops);
572}
573
574module_init(pie_module_init);
575module_exit(pie_module_exit);
576
577MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
578MODULE_AUTHOR("Vijay Subramanian");
579MODULE_AUTHOR("Mythili Prabhu");
580MODULE_LICENSE("GPL");