Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (C) 2013 Cisco Systems, Inc, 2013.
3 *
4 * Author: Vijay Subramanian <vijaynsu@cisco.com>
5 * Author: Mythili Prabhu <mysuryan@cisco.com>
6 *
7 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
8 * University of Oslo, Norway.
9 *
10 * References:
11 * RFC 8033: https://tools.ietf.org/html/rfc8033
12 */
13
14#include <linux/module.h>
15#include <linux/slab.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <net/pkt_sched.h>
21#include <net/inet_ecn.h>
22
23#define QUEUE_THRESHOLD 16384
24#define DQCOUNT_INVALID -1
25#define MAX_PROB 0xffffffffffffffff
26#define PIE_SCALE 8
27
28/* parameters used */
29struct pie_params {
30 psched_time_t target; /* user specified target delay in pschedtime */
31 u32 tupdate; /* timer frequency (in jiffies) */
32 u32 limit; /* number of packets that can be enqueued */
33 u32 alpha; /* alpha and beta are between 0 and 32 */
34 u32 beta; /* and are used for shift relative to 1 */
35 bool ecn; /* true if ecn is enabled */
36 bool bytemode; /* to scale drop early prob based on pkt size */
37};
38
39/* variables used */
40struct pie_vars {
41 u64 prob; /* probability but scaled by u64 limit. */
42 psched_time_t burst_time;
43 psched_time_t qdelay;
44 psched_time_t qdelay_old;
45 u64 dq_count; /* measured in bytes */
46 psched_time_t dq_tstamp; /* drain rate */
47 u64 accu_prob; /* accumulated drop probability */
48 u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */
49 u32 qlen_old; /* in bytes */
50 u8 accu_prob_overflows; /* overflows of accu_prob */
51};
52
53/* statistics gathering */
54struct pie_stats {
55 u32 packets_in; /* total number of packets enqueued */
56 u32 dropped; /* packets dropped due to pie_action */
57 u32 overlimit; /* dropped due to lack of space in queue */
58 u32 maxq; /* maximum queue size */
59 u32 ecn_mark; /* packets marked with ECN */
60};
61
62/* private data for the Qdisc */
63struct pie_sched_data {
64 struct pie_params params;
65 struct pie_vars vars;
66 struct pie_stats stats;
67 struct timer_list adapt_timer;
68 struct Qdisc *sch;
69};
70
71static void pie_params_init(struct pie_params *params)
72{
73 params->alpha = 2;
74 params->beta = 20;
75 params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */
76 params->limit = 1000; /* default of 1000 packets */
77 params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */
78 params->ecn = false;
79 params->bytemode = false;
80}
81
82static void pie_vars_init(struct pie_vars *vars)
83{
84 vars->dq_count = DQCOUNT_INVALID;
85 vars->accu_prob = 0;
86 vars->avg_dq_rate = 0;
87 /* default of 150 ms in pschedtime */
88 vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC);
89 vars->accu_prob_overflows = 0;
90}
91
92static bool drop_early(struct Qdisc *sch, u32 packet_size)
93{
94 struct pie_sched_data *q = qdisc_priv(sch);
95 u64 rnd;
96 u64 local_prob = q->vars.prob;
97 u32 mtu = psched_mtu(qdisc_dev(sch));
98
99 /* If there is still burst allowance left skip random early drop */
100 if (q->vars.burst_time > 0)
101 return false;
102
103 /* If current delay is less than half of target, and
104 * if drop prob is low already, disable early_drop
105 */
106 if ((q->vars.qdelay < q->params.target / 2) &&
107 (q->vars.prob < MAX_PROB / 5))
108 return false;
109
110 /* If we have fewer than 2 mtu-sized packets, disable drop_early,
111 * similar to min_th in RED
112 */
113 if (sch->qstats.backlog < 2 * mtu)
114 return false;
115
116 /* If bytemode is turned on, use packet size to compute new
117 * probablity. Smaller packets will have lower drop prob in this case
118 */
119 if (q->params.bytemode && packet_size <= mtu)
120 local_prob = (u64)packet_size * div_u64(local_prob, mtu);
121 else
122 local_prob = q->vars.prob;
123
124 if (local_prob == 0) {
125 q->vars.accu_prob = 0;
126 q->vars.accu_prob_overflows = 0;
127 }
128
129 if (local_prob > MAX_PROB - q->vars.accu_prob)
130 q->vars.accu_prob_overflows++;
131
132 q->vars.accu_prob += local_prob;
133
134 if (q->vars.accu_prob_overflows == 0 &&
135 q->vars.accu_prob < (MAX_PROB / 100) * 85)
136 return false;
137 if (q->vars.accu_prob_overflows == 8 &&
138 q->vars.accu_prob >= MAX_PROB / 2)
139 return true;
140
141 prandom_bytes(&rnd, 8);
142 if (rnd < local_prob) {
143 q->vars.accu_prob = 0;
144 q->vars.accu_prob_overflows = 0;
145 return true;
146 }
147
148 return false;
149}
150
151static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
152 struct sk_buff **to_free)
153{
154 struct pie_sched_data *q = qdisc_priv(sch);
155 bool enqueue = false;
156
157 if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
158 q->stats.overlimit++;
159 goto out;
160 }
161
162 if (!drop_early(sch, skb->len)) {
163 enqueue = true;
164 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
165 INET_ECN_set_ce(skb)) {
166 /* If packet is ecn capable, mark it if drop probability
167 * is lower than 10%, else drop it.
168 */
169 q->stats.ecn_mark++;
170 enqueue = true;
171 }
172
173 /* we can enqueue the packet */
174 if (enqueue) {
175 q->stats.packets_in++;
176 if (qdisc_qlen(sch) > q->stats.maxq)
177 q->stats.maxq = qdisc_qlen(sch);
178
179 return qdisc_enqueue_tail(skb, sch);
180 }
181
182out:
183 q->stats.dropped++;
184 q->vars.accu_prob = 0;
185 q->vars.accu_prob_overflows = 0;
186 return qdisc_drop(skb, sch, to_free);
187}
188
189static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
190 [TCA_PIE_TARGET] = {.type = NLA_U32},
191 [TCA_PIE_LIMIT] = {.type = NLA_U32},
192 [TCA_PIE_TUPDATE] = {.type = NLA_U32},
193 [TCA_PIE_ALPHA] = {.type = NLA_U32},
194 [TCA_PIE_BETA] = {.type = NLA_U32},
195 [TCA_PIE_ECN] = {.type = NLA_U32},
196 [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
197};
198
199static int pie_change(struct Qdisc *sch, struct nlattr *opt,
200 struct netlink_ext_ack *extack)
201{
202 struct pie_sched_data *q = qdisc_priv(sch);
203 struct nlattr *tb[TCA_PIE_MAX + 1];
204 unsigned int qlen, dropped = 0;
205 int err;
206
207 if (!opt)
208 return -EINVAL;
209
210 err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
211 NULL);
212 if (err < 0)
213 return err;
214
215 sch_tree_lock(sch);
216
217 /* convert from microseconds to pschedtime */
218 if (tb[TCA_PIE_TARGET]) {
219 /* target is in us */
220 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
221
222 /* convert to pschedtime */
223 q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
224 }
225
226 /* tupdate is in jiffies */
227 if (tb[TCA_PIE_TUPDATE])
228 q->params.tupdate =
229 usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
230
231 if (tb[TCA_PIE_LIMIT]) {
232 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
233
234 q->params.limit = limit;
235 sch->limit = limit;
236 }
237
238 if (tb[TCA_PIE_ALPHA])
239 q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
240
241 if (tb[TCA_PIE_BETA])
242 q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
243
244 if (tb[TCA_PIE_ECN])
245 q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
246
247 if (tb[TCA_PIE_BYTEMODE])
248 q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
249
250 /* Drop excess packets if new limit is lower */
251 qlen = sch->q.qlen;
252 while (sch->q.qlen > sch->limit) {
253 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
254
255 dropped += qdisc_pkt_len(skb);
256 qdisc_qstats_backlog_dec(sch, skb);
257 rtnl_qdisc_drop(skb, sch);
258 }
259 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
260
261 sch_tree_unlock(sch);
262 return 0;
263}
264
265static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
266{
267 struct pie_sched_data *q = qdisc_priv(sch);
268 int qlen = sch->qstats.backlog; /* current queue size in bytes */
269
270 /* If current queue is about 10 packets or more and dq_count is unset
271 * we have enough packets to calculate the drain rate. Save
272 * current time as dq_tstamp and start measurement cycle.
273 */
274 if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) {
275 q->vars.dq_tstamp = psched_get_time();
276 q->vars.dq_count = 0;
277 }
278
279 /* Calculate the average drain rate from this value. If queue length
280 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset
281 * the dq_count to -1 as we don't have enough packets to calculate the
282 * drain rate anymore The following if block is entered only when we
283 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
284 * and we calculate the drain rate for the threshold here. dq_count is
285 * in bytes, time difference in psched_time, hence rate is in
286 * bytes/psched_time.
287 */
288 if (q->vars.dq_count != DQCOUNT_INVALID) {
289 q->vars.dq_count += skb->len;
290
291 if (q->vars.dq_count >= QUEUE_THRESHOLD) {
292 psched_time_t now = psched_get_time();
293 u32 dtime = now - q->vars.dq_tstamp;
294 u32 count = q->vars.dq_count << PIE_SCALE;
295
296 if (dtime == 0)
297 return;
298
299 count = count / dtime;
300
301 if (q->vars.avg_dq_rate == 0)
302 q->vars.avg_dq_rate = count;
303 else
304 q->vars.avg_dq_rate =
305 (q->vars.avg_dq_rate -
306 (q->vars.avg_dq_rate >> 3)) + (count >> 3);
307
308 /* If the queue has receded below the threshold, we hold
309 * on to the last drain rate calculated, else we reset
310 * dq_count to 0 to re-enter the if block when the next
311 * packet is dequeued
312 */
313 if (qlen < QUEUE_THRESHOLD) {
314 q->vars.dq_count = DQCOUNT_INVALID;
315 } else {
316 q->vars.dq_count = 0;
317 q->vars.dq_tstamp = psched_get_time();
318 }
319
320 if (q->vars.burst_time > 0) {
321 if (q->vars.burst_time > dtime)
322 q->vars.burst_time -= dtime;
323 else
324 q->vars.burst_time = 0;
325 }
326 }
327 }
328}
329
330static void calculate_probability(struct Qdisc *sch)
331{
332 struct pie_sched_data *q = qdisc_priv(sch);
333 u32 qlen = sch->qstats.backlog; /* queue size in bytes */
334 psched_time_t qdelay = 0; /* in pschedtime */
335 psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */
336 s64 delta = 0; /* determines the change in probability */
337 u64 oldprob;
338 u64 alpha, beta;
339 u32 power;
340 bool update_prob = true;
341
342 q->vars.qdelay_old = q->vars.qdelay;
343
344 if (q->vars.avg_dq_rate > 0)
345 qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
346 else
347 qdelay = 0;
348
349 /* If qdelay is zero and qlen is not, it means qlen is very small, less
350 * than dequeue_rate, so we do not update probabilty in this round
351 */
352 if (qdelay == 0 && qlen != 0)
353 update_prob = false;
354
355 /* In the algorithm, alpha and beta are between 0 and 2 with typical
356 * value for alpha as 0.125. In this implementation, we use values 0-32
357 * passed from user space to represent this. Also, alpha and beta have
358 * unit of HZ and need to be scaled before they can used to update
359 * probability. alpha/beta are updated locally below by scaling down
360 * by 16 to come to 0-2 range.
361 */
362 alpha = ((u64)q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
363 beta = ((u64)q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
364
365 /* We scale alpha and beta differently depending on how heavy the
366 * congestion is. Please see RFC 8033 for details.
367 */
368 if (q->vars.prob < MAX_PROB / 10) {
369 alpha >>= 1;
370 beta >>= 1;
371
372 power = 100;
373 while (q->vars.prob < div_u64(MAX_PROB, power) &&
374 power <= 1000000) {
375 alpha >>= 2;
376 beta >>= 2;
377 power *= 10;
378 }
379 }
380
381 /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
382 delta += alpha * (u64)(qdelay - q->params.target);
383 delta += beta * (u64)(qdelay - qdelay_old);
384
385 oldprob = q->vars.prob;
386
387 /* to ensure we increase probability in steps of no more than 2% */
388 if (delta > (s64)(MAX_PROB / (100 / 2)) &&
389 q->vars.prob >= MAX_PROB / 10)
390 delta = (MAX_PROB / 100) * 2;
391
392 /* Non-linear drop:
393 * Tune drop probability to increase quickly for high delays(>= 250ms)
394 * 250ms is derived through experiments and provides error protection
395 */
396
397 if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
398 delta += MAX_PROB / (100 / 2);
399
400 q->vars.prob += delta;
401
402 if (delta > 0) {
403 /* prevent overflow */
404 if (q->vars.prob < oldprob) {
405 q->vars.prob = MAX_PROB;
406 /* Prevent normalization error. If probability is at
407 * maximum value already, we normalize it here, and
408 * skip the check to do a non-linear drop in the next
409 * section.
410 */
411 update_prob = false;
412 }
413 } else {
414 /* prevent underflow */
415 if (q->vars.prob > oldprob)
416 q->vars.prob = 0;
417 }
418
419 /* Non-linear drop in probability: Reduce drop probability quickly if
420 * delay is 0 for 2 consecutive Tupdate periods.
421 */
422
423 if (qdelay == 0 && qdelay_old == 0 && update_prob)
424 /* Reduce drop probability to 98.4% */
425 q->vars.prob -= q->vars.prob / 64u;
426
427 q->vars.qdelay = qdelay;
428 q->vars.qlen_old = qlen;
429
430 /* We restart the measurement cycle if the following conditions are met
431 * 1. If the delay has been low for 2 consecutive Tupdate periods
432 * 2. Calculated drop probability is zero
433 * 3. We have atleast one estimate for the avg_dq_rate ie.,
434 * is a non-zero value
435 */
436 if ((q->vars.qdelay < q->params.target / 2) &&
437 (q->vars.qdelay_old < q->params.target / 2) &&
438 q->vars.prob == 0 &&
439 q->vars.avg_dq_rate > 0)
440 pie_vars_init(&q->vars);
441}
442
443static void pie_timer(struct timer_list *t)
444{
445 struct pie_sched_data *q = from_timer(q, t, adapt_timer);
446 struct Qdisc *sch = q->sch;
447 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
448
449 spin_lock(root_lock);
450 calculate_probability(sch);
451
452 /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
453 if (q->params.tupdate)
454 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
455 spin_unlock(root_lock);
456}
457
458static int pie_init(struct Qdisc *sch, struct nlattr *opt,
459 struct netlink_ext_ack *extack)
460{
461 struct pie_sched_data *q = qdisc_priv(sch);
462
463 pie_params_init(&q->params);
464 pie_vars_init(&q->vars);
465 sch->limit = q->params.limit;
466
467 q->sch = sch;
468 timer_setup(&q->adapt_timer, pie_timer, 0);
469
470 if (opt) {
471 int err = pie_change(sch, opt, extack);
472
473 if (err)
474 return err;
475 }
476
477 mod_timer(&q->adapt_timer, jiffies + HZ / 2);
478 return 0;
479}
480
481static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
482{
483 struct pie_sched_data *q = qdisc_priv(sch);
484 struct nlattr *opts;
485
486 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
487 if (!opts)
488 goto nla_put_failure;
489
490 /* convert target from pschedtime to us */
491 if (nla_put_u32(skb, TCA_PIE_TARGET,
492 ((u32)PSCHED_TICKS2NS(q->params.target)) /
493 NSEC_PER_USEC) ||
494 nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
495 nla_put_u32(skb, TCA_PIE_TUPDATE,
496 jiffies_to_usecs(q->params.tupdate)) ||
497 nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
498 nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
499 nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
500 nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
501 goto nla_put_failure;
502
503 return nla_nest_end(skb, opts);
504
505nla_put_failure:
506 nla_nest_cancel(skb, opts);
507 return -1;
508}
509
510static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
511{
512 struct pie_sched_data *q = qdisc_priv(sch);
513 struct tc_pie_xstats st = {
514 .prob = q->vars.prob,
515 .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) /
516 NSEC_PER_USEC,
517 /* unscale and return dq_rate in bytes per sec */
518 .avg_dq_rate = q->vars.avg_dq_rate *
519 (PSCHED_TICKS_PER_SEC) >> PIE_SCALE,
520 .packets_in = q->stats.packets_in,
521 .overlimit = q->stats.overlimit,
522 .maxq = q->stats.maxq,
523 .dropped = q->stats.dropped,
524 .ecn_mark = q->stats.ecn_mark,
525 };
526
527 return gnet_stats_copy_app(d, &st, sizeof(st));
528}
529
530static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
531{
532 struct sk_buff *skb = qdisc_dequeue_head(sch);
533
534 if (!skb)
535 return NULL;
536
537 pie_process_dequeue(sch, skb);
538 return skb;
539}
540
541static void pie_reset(struct Qdisc *sch)
542{
543 struct pie_sched_data *q = qdisc_priv(sch);
544
545 qdisc_reset_queue(sch);
546 pie_vars_init(&q->vars);
547}
548
549static void pie_destroy(struct Qdisc *sch)
550{
551 struct pie_sched_data *q = qdisc_priv(sch);
552
553 q->params.tupdate = 0;
554 del_timer_sync(&q->adapt_timer);
555}
556
557static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
558 .id = "pie",
559 .priv_size = sizeof(struct pie_sched_data),
560 .enqueue = pie_qdisc_enqueue,
561 .dequeue = pie_qdisc_dequeue,
562 .peek = qdisc_peek_dequeued,
563 .init = pie_init,
564 .destroy = pie_destroy,
565 .reset = pie_reset,
566 .change = pie_change,
567 .dump = pie_dump,
568 .dump_stats = pie_dump_stats,
569 .owner = THIS_MODULE,
570};
571
572static int __init pie_module_init(void)
573{
574 return register_qdisc(&pie_qdisc_ops);
575}
576
577static void __exit pie_module_exit(void)
578{
579 unregister_qdisc(&pie_qdisc_ops);
580}
581
582module_init(pie_module_init);
583module_exit(pie_module_exit);
584
585MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
586MODULE_AUTHOR("Vijay Subramanian");
587MODULE_AUTHOR("Mythili Prabhu");
588MODULE_LICENSE("GPL");
1/* Copyright (C) 2013 Cisco Systems, Inc, 2013.
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License
5 * as published by the Free Software Foundation; either version 2
6 * of the License.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * Author: Vijay Subramanian <vijaynsu@cisco.com>
14 * Author: Mythili Prabhu <mysuryan@cisco.com>
15 *
16 * ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
17 * University of Oslo, Norway.
18 *
19 * References:
20 * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
21 * IEEE Conference on High Performance Switching and Routing 2013 :
22 * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
23 */
24
25#include <linux/module.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/skbuff.h>
31#include <net/pkt_sched.h>
32#include <net/inet_ecn.h>
33
34#define QUEUE_THRESHOLD 10000
35#define DQCOUNT_INVALID -1
36#define MAX_PROB 0xffffffff
37#define PIE_SCALE 8
38
39/* parameters used */
40struct pie_params {
41 psched_time_t target; /* user specified target delay in pschedtime */
42 u32 tupdate; /* timer frequency (in jiffies) */
43 u32 limit; /* number of packets that can be enqueued */
44 u32 alpha; /* alpha and beta are between 0 and 32 */
45 u32 beta; /* and are used for shift relative to 1 */
46 bool ecn; /* true if ecn is enabled */
47 bool bytemode; /* to scale drop early prob based on pkt size */
48};
49
50/* variables used */
51struct pie_vars {
52 u32 prob; /* probability but scaled by u32 limit. */
53 psched_time_t burst_time;
54 psched_time_t qdelay;
55 psched_time_t qdelay_old;
56 u64 dq_count; /* measured in bytes */
57 psched_time_t dq_tstamp; /* drain rate */
58 u32 avg_dq_rate; /* bytes per pschedtime tick,scaled */
59 u32 qlen_old; /* in bytes */
60};
61
62/* statistics gathering */
63struct pie_stats {
64 u32 packets_in; /* total number of packets enqueued */
65 u32 dropped; /* packets dropped due to pie_action */
66 u32 overlimit; /* dropped due to lack of space in queue */
67 u32 maxq; /* maximum queue size */
68 u32 ecn_mark; /* packets marked with ECN */
69};
70
71/* private data for the Qdisc */
72struct pie_sched_data {
73 struct pie_params params;
74 struct pie_vars vars;
75 struct pie_stats stats;
76 struct timer_list adapt_timer;
77 struct Qdisc *sch;
78};
79
80static void pie_params_init(struct pie_params *params)
81{
82 params->alpha = 2;
83 params->beta = 20;
84 params->tupdate = usecs_to_jiffies(30 * USEC_PER_MSEC); /* 30 ms */
85 params->limit = 1000; /* default of 1000 packets */
86 params->target = PSCHED_NS2TICKS(20 * NSEC_PER_MSEC); /* 20 ms */
87 params->ecn = false;
88 params->bytemode = false;
89}
90
91static void pie_vars_init(struct pie_vars *vars)
92{
93 vars->dq_count = DQCOUNT_INVALID;
94 vars->avg_dq_rate = 0;
95 /* default of 100 ms in pschedtime */
96 vars->burst_time = PSCHED_NS2TICKS(100 * NSEC_PER_MSEC);
97}
98
99static bool drop_early(struct Qdisc *sch, u32 packet_size)
100{
101 struct pie_sched_data *q = qdisc_priv(sch);
102 u32 rnd;
103 u32 local_prob = q->vars.prob;
104 u32 mtu = psched_mtu(qdisc_dev(sch));
105
106 /* If there is still burst allowance left skip random early drop */
107 if (q->vars.burst_time > 0)
108 return false;
109
110 /* If current delay is less than half of target, and
111 * if drop prob is low already, disable early_drop
112 */
113 if ((q->vars.qdelay < q->params.target / 2)
114 && (q->vars.prob < MAX_PROB / 5))
115 return false;
116
117 /* If we have fewer than 2 mtu-sized packets, disable drop_early,
118 * similar to min_th in RED
119 */
120 if (sch->qstats.backlog < 2 * mtu)
121 return false;
122
123 /* If bytemode is turned on, use packet size to compute new
124 * probablity. Smaller packets will have lower drop prob in this case
125 */
126 if (q->params.bytemode && packet_size <= mtu)
127 local_prob = (local_prob / mtu) * packet_size;
128 else
129 local_prob = q->vars.prob;
130
131 rnd = prandom_u32();
132 if (rnd < local_prob)
133 return true;
134
135 return false;
136}
137
138static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
139 struct sk_buff **to_free)
140{
141 struct pie_sched_data *q = qdisc_priv(sch);
142 bool enqueue = false;
143
144 if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
145 q->stats.overlimit++;
146 goto out;
147 }
148
149 if (!drop_early(sch, skb->len)) {
150 enqueue = true;
151 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) &&
152 INET_ECN_set_ce(skb)) {
153 /* If packet is ecn capable, mark it if drop probability
154 * is lower than 10%, else drop it.
155 */
156 q->stats.ecn_mark++;
157 enqueue = true;
158 }
159
160 /* we can enqueue the packet */
161 if (enqueue) {
162 q->stats.packets_in++;
163 if (qdisc_qlen(sch) > q->stats.maxq)
164 q->stats.maxq = qdisc_qlen(sch);
165
166 return qdisc_enqueue_tail(skb, sch);
167 }
168
169out:
170 q->stats.dropped++;
171 return qdisc_drop(skb, sch, to_free);
172}
173
174static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
175 [TCA_PIE_TARGET] = {.type = NLA_U32},
176 [TCA_PIE_LIMIT] = {.type = NLA_U32},
177 [TCA_PIE_TUPDATE] = {.type = NLA_U32},
178 [TCA_PIE_ALPHA] = {.type = NLA_U32},
179 [TCA_PIE_BETA] = {.type = NLA_U32},
180 [TCA_PIE_ECN] = {.type = NLA_U32},
181 [TCA_PIE_BYTEMODE] = {.type = NLA_U32},
182};
183
184static int pie_change(struct Qdisc *sch, struct nlattr *opt,
185 struct netlink_ext_ack *extack)
186{
187 struct pie_sched_data *q = qdisc_priv(sch);
188 struct nlattr *tb[TCA_PIE_MAX + 1];
189 unsigned int qlen, dropped = 0;
190 int err;
191
192 if (!opt)
193 return -EINVAL;
194
195 err = nla_parse_nested(tb, TCA_PIE_MAX, opt, pie_policy, NULL);
196 if (err < 0)
197 return err;
198
199 sch_tree_lock(sch);
200
201 /* convert from microseconds to pschedtime */
202 if (tb[TCA_PIE_TARGET]) {
203 /* target is in us */
204 u32 target = nla_get_u32(tb[TCA_PIE_TARGET]);
205
206 /* convert to pschedtime */
207 q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
208 }
209
210 /* tupdate is in jiffies */
211 if (tb[TCA_PIE_TUPDATE])
212 q->params.tupdate = usecs_to_jiffies(nla_get_u32(tb[TCA_PIE_TUPDATE]));
213
214 if (tb[TCA_PIE_LIMIT]) {
215 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]);
216
217 q->params.limit = limit;
218 sch->limit = limit;
219 }
220
221 if (tb[TCA_PIE_ALPHA])
222 q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]);
223
224 if (tb[TCA_PIE_BETA])
225 q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]);
226
227 if (tb[TCA_PIE_ECN])
228 q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]);
229
230 if (tb[TCA_PIE_BYTEMODE])
231 q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]);
232
233 /* Drop excess packets if new limit is lower */
234 qlen = sch->q.qlen;
235 while (sch->q.qlen > sch->limit) {
236 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
237
238 dropped += qdisc_pkt_len(skb);
239 qdisc_qstats_backlog_dec(sch, skb);
240 rtnl_qdisc_drop(skb, sch);
241 }
242 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
243
244 sch_tree_unlock(sch);
245 return 0;
246}
247
248static void pie_process_dequeue(struct Qdisc *sch, struct sk_buff *skb)
249{
250
251 struct pie_sched_data *q = qdisc_priv(sch);
252 int qlen = sch->qstats.backlog; /* current queue size in bytes */
253
254 /* If current queue is about 10 packets or more and dq_count is unset
255 * we have enough packets to calculate the drain rate. Save
256 * current time as dq_tstamp and start measurement cycle.
257 */
258 if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) {
259 q->vars.dq_tstamp = psched_get_time();
260 q->vars.dq_count = 0;
261 }
262
263 /* Calculate the average drain rate from this value. If queue length
264 * has receded to a small value viz., <= QUEUE_THRESHOLD bytes,reset
265 * the dq_count to -1 as we don't have enough packets to calculate the
266 * drain rate anymore The following if block is entered only when we
267 * have a substantial queue built up (QUEUE_THRESHOLD bytes or more)
268 * and we calculate the drain rate for the threshold here. dq_count is
269 * in bytes, time difference in psched_time, hence rate is in
270 * bytes/psched_time.
271 */
272 if (q->vars.dq_count != DQCOUNT_INVALID) {
273 q->vars.dq_count += skb->len;
274
275 if (q->vars.dq_count >= QUEUE_THRESHOLD) {
276 psched_time_t now = psched_get_time();
277 u32 dtime = now - q->vars.dq_tstamp;
278 u32 count = q->vars.dq_count << PIE_SCALE;
279
280 if (dtime == 0)
281 return;
282
283 count = count / dtime;
284
285 if (q->vars.avg_dq_rate == 0)
286 q->vars.avg_dq_rate = count;
287 else
288 q->vars.avg_dq_rate =
289 (q->vars.avg_dq_rate -
290 (q->vars.avg_dq_rate >> 3)) + (count >> 3);
291
292 /* If the queue has receded below the threshold, we hold
293 * on to the last drain rate calculated, else we reset
294 * dq_count to 0 to re-enter the if block when the next
295 * packet is dequeued
296 */
297 if (qlen < QUEUE_THRESHOLD)
298 q->vars.dq_count = DQCOUNT_INVALID;
299 else {
300 q->vars.dq_count = 0;
301 q->vars.dq_tstamp = psched_get_time();
302 }
303
304 if (q->vars.burst_time > 0) {
305 if (q->vars.burst_time > dtime)
306 q->vars.burst_time -= dtime;
307 else
308 q->vars.burst_time = 0;
309 }
310 }
311 }
312}
313
314static void calculate_probability(struct Qdisc *sch)
315{
316 struct pie_sched_data *q = qdisc_priv(sch);
317 u32 qlen = sch->qstats.backlog; /* queue size in bytes */
318 psched_time_t qdelay = 0; /* in pschedtime */
319 psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */
320 s32 delta = 0; /* determines the change in probability */
321 u32 oldprob;
322 u32 alpha, beta;
323 bool update_prob = true;
324
325 q->vars.qdelay_old = q->vars.qdelay;
326
327 if (q->vars.avg_dq_rate > 0)
328 qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate;
329 else
330 qdelay = 0;
331
332 /* If qdelay is zero and qlen is not, it means qlen is very small, less
333 * than dequeue_rate, so we do not update probabilty in this round
334 */
335 if (qdelay == 0 && qlen != 0)
336 update_prob = false;
337
338 /* In the algorithm, alpha and beta are between 0 and 2 with typical
339 * value for alpha as 0.125. In this implementation, we use values 0-32
340 * passed from user space to represent this. Also, alpha and beta have
341 * unit of HZ and need to be scaled before they can used to update
342 * probability. alpha/beta are updated locally below by 1) scaling them
343 * appropriately 2) scaling down by 16 to come to 0-2 range.
344 * Please see paper for details.
345 *
346 * We scale alpha and beta differently depending on whether we are in
347 * light, medium or high dropping mode.
348 */
349 if (q->vars.prob < MAX_PROB / 100) {
350 alpha =
351 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
352 beta =
353 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 7;
354 } else if (q->vars.prob < MAX_PROB / 10) {
355 alpha =
356 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
357 beta =
358 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 5;
359 } else {
360 alpha =
361 (q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
362 beta =
363 (q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4;
364 }
365
366 /* alpha and beta should be between 0 and 32, in multiples of 1/16 */
367 delta += alpha * ((qdelay - q->params.target));
368 delta += beta * ((qdelay - qdelay_old));
369
370 oldprob = q->vars.prob;
371
372 /* to ensure we increase probability in steps of no more than 2% */
373 if (delta > (s32) (MAX_PROB / (100 / 2)) &&
374 q->vars.prob >= MAX_PROB / 10)
375 delta = (MAX_PROB / 100) * 2;
376
377 /* Non-linear drop:
378 * Tune drop probability to increase quickly for high delays(>= 250ms)
379 * 250ms is derived through experiments and provides error protection
380 */
381
382 if (qdelay > (PSCHED_NS2TICKS(250 * NSEC_PER_MSEC)))
383 delta += MAX_PROB / (100 / 2);
384
385 q->vars.prob += delta;
386
387 if (delta > 0) {
388 /* prevent overflow */
389 if (q->vars.prob < oldprob) {
390 q->vars.prob = MAX_PROB;
391 /* Prevent normalization error. If probability is at
392 * maximum value already, we normalize it here, and
393 * skip the check to do a non-linear drop in the next
394 * section.
395 */
396 update_prob = false;
397 }
398 } else {
399 /* prevent underflow */
400 if (q->vars.prob > oldprob)
401 q->vars.prob = 0;
402 }
403
404 /* Non-linear drop in probability: Reduce drop probability quickly if
405 * delay is 0 for 2 consecutive Tupdate periods.
406 */
407
408 if ((qdelay == 0) && (qdelay_old == 0) && update_prob)
409 q->vars.prob = (q->vars.prob * 98) / 100;
410
411 q->vars.qdelay = qdelay;
412 q->vars.qlen_old = qlen;
413
414 /* We restart the measurement cycle if the following conditions are met
415 * 1. If the delay has been low for 2 consecutive Tupdate periods
416 * 2. Calculated drop probability is zero
417 * 3. We have atleast one estimate for the avg_dq_rate ie.,
418 * is a non-zero value
419 */
420 if ((q->vars.qdelay < q->params.target / 2) &&
421 (q->vars.qdelay_old < q->params.target / 2) &&
422 (q->vars.prob == 0) &&
423 (q->vars.avg_dq_rate > 0))
424 pie_vars_init(&q->vars);
425}
426
427static void pie_timer(struct timer_list *t)
428{
429 struct pie_sched_data *q = from_timer(q, t, adapt_timer);
430 struct Qdisc *sch = q->sch;
431 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
432
433 spin_lock(root_lock);
434 calculate_probability(sch);
435
436 /* reset the timer to fire after 'tupdate'. tupdate is in jiffies. */
437 if (q->params.tupdate)
438 mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
439 spin_unlock(root_lock);
440
441}
442
443static int pie_init(struct Qdisc *sch, struct nlattr *opt,
444 struct netlink_ext_ack *extack)
445{
446 struct pie_sched_data *q = qdisc_priv(sch);
447
448 pie_params_init(&q->params);
449 pie_vars_init(&q->vars);
450 sch->limit = q->params.limit;
451
452 q->sch = sch;
453 timer_setup(&q->adapt_timer, pie_timer, 0);
454
455 if (opt) {
456 int err = pie_change(sch, opt, extack);
457
458 if (err)
459 return err;
460 }
461
462 mod_timer(&q->adapt_timer, jiffies + HZ / 2);
463 return 0;
464}
465
466static int pie_dump(struct Qdisc *sch, struct sk_buff *skb)
467{
468 struct pie_sched_data *q = qdisc_priv(sch);
469 struct nlattr *opts;
470
471 opts = nla_nest_start(skb, TCA_OPTIONS);
472 if (opts == NULL)
473 goto nla_put_failure;
474
475 /* convert target from pschedtime to us */
476 if (nla_put_u32(skb, TCA_PIE_TARGET,
477 ((u32) PSCHED_TICKS2NS(q->params.target)) /
478 NSEC_PER_USEC) ||
479 nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) ||
480 nla_put_u32(skb, TCA_PIE_TUPDATE, jiffies_to_usecs(q->params.tupdate)) ||
481 nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) ||
482 nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) ||
483 nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) ||
484 nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode))
485 goto nla_put_failure;
486
487 return nla_nest_end(skb, opts);
488
489nla_put_failure:
490 nla_nest_cancel(skb, opts);
491 return -1;
492
493}
494
495static int pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
496{
497 struct pie_sched_data *q = qdisc_priv(sch);
498 struct tc_pie_xstats st = {
499 .prob = q->vars.prob,
500 .delay = ((u32) PSCHED_TICKS2NS(q->vars.qdelay)) /
501 NSEC_PER_USEC,
502 /* unscale and return dq_rate in bytes per sec */
503 .avg_dq_rate = q->vars.avg_dq_rate *
504 (PSCHED_TICKS_PER_SEC) >> PIE_SCALE,
505 .packets_in = q->stats.packets_in,
506 .overlimit = q->stats.overlimit,
507 .maxq = q->stats.maxq,
508 .dropped = q->stats.dropped,
509 .ecn_mark = q->stats.ecn_mark,
510 };
511
512 return gnet_stats_copy_app(d, &st, sizeof(st));
513}
514
515static struct sk_buff *pie_qdisc_dequeue(struct Qdisc *sch)
516{
517 struct sk_buff *skb;
518 skb = qdisc_dequeue_head(sch);
519
520 if (!skb)
521 return NULL;
522
523 pie_process_dequeue(sch, skb);
524 return skb;
525}
526
527static void pie_reset(struct Qdisc *sch)
528{
529 struct pie_sched_data *q = qdisc_priv(sch);
530 qdisc_reset_queue(sch);
531 pie_vars_init(&q->vars);
532}
533
534static void pie_destroy(struct Qdisc *sch)
535{
536 struct pie_sched_data *q = qdisc_priv(sch);
537 q->params.tupdate = 0;
538 del_timer_sync(&q->adapt_timer);
539}
540
541static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
542 .id = "pie",
543 .priv_size = sizeof(struct pie_sched_data),
544 .enqueue = pie_qdisc_enqueue,
545 .dequeue = pie_qdisc_dequeue,
546 .peek = qdisc_peek_dequeued,
547 .init = pie_init,
548 .destroy = pie_destroy,
549 .reset = pie_reset,
550 .change = pie_change,
551 .dump = pie_dump,
552 .dump_stats = pie_dump_stats,
553 .owner = THIS_MODULE,
554};
555
556static int __init pie_module_init(void)
557{
558 return register_qdisc(&pie_qdisc_ops);
559}
560
561static void __exit pie_module_exit(void)
562{
563 unregister_qdisc(&pie_qdisc_ops);
564}
565
566module_init(pie_module_init);
567module_exit(pie_module_exit);
568
569MODULE_DESCRIPTION("Proportional Integral controller Enhanced (PIE) scheduler");
570MODULE_AUTHOR("Vijay Subramanian");
571MODULE_AUTHOR("Mythili Prabhu");
572MODULE_LICENSE("GPL");