Loading...
1/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <net/netlink.h>
21#include <net/pkt_sched.h>
22
23
24/* Class-Based Queueing (CBQ) algorithm.
25 =======================================
26
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
28 Management Models for Packet Networks",
29 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
30
31 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
32
33 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
34 Parameters", 1996
35
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
38
39 -----------------------------------------------------------------------
40
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
45
46 --- The WRR algorithm is different. Our version looks more
47 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
53
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
55
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
58
59
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
64
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
71
72struct cbq_sched_data;
73
74
75struct cbq_class {
76 struct Qdisc_class_common common;
77 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78
79/* Parameters */
80 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
83 unsigned char ovl_strategy;
84#ifdef CONFIG_NET_CLS_ACT
85 unsigned char police;
86#endif
87
88 u32 defmap;
89
90 /* Link-sharing scheduler parameters */
91 long maxidle; /* Class parameters: see below. */
92 long offtime;
93 long minidle;
94 u32 avpkt;
95 struct qdisc_rate_table *R_tab;
96
97 /* Overlimit strategy parameters */
98 void (*overlimit)(struct cbq_class *cl);
99 psched_tdiff_t penalty;
100
101 /* General scheduler (WRR) parameters */
102 long allot;
103 long quantum; /* Allotment per WRR round */
104 long weight; /* Relative allotment: see below */
105
106 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
107 struct cbq_class *split; /* Ptr to split node */
108 struct cbq_class *share; /* Ptr to LS parent in the class tree */
109 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
110 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
111 parent otherwise */
112 struct cbq_class *sibling; /* Sibling chain */
113 struct cbq_class *children; /* Pointer to children chain */
114
115 struct Qdisc *q; /* Elementary queueing discipline */
116
117
118/* Variables */
119 unsigned char cpriority; /* Effective priority */
120 unsigned char delayed;
121 unsigned char level; /* level of the class in hierarchy:
122 0 for leaf classes, and maximal
123 level of children + 1 for nodes.
124 */
125
126 psched_time_t last; /* Last end of service */
127 psched_time_t undertime;
128 long avgidle;
129 long deficit; /* Saved deficit for WRR */
130 psched_time_t penalized;
131 struct gnet_stats_basic_packed bstats;
132 struct gnet_stats_queue qstats;
133 struct gnet_stats_rate_est64 rate_est;
134 struct tc_cbq_xstats xstats;
135
136 struct tcf_proto __rcu *filter_list;
137
138 int refcnt;
139 int filters;
140
141 struct cbq_class *defaults[TC_PRIO_MAX + 1];
142};
143
144struct cbq_sched_data {
145 struct Qdisc_class_hash clhash; /* Hash table of all classes */
146 int nclasses[TC_CBQ_MAXPRIO + 1];
147 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
148
149 struct cbq_class link;
150
151 unsigned int activemask;
152 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
153 with backlog */
154
155#ifdef CONFIG_NET_CLS_ACT
156 struct cbq_class *rx_class;
157#endif
158 struct cbq_class *tx_class;
159 struct cbq_class *tx_borrowed;
160 int tx_len;
161 psched_time_t now; /* Cached timestamp */
162 unsigned int pmask;
163
164 struct hrtimer delay_timer;
165 struct qdisc_watchdog watchdog; /* Watchdog timer,
166 started when CBQ has
167 backlog, but cannot
168 transmit just now */
169 psched_tdiff_t wd_expires;
170 int toplevel;
171 u32 hgenerator;
172};
173
174
175#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
176
177static inline struct cbq_class *
178cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
179{
180 struct Qdisc_class_common *clc;
181
182 clc = qdisc_class_find(&q->clhash, classid);
183 if (clc == NULL)
184 return NULL;
185 return container_of(clc, struct cbq_class, common);
186}
187
188#ifdef CONFIG_NET_CLS_ACT
189
190static struct cbq_class *
191cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
192{
193 struct cbq_class *cl;
194
195 for (cl = this->tparent; cl; cl = cl->tparent) {
196 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
197
198 if (new != NULL && new != this)
199 return new;
200 }
201 return NULL;
202}
203
204#endif
205
206/* Classify packet. The procedure is pretty complicated, but
207 * it allows us to combine link sharing and priority scheduling
208 * transparently.
209 *
210 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
211 * so that it resolves to split nodes. Then packets are classified
212 * by logical priority, or a more specific classifier may be attached
213 * to the split node.
214 */
215
216static struct cbq_class *
217cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
218{
219 struct cbq_sched_data *q = qdisc_priv(sch);
220 struct cbq_class *head = &q->link;
221 struct cbq_class **defmap;
222 struct cbq_class *cl = NULL;
223 u32 prio = skb->priority;
224 struct tcf_proto *fl;
225 struct tcf_result res;
226
227 /*
228 * Step 1. If skb->priority points to one of our classes, use it.
229 */
230 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
231 (cl = cbq_class_lookup(q, prio)) != NULL)
232 return cl;
233
234 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
235 for (;;) {
236 int result = 0;
237 defmap = head->defaults;
238
239 fl = rcu_dereference_bh(head->filter_list);
240 /*
241 * Step 2+n. Apply classifier.
242 */
243 result = tc_classify(skb, fl, &res, true);
244 if (!fl || result < 0)
245 goto fallback;
246
247 cl = (void *)res.class;
248 if (!cl) {
249 if (TC_H_MAJ(res.classid))
250 cl = cbq_class_lookup(q, res.classid);
251 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
252 cl = defmap[TC_PRIO_BESTEFFORT];
253
254 if (cl == NULL)
255 goto fallback;
256 }
257 if (cl->level >= head->level)
258 goto fallback;
259#ifdef CONFIG_NET_CLS_ACT
260 switch (result) {
261 case TC_ACT_QUEUED:
262 case TC_ACT_STOLEN:
263 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
264 case TC_ACT_SHOT:
265 return NULL;
266 case TC_ACT_RECLASSIFY:
267 return cbq_reclassify(skb, cl);
268 }
269#endif
270 if (cl->level == 0)
271 return cl;
272
273 /*
274 * Step 3+n. If classifier selected a link sharing class,
275 * apply agency specific classifier.
276 * Repeat this procdure until we hit a leaf node.
277 */
278 head = cl;
279 }
280
281fallback:
282 cl = head;
283
284 /*
285 * Step 4. No success...
286 */
287 if (TC_H_MAJ(prio) == 0 &&
288 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
289 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
290 return head;
291
292 return cl;
293}
294
295/*
296 * A packet has just been enqueued on the empty class.
297 * cbq_activate_class adds it to the tail of active class list
298 * of its priority band.
299 */
300
301static inline void cbq_activate_class(struct cbq_class *cl)
302{
303 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
304 int prio = cl->cpriority;
305 struct cbq_class *cl_tail;
306
307 cl_tail = q->active[prio];
308 q->active[prio] = cl;
309
310 if (cl_tail != NULL) {
311 cl->next_alive = cl_tail->next_alive;
312 cl_tail->next_alive = cl;
313 } else {
314 cl->next_alive = cl;
315 q->activemask |= (1<<prio);
316 }
317}
318
319/*
320 * Unlink class from active chain.
321 * Note that this same procedure is done directly in cbq_dequeue*
322 * during round-robin procedure.
323 */
324
325static void cbq_deactivate_class(struct cbq_class *this)
326{
327 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
328 int prio = this->cpriority;
329 struct cbq_class *cl;
330 struct cbq_class *cl_prev = q->active[prio];
331
332 do {
333 cl = cl_prev->next_alive;
334 if (cl == this) {
335 cl_prev->next_alive = cl->next_alive;
336 cl->next_alive = NULL;
337
338 if (cl == q->active[prio]) {
339 q->active[prio] = cl_prev;
340 if (cl == q->active[prio]) {
341 q->active[prio] = NULL;
342 q->activemask &= ~(1<<prio);
343 return;
344 }
345 }
346 return;
347 }
348 } while ((cl_prev = cl) != q->active[prio]);
349}
350
351static void
352cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
353{
354 int toplevel = q->toplevel;
355
356 if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
357 psched_time_t now = psched_get_time();
358
359 do {
360 if (cl->undertime < now) {
361 q->toplevel = cl->level;
362 return;
363 }
364 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
365 }
366}
367
368static int
369cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
370{
371 struct cbq_sched_data *q = qdisc_priv(sch);
372 int uninitialized_var(ret);
373 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
374
375#ifdef CONFIG_NET_CLS_ACT
376 q->rx_class = cl;
377#endif
378 if (cl == NULL) {
379 if (ret & __NET_XMIT_BYPASS)
380 qdisc_qstats_drop(sch);
381 kfree_skb(skb);
382 return ret;
383 }
384
385#ifdef CONFIG_NET_CLS_ACT
386 cl->q->__parent = sch;
387#endif
388 ret = qdisc_enqueue(skb, cl->q);
389 if (ret == NET_XMIT_SUCCESS) {
390 sch->q.qlen++;
391 cbq_mark_toplevel(q, cl);
392 if (!cl->next_alive)
393 cbq_activate_class(cl);
394 return ret;
395 }
396
397 if (net_xmit_drop_count(ret)) {
398 qdisc_qstats_drop(sch);
399 cbq_mark_toplevel(q, cl);
400 cl->qstats.drops++;
401 }
402 return ret;
403}
404
405/* Overlimit actions */
406
407/* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */
408
409static void cbq_ovl_classic(struct cbq_class *cl)
410{
411 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
412 psched_tdiff_t delay = cl->undertime - q->now;
413
414 if (!cl->delayed) {
415 delay += cl->offtime;
416
417 /*
418 * Class goes to sleep, so that it will have no
419 * chance to work avgidle. Let's forgive it 8)
420 *
421 * BTW cbq-2.0 has a crap in this
422 * place, apparently they forgot to shift it by cl->ewma_log.
423 */
424 if (cl->avgidle < 0)
425 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
426 if (cl->avgidle < cl->minidle)
427 cl->avgidle = cl->minidle;
428 if (delay <= 0)
429 delay = 1;
430 cl->undertime = q->now + delay;
431
432 cl->xstats.overactions++;
433 cl->delayed = 1;
434 }
435 if (q->wd_expires == 0 || q->wd_expires > delay)
436 q->wd_expires = delay;
437
438 /* Dirty work! We must schedule wakeups based on
439 * real available rate, rather than leaf rate,
440 * which may be tiny (even zero).
441 */
442 if (q->toplevel == TC_CBQ_MAXLEVEL) {
443 struct cbq_class *b;
444 psched_tdiff_t base_delay = q->wd_expires;
445
446 for (b = cl->borrow; b; b = b->borrow) {
447 delay = b->undertime - q->now;
448 if (delay < base_delay) {
449 if (delay <= 0)
450 delay = 1;
451 base_delay = delay;
452 }
453 }
454
455 q->wd_expires = base_delay;
456 }
457}
458
459/* TC_CBQ_OVL_RCLASSIC: penalize by offtime classes in hierarchy, when
460 * they go overlimit
461 */
462
463static void cbq_ovl_rclassic(struct cbq_class *cl)
464{
465 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
466 struct cbq_class *this = cl;
467
468 do {
469 if (cl->level > q->toplevel) {
470 cl = NULL;
471 break;
472 }
473 } while ((cl = cl->borrow) != NULL);
474
475 if (cl == NULL)
476 cl = this;
477 cbq_ovl_classic(cl);
478}
479
480/* TC_CBQ_OVL_DELAY: delay until it will go to underlimit */
481
482static void cbq_ovl_delay(struct cbq_class *cl)
483{
484 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
485 psched_tdiff_t delay = cl->undertime - q->now;
486
487 if (test_bit(__QDISC_STATE_DEACTIVATED,
488 &qdisc_root_sleeping(cl->qdisc)->state))
489 return;
490
491 if (!cl->delayed) {
492 psched_time_t sched = q->now;
493 ktime_t expires;
494
495 delay += cl->offtime;
496 if (cl->avgidle < 0)
497 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
498 if (cl->avgidle < cl->minidle)
499 cl->avgidle = cl->minidle;
500 cl->undertime = q->now + delay;
501
502 if (delay > 0) {
503 sched += delay + cl->penalty;
504 cl->penalized = sched;
505 cl->cpriority = TC_CBQ_MAXPRIO;
506 q->pmask |= (1<<TC_CBQ_MAXPRIO);
507
508 expires = ns_to_ktime(PSCHED_TICKS2NS(sched));
509 if (hrtimer_try_to_cancel(&q->delay_timer) &&
510 ktime_to_ns(ktime_sub(
511 hrtimer_get_expires(&q->delay_timer),
512 expires)) > 0)
513 hrtimer_set_expires(&q->delay_timer, expires);
514 hrtimer_restart(&q->delay_timer);
515 cl->delayed = 1;
516 cl->xstats.overactions++;
517 return;
518 }
519 delay = 1;
520 }
521 if (q->wd_expires == 0 || q->wd_expires > delay)
522 q->wd_expires = delay;
523}
524
525/* TC_CBQ_OVL_LOWPRIO: penalize class by lowering its priority band */
526
527static void cbq_ovl_lowprio(struct cbq_class *cl)
528{
529 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
530
531 cl->penalized = q->now + cl->penalty;
532
533 if (cl->cpriority != cl->priority2) {
534 cl->cpriority = cl->priority2;
535 q->pmask |= (1<<cl->cpriority);
536 cl->xstats.overactions++;
537 }
538 cbq_ovl_classic(cl);
539}
540
541/* TC_CBQ_OVL_DROP: penalize class by dropping */
542
543static void cbq_ovl_drop(struct cbq_class *cl)
544{
545 if (cl->q->ops->drop)
546 if (cl->q->ops->drop(cl->q))
547 cl->qdisc->q.qlen--;
548 cl->xstats.overactions++;
549 cbq_ovl_classic(cl);
550}
551
552static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
553 psched_time_t now)
554{
555 struct cbq_class *cl;
556 struct cbq_class *cl_prev = q->active[prio];
557 psched_time_t sched = now;
558
559 if (cl_prev == NULL)
560 return 0;
561
562 do {
563 cl = cl_prev->next_alive;
564 if (now - cl->penalized > 0) {
565 cl_prev->next_alive = cl->next_alive;
566 cl->next_alive = NULL;
567 cl->cpriority = cl->priority;
568 cl->delayed = 0;
569 cbq_activate_class(cl);
570
571 if (cl == q->active[prio]) {
572 q->active[prio] = cl_prev;
573 if (cl == q->active[prio]) {
574 q->active[prio] = NULL;
575 return 0;
576 }
577 }
578
579 cl = cl_prev->next_alive;
580 } else if (sched - cl->penalized > 0)
581 sched = cl->penalized;
582 } while ((cl_prev = cl) != q->active[prio]);
583
584 return sched - now;
585}
586
587static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
588{
589 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
590 delay_timer);
591 struct Qdisc *sch = q->watchdog.qdisc;
592 psched_time_t now;
593 psched_tdiff_t delay = 0;
594 unsigned int pmask;
595
596 now = psched_get_time();
597
598 pmask = q->pmask;
599 q->pmask = 0;
600
601 while (pmask) {
602 int prio = ffz(~pmask);
603 psched_tdiff_t tmp;
604
605 pmask &= ~(1<<prio);
606
607 tmp = cbq_undelay_prio(q, prio, now);
608 if (tmp > 0) {
609 q->pmask |= 1<<prio;
610 if (tmp < delay || delay == 0)
611 delay = tmp;
612 }
613 }
614
615 if (delay) {
616 ktime_t time;
617
618 time = ktime_set(0, 0);
619 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
620 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
621 }
622
623 qdisc_unthrottled(sch);
624 __netif_schedule(qdisc_root(sch));
625 return HRTIMER_NORESTART;
626}
627
628#ifdef CONFIG_NET_CLS_ACT
629static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
630{
631 struct Qdisc *sch = child->__parent;
632 struct cbq_sched_data *q = qdisc_priv(sch);
633 struct cbq_class *cl = q->rx_class;
634
635 q->rx_class = NULL;
636
637 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
638 int ret;
639
640 cbq_mark_toplevel(q, cl);
641
642 q->rx_class = cl;
643 cl->q->__parent = sch;
644
645 ret = qdisc_enqueue(skb, cl->q);
646 if (ret == NET_XMIT_SUCCESS) {
647 sch->q.qlen++;
648 if (!cl->next_alive)
649 cbq_activate_class(cl);
650 return 0;
651 }
652 if (net_xmit_drop_count(ret))
653 qdisc_qstats_drop(sch);
654 return 0;
655 }
656
657 qdisc_qstats_drop(sch);
658 return -1;
659}
660#endif
661
662/*
663 * It is mission critical procedure.
664 *
665 * We "regenerate" toplevel cutoff, if transmitting class
666 * has backlog and it is not regulated. It is not part of
667 * original CBQ description, but looks more reasonable.
668 * Probably, it is wrong. This question needs further investigation.
669 */
670
671static inline void
672cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
673 struct cbq_class *borrowed)
674{
675 if (cl && q->toplevel >= borrowed->level) {
676 if (cl->q->q.qlen > 1) {
677 do {
678 if (borrowed->undertime == PSCHED_PASTPERFECT) {
679 q->toplevel = borrowed->level;
680 return;
681 }
682 } while ((borrowed = borrowed->borrow) != NULL);
683 }
684#if 0
685 /* It is not necessary now. Uncommenting it
686 will save CPU cycles, but decrease fairness.
687 */
688 q->toplevel = TC_CBQ_MAXLEVEL;
689#endif
690 }
691}
692
693static void
694cbq_update(struct cbq_sched_data *q)
695{
696 struct cbq_class *this = q->tx_class;
697 struct cbq_class *cl = this;
698 int len = q->tx_len;
699 psched_time_t now;
700
701 q->tx_class = NULL;
702 /* Time integrator. We calculate EOS time
703 * by adding expected packet transmission time.
704 */
705 now = q->now + L2T(&q->link, len);
706
707 for ( ; cl; cl = cl->share) {
708 long avgidle = cl->avgidle;
709 long idle;
710
711 cl->bstats.packets++;
712 cl->bstats.bytes += len;
713
714 /*
715 * (now - last) is total time between packet right edges.
716 * (last_pktlen/rate) is "virtual" busy time, so that
717 *
718 * idle = (now - last) - last_pktlen/rate
719 */
720
721 idle = now - cl->last;
722 if ((unsigned long)idle > 128*1024*1024) {
723 avgidle = cl->maxidle;
724 } else {
725 idle -= L2T(cl, len);
726
727 /* true_avgidle := (1-W)*true_avgidle + W*idle,
728 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
729 * cl->avgidle == true_avgidle/W,
730 * hence:
731 */
732 avgidle += idle - (avgidle>>cl->ewma_log);
733 }
734
735 if (avgidle <= 0) {
736 /* Overlimit or at-limit */
737
738 if (avgidle < cl->minidle)
739 avgidle = cl->minidle;
740
741 cl->avgidle = avgidle;
742
743 /* Calculate expected time, when this class
744 * will be allowed to send.
745 * It will occur, when:
746 * (1-W)*true_avgidle + W*delay = 0, i.e.
747 * idle = (1/W - 1)*(-true_avgidle)
748 * or
749 * idle = (1 - W)*(-cl->avgidle);
750 */
751 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
752
753 /*
754 * That is not all.
755 * To maintain the rate allocated to the class,
756 * we add to undertime virtual clock,
757 * necessary to complete transmitted packet.
758 * (len/phys_bandwidth has been already passed
759 * to the moment of cbq_update)
760 */
761
762 idle -= L2T(&q->link, len);
763 idle += L2T(cl, len);
764
765 cl->undertime = now + idle;
766 } else {
767 /* Underlimit */
768
769 cl->undertime = PSCHED_PASTPERFECT;
770 if (avgidle > cl->maxidle)
771 cl->avgidle = cl->maxidle;
772 else
773 cl->avgidle = avgidle;
774 }
775 if ((s64)(now - cl->last) > 0)
776 cl->last = now;
777 }
778
779 cbq_update_toplevel(q, this, q->tx_borrowed);
780}
781
782static inline struct cbq_class *
783cbq_under_limit(struct cbq_class *cl)
784{
785 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
786 struct cbq_class *this_cl = cl;
787
788 if (cl->tparent == NULL)
789 return cl;
790
791 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
792 cl->delayed = 0;
793 return cl;
794 }
795
796 do {
797 /* It is very suspicious place. Now overlimit
798 * action is generated for not bounded classes
799 * only if link is completely congested.
800 * Though it is in agree with ancestor-only paradigm,
801 * it looks very stupid. Particularly,
802 * it means that this chunk of code will either
803 * never be called or result in strong amplification
804 * of burstiness. Dangerous, silly, and, however,
805 * no another solution exists.
806 */
807 cl = cl->borrow;
808 if (!cl) {
809 this_cl->qstats.overlimits++;
810 this_cl->overlimit(this_cl);
811 return NULL;
812 }
813 if (cl->level > q->toplevel)
814 return NULL;
815 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
816
817 cl->delayed = 0;
818 return cl;
819}
820
821static inline struct sk_buff *
822cbq_dequeue_prio(struct Qdisc *sch, int prio)
823{
824 struct cbq_sched_data *q = qdisc_priv(sch);
825 struct cbq_class *cl_tail, *cl_prev, *cl;
826 struct sk_buff *skb;
827 int deficit;
828
829 cl_tail = cl_prev = q->active[prio];
830 cl = cl_prev->next_alive;
831
832 do {
833 deficit = 0;
834
835 /* Start round */
836 do {
837 struct cbq_class *borrow = cl;
838
839 if (cl->q->q.qlen &&
840 (borrow = cbq_under_limit(cl)) == NULL)
841 goto skip_class;
842
843 if (cl->deficit <= 0) {
844 /* Class exhausted its allotment per
845 * this round. Switch to the next one.
846 */
847 deficit = 1;
848 cl->deficit += cl->quantum;
849 goto next_class;
850 }
851
852 skb = cl->q->dequeue(cl->q);
853
854 /* Class did not give us any skb :-(
855 * It could occur even if cl->q->q.qlen != 0
856 * f.e. if cl->q == "tbf"
857 */
858 if (skb == NULL)
859 goto skip_class;
860
861 cl->deficit -= qdisc_pkt_len(skb);
862 q->tx_class = cl;
863 q->tx_borrowed = borrow;
864 if (borrow != cl) {
865#ifndef CBQ_XSTATS_BORROWS_BYTES
866 borrow->xstats.borrows++;
867 cl->xstats.borrows++;
868#else
869 borrow->xstats.borrows += qdisc_pkt_len(skb);
870 cl->xstats.borrows += qdisc_pkt_len(skb);
871#endif
872 }
873 q->tx_len = qdisc_pkt_len(skb);
874
875 if (cl->deficit <= 0) {
876 q->active[prio] = cl;
877 cl = cl->next_alive;
878 cl->deficit += cl->quantum;
879 }
880 return skb;
881
882skip_class:
883 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
884 /* Class is empty or penalized.
885 * Unlink it from active chain.
886 */
887 cl_prev->next_alive = cl->next_alive;
888 cl->next_alive = NULL;
889
890 /* Did cl_tail point to it? */
891 if (cl == cl_tail) {
892 /* Repair it! */
893 cl_tail = cl_prev;
894
895 /* Was it the last class in this band? */
896 if (cl == cl_tail) {
897 /* Kill the band! */
898 q->active[prio] = NULL;
899 q->activemask &= ~(1<<prio);
900 if (cl->q->q.qlen)
901 cbq_activate_class(cl);
902 return NULL;
903 }
904
905 q->active[prio] = cl_tail;
906 }
907 if (cl->q->q.qlen)
908 cbq_activate_class(cl);
909
910 cl = cl_prev;
911 }
912
913next_class:
914 cl_prev = cl;
915 cl = cl->next_alive;
916 } while (cl_prev != cl_tail);
917 } while (deficit);
918
919 q->active[prio] = cl_prev;
920
921 return NULL;
922}
923
924static inline struct sk_buff *
925cbq_dequeue_1(struct Qdisc *sch)
926{
927 struct cbq_sched_data *q = qdisc_priv(sch);
928 struct sk_buff *skb;
929 unsigned int activemask;
930
931 activemask = q->activemask & 0xFF;
932 while (activemask) {
933 int prio = ffz(~activemask);
934 activemask &= ~(1<<prio);
935 skb = cbq_dequeue_prio(sch, prio);
936 if (skb)
937 return skb;
938 }
939 return NULL;
940}
941
942static struct sk_buff *
943cbq_dequeue(struct Qdisc *sch)
944{
945 struct sk_buff *skb;
946 struct cbq_sched_data *q = qdisc_priv(sch);
947 psched_time_t now;
948
949 now = psched_get_time();
950
951 if (q->tx_class)
952 cbq_update(q);
953
954 q->now = now;
955
956 for (;;) {
957 q->wd_expires = 0;
958
959 skb = cbq_dequeue_1(sch);
960 if (skb) {
961 qdisc_bstats_update(sch, skb);
962 sch->q.qlen--;
963 qdisc_unthrottled(sch);
964 return skb;
965 }
966
967 /* All the classes are overlimit.
968 *
969 * It is possible, if:
970 *
971 * 1. Scheduler is empty.
972 * 2. Toplevel cutoff inhibited borrowing.
973 * 3. Root class is overlimit.
974 *
975 * Reset 2d and 3d conditions and retry.
976 *
977 * Note, that NS and cbq-2.0 are buggy, peeking
978 * an arbitrary class is appropriate for ancestor-only
979 * sharing, but not for toplevel algorithm.
980 *
981 * Our version is better, but slower, because it requires
982 * two passes, but it is unavoidable with top-level sharing.
983 */
984
985 if (q->toplevel == TC_CBQ_MAXLEVEL &&
986 q->link.undertime == PSCHED_PASTPERFECT)
987 break;
988
989 q->toplevel = TC_CBQ_MAXLEVEL;
990 q->link.undertime = PSCHED_PASTPERFECT;
991 }
992
993 /* No packets in scheduler or nobody wants to give them to us :-(
994 * Sigh... start watchdog timer in the last case.
995 */
996
997 if (sch->q.qlen) {
998 qdisc_qstats_overlimit(sch);
999 if (q->wd_expires)
1000 qdisc_watchdog_schedule(&q->watchdog,
1001 now + q->wd_expires);
1002 }
1003 return NULL;
1004}
1005
1006/* CBQ class maintanance routines */
1007
1008static void cbq_adjust_levels(struct cbq_class *this)
1009{
1010 if (this == NULL)
1011 return;
1012
1013 do {
1014 int level = 0;
1015 struct cbq_class *cl;
1016
1017 cl = this->children;
1018 if (cl) {
1019 do {
1020 if (cl->level > level)
1021 level = cl->level;
1022 } while ((cl = cl->sibling) != this->children);
1023 }
1024 this->level = level + 1;
1025 } while ((this = this->tparent) != NULL);
1026}
1027
1028static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1029{
1030 struct cbq_class *cl;
1031 unsigned int h;
1032
1033 if (q->quanta[prio] == 0)
1034 return;
1035
1036 for (h = 0; h < q->clhash.hashsize; h++) {
1037 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1038 /* BUGGGG... Beware! This expression suffer of
1039 * arithmetic overflows!
1040 */
1041 if (cl->priority == prio) {
1042 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1043 q->quanta[prio];
1044 }
1045 if (cl->quantum <= 0 ||
1046 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
1047 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
1048 cl->common.classid, cl->quantum);
1049 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1050 }
1051 }
1052 }
1053}
1054
1055static void cbq_sync_defmap(struct cbq_class *cl)
1056{
1057 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1058 struct cbq_class *split = cl->split;
1059 unsigned int h;
1060 int i;
1061
1062 if (split == NULL)
1063 return;
1064
1065 for (i = 0; i <= TC_PRIO_MAX; i++) {
1066 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
1067 split->defaults[i] = NULL;
1068 }
1069
1070 for (i = 0; i <= TC_PRIO_MAX; i++) {
1071 int level = split->level;
1072
1073 if (split->defaults[i])
1074 continue;
1075
1076 for (h = 0; h < q->clhash.hashsize; h++) {
1077 struct cbq_class *c;
1078
1079 hlist_for_each_entry(c, &q->clhash.hash[h],
1080 common.hnode) {
1081 if (c->split == split && c->level < level &&
1082 c->defmap & (1<<i)) {
1083 split->defaults[i] = c;
1084 level = c->level;
1085 }
1086 }
1087 }
1088 }
1089}
1090
1091static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
1092{
1093 struct cbq_class *split = NULL;
1094
1095 if (splitid == 0) {
1096 split = cl->split;
1097 if (!split)
1098 return;
1099 splitid = split->common.classid;
1100 }
1101
1102 if (split == NULL || split->common.classid != splitid) {
1103 for (split = cl->tparent; split; split = split->tparent)
1104 if (split->common.classid == splitid)
1105 break;
1106 }
1107
1108 if (split == NULL)
1109 return;
1110
1111 if (cl->split != split) {
1112 cl->defmap = 0;
1113 cbq_sync_defmap(cl);
1114 cl->split = split;
1115 cl->defmap = def & mask;
1116 } else
1117 cl->defmap = (cl->defmap & ~mask) | (def & mask);
1118
1119 cbq_sync_defmap(cl);
1120}
1121
1122static void cbq_unlink_class(struct cbq_class *this)
1123{
1124 struct cbq_class *cl, **clp;
1125 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1126
1127 qdisc_class_hash_remove(&q->clhash, &this->common);
1128
1129 if (this->tparent) {
1130 clp = &this->sibling;
1131 cl = *clp;
1132 do {
1133 if (cl == this) {
1134 *clp = cl->sibling;
1135 break;
1136 }
1137 clp = &cl->sibling;
1138 } while ((cl = *clp) != this->sibling);
1139
1140 if (this->tparent->children == this) {
1141 this->tparent->children = this->sibling;
1142 if (this->sibling == this)
1143 this->tparent->children = NULL;
1144 }
1145 } else {
1146 WARN_ON(this->sibling != this);
1147 }
1148}
1149
1150static void cbq_link_class(struct cbq_class *this)
1151{
1152 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1153 struct cbq_class *parent = this->tparent;
1154
1155 this->sibling = this;
1156 qdisc_class_hash_insert(&q->clhash, &this->common);
1157
1158 if (parent == NULL)
1159 return;
1160
1161 if (parent->children == NULL) {
1162 parent->children = this;
1163 } else {
1164 this->sibling = parent->children->sibling;
1165 parent->children->sibling = this;
1166 }
1167}
1168
1169static unsigned int cbq_drop(struct Qdisc *sch)
1170{
1171 struct cbq_sched_data *q = qdisc_priv(sch);
1172 struct cbq_class *cl, *cl_head;
1173 int prio;
1174 unsigned int len;
1175
1176 for (prio = TC_CBQ_MAXPRIO; prio >= 0; prio--) {
1177 cl_head = q->active[prio];
1178 if (!cl_head)
1179 continue;
1180
1181 cl = cl_head;
1182 do {
1183 if (cl->q->ops->drop && (len = cl->q->ops->drop(cl->q))) {
1184 sch->q.qlen--;
1185 if (!cl->q->q.qlen)
1186 cbq_deactivate_class(cl);
1187 return len;
1188 }
1189 } while ((cl = cl->next_alive) != cl_head);
1190 }
1191 return 0;
1192}
1193
1194static void
1195cbq_reset(struct Qdisc *sch)
1196{
1197 struct cbq_sched_data *q = qdisc_priv(sch);
1198 struct cbq_class *cl;
1199 int prio;
1200 unsigned int h;
1201
1202 q->activemask = 0;
1203 q->pmask = 0;
1204 q->tx_class = NULL;
1205 q->tx_borrowed = NULL;
1206 qdisc_watchdog_cancel(&q->watchdog);
1207 hrtimer_cancel(&q->delay_timer);
1208 q->toplevel = TC_CBQ_MAXLEVEL;
1209 q->now = psched_get_time();
1210
1211 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1212 q->active[prio] = NULL;
1213
1214 for (h = 0; h < q->clhash.hashsize; h++) {
1215 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1216 qdisc_reset(cl->q);
1217
1218 cl->next_alive = NULL;
1219 cl->undertime = PSCHED_PASTPERFECT;
1220 cl->avgidle = cl->maxidle;
1221 cl->deficit = cl->quantum;
1222 cl->cpriority = cl->priority;
1223 }
1224 }
1225 sch->q.qlen = 0;
1226}
1227
1228
1229static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1230{
1231 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1232 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1233 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1234 }
1235 if (lss->change & TCF_CBQ_LSS_EWMA)
1236 cl->ewma_log = lss->ewma_log;
1237 if (lss->change & TCF_CBQ_LSS_AVPKT)
1238 cl->avpkt = lss->avpkt;
1239 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1240 cl->minidle = -(long)lss->minidle;
1241 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1242 cl->maxidle = lss->maxidle;
1243 cl->avgidle = lss->maxidle;
1244 }
1245 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1246 cl->offtime = lss->offtime;
1247 return 0;
1248}
1249
1250static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1251{
1252 q->nclasses[cl->priority]--;
1253 q->quanta[cl->priority] -= cl->weight;
1254 cbq_normalize_quanta(q, cl->priority);
1255}
1256
1257static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1258{
1259 q->nclasses[cl->priority]++;
1260 q->quanta[cl->priority] += cl->weight;
1261 cbq_normalize_quanta(q, cl->priority);
1262}
1263
1264static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1265{
1266 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1267
1268 if (wrr->allot)
1269 cl->allot = wrr->allot;
1270 if (wrr->weight)
1271 cl->weight = wrr->weight;
1272 if (wrr->priority) {
1273 cl->priority = wrr->priority - 1;
1274 cl->cpriority = cl->priority;
1275 if (cl->priority >= cl->priority2)
1276 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1277 }
1278
1279 cbq_addprio(q, cl);
1280 return 0;
1281}
1282
1283static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
1284{
1285 switch (ovl->strategy) {
1286 case TC_CBQ_OVL_CLASSIC:
1287 cl->overlimit = cbq_ovl_classic;
1288 break;
1289 case TC_CBQ_OVL_DELAY:
1290 cl->overlimit = cbq_ovl_delay;
1291 break;
1292 case TC_CBQ_OVL_LOWPRIO:
1293 if (ovl->priority2 - 1 >= TC_CBQ_MAXPRIO ||
1294 ovl->priority2 - 1 <= cl->priority)
1295 return -EINVAL;
1296 cl->priority2 = ovl->priority2 - 1;
1297 cl->overlimit = cbq_ovl_lowprio;
1298 break;
1299 case TC_CBQ_OVL_DROP:
1300 cl->overlimit = cbq_ovl_drop;
1301 break;
1302 case TC_CBQ_OVL_RCLASSIC:
1303 cl->overlimit = cbq_ovl_rclassic;
1304 break;
1305 default:
1306 return -EINVAL;
1307 }
1308 cl->penalty = ovl->penalty;
1309 return 0;
1310}
1311
1312#ifdef CONFIG_NET_CLS_ACT
1313static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
1314{
1315 cl->police = p->police;
1316
1317 if (cl->q->handle) {
1318 if (p->police == TC_POLICE_RECLASSIFY)
1319 cl->q->reshape_fail = cbq_reshape_fail;
1320 else
1321 cl->q->reshape_fail = NULL;
1322 }
1323 return 0;
1324}
1325#endif
1326
1327static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1328{
1329 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1330 return 0;
1331}
1332
1333static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1334 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1335 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1336 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1337 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1338 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1339 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1340 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1341};
1342
1343static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1344{
1345 struct cbq_sched_data *q = qdisc_priv(sch);
1346 struct nlattr *tb[TCA_CBQ_MAX + 1];
1347 struct tc_ratespec *r;
1348 int err;
1349
1350 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
1351 if (err < 0)
1352 return err;
1353
1354 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
1355 return -EINVAL;
1356
1357 r = nla_data(tb[TCA_CBQ_RATE]);
1358
1359 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1360 return -EINVAL;
1361
1362 err = qdisc_class_hash_init(&q->clhash);
1363 if (err < 0)
1364 goto put_rtab;
1365
1366 q->link.refcnt = 1;
1367 q->link.sibling = &q->link;
1368 q->link.common.classid = sch->handle;
1369 q->link.qdisc = sch;
1370 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1371 sch->handle);
1372 if (!q->link.q)
1373 q->link.q = &noop_qdisc;
1374
1375 q->link.priority = TC_CBQ_MAXPRIO - 1;
1376 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1377 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1378 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1379 q->link.overlimit = cbq_ovl_classic;
1380 q->link.allot = psched_mtu(qdisc_dev(sch));
1381 q->link.quantum = q->link.allot;
1382 q->link.weight = q->link.R_tab->rate.rate;
1383
1384 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1385 q->link.avpkt = q->link.allot/2;
1386 q->link.minidle = -0x7FFFFFFF;
1387
1388 qdisc_watchdog_init(&q->watchdog, sch);
1389 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1390 q->delay_timer.function = cbq_undelay;
1391 q->toplevel = TC_CBQ_MAXLEVEL;
1392 q->now = psched_get_time();
1393
1394 cbq_link_class(&q->link);
1395
1396 if (tb[TCA_CBQ_LSSOPT])
1397 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1398
1399 cbq_addprio(q, &q->link);
1400 return 0;
1401
1402put_rtab:
1403 qdisc_put_rtab(q->link.R_tab);
1404 return err;
1405}
1406
1407static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1408{
1409 unsigned char *b = skb_tail_pointer(skb);
1410
1411 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1412 goto nla_put_failure;
1413 return skb->len;
1414
1415nla_put_failure:
1416 nlmsg_trim(skb, b);
1417 return -1;
1418}
1419
1420static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1421{
1422 unsigned char *b = skb_tail_pointer(skb);
1423 struct tc_cbq_lssopt opt;
1424
1425 opt.flags = 0;
1426 if (cl->borrow == NULL)
1427 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1428 if (cl->share == NULL)
1429 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1430 opt.ewma_log = cl->ewma_log;
1431 opt.level = cl->level;
1432 opt.avpkt = cl->avpkt;
1433 opt.maxidle = cl->maxidle;
1434 opt.minidle = (u32)(-cl->minidle);
1435 opt.offtime = cl->offtime;
1436 opt.change = ~0;
1437 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1438 goto nla_put_failure;
1439 return skb->len;
1440
1441nla_put_failure:
1442 nlmsg_trim(skb, b);
1443 return -1;
1444}
1445
1446static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1447{
1448 unsigned char *b = skb_tail_pointer(skb);
1449 struct tc_cbq_wrropt opt;
1450
1451 memset(&opt, 0, sizeof(opt));
1452 opt.flags = 0;
1453 opt.allot = cl->allot;
1454 opt.priority = cl->priority + 1;
1455 opt.cpriority = cl->cpriority + 1;
1456 opt.weight = cl->weight;
1457 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1458 goto nla_put_failure;
1459 return skb->len;
1460
1461nla_put_failure:
1462 nlmsg_trim(skb, b);
1463 return -1;
1464}
1465
1466static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1467{
1468 unsigned char *b = skb_tail_pointer(skb);
1469 struct tc_cbq_ovl opt;
1470
1471 opt.strategy = cl->ovl_strategy;
1472 opt.priority2 = cl->priority2 + 1;
1473 opt.pad = 0;
1474 opt.penalty = cl->penalty;
1475 if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
1476 goto nla_put_failure;
1477 return skb->len;
1478
1479nla_put_failure:
1480 nlmsg_trim(skb, b);
1481 return -1;
1482}
1483
1484static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1485{
1486 unsigned char *b = skb_tail_pointer(skb);
1487 struct tc_cbq_fopt opt;
1488
1489 if (cl->split || cl->defmap) {
1490 opt.split = cl->split ? cl->split->common.classid : 0;
1491 opt.defmap = cl->defmap;
1492 opt.defchange = ~0;
1493 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1494 goto nla_put_failure;
1495 }
1496 return skb->len;
1497
1498nla_put_failure:
1499 nlmsg_trim(skb, b);
1500 return -1;
1501}
1502
1503#ifdef CONFIG_NET_CLS_ACT
1504static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1505{
1506 unsigned char *b = skb_tail_pointer(skb);
1507 struct tc_cbq_police opt;
1508
1509 if (cl->police) {
1510 opt.police = cl->police;
1511 opt.__res1 = 0;
1512 opt.__res2 = 0;
1513 if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
1514 goto nla_put_failure;
1515 }
1516 return skb->len;
1517
1518nla_put_failure:
1519 nlmsg_trim(skb, b);
1520 return -1;
1521}
1522#endif
1523
1524static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1525{
1526 if (cbq_dump_lss(skb, cl) < 0 ||
1527 cbq_dump_rate(skb, cl) < 0 ||
1528 cbq_dump_wrr(skb, cl) < 0 ||
1529 cbq_dump_ovl(skb, cl) < 0 ||
1530#ifdef CONFIG_NET_CLS_ACT
1531 cbq_dump_police(skb, cl) < 0 ||
1532#endif
1533 cbq_dump_fopt(skb, cl) < 0)
1534 return -1;
1535 return 0;
1536}
1537
1538static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1539{
1540 struct cbq_sched_data *q = qdisc_priv(sch);
1541 struct nlattr *nest;
1542
1543 nest = nla_nest_start(skb, TCA_OPTIONS);
1544 if (nest == NULL)
1545 goto nla_put_failure;
1546 if (cbq_dump_attr(skb, &q->link) < 0)
1547 goto nla_put_failure;
1548 return nla_nest_end(skb, nest);
1549
1550nla_put_failure:
1551 nla_nest_cancel(skb, nest);
1552 return -1;
1553}
1554
1555static int
1556cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1557{
1558 struct cbq_sched_data *q = qdisc_priv(sch);
1559
1560 q->link.xstats.avgidle = q->link.avgidle;
1561 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1562}
1563
1564static int
1565cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1566 struct sk_buff *skb, struct tcmsg *tcm)
1567{
1568 struct cbq_class *cl = (struct cbq_class *)arg;
1569 struct nlattr *nest;
1570
1571 if (cl->tparent)
1572 tcm->tcm_parent = cl->tparent->common.classid;
1573 else
1574 tcm->tcm_parent = TC_H_ROOT;
1575 tcm->tcm_handle = cl->common.classid;
1576 tcm->tcm_info = cl->q->handle;
1577
1578 nest = nla_nest_start(skb, TCA_OPTIONS);
1579 if (nest == NULL)
1580 goto nla_put_failure;
1581 if (cbq_dump_attr(skb, cl) < 0)
1582 goto nla_put_failure;
1583 return nla_nest_end(skb, nest);
1584
1585nla_put_failure:
1586 nla_nest_cancel(skb, nest);
1587 return -1;
1588}
1589
1590static int
1591cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1592 struct gnet_dump *d)
1593{
1594 struct cbq_sched_data *q = qdisc_priv(sch);
1595 struct cbq_class *cl = (struct cbq_class *)arg;
1596
1597 cl->xstats.avgidle = cl->avgidle;
1598 cl->xstats.undertime = 0;
1599
1600 if (cl->undertime != PSCHED_PASTPERFECT)
1601 cl->xstats.undertime = cl->undertime - q->now;
1602
1603 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
1604 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
1605 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
1606 return -1;
1607
1608 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1609}
1610
1611static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1612 struct Qdisc **old)
1613{
1614 struct cbq_class *cl = (struct cbq_class *)arg;
1615
1616 if (new == NULL) {
1617 new = qdisc_create_dflt(sch->dev_queue,
1618 &pfifo_qdisc_ops, cl->common.classid);
1619 if (new == NULL)
1620 return -ENOBUFS;
1621 } else {
1622#ifdef CONFIG_NET_CLS_ACT
1623 if (cl->police == TC_POLICE_RECLASSIFY)
1624 new->reshape_fail = cbq_reshape_fail;
1625#endif
1626 }
1627
1628 *old = qdisc_replace(sch, new, &cl->q);
1629 return 0;
1630}
1631
1632static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1633{
1634 struct cbq_class *cl = (struct cbq_class *)arg;
1635
1636 return cl->q;
1637}
1638
1639static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1640{
1641 struct cbq_class *cl = (struct cbq_class *)arg;
1642
1643 if (cl->q->q.qlen == 0)
1644 cbq_deactivate_class(cl);
1645}
1646
1647static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1648{
1649 struct cbq_sched_data *q = qdisc_priv(sch);
1650 struct cbq_class *cl = cbq_class_lookup(q, classid);
1651
1652 if (cl) {
1653 cl->refcnt++;
1654 return (unsigned long)cl;
1655 }
1656 return 0;
1657}
1658
1659static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1660{
1661 struct cbq_sched_data *q = qdisc_priv(sch);
1662
1663 WARN_ON(cl->filters);
1664
1665 tcf_destroy_chain(&cl->filter_list);
1666 qdisc_destroy(cl->q);
1667 qdisc_put_rtab(cl->R_tab);
1668 gen_kill_estimator(&cl->bstats, &cl->rate_est);
1669 if (cl != &q->link)
1670 kfree(cl);
1671}
1672
1673static void cbq_destroy(struct Qdisc *sch)
1674{
1675 struct cbq_sched_data *q = qdisc_priv(sch);
1676 struct hlist_node *next;
1677 struct cbq_class *cl;
1678 unsigned int h;
1679
1680#ifdef CONFIG_NET_CLS_ACT
1681 q->rx_class = NULL;
1682#endif
1683 /*
1684 * Filters must be destroyed first because we don't destroy the
1685 * classes from root to leafs which means that filters can still
1686 * be bound to classes which have been destroyed already. --TGR '04
1687 */
1688 for (h = 0; h < q->clhash.hashsize; h++) {
1689 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
1690 tcf_destroy_chain(&cl->filter_list);
1691 }
1692 for (h = 0; h < q->clhash.hashsize; h++) {
1693 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1694 common.hnode)
1695 cbq_destroy_class(sch, cl);
1696 }
1697 qdisc_class_hash_destroy(&q->clhash);
1698}
1699
1700static void cbq_put(struct Qdisc *sch, unsigned long arg)
1701{
1702 struct cbq_class *cl = (struct cbq_class *)arg;
1703
1704 if (--cl->refcnt == 0) {
1705#ifdef CONFIG_NET_CLS_ACT
1706 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
1707 struct cbq_sched_data *q = qdisc_priv(sch);
1708
1709 spin_lock_bh(root_lock);
1710 if (q->rx_class == cl)
1711 q->rx_class = NULL;
1712 spin_unlock_bh(root_lock);
1713#endif
1714
1715 cbq_destroy_class(sch, cl);
1716 }
1717}
1718
1719static int
1720cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1721 unsigned long *arg)
1722{
1723 int err;
1724 struct cbq_sched_data *q = qdisc_priv(sch);
1725 struct cbq_class *cl = (struct cbq_class *)*arg;
1726 struct nlattr *opt = tca[TCA_OPTIONS];
1727 struct nlattr *tb[TCA_CBQ_MAX + 1];
1728 struct cbq_class *parent;
1729 struct qdisc_rate_table *rtab = NULL;
1730
1731 if (opt == NULL)
1732 return -EINVAL;
1733
1734 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
1735 if (err < 0)
1736 return err;
1737
1738 if (cl) {
1739 /* Check parent */
1740 if (parentid) {
1741 if (cl->tparent &&
1742 cl->tparent->common.classid != parentid)
1743 return -EINVAL;
1744 if (!cl->tparent && parentid != TC_H_ROOT)
1745 return -EINVAL;
1746 }
1747
1748 if (tb[TCA_CBQ_RATE]) {
1749 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1750 tb[TCA_CBQ_RTAB]);
1751 if (rtab == NULL)
1752 return -EINVAL;
1753 }
1754
1755 if (tca[TCA_RATE]) {
1756 err = gen_replace_estimator(&cl->bstats, NULL,
1757 &cl->rate_est,
1758 qdisc_root_sleeping_lock(sch),
1759 tca[TCA_RATE]);
1760 if (err) {
1761 qdisc_put_rtab(rtab);
1762 return err;
1763 }
1764 }
1765
1766 /* Change class parameters */
1767 sch_tree_lock(sch);
1768
1769 if (cl->next_alive != NULL)
1770 cbq_deactivate_class(cl);
1771
1772 if (rtab) {
1773 qdisc_put_rtab(cl->R_tab);
1774 cl->R_tab = rtab;
1775 }
1776
1777 if (tb[TCA_CBQ_LSSOPT])
1778 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1779
1780 if (tb[TCA_CBQ_WRROPT]) {
1781 cbq_rmprio(q, cl);
1782 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1783 }
1784
1785 if (tb[TCA_CBQ_OVL_STRATEGY])
1786 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1787
1788#ifdef CONFIG_NET_CLS_ACT
1789 if (tb[TCA_CBQ_POLICE])
1790 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1791#endif
1792
1793 if (tb[TCA_CBQ_FOPT])
1794 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1795
1796 if (cl->q->q.qlen)
1797 cbq_activate_class(cl);
1798
1799 sch_tree_unlock(sch);
1800
1801 return 0;
1802 }
1803
1804 if (parentid == TC_H_ROOT)
1805 return -EINVAL;
1806
1807 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1808 tb[TCA_CBQ_LSSOPT] == NULL)
1809 return -EINVAL;
1810
1811 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
1812 if (rtab == NULL)
1813 return -EINVAL;
1814
1815 if (classid) {
1816 err = -EINVAL;
1817 if (TC_H_MAJ(classid ^ sch->handle) ||
1818 cbq_class_lookup(q, classid))
1819 goto failure;
1820 } else {
1821 int i;
1822 classid = TC_H_MAKE(sch->handle, 0x8000);
1823
1824 for (i = 0; i < 0x8000; i++) {
1825 if (++q->hgenerator >= 0x8000)
1826 q->hgenerator = 1;
1827 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1828 break;
1829 }
1830 err = -ENOSR;
1831 if (i >= 0x8000)
1832 goto failure;
1833 classid = classid|q->hgenerator;
1834 }
1835
1836 parent = &q->link;
1837 if (parentid) {
1838 parent = cbq_class_lookup(q, parentid);
1839 err = -EINVAL;
1840 if (parent == NULL)
1841 goto failure;
1842 }
1843
1844 err = -ENOBUFS;
1845 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1846 if (cl == NULL)
1847 goto failure;
1848
1849 if (tca[TCA_RATE]) {
1850 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1851 qdisc_root_sleeping_lock(sch),
1852 tca[TCA_RATE]);
1853 if (err) {
1854 kfree(cl);
1855 goto failure;
1856 }
1857 }
1858
1859 cl->R_tab = rtab;
1860 rtab = NULL;
1861 cl->refcnt = 1;
1862 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1863 if (!cl->q)
1864 cl->q = &noop_qdisc;
1865 cl->common.classid = classid;
1866 cl->tparent = parent;
1867 cl->qdisc = sch;
1868 cl->allot = parent->allot;
1869 cl->quantum = cl->allot;
1870 cl->weight = cl->R_tab->rate.rate;
1871
1872 sch_tree_lock(sch);
1873 cbq_link_class(cl);
1874 cl->borrow = cl->tparent;
1875 if (cl->tparent != &q->link)
1876 cl->share = cl->tparent;
1877 cbq_adjust_levels(parent);
1878 cl->minidle = -0x7FFFFFFF;
1879 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1880 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1881 if (cl->ewma_log == 0)
1882 cl->ewma_log = q->link.ewma_log;
1883 if (cl->maxidle == 0)
1884 cl->maxidle = q->link.maxidle;
1885 if (cl->avpkt == 0)
1886 cl->avpkt = q->link.avpkt;
1887 cl->overlimit = cbq_ovl_classic;
1888 if (tb[TCA_CBQ_OVL_STRATEGY])
1889 cbq_set_overlimit(cl, nla_data(tb[TCA_CBQ_OVL_STRATEGY]));
1890#ifdef CONFIG_NET_CLS_ACT
1891 if (tb[TCA_CBQ_POLICE])
1892 cbq_set_police(cl, nla_data(tb[TCA_CBQ_POLICE]));
1893#endif
1894 if (tb[TCA_CBQ_FOPT])
1895 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1896 sch_tree_unlock(sch);
1897
1898 qdisc_class_hash_grow(sch, &q->clhash);
1899
1900 *arg = (unsigned long)cl;
1901 return 0;
1902
1903failure:
1904 qdisc_put_rtab(rtab);
1905 return err;
1906}
1907
1908static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1909{
1910 struct cbq_sched_data *q = qdisc_priv(sch);
1911 struct cbq_class *cl = (struct cbq_class *)arg;
1912 unsigned int qlen, backlog;
1913
1914 if (cl->filters || cl->children || cl == &q->link)
1915 return -EBUSY;
1916
1917 sch_tree_lock(sch);
1918
1919 qlen = cl->q->q.qlen;
1920 backlog = cl->q->qstats.backlog;
1921 qdisc_reset(cl->q);
1922 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
1923
1924 if (cl->next_alive)
1925 cbq_deactivate_class(cl);
1926
1927 if (q->tx_borrowed == cl)
1928 q->tx_borrowed = q->tx_class;
1929 if (q->tx_class == cl) {
1930 q->tx_class = NULL;
1931 q->tx_borrowed = NULL;
1932 }
1933#ifdef CONFIG_NET_CLS_ACT
1934 if (q->rx_class == cl)
1935 q->rx_class = NULL;
1936#endif
1937
1938 cbq_unlink_class(cl);
1939 cbq_adjust_levels(cl->tparent);
1940 cl->defmap = 0;
1941 cbq_sync_defmap(cl);
1942
1943 cbq_rmprio(q, cl);
1944 sch_tree_unlock(sch);
1945
1946 BUG_ON(--cl->refcnt == 0);
1947 /*
1948 * This shouldn't happen: we "hold" one cops->get() when called
1949 * from tc_ctl_tclass; the destroy method is done from cops->put().
1950 */
1951
1952 return 0;
1953}
1954
1955static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
1956 unsigned long arg)
1957{
1958 struct cbq_sched_data *q = qdisc_priv(sch);
1959 struct cbq_class *cl = (struct cbq_class *)arg;
1960
1961 if (cl == NULL)
1962 cl = &q->link;
1963
1964 return &cl->filter_list;
1965}
1966
1967static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1968 u32 classid)
1969{
1970 struct cbq_sched_data *q = qdisc_priv(sch);
1971 struct cbq_class *p = (struct cbq_class *)parent;
1972 struct cbq_class *cl = cbq_class_lookup(q, classid);
1973
1974 if (cl) {
1975 if (p && p->level <= cl->level)
1976 return 0;
1977 cl->filters++;
1978 return (unsigned long)cl;
1979 }
1980 return 0;
1981}
1982
1983static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1984{
1985 struct cbq_class *cl = (struct cbq_class *)arg;
1986
1987 cl->filters--;
1988}
1989
1990static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1991{
1992 struct cbq_sched_data *q = qdisc_priv(sch);
1993 struct cbq_class *cl;
1994 unsigned int h;
1995
1996 if (arg->stop)
1997 return;
1998
1999 for (h = 0; h < q->clhash.hashsize; h++) {
2000 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
2001 if (arg->count < arg->skip) {
2002 arg->count++;
2003 continue;
2004 }
2005 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2006 arg->stop = 1;
2007 return;
2008 }
2009 arg->count++;
2010 }
2011 }
2012}
2013
2014static const struct Qdisc_class_ops cbq_class_ops = {
2015 .graft = cbq_graft,
2016 .leaf = cbq_leaf,
2017 .qlen_notify = cbq_qlen_notify,
2018 .get = cbq_get,
2019 .put = cbq_put,
2020 .change = cbq_change_class,
2021 .delete = cbq_delete,
2022 .walk = cbq_walk,
2023 .tcf_chain = cbq_find_tcf,
2024 .bind_tcf = cbq_bind_filter,
2025 .unbind_tcf = cbq_unbind_filter,
2026 .dump = cbq_dump_class,
2027 .dump_stats = cbq_dump_class_stats,
2028};
2029
2030static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
2031 .next = NULL,
2032 .cl_ops = &cbq_class_ops,
2033 .id = "cbq",
2034 .priv_size = sizeof(struct cbq_sched_data),
2035 .enqueue = cbq_enqueue,
2036 .dequeue = cbq_dequeue,
2037 .peek = qdisc_peek_dequeued,
2038 .drop = cbq_drop,
2039 .init = cbq_init,
2040 .reset = cbq_reset,
2041 .destroy = cbq_destroy,
2042 .change = NULL,
2043 .dump = cbq_dump,
2044 .dump_stats = cbq_dump_stats,
2045 .owner = THIS_MODULE,
2046};
2047
2048static int __init cbq_module_init(void)
2049{
2050 return register_qdisc(&cbq_qdisc_ops);
2051}
2052static void __exit cbq_module_exit(void)
2053{
2054 unregister_qdisc(&cbq_qdisc_ops);
2055}
2056module_init(cbq_module_init)
2057module_exit(cbq_module_exit)
2058MODULE_LICENSE("GPL");
1/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <net/netlink.h>
21#include <net/pkt_sched.h>
22#include <net/pkt_cls.h>
23
24
25/* Class-Based Queueing (CBQ) algorithm.
26 =======================================
27
28 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
29 Management Models for Packet Networks",
30 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
31
32 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
33
34 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
35 Parameters", 1996
36
37 [4] Sally Floyd and Michael Speer, "Experimental Results
38 for Class-Based Queueing", 1998, not published.
39
40 -----------------------------------------------------------------------
41
42 Algorithm skeleton was taken from NS simulator cbq.cc.
43 If someone wants to check this code against the LBL version,
44 he should take into account that ONLY the skeleton was borrowed,
45 the implementation is different. Particularly:
46
47 --- The WRR algorithm is different. Our version looks more
48 reasonable (I hope) and works when quanta are allowed to be
49 less than MTU, which is always the case when real time classes
50 have small rates. Note, that the statement of [3] is
51 incomplete, delay may actually be estimated even if class
52 per-round allotment is less than MTU. Namely, if per-round
53 allotment is W*r_i, and r_1+...+r_k = r < 1
54
55 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
56
57 In the worst case we have IntServ estimate with D = W*r+k*MTU
58 and C = MTU*r. The proof (if correct at all) is trivial.
59
60
61 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
62 interpret some places, which look like wrong translations
63 from NS. Anyone is advised to find these differences
64 and explain to me, why I am wrong 8).
65
66 --- Linux has no EOI event, so that we cannot estimate true class
67 idle time. Workaround is to consider the next dequeue event
68 as sign that previous packet is finished. This is wrong because of
69 internal device queueing, but on a permanently loaded link it is true.
70 Moreover, combined with clock integrator, this scheme looks
71 very close to an ideal solution. */
72
73struct cbq_sched_data;
74
75
76struct cbq_class {
77 struct Qdisc_class_common common;
78 struct cbq_class *next_alive; /* next class with backlog in this priority band */
79
80/* Parameters */
81 unsigned char priority; /* class priority */
82 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */
84
85 u32 defmap;
86
87 /* Link-sharing scheduler parameters */
88 long maxidle; /* Class parameters: see below. */
89 long offtime;
90 long minidle;
91 u32 avpkt;
92 struct qdisc_rate_table *R_tab;
93
94 /* General scheduler (WRR) parameters */
95 long allot;
96 long quantum; /* Allotment per WRR round */
97 long weight; /* Relative allotment: see below */
98
99 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
100 struct cbq_class *split; /* Ptr to split node */
101 struct cbq_class *share; /* Ptr to LS parent in the class tree */
102 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
103 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
104 parent otherwise */
105 struct cbq_class *sibling; /* Sibling chain */
106 struct cbq_class *children; /* Pointer to children chain */
107
108 struct Qdisc *q; /* Elementary queueing discipline */
109
110
111/* Variables */
112 unsigned char cpriority; /* Effective priority */
113 unsigned char delayed;
114 unsigned char level; /* level of the class in hierarchy:
115 0 for leaf classes, and maximal
116 level of children + 1 for nodes.
117 */
118
119 psched_time_t last; /* Last end of service */
120 psched_time_t undertime;
121 long avgidle;
122 long deficit; /* Saved deficit for WRR */
123 psched_time_t penalized;
124 struct gnet_stats_basic_packed bstats;
125 struct gnet_stats_queue qstats;
126 struct net_rate_estimator __rcu *rate_est;
127 struct tc_cbq_xstats xstats;
128
129 struct tcf_proto __rcu *filter_list;
130 struct tcf_block *block;
131
132 int filters;
133
134 struct cbq_class *defaults[TC_PRIO_MAX + 1];
135};
136
137struct cbq_sched_data {
138 struct Qdisc_class_hash clhash; /* Hash table of all classes */
139 int nclasses[TC_CBQ_MAXPRIO + 1];
140 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
141
142 struct cbq_class link;
143
144 unsigned int activemask;
145 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
146 with backlog */
147
148#ifdef CONFIG_NET_CLS_ACT
149 struct cbq_class *rx_class;
150#endif
151 struct cbq_class *tx_class;
152 struct cbq_class *tx_borrowed;
153 int tx_len;
154 psched_time_t now; /* Cached timestamp */
155 unsigned int pmask;
156
157 struct hrtimer delay_timer;
158 struct qdisc_watchdog watchdog; /* Watchdog timer,
159 started when CBQ has
160 backlog, but cannot
161 transmit just now */
162 psched_tdiff_t wd_expires;
163 int toplevel;
164 u32 hgenerator;
165};
166
167
168#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
169
170static inline struct cbq_class *
171cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
172{
173 struct Qdisc_class_common *clc;
174
175 clc = qdisc_class_find(&q->clhash, classid);
176 if (clc == NULL)
177 return NULL;
178 return container_of(clc, struct cbq_class, common);
179}
180
181#ifdef CONFIG_NET_CLS_ACT
182
183static struct cbq_class *
184cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
185{
186 struct cbq_class *cl;
187
188 for (cl = this->tparent; cl; cl = cl->tparent) {
189 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
190
191 if (new != NULL && new != this)
192 return new;
193 }
194 return NULL;
195}
196
197#endif
198
199/* Classify packet. The procedure is pretty complicated, but
200 * it allows us to combine link sharing and priority scheduling
201 * transparently.
202 *
203 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
204 * so that it resolves to split nodes. Then packets are classified
205 * by logical priority, or a more specific classifier may be attached
206 * to the split node.
207 */
208
209static struct cbq_class *
210cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
211{
212 struct cbq_sched_data *q = qdisc_priv(sch);
213 struct cbq_class *head = &q->link;
214 struct cbq_class **defmap;
215 struct cbq_class *cl = NULL;
216 u32 prio = skb->priority;
217 struct tcf_proto *fl;
218 struct tcf_result res;
219
220 /*
221 * Step 1. If skb->priority points to one of our classes, use it.
222 */
223 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
224 (cl = cbq_class_lookup(q, prio)) != NULL)
225 return cl;
226
227 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
228 for (;;) {
229 int result = 0;
230 defmap = head->defaults;
231
232 fl = rcu_dereference_bh(head->filter_list);
233 /*
234 * Step 2+n. Apply classifier.
235 */
236 result = tcf_classify(skb, fl, &res, true);
237 if (!fl || result < 0)
238 goto fallback;
239
240 cl = (void *)res.class;
241 if (!cl) {
242 if (TC_H_MAJ(res.classid))
243 cl = cbq_class_lookup(q, res.classid);
244 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
245 cl = defmap[TC_PRIO_BESTEFFORT];
246
247 if (cl == NULL)
248 goto fallback;
249 }
250 if (cl->level >= head->level)
251 goto fallback;
252#ifdef CONFIG_NET_CLS_ACT
253 switch (result) {
254 case TC_ACT_QUEUED:
255 case TC_ACT_STOLEN:
256 case TC_ACT_TRAP:
257 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
258 /* fall through */
259 case TC_ACT_SHOT:
260 return NULL;
261 case TC_ACT_RECLASSIFY:
262 return cbq_reclassify(skb, cl);
263 }
264#endif
265 if (cl->level == 0)
266 return cl;
267
268 /*
269 * Step 3+n. If classifier selected a link sharing class,
270 * apply agency specific classifier.
271 * Repeat this procdure until we hit a leaf node.
272 */
273 head = cl;
274 }
275
276fallback:
277 cl = head;
278
279 /*
280 * Step 4. No success...
281 */
282 if (TC_H_MAJ(prio) == 0 &&
283 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
284 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
285 return head;
286
287 return cl;
288}
289
290/*
291 * A packet has just been enqueued on the empty class.
292 * cbq_activate_class adds it to the tail of active class list
293 * of its priority band.
294 */
295
296static inline void cbq_activate_class(struct cbq_class *cl)
297{
298 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
299 int prio = cl->cpriority;
300 struct cbq_class *cl_tail;
301
302 cl_tail = q->active[prio];
303 q->active[prio] = cl;
304
305 if (cl_tail != NULL) {
306 cl->next_alive = cl_tail->next_alive;
307 cl_tail->next_alive = cl;
308 } else {
309 cl->next_alive = cl;
310 q->activemask |= (1<<prio);
311 }
312}
313
314/*
315 * Unlink class from active chain.
316 * Note that this same procedure is done directly in cbq_dequeue*
317 * during round-robin procedure.
318 */
319
320static void cbq_deactivate_class(struct cbq_class *this)
321{
322 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
323 int prio = this->cpriority;
324 struct cbq_class *cl;
325 struct cbq_class *cl_prev = q->active[prio];
326
327 do {
328 cl = cl_prev->next_alive;
329 if (cl == this) {
330 cl_prev->next_alive = cl->next_alive;
331 cl->next_alive = NULL;
332
333 if (cl == q->active[prio]) {
334 q->active[prio] = cl_prev;
335 if (cl == q->active[prio]) {
336 q->active[prio] = NULL;
337 q->activemask &= ~(1<<prio);
338 return;
339 }
340 }
341 return;
342 }
343 } while ((cl_prev = cl) != q->active[prio]);
344}
345
346static void
347cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
348{
349 int toplevel = q->toplevel;
350
351 if (toplevel > cl->level) {
352 psched_time_t now = psched_get_time();
353
354 do {
355 if (cl->undertime < now) {
356 q->toplevel = cl->level;
357 return;
358 }
359 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
360 }
361}
362
363static int
364cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
365 struct sk_buff **to_free)
366{
367 struct cbq_sched_data *q = qdisc_priv(sch);
368 int uninitialized_var(ret);
369 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
370
371#ifdef CONFIG_NET_CLS_ACT
372 q->rx_class = cl;
373#endif
374 if (cl == NULL) {
375 if (ret & __NET_XMIT_BYPASS)
376 qdisc_qstats_drop(sch);
377 __qdisc_drop(skb, to_free);
378 return ret;
379 }
380
381 ret = qdisc_enqueue(skb, cl->q, to_free);
382 if (ret == NET_XMIT_SUCCESS) {
383 sch->q.qlen++;
384 cbq_mark_toplevel(q, cl);
385 if (!cl->next_alive)
386 cbq_activate_class(cl);
387 return ret;
388 }
389
390 if (net_xmit_drop_count(ret)) {
391 qdisc_qstats_drop(sch);
392 cbq_mark_toplevel(q, cl);
393 cl->qstats.drops++;
394 }
395 return ret;
396}
397
398/* Overlimit action: penalize leaf class by adding offtime */
399static void cbq_overlimit(struct cbq_class *cl)
400{
401 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
402 psched_tdiff_t delay = cl->undertime - q->now;
403
404 if (!cl->delayed) {
405 delay += cl->offtime;
406
407 /*
408 * Class goes to sleep, so that it will have no
409 * chance to work avgidle. Let's forgive it 8)
410 *
411 * BTW cbq-2.0 has a crap in this
412 * place, apparently they forgot to shift it by cl->ewma_log.
413 */
414 if (cl->avgidle < 0)
415 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
416 if (cl->avgidle < cl->minidle)
417 cl->avgidle = cl->minidle;
418 if (delay <= 0)
419 delay = 1;
420 cl->undertime = q->now + delay;
421
422 cl->xstats.overactions++;
423 cl->delayed = 1;
424 }
425 if (q->wd_expires == 0 || q->wd_expires > delay)
426 q->wd_expires = delay;
427
428 /* Dirty work! We must schedule wakeups based on
429 * real available rate, rather than leaf rate,
430 * which may be tiny (even zero).
431 */
432 if (q->toplevel == TC_CBQ_MAXLEVEL) {
433 struct cbq_class *b;
434 psched_tdiff_t base_delay = q->wd_expires;
435
436 for (b = cl->borrow; b; b = b->borrow) {
437 delay = b->undertime - q->now;
438 if (delay < base_delay) {
439 if (delay <= 0)
440 delay = 1;
441 base_delay = delay;
442 }
443 }
444
445 q->wd_expires = base_delay;
446 }
447}
448
449static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
450 psched_time_t now)
451{
452 struct cbq_class *cl;
453 struct cbq_class *cl_prev = q->active[prio];
454 psched_time_t sched = now;
455
456 if (cl_prev == NULL)
457 return 0;
458
459 do {
460 cl = cl_prev->next_alive;
461 if (now - cl->penalized > 0) {
462 cl_prev->next_alive = cl->next_alive;
463 cl->next_alive = NULL;
464 cl->cpriority = cl->priority;
465 cl->delayed = 0;
466 cbq_activate_class(cl);
467
468 if (cl == q->active[prio]) {
469 q->active[prio] = cl_prev;
470 if (cl == q->active[prio]) {
471 q->active[prio] = NULL;
472 return 0;
473 }
474 }
475
476 cl = cl_prev->next_alive;
477 } else if (sched - cl->penalized > 0)
478 sched = cl->penalized;
479 } while ((cl_prev = cl) != q->active[prio]);
480
481 return sched - now;
482}
483
484static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
485{
486 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
487 delay_timer);
488 struct Qdisc *sch = q->watchdog.qdisc;
489 psched_time_t now;
490 psched_tdiff_t delay = 0;
491 unsigned int pmask;
492
493 now = psched_get_time();
494
495 pmask = q->pmask;
496 q->pmask = 0;
497
498 while (pmask) {
499 int prio = ffz(~pmask);
500 psched_tdiff_t tmp;
501
502 pmask &= ~(1<<prio);
503
504 tmp = cbq_undelay_prio(q, prio, now);
505 if (tmp > 0) {
506 q->pmask |= 1<<prio;
507 if (tmp < delay || delay == 0)
508 delay = tmp;
509 }
510 }
511
512 if (delay) {
513 ktime_t time;
514
515 time = 0;
516 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
517 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
518 }
519
520 __netif_schedule(qdisc_root(sch));
521 return HRTIMER_NORESTART;
522}
523
524/*
525 * It is mission critical procedure.
526 *
527 * We "regenerate" toplevel cutoff, if transmitting class
528 * has backlog and it is not regulated. It is not part of
529 * original CBQ description, but looks more reasonable.
530 * Probably, it is wrong. This question needs further investigation.
531 */
532
533static inline void
534cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
535 struct cbq_class *borrowed)
536{
537 if (cl && q->toplevel >= borrowed->level) {
538 if (cl->q->q.qlen > 1) {
539 do {
540 if (borrowed->undertime == PSCHED_PASTPERFECT) {
541 q->toplevel = borrowed->level;
542 return;
543 }
544 } while ((borrowed = borrowed->borrow) != NULL);
545 }
546#if 0
547 /* It is not necessary now. Uncommenting it
548 will save CPU cycles, but decrease fairness.
549 */
550 q->toplevel = TC_CBQ_MAXLEVEL;
551#endif
552 }
553}
554
555static void
556cbq_update(struct cbq_sched_data *q)
557{
558 struct cbq_class *this = q->tx_class;
559 struct cbq_class *cl = this;
560 int len = q->tx_len;
561 psched_time_t now;
562
563 q->tx_class = NULL;
564 /* Time integrator. We calculate EOS time
565 * by adding expected packet transmission time.
566 */
567 now = q->now + L2T(&q->link, len);
568
569 for ( ; cl; cl = cl->share) {
570 long avgidle = cl->avgidle;
571 long idle;
572
573 cl->bstats.packets++;
574 cl->bstats.bytes += len;
575
576 /*
577 * (now - last) is total time between packet right edges.
578 * (last_pktlen/rate) is "virtual" busy time, so that
579 *
580 * idle = (now - last) - last_pktlen/rate
581 */
582
583 idle = now - cl->last;
584 if ((unsigned long)idle > 128*1024*1024) {
585 avgidle = cl->maxidle;
586 } else {
587 idle -= L2T(cl, len);
588
589 /* true_avgidle := (1-W)*true_avgidle + W*idle,
590 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
591 * cl->avgidle == true_avgidle/W,
592 * hence:
593 */
594 avgidle += idle - (avgidle>>cl->ewma_log);
595 }
596
597 if (avgidle <= 0) {
598 /* Overlimit or at-limit */
599
600 if (avgidle < cl->minidle)
601 avgidle = cl->minidle;
602
603 cl->avgidle = avgidle;
604
605 /* Calculate expected time, when this class
606 * will be allowed to send.
607 * It will occur, when:
608 * (1-W)*true_avgidle + W*delay = 0, i.e.
609 * idle = (1/W - 1)*(-true_avgidle)
610 * or
611 * idle = (1 - W)*(-cl->avgidle);
612 */
613 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
614
615 /*
616 * That is not all.
617 * To maintain the rate allocated to the class,
618 * we add to undertime virtual clock,
619 * necessary to complete transmitted packet.
620 * (len/phys_bandwidth has been already passed
621 * to the moment of cbq_update)
622 */
623
624 idle -= L2T(&q->link, len);
625 idle += L2T(cl, len);
626
627 cl->undertime = now + idle;
628 } else {
629 /* Underlimit */
630
631 cl->undertime = PSCHED_PASTPERFECT;
632 if (avgidle > cl->maxidle)
633 cl->avgidle = cl->maxidle;
634 else
635 cl->avgidle = avgidle;
636 }
637 if ((s64)(now - cl->last) > 0)
638 cl->last = now;
639 }
640
641 cbq_update_toplevel(q, this, q->tx_borrowed);
642}
643
644static inline struct cbq_class *
645cbq_under_limit(struct cbq_class *cl)
646{
647 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
648 struct cbq_class *this_cl = cl;
649
650 if (cl->tparent == NULL)
651 return cl;
652
653 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
654 cl->delayed = 0;
655 return cl;
656 }
657
658 do {
659 /* It is very suspicious place. Now overlimit
660 * action is generated for not bounded classes
661 * only if link is completely congested.
662 * Though it is in agree with ancestor-only paradigm,
663 * it looks very stupid. Particularly,
664 * it means that this chunk of code will either
665 * never be called or result in strong amplification
666 * of burstiness. Dangerous, silly, and, however,
667 * no another solution exists.
668 */
669 cl = cl->borrow;
670 if (!cl) {
671 this_cl->qstats.overlimits++;
672 cbq_overlimit(this_cl);
673 return NULL;
674 }
675 if (cl->level > q->toplevel)
676 return NULL;
677 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
678
679 cl->delayed = 0;
680 return cl;
681}
682
683static inline struct sk_buff *
684cbq_dequeue_prio(struct Qdisc *sch, int prio)
685{
686 struct cbq_sched_data *q = qdisc_priv(sch);
687 struct cbq_class *cl_tail, *cl_prev, *cl;
688 struct sk_buff *skb;
689 int deficit;
690
691 cl_tail = cl_prev = q->active[prio];
692 cl = cl_prev->next_alive;
693
694 do {
695 deficit = 0;
696
697 /* Start round */
698 do {
699 struct cbq_class *borrow = cl;
700
701 if (cl->q->q.qlen &&
702 (borrow = cbq_under_limit(cl)) == NULL)
703 goto skip_class;
704
705 if (cl->deficit <= 0) {
706 /* Class exhausted its allotment per
707 * this round. Switch to the next one.
708 */
709 deficit = 1;
710 cl->deficit += cl->quantum;
711 goto next_class;
712 }
713
714 skb = cl->q->dequeue(cl->q);
715
716 /* Class did not give us any skb :-(
717 * It could occur even if cl->q->q.qlen != 0
718 * f.e. if cl->q == "tbf"
719 */
720 if (skb == NULL)
721 goto skip_class;
722
723 cl->deficit -= qdisc_pkt_len(skb);
724 q->tx_class = cl;
725 q->tx_borrowed = borrow;
726 if (borrow != cl) {
727#ifndef CBQ_XSTATS_BORROWS_BYTES
728 borrow->xstats.borrows++;
729 cl->xstats.borrows++;
730#else
731 borrow->xstats.borrows += qdisc_pkt_len(skb);
732 cl->xstats.borrows += qdisc_pkt_len(skb);
733#endif
734 }
735 q->tx_len = qdisc_pkt_len(skb);
736
737 if (cl->deficit <= 0) {
738 q->active[prio] = cl;
739 cl = cl->next_alive;
740 cl->deficit += cl->quantum;
741 }
742 return skb;
743
744skip_class:
745 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
746 /* Class is empty or penalized.
747 * Unlink it from active chain.
748 */
749 cl_prev->next_alive = cl->next_alive;
750 cl->next_alive = NULL;
751
752 /* Did cl_tail point to it? */
753 if (cl == cl_tail) {
754 /* Repair it! */
755 cl_tail = cl_prev;
756
757 /* Was it the last class in this band? */
758 if (cl == cl_tail) {
759 /* Kill the band! */
760 q->active[prio] = NULL;
761 q->activemask &= ~(1<<prio);
762 if (cl->q->q.qlen)
763 cbq_activate_class(cl);
764 return NULL;
765 }
766
767 q->active[prio] = cl_tail;
768 }
769 if (cl->q->q.qlen)
770 cbq_activate_class(cl);
771
772 cl = cl_prev;
773 }
774
775next_class:
776 cl_prev = cl;
777 cl = cl->next_alive;
778 } while (cl_prev != cl_tail);
779 } while (deficit);
780
781 q->active[prio] = cl_prev;
782
783 return NULL;
784}
785
786static inline struct sk_buff *
787cbq_dequeue_1(struct Qdisc *sch)
788{
789 struct cbq_sched_data *q = qdisc_priv(sch);
790 struct sk_buff *skb;
791 unsigned int activemask;
792
793 activemask = q->activemask & 0xFF;
794 while (activemask) {
795 int prio = ffz(~activemask);
796 activemask &= ~(1<<prio);
797 skb = cbq_dequeue_prio(sch, prio);
798 if (skb)
799 return skb;
800 }
801 return NULL;
802}
803
804static struct sk_buff *
805cbq_dequeue(struct Qdisc *sch)
806{
807 struct sk_buff *skb;
808 struct cbq_sched_data *q = qdisc_priv(sch);
809 psched_time_t now;
810
811 now = psched_get_time();
812
813 if (q->tx_class)
814 cbq_update(q);
815
816 q->now = now;
817
818 for (;;) {
819 q->wd_expires = 0;
820
821 skb = cbq_dequeue_1(sch);
822 if (skb) {
823 qdisc_bstats_update(sch, skb);
824 sch->q.qlen--;
825 return skb;
826 }
827
828 /* All the classes are overlimit.
829 *
830 * It is possible, if:
831 *
832 * 1. Scheduler is empty.
833 * 2. Toplevel cutoff inhibited borrowing.
834 * 3. Root class is overlimit.
835 *
836 * Reset 2d and 3d conditions and retry.
837 *
838 * Note, that NS and cbq-2.0 are buggy, peeking
839 * an arbitrary class is appropriate for ancestor-only
840 * sharing, but not for toplevel algorithm.
841 *
842 * Our version is better, but slower, because it requires
843 * two passes, but it is unavoidable with top-level sharing.
844 */
845
846 if (q->toplevel == TC_CBQ_MAXLEVEL &&
847 q->link.undertime == PSCHED_PASTPERFECT)
848 break;
849
850 q->toplevel = TC_CBQ_MAXLEVEL;
851 q->link.undertime = PSCHED_PASTPERFECT;
852 }
853
854 /* No packets in scheduler or nobody wants to give them to us :-(
855 * Sigh... start watchdog timer in the last case.
856 */
857
858 if (sch->q.qlen) {
859 qdisc_qstats_overlimit(sch);
860 if (q->wd_expires)
861 qdisc_watchdog_schedule(&q->watchdog,
862 now + q->wd_expires);
863 }
864 return NULL;
865}
866
867/* CBQ class maintanance routines */
868
869static void cbq_adjust_levels(struct cbq_class *this)
870{
871 if (this == NULL)
872 return;
873
874 do {
875 int level = 0;
876 struct cbq_class *cl;
877
878 cl = this->children;
879 if (cl) {
880 do {
881 if (cl->level > level)
882 level = cl->level;
883 } while ((cl = cl->sibling) != this->children);
884 }
885 this->level = level + 1;
886 } while ((this = this->tparent) != NULL);
887}
888
889static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
890{
891 struct cbq_class *cl;
892 unsigned int h;
893
894 if (q->quanta[prio] == 0)
895 return;
896
897 for (h = 0; h < q->clhash.hashsize; h++) {
898 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
899 /* BUGGGG... Beware! This expression suffer of
900 * arithmetic overflows!
901 */
902 if (cl->priority == prio) {
903 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
904 q->quanta[prio];
905 }
906 if (cl->quantum <= 0 ||
907 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
908 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
909 cl->common.classid, cl->quantum);
910 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
911 }
912 }
913 }
914}
915
916static void cbq_sync_defmap(struct cbq_class *cl)
917{
918 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
919 struct cbq_class *split = cl->split;
920 unsigned int h;
921 int i;
922
923 if (split == NULL)
924 return;
925
926 for (i = 0; i <= TC_PRIO_MAX; i++) {
927 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
928 split->defaults[i] = NULL;
929 }
930
931 for (i = 0; i <= TC_PRIO_MAX; i++) {
932 int level = split->level;
933
934 if (split->defaults[i])
935 continue;
936
937 for (h = 0; h < q->clhash.hashsize; h++) {
938 struct cbq_class *c;
939
940 hlist_for_each_entry(c, &q->clhash.hash[h],
941 common.hnode) {
942 if (c->split == split && c->level < level &&
943 c->defmap & (1<<i)) {
944 split->defaults[i] = c;
945 level = c->level;
946 }
947 }
948 }
949 }
950}
951
952static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
953{
954 struct cbq_class *split = NULL;
955
956 if (splitid == 0) {
957 split = cl->split;
958 if (!split)
959 return;
960 splitid = split->common.classid;
961 }
962
963 if (split == NULL || split->common.classid != splitid) {
964 for (split = cl->tparent; split; split = split->tparent)
965 if (split->common.classid == splitid)
966 break;
967 }
968
969 if (split == NULL)
970 return;
971
972 if (cl->split != split) {
973 cl->defmap = 0;
974 cbq_sync_defmap(cl);
975 cl->split = split;
976 cl->defmap = def & mask;
977 } else
978 cl->defmap = (cl->defmap & ~mask) | (def & mask);
979
980 cbq_sync_defmap(cl);
981}
982
983static void cbq_unlink_class(struct cbq_class *this)
984{
985 struct cbq_class *cl, **clp;
986 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
987
988 qdisc_class_hash_remove(&q->clhash, &this->common);
989
990 if (this->tparent) {
991 clp = &this->sibling;
992 cl = *clp;
993 do {
994 if (cl == this) {
995 *clp = cl->sibling;
996 break;
997 }
998 clp = &cl->sibling;
999 } while ((cl = *clp) != this->sibling);
1000
1001 if (this->tparent->children == this) {
1002 this->tparent->children = this->sibling;
1003 if (this->sibling == this)
1004 this->tparent->children = NULL;
1005 }
1006 } else {
1007 WARN_ON(this->sibling != this);
1008 }
1009}
1010
1011static void cbq_link_class(struct cbq_class *this)
1012{
1013 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1014 struct cbq_class *parent = this->tparent;
1015
1016 this->sibling = this;
1017 qdisc_class_hash_insert(&q->clhash, &this->common);
1018
1019 if (parent == NULL)
1020 return;
1021
1022 if (parent->children == NULL) {
1023 parent->children = this;
1024 } else {
1025 this->sibling = parent->children->sibling;
1026 parent->children->sibling = this;
1027 }
1028}
1029
1030static void
1031cbq_reset(struct Qdisc *sch)
1032{
1033 struct cbq_sched_data *q = qdisc_priv(sch);
1034 struct cbq_class *cl;
1035 int prio;
1036 unsigned int h;
1037
1038 q->activemask = 0;
1039 q->pmask = 0;
1040 q->tx_class = NULL;
1041 q->tx_borrowed = NULL;
1042 qdisc_watchdog_cancel(&q->watchdog);
1043 hrtimer_cancel(&q->delay_timer);
1044 q->toplevel = TC_CBQ_MAXLEVEL;
1045 q->now = psched_get_time();
1046
1047 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1048 q->active[prio] = NULL;
1049
1050 for (h = 0; h < q->clhash.hashsize; h++) {
1051 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1052 qdisc_reset(cl->q);
1053
1054 cl->next_alive = NULL;
1055 cl->undertime = PSCHED_PASTPERFECT;
1056 cl->avgidle = cl->maxidle;
1057 cl->deficit = cl->quantum;
1058 cl->cpriority = cl->priority;
1059 }
1060 }
1061 sch->q.qlen = 0;
1062}
1063
1064
1065static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1066{
1067 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1068 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1069 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
1070 }
1071 if (lss->change & TCF_CBQ_LSS_EWMA)
1072 cl->ewma_log = lss->ewma_log;
1073 if (lss->change & TCF_CBQ_LSS_AVPKT)
1074 cl->avpkt = lss->avpkt;
1075 if (lss->change & TCF_CBQ_LSS_MINIDLE)
1076 cl->minidle = -(long)lss->minidle;
1077 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
1078 cl->maxidle = lss->maxidle;
1079 cl->avgidle = lss->maxidle;
1080 }
1081 if (lss->change & TCF_CBQ_LSS_OFFTIME)
1082 cl->offtime = lss->offtime;
1083 return 0;
1084}
1085
1086static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1087{
1088 q->nclasses[cl->priority]--;
1089 q->quanta[cl->priority] -= cl->weight;
1090 cbq_normalize_quanta(q, cl->priority);
1091}
1092
1093static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1094{
1095 q->nclasses[cl->priority]++;
1096 q->quanta[cl->priority] += cl->weight;
1097 cbq_normalize_quanta(q, cl->priority);
1098}
1099
1100static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1101{
1102 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1103
1104 if (wrr->allot)
1105 cl->allot = wrr->allot;
1106 if (wrr->weight)
1107 cl->weight = wrr->weight;
1108 if (wrr->priority) {
1109 cl->priority = wrr->priority - 1;
1110 cl->cpriority = cl->priority;
1111 if (cl->priority >= cl->priority2)
1112 cl->priority2 = TC_CBQ_MAXPRIO - 1;
1113 }
1114
1115 cbq_addprio(q, cl);
1116 return 0;
1117}
1118
1119static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1120{
1121 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1122 return 0;
1123}
1124
1125static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1126 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1127 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1128 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1129 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1130 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1131 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1132 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1133};
1134
1135static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
1136 struct netlink_ext_ack *extack)
1137{
1138 struct cbq_sched_data *q = qdisc_priv(sch);
1139 struct nlattr *tb[TCA_CBQ_MAX + 1];
1140 struct tc_ratespec *r;
1141 int err;
1142
1143 qdisc_watchdog_init(&q->watchdog, sch);
1144 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
1145 q->delay_timer.function = cbq_undelay;
1146
1147 if (!opt) {
1148 NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
1149 return -EINVAL;
1150 }
1151
1152 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
1153 if (err < 0)
1154 return err;
1155
1156 if (!tb[TCA_CBQ_RTAB] || !tb[TCA_CBQ_RATE]) {
1157 NL_SET_ERR_MSG(extack, "Rate specification missing or incomplete");
1158 return -EINVAL;
1159 }
1160
1161 r = nla_data(tb[TCA_CBQ_RATE]);
1162
1163 q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack);
1164 if (!q->link.R_tab)
1165 return -EINVAL;
1166
1167 err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack);
1168 if (err)
1169 goto put_rtab;
1170
1171 err = qdisc_class_hash_init(&q->clhash);
1172 if (err < 0)
1173 goto put_block;
1174
1175 q->link.sibling = &q->link;
1176 q->link.common.classid = sch->handle;
1177 q->link.qdisc = sch;
1178 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1179 sch->handle, NULL);
1180 if (!q->link.q)
1181 q->link.q = &noop_qdisc;
1182 else
1183 qdisc_hash_add(q->link.q, true);
1184
1185 q->link.priority = TC_CBQ_MAXPRIO - 1;
1186 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1187 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
1188 q->link.allot = psched_mtu(qdisc_dev(sch));
1189 q->link.quantum = q->link.allot;
1190 q->link.weight = q->link.R_tab->rate.rate;
1191
1192 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1193 q->link.avpkt = q->link.allot/2;
1194 q->link.minidle = -0x7FFFFFFF;
1195
1196 q->toplevel = TC_CBQ_MAXLEVEL;
1197 q->now = psched_get_time();
1198
1199 cbq_link_class(&q->link);
1200
1201 if (tb[TCA_CBQ_LSSOPT])
1202 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
1203
1204 cbq_addprio(q, &q->link);
1205 return 0;
1206
1207put_block:
1208 tcf_block_put(q->link.block);
1209
1210put_rtab:
1211 qdisc_put_rtab(q->link.R_tab);
1212 return err;
1213}
1214
1215static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1216{
1217 unsigned char *b = skb_tail_pointer(skb);
1218
1219 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1220 goto nla_put_failure;
1221 return skb->len;
1222
1223nla_put_failure:
1224 nlmsg_trim(skb, b);
1225 return -1;
1226}
1227
1228static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1229{
1230 unsigned char *b = skb_tail_pointer(skb);
1231 struct tc_cbq_lssopt opt;
1232
1233 opt.flags = 0;
1234 if (cl->borrow == NULL)
1235 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1236 if (cl->share == NULL)
1237 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1238 opt.ewma_log = cl->ewma_log;
1239 opt.level = cl->level;
1240 opt.avpkt = cl->avpkt;
1241 opt.maxidle = cl->maxidle;
1242 opt.minidle = (u32)(-cl->minidle);
1243 opt.offtime = cl->offtime;
1244 opt.change = ~0;
1245 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1246 goto nla_put_failure;
1247 return skb->len;
1248
1249nla_put_failure:
1250 nlmsg_trim(skb, b);
1251 return -1;
1252}
1253
1254static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1255{
1256 unsigned char *b = skb_tail_pointer(skb);
1257 struct tc_cbq_wrropt opt;
1258
1259 memset(&opt, 0, sizeof(opt));
1260 opt.flags = 0;
1261 opt.allot = cl->allot;
1262 opt.priority = cl->priority + 1;
1263 opt.cpriority = cl->cpriority + 1;
1264 opt.weight = cl->weight;
1265 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1266 goto nla_put_failure;
1267 return skb->len;
1268
1269nla_put_failure:
1270 nlmsg_trim(skb, b);
1271 return -1;
1272}
1273
1274static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1275{
1276 unsigned char *b = skb_tail_pointer(skb);
1277 struct tc_cbq_fopt opt;
1278
1279 if (cl->split || cl->defmap) {
1280 opt.split = cl->split ? cl->split->common.classid : 0;
1281 opt.defmap = cl->defmap;
1282 opt.defchange = ~0;
1283 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1284 goto nla_put_failure;
1285 }
1286 return skb->len;
1287
1288nla_put_failure:
1289 nlmsg_trim(skb, b);
1290 return -1;
1291}
1292
1293static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1294{
1295 if (cbq_dump_lss(skb, cl) < 0 ||
1296 cbq_dump_rate(skb, cl) < 0 ||
1297 cbq_dump_wrr(skb, cl) < 0 ||
1298 cbq_dump_fopt(skb, cl) < 0)
1299 return -1;
1300 return 0;
1301}
1302
1303static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1304{
1305 struct cbq_sched_data *q = qdisc_priv(sch);
1306 struct nlattr *nest;
1307
1308 nest = nla_nest_start(skb, TCA_OPTIONS);
1309 if (nest == NULL)
1310 goto nla_put_failure;
1311 if (cbq_dump_attr(skb, &q->link) < 0)
1312 goto nla_put_failure;
1313 return nla_nest_end(skb, nest);
1314
1315nla_put_failure:
1316 nla_nest_cancel(skb, nest);
1317 return -1;
1318}
1319
1320static int
1321cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1322{
1323 struct cbq_sched_data *q = qdisc_priv(sch);
1324
1325 q->link.xstats.avgidle = q->link.avgidle;
1326 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1327}
1328
1329static int
1330cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1331 struct sk_buff *skb, struct tcmsg *tcm)
1332{
1333 struct cbq_class *cl = (struct cbq_class *)arg;
1334 struct nlattr *nest;
1335
1336 if (cl->tparent)
1337 tcm->tcm_parent = cl->tparent->common.classid;
1338 else
1339 tcm->tcm_parent = TC_H_ROOT;
1340 tcm->tcm_handle = cl->common.classid;
1341 tcm->tcm_info = cl->q->handle;
1342
1343 nest = nla_nest_start(skb, TCA_OPTIONS);
1344 if (nest == NULL)
1345 goto nla_put_failure;
1346 if (cbq_dump_attr(skb, cl) < 0)
1347 goto nla_put_failure;
1348 return nla_nest_end(skb, nest);
1349
1350nla_put_failure:
1351 nla_nest_cancel(skb, nest);
1352 return -1;
1353}
1354
1355static int
1356cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1357 struct gnet_dump *d)
1358{
1359 struct cbq_sched_data *q = qdisc_priv(sch);
1360 struct cbq_class *cl = (struct cbq_class *)arg;
1361
1362 cl->xstats.avgidle = cl->avgidle;
1363 cl->xstats.undertime = 0;
1364
1365 if (cl->undertime != PSCHED_PASTPERFECT)
1366 cl->xstats.undertime = cl->undertime - q->now;
1367
1368 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1369 d, NULL, &cl->bstats) < 0 ||
1370 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1371 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
1372 return -1;
1373
1374 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1375}
1376
1377static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1378 struct Qdisc **old, struct netlink_ext_ack *extack)
1379{
1380 struct cbq_class *cl = (struct cbq_class *)arg;
1381
1382 if (new == NULL) {
1383 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1384 cl->common.classid, extack);
1385 if (new == NULL)
1386 return -ENOBUFS;
1387 }
1388
1389 *old = qdisc_replace(sch, new, &cl->q);
1390 return 0;
1391}
1392
1393static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
1394{
1395 struct cbq_class *cl = (struct cbq_class *)arg;
1396
1397 return cl->q;
1398}
1399
1400static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1401{
1402 struct cbq_class *cl = (struct cbq_class *)arg;
1403
1404 cbq_deactivate_class(cl);
1405}
1406
1407static unsigned long cbq_find(struct Qdisc *sch, u32 classid)
1408{
1409 struct cbq_sched_data *q = qdisc_priv(sch);
1410
1411 return (unsigned long)cbq_class_lookup(q, classid);
1412}
1413
1414static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1415{
1416 struct cbq_sched_data *q = qdisc_priv(sch);
1417
1418 WARN_ON(cl->filters);
1419
1420 tcf_block_put(cl->block);
1421 qdisc_destroy(cl->q);
1422 qdisc_put_rtab(cl->R_tab);
1423 gen_kill_estimator(&cl->rate_est);
1424 if (cl != &q->link)
1425 kfree(cl);
1426}
1427
1428static void cbq_destroy(struct Qdisc *sch)
1429{
1430 struct cbq_sched_data *q = qdisc_priv(sch);
1431 struct hlist_node *next;
1432 struct cbq_class *cl;
1433 unsigned int h;
1434
1435#ifdef CONFIG_NET_CLS_ACT
1436 q->rx_class = NULL;
1437#endif
1438 /*
1439 * Filters must be destroyed first because we don't destroy the
1440 * classes from root to leafs which means that filters can still
1441 * be bound to classes which have been destroyed already. --TGR '04
1442 */
1443 for (h = 0; h < q->clhash.hashsize; h++) {
1444 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1445 tcf_block_put(cl->block);
1446 cl->block = NULL;
1447 }
1448 }
1449 for (h = 0; h < q->clhash.hashsize; h++) {
1450 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
1451 common.hnode)
1452 cbq_destroy_class(sch, cl);
1453 }
1454 qdisc_class_hash_destroy(&q->clhash);
1455}
1456
1457static int
1458cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
1459 unsigned long *arg, struct netlink_ext_ack *extack)
1460{
1461 int err;
1462 struct cbq_sched_data *q = qdisc_priv(sch);
1463 struct cbq_class *cl = (struct cbq_class *)*arg;
1464 struct nlattr *opt = tca[TCA_OPTIONS];
1465 struct nlattr *tb[TCA_CBQ_MAX + 1];
1466 struct cbq_class *parent;
1467 struct qdisc_rate_table *rtab = NULL;
1468
1469 if (!opt) {
1470 NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
1471 return -EINVAL;
1472 }
1473
1474 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, extack);
1475 if (err < 0)
1476 return err;
1477
1478 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE]) {
1479 NL_SET_ERR_MSG(extack, "Neither overlimit strategy nor policing attributes can be used for changing class params");
1480 return -EOPNOTSUPP;
1481 }
1482
1483 if (cl) {
1484 /* Check parent */
1485 if (parentid) {
1486 if (cl->tparent &&
1487 cl->tparent->common.classid != parentid) {
1488 NL_SET_ERR_MSG(extack, "Invalid parent id");
1489 return -EINVAL;
1490 }
1491 if (!cl->tparent && parentid != TC_H_ROOT) {
1492 NL_SET_ERR_MSG(extack, "Parent must be root");
1493 return -EINVAL;
1494 }
1495 }
1496
1497 if (tb[TCA_CBQ_RATE]) {
1498 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1499 tb[TCA_CBQ_RTAB], extack);
1500 if (rtab == NULL)
1501 return -EINVAL;
1502 }
1503
1504 if (tca[TCA_RATE]) {
1505 err = gen_replace_estimator(&cl->bstats, NULL,
1506 &cl->rate_est,
1507 NULL,
1508 qdisc_root_sleeping_running(sch),
1509 tca[TCA_RATE]);
1510 if (err) {
1511 NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
1512 qdisc_put_rtab(rtab);
1513 return err;
1514 }
1515 }
1516
1517 /* Change class parameters */
1518 sch_tree_lock(sch);
1519
1520 if (cl->next_alive != NULL)
1521 cbq_deactivate_class(cl);
1522
1523 if (rtab) {
1524 qdisc_put_rtab(cl->R_tab);
1525 cl->R_tab = rtab;
1526 }
1527
1528 if (tb[TCA_CBQ_LSSOPT])
1529 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1530
1531 if (tb[TCA_CBQ_WRROPT]) {
1532 cbq_rmprio(q, cl);
1533 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1534 }
1535
1536 if (tb[TCA_CBQ_FOPT])
1537 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1538
1539 if (cl->q->q.qlen)
1540 cbq_activate_class(cl);
1541
1542 sch_tree_unlock(sch);
1543
1544 return 0;
1545 }
1546
1547 if (parentid == TC_H_ROOT)
1548 return -EINVAL;
1549
1550 if (!tb[TCA_CBQ_WRROPT] || !tb[TCA_CBQ_RATE] || !tb[TCA_CBQ_LSSOPT]) {
1551 NL_SET_ERR_MSG(extack, "One of the following attributes MUST be specified: WRR, rate or link sharing");
1552 return -EINVAL;
1553 }
1554
1555 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB],
1556 extack);
1557 if (rtab == NULL)
1558 return -EINVAL;
1559
1560 if (classid) {
1561 err = -EINVAL;
1562 if (TC_H_MAJ(classid ^ sch->handle) ||
1563 cbq_class_lookup(q, classid)) {
1564 NL_SET_ERR_MSG(extack, "Specified class not found");
1565 goto failure;
1566 }
1567 } else {
1568 int i;
1569 classid = TC_H_MAKE(sch->handle, 0x8000);
1570
1571 for (i = 0; i < 0x8000; i++) {
1572 if (++q->hgenerator >= 0x8000)
1573 q->hgenerator = 1;
1574 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1575 break;
1576 }
1577 err = -ENOSR;
1578 if (i >= 0x8000) {
1579 NL_SET_ERR_MSG(extack, "Unable to generate classid");
1580 goto failure;
1581 }
1582 classid = classid|q->hgenerator;
1583 }
1584
1585 parent = &q->link;
1586 if (parentid) {
1587 parent = cbq_class_lookup(q, parentid);
1588 err = -EINVAL;
1589 if (!parent) {
1590 NL_SET_ERR_MSG(extack, "Failed to find parentid");
1591 goto failure;
1592 }
1593 }
1594
1595 err = -ENOBUFS;
1596 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1597 if (cl == NULL)
1598 goto failure;
1599
1600 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1601 if (err) {
1602 kfree(cl);
1603 return err;
1604 }
1605
1606 if (tca[TCA_RATE]) {
1607 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1608 NULL,
1609 qdisc_root_sleeping_running(sch),
1610 tca[TCA_RATE]);
1611 if (err) {
1612 NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
1613 tcf_block_put(cl->block);
1614 kfree(cl);
1615 goto failure;
1616 }
1617 }
1618
1619 cl->R_tab = rtab;
1620 rtab = NULL;
1621 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
1622 NULL);
1623 if (!cl->q)
1624 cl->q = &noop_qdisc;
1625 else
1626 qdisc_hash_add(cl->q, true);
1627
1628 cl->common.classid = classid;
1629 cl->tparent = parent;
1630 cl->qdisc = sch;
1631 cl->allot = parent->allot;
1632 cl->quantum = cl->allot;
1633 cl->weight = cl->R_tab->rate.rate;
1634
1635 sch_tree_lock(sch);
1636 cbq_link_class(cl);
1637 cl->borrow = cl->tparent;
1638 if (cl->tparent != &q->link)
1639 cl->share = cl->tparent;
1640 cbq_adjust_levels(parent);
1641 cl->minidle = -0x7FFFFFFF;
1642 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1643 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
1644 if (cl->ewma_log == 0)
1645 cl->ewma_log = q->link.ewma_log;
1646 if (cl->maxidle == 0)
1647 cl->maxidle = q->link.maxidle;
1648 if (cl->avpkt == 0)
1649 cl->avpkt = q->link.avpkt;
1650 if (tb[TCA_CBQ_FOPT])
1651 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1652 sch_tree_unlock(sch);
1653
1654 qdisc_class_hash_grow(sch, &q->clhash);
1655
1656 *arg = (unsigned long)cl;
1657 return 0;
1658
1659failure:
1660 qdisc_put_rtab(rtab);
1661 return err;
1662}
1663
1664static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1665{
1666 struct cbq_sched_data *q = qdisc_priv(sch);
1667 struct cbq_class *cl = (struct cbq_class *)arg;
1668 unsigned int qlen, backlog;
1669
1670 if (cl->filters || cl->children || cl == &q->link)
1671 return -EBUSY;
1672
1673 sch_tree_lock(sch);
1674
1675 qlen = cl->q->q.qlen;
1676 backlog = cl->q->qstats.backlog;
1677 qdisc_reset(cl->q);
1678 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
1679
1680 if (cl->next_alive)
1681 cbq_deactivate_class(cl);
1682
1683 if (q->tx_borrowed == cl)
1684 q->tx_borrowed = q->tx_class;
1685 if (q->tx_class == cl) {
1686 q->tx_class = NULL;
1687 q->tx_borrowed = NULL;
1688 }
1689#ifdef CONFIG_NET_CLS_ACT
1690 if (q->rx_class == cl)
1691 q->rx_class = NULL;
1692#endif
1693
1694 cbq_unlink_class(cl);
1695 cbq_adjust_levels(cl->tparent);
1696 cl->defmap = 0;
1697 cbq_sync_defmap(cl);
1698
1699 cbq_rmprio(q, cl);
1700 sch_tree_unlock(sch);
1701
1702 cbq_destroy_class(sch, cl);
1703 return 0;
1704}
1705
1706static struct tcf_block *cbq_tcf_block(struct Qdisc *sch, unsigned long arg,
1707 struct netlink_ext_ack *extack)
1708{
1709 struct cbq_sched_data *q = qdisc_priv(sch);
1710 struct cbq_class *cl = (struct cbq_class *)arg;
1711
1712 if (cl == NULL)
1713 cl = &q->link;
1714
1715 return cl->block;
1716}
1717
1718static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1719 u32 classid)
1720{
1721 struct cbq_sched_data *q = qdisc_priv(sch);
1722 struct cbq_class *p = (struct cbq_class *)parent;
1723 struct cbq_class *cl = cbq_class_lookup(q, classid);
1724
1725 if (cl) {
1726 if (p && p->level <= cl->level)
1727 return 0;
1728 cl->filters++;
1729 return (unsigned long)cl;
1730 }
1731 return 0;
1732}
1733
1734static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1735{
1736 struct cbq_class *cl = (struct cbq_class *)arg;
1737
1738 cl->filters--;
1739}
1740
1741static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1742{
1743 struct cbq_sched_data *q = qdisc_priv(sch);
1744 struct cbq_class *cl;
1745 unsigned int h;
1746
1747 if (arg->stop)
1748 return;
1749
1750 for (h = 0; h < q->clhash.hashsize; h++) {
1751 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
1752 if (arg->count < arg->skip) {
1753 arg->count++;
1754 continue;
1755 }
1756 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1757 arg->stop = 1;
1758 return;
1759 }
1760 arg->count++;
1761 }
1762 }
1763}
1764
1765static const struct Qdisc_class_ops cbq_class_ops = {
1766 .graft = cbq_graft,
1767 .leaf = cbq_leaf,
1768 .qlen_notify = cbq_qlen_notify,
1769 .find = cbq_find,
1770 .change = cbq_change_class,
1771 .delete = cbq_delete,
1772 .walk = cbq_walk,
1773 .tcf_block = cbq_tcf_block,
1774 .bind_tcf = cbq_bind_filter,
1775 .unbind_tcf = cbq_unbind_filter,
1776 .dump = cbq_dump_class,
1777 .dump_stats = cbq_dump_class_stats,
1778};
1779
1780static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
1781 .next = NULL,
1782 .cl_ops = &cbq_class_ops,
1783 .id = "cbq",
1784 .priv_size = sizeof(struct cbq_sched_data),
1785 .enqueue = cbq_enqueue,
1786 .dequeue = cbq_dequeue,
1787 .peek = qdisc_peek_dequeued,
1788 .init = cbq_init,
1789 .reset = cbq_reset,
1790 .destroy = cbq_destroy,
1791 .change = NULL,
1792 .dump = cbq_dump,
1793 .dump_stats = cbq_dump_stats,
1794 .owner = THIS_MODULE,
1795};
1796
1797static int __init cbq_module_init(void)
1798{
1799 return register_qdisc(&cbq_qdisc_ops);
1800}
1801static void __exit cbq_module_exit(void)
1802{
1803 unregister_qdisc(&cbq_qdisc_ops);
1804}
1805module_init(cbq_module_init)
1806module_exit(cbq_module_exit)
1807MODULE_LICENSE("GPL");