Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_tbf.c Token Bucket Filter queue.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
7 * original idea by Martin Devera
8 */
9
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/skbuff.h>
16#include <net/gso.h>
17#include <net/netlink.h>
18#include <net/sch_generic.h>
19#include <net/pkt_cls.h>
20#include <net/pkt_sched.h>
21
22
23/* Simple Token Bucket Filter.
24 =======================================
25
26 SOURCE.
27 -------
28
29 None.
30
31 Description.
32 ------------
33
34 A data flow obeys TBF with rate R and depth B, if for any
35 time interval t_i...t_f the number of transmitted bits
36 does not exceed B + R*(t_f-t_i).
37
38 Packetized version of this definition:
39 The sequence of packets of sizes s_i served at moments t_i
40 obeys TBF, if for any i<=k:
41
42 s_i+....+s_k <= B + R*(t_k - t_i)
43
44 Algorithm.
45 ----------
46
47 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
48
49 N(t+delta) = min{B/R, N(t) + delta}
50
51 If the first packet in queue has length S, it may be
52 transmitted only at the time t_* when S/R <= N(t_*),
53 and in this case N(t) jumps:
54
55 N(t_* + 0) = N(t_* - 0) - S/R.
56
57
58
59 Actually, QoS requires two TBF to be applied to a data stream.
60 One of them controls steady state burst size, another
61 one with rate P (peak rate) and depth M (equal to link MTU)
62 limits bursts at a smaller time scale.
63
64 It is easy to see that P>R, and B>M. If P is infinity, this double
65 TBF is equivalent to a single one.
66
67 When TBF works in reshaping mode, latency is estimated as:
68
69 lat = max ((L-B)/R, (L-M)/P)
70
71
72 NOTES.
73 ------
74
75 If TBF throttles, it starts a watchdog timer, which will wake it up
76 when it is ready to transmit.
77 Note that the minimal timer resolution is 1/HZ.
78 If no new packets arrive during this period,
79 or if the device is not awaken by EOI for some previous packet,
80 TBF can stop its activity for 1/HZ.
81
82
83 This means, that with depth B, the maximal rate is
84
85 R_crit = B*HZ
86
87 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
88
89 Note that the peak rate TBF is much more tough: with MTU 1500
90 P_crit = 150Kbytes/sec. So, if you need greater peak
91 rates, use alpha with HZ=1000 :-)
92
93 With classful TBF, limit is just kept for backwards compatibility.
94 It is passed to the default bfifo qdisc - if the inner qdisc is
95 changed the limit is not effective anymore.
96*/
97
98struct tbf_sched_data {
99/* Parameters */
100 u32 limit; /* Maximal length of backlog: bytes */
101 u32 max_size;
102 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
103 s64 mtu;
104 struct psched_ratecfg rate;
105 struct psched_ratecfg peak;
106
107/* Variables */
108 s64 tokens; /* Current number of B tokens */
109 s64 ptokens; /* Current number of P tokens */
110 s64 t_c; /* Time check-point */
111 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
112 struct qdisc_watchdog watchdog; /* Watchdog timer */
113};
114
115
116/* Time to Length, convert time in ns to length in bytes
117 * to determinate how many bytes can be sent in given time.
118 */
119static u64 psched_ns_t2l(const struct psched_ratecfg *r,
120 u64 time_in_ns)
121{
122 /* The formula is :
123 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
124 */
125 u64 len = time_in_ns * r->rate_bytes_ps;
126
127 do_div(len, NSEC_PER_SEC);
128
129 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
130 do_div(len, 53);
131 len = len * 48;
132 }
133
134 if (len > r->overhead)
135 len -= r->overhead;
136 else
137 len = 0;
138
139 return len;
140}
141
142static void tbf_offload_change(struct Qdisc *sch)
143{
144 struct tbf_sched_data *q = qdisc_priv(sch);
145 struct net_device *dev = qdisc_dev(sch);
146 struct tc_tbf_qopt_offload qopt;
147
148 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
149 return;
150
151 qopt.command = TC_TBF_REPLACE;
152 qopt.handle = sch->handle;
153 qopt.parent = sch->parent;
154 qopt.replace_params.rate = q->rate;
155 qopt.replace_params.max_size = q->max_size;
156 qopt.replace_params.qstats = &sch->qstats;
157
158 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
159}
160
161static void tbf_offload_destroy(struct Qdisc *sch)
162{
163 struct net_device *dev = qdisc_dev(sch);
164 struct tc_tbf_qopt_offload qopt;
165
166 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
167 return;
168
169 qopt.command = TC_TBF_DESTROY;
170 qopt.handle = sch->handle;
171 qopt.parent = sch->parent;
172 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
173}
174
175static int tbf_offload_dump(struct Qdisc *sch)
176{
177 struct tc_tbf_qopt_offload qopt;
178
179 qopt.command = TC_TBF_STATS;
180 qopt.handle = sch->handle;
181 qopt.parent = sch->parent;
182 qopt.stats.bstats = &sch->bstats;
183 qopt.stats.qstats = &sch->qstats;
184
185 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_TBF, &qopt);
186}
187
188static void tbf_offload_graft(struct Qdisc *sch, struct Qdisc *new,
189 struct Qdisc *old, struct netlink_ext_ack *extack)
190{
191 struct tc_tbf_qopt_offload graft_offload = {
192 .handle = sch->handle,
193 .parent = sch->parent,
194 .child_handle = new->handle,
195 .command = TC_TBF_GRAFT,
196 };
197
198 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
199 TC_SETUP_QDISC_TBF, &graft_offload, extack);
200}
201
202/* GSO packet is too big, segment it so that tbf can transmit
203 * each segment in time
204 */
205static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
206 struct sk_buff **to_free)
207{
208 struct tbf_sched_data *q = qdisc_priv(sch);
209 struct sk_buff *segs, *nskb;
210 netdev_features_t features = netif_skb_features(skb);
211 unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len;
212 int ret, nb;
213
214 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
215
216 if (IS_ERR_OR_NULL(segs))
217 return qdisc_drop(skb, sch, to_free);
218
219 nb = 0;
220 skb_list_walk_safe(segs, segs, nskb) {
221 skb_mark_not_on_list(segs);
222 seg_len = segs->len;
223 qdisc_skb_cb(segs)->pkt_len = seg_len;
224 ret = qdisc_enqueue(segs, q->qdisc, to_free);
225 if (ret != NET_XMIT_SUCCESS) {
226 if (net_xmit_drop_count(ret))
227 qdisc_qstats_drop(sch);
228 } else {
229 nb++;
230 len += seg_len;
231 }
232 }
233 sch->q.qlen += nb;
234 sch->qstats.backlog += len;
235 if (nb > 0) {
236 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
237 consume_skb(skb);
238 return NET_XMIT_SUCCESS;
239 }
240
241 kfree_skb(skb);
242 return NET_XMIT_DROP;
243}
244
245static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
246 struct sk_buff **to_free)
247{
248 struct tbf_sched_data *q = qdisc_priv(sch);
249 unsigned int len = qdisc_pkt_len(skb);
250 int ret;
251
252 if (qdisc_pkt_len(skb) > q->max_size) {
253 if (skb_is_gso(skb) &&
254 skb_gso_validate_mac_len(skb, q->max_size))
255 return tbf_segment(skb, sch, to_free);
256 return qdisc_drop(skb, sch, to_free);
257 }
258 ret = qdisc_enqueue(skb, q->qdisc, to_free);
259 if (ret != NET_XMIT_SUCCESS) {
260 if (net_xmit_drop_count(ret))
261 qdisc_qstats_drop(sch);
262 return ret;
263 }
264
265 sch->qstats.backlog += len;
266 sch->q.qlen++;
267 return NET_XMIT_SUCCESS;
268}
269
270static bool tbf_peak_present(const struct tbf_sched_data *q)
271{
272 return q->peak.rate_bytes_ps;
273}
274
275static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
276{
277 struct tbf_sched_data *q = qdisc_priv(sch);
278 struct sk_buff *skb;
279
280 skb = q->qdisc->ops->peek(q->qdisc);
281
282 if (skb) {
283 s64 now;
284 s64 toks;
285 s64 ptoks = 0;
286 unsigned int len = qdisc_pkt_len(skb);
287
288 now = ktime_get_ns();
289 toks = min_t(s64, now - q->t_c, q->buffer);
290
291 if (tbf_peak_present(q)) {
292 ptoks = toks + q->ptokens;
293 if (ptoks > q->mtu)
294 ptoks = q->mtu;
295 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
296 }
297 toks += q->tokens;
298 if (toks > q->buffer)
299 toks = q->buffer;
300 toks -= (s64) psched_l2t_ns(&q->rate, len);
301
302 if ((toks|ptoks) >= 0) {
303 skb = qdisc_dequeue_peeked(q->qdisc);
304 if (unlikely(!skb))
305 return NULL;
306
307 q->t_c = now;
308 q->tokens = toks;
309 q->ptokens = ptoks;
310 qdisc_qstats_backlog_dec(sch, skb);
311 sch->q.qlen--;
312 qdisc_bstats_update(sch, skb);
313 return skb;
314 }
315
316 qdisc_watchdog_schedule_ns(&q->watchdog,
317 now + max_t(long, -toks, -ptoks));
318
319 /* Maybe we have a shorter packet in the queue,
320 which can be sent now. It sounds cool,
321 but, however, this is wrong in principle.
322 We MUST NOT reorder packets under these circumstances.
323
324 Really, if we split the flow into independent
325 subflows, it would be a very good solution.
326 This is the main idea of all FQ algorithms
327 (cf. CSZ, HPFQ, HFSC)
328 */
329
330 qdisc_qstats_overlimit(sch);
331 }
332 return NULL;
333}
334
335static void tbf_reset(struct Qdisc *sch)
336{
337 struct tbf_sched_data *q = qdisc_priv(sch);
338
339 qdisc_reset(q->qdisc);
340 q->t_c = ktime_get_ns();
341 q->tokens = q->buffer;
342 q->ptokens = q->mtu;
343 qdisc_watchdog_cancel(&q->watchdog);
344}
345
346static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
347 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
348 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
349 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
350 [TCA_TBF_RATE64] = { .type = NLA_U64 },
351 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
352 [TCA_TBF_BURST] = { .type = NLA_U32 },
353 [TCA_TBF_PBURST] = { .type = NLA_U32 },
354};
355
356static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
357 struct netlink_ext_ack *extack)
358{
359 int err;
360 struct tbf_sched_data *q = qdisc_priv(sch);
361 struct nlattr *tb[TCA_TBF_MAX + 1];
362 struct tc_tbf_qopt *qopt;
363 struct Qdisc *child = NULL;
364 struct Qdisc *old = NULL;
365 struct psched_ratecfg rate;
366 struct psched_ratecfg peak;
367 u64 max_size;
368 s64 buffer, mtu;
369 u64 rate64 = 0, prate64 = 0;
370
371 err = nla_parse_nested_deprecated(tb, TCA_TBF_MAX, opt, tbf_policy,
372 NULL);
373 if (err < 0)
374 return err;
375
376 err = -EINVAL;
377 if (tb[TCA_TBF_PARMS] == NULL)
378 goto done;
379
380 qopt = nla_data(tb[TCA_TBF_PARMS]);
381 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
382 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
383 tb[TCA_TBF_RTAB],
384 NULL));
385
386 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
387 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
388 tb[TCA_TBF_PTAB],
389 NULL));
390
391 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
392 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
393
394 if (tb[TCA_TBF_RATE64])
395 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
396 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
397
398 if (tb[TCA_TBF_BURST]) {
399 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
400 buffer = psched_l2t_ns(&rate, max_size);
401 } else {
402 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
403 }
404
405 if (qopt->peakrate.rate) {
406 if (tb[TCA_TBF_PRATE64])
407 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
408 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
409 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
410 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
411 peak.rate_bytes_ps, rate.rate_bytes_ps);
412 err = -EINVAL;
413 goto done;
414 }
415
416 if (tb[TCA_TBF_PBURST]) {
417 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
418 max_size = min_t(u32, max_size, pburst);
419 mtu = psched_l2t_ns(&peak, pburst);
420 } else {
421 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
422 }
423 } else {
424 memset(&peak, 0, sizeof(peak));
425 }
426
427 if (max_size < psched_mtu(qdisc_dev(sch)))
428 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
429 max_size, qdisc_dev(sch)->name,
430 psched_mtu(qdisc_dev(sch)));
431
432 if (!max_size) {
433 err = -EINVAL;
434 goto done;
435 }
436
437 if (q->qdisc != &noop_qdisc) {
438 err = fifo_set_limit(q->qdisc, qopt->limit);
439 if (err)
440 goto done;
441 } else if (qopt->limit > 0) {
442 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
443 extack);
444 if (IS_ERR(child)) {
445 err = PTR_ERR(child);
446 goto done;
447 }
448
449 /* child is fifo, no need to check for noop_qdisc */
450 qdisc_hash_add(child, true);
451 }
452
453 sch_tree_lock(sch);
454 if (child) {
455 qdisc_tree_flush_backlog(q->qdisc);
456 old = q->qdisc;
457 q->qdisc = child;
458 }
459 q->limit = qopt->limit;
460 if (tb[TCA_TBF_PBURST])
461 q->mtu = mtu;
462 else
463 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
464 q->max_size = max_size;
465 if (tb[TCA_TBF_BURST])
466 q->buffer = buffer;
467 else
468 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
469 q->tokens = q->buffer;
470 q->ptokens = q->mtu;
471
472 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
473 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
474
475 sch_tree_unlock(sch);
476 qdisc_put(old);
477 err = 0;
478
479 tbf_offload_change(sch);
480done:
481 return err;
482}
483
484static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
485 struct netlink_ext_ack *extack)
486{
487 struct tbf_sched_data *q = qdisc_priv(sch);
488
489 qdisc_watchdog_init(&q->watchdog, sch);
490 q->qdisc = &noop_qdisc;
491
492 if (!opt)
493 return -EINVAL;
494
495 q->t_c = ktime_get_ns();
496
497 return tbf_change(sch, opt, extack);
498}
499
500static void tbf_destroy(struct Qdisc *sch)
501{
502 struct tbf_sched_data *q = qdisc_priv(sch);
503
504 qdisc_watchdog_cancel(&q->watchdog);
505 tbf_offload_destroy(sch);
506 qdisc_put(q->qdisc);
507}
508
509static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
510{
511 struct tbf_sched_data *q = qdisc_priv(sch);
512 struct nlattr *nest;
513 struct tc_tbf_qopt opt;
514 int err;
515
516 err = tbf_offload_dump(sch);
517 if (err)
518 return err;
519
520 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
521 if (nest == NULL)
522 goto nla_put_failure;
523
524 opt.limit = q->limit;
525 psched_ratecfg_getrate(&opt.rate, &q->rate);
526 if (tbf_peak_present(q))
527 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
528 else
529 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
530 opt.mtu = PSCHED_NS2TICKS(q->mtu);
531 opt.buffer = PSCHED_NS2TICKS(q->buffer);
532 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
533 goto nla_put_failure;
534 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
535 nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
536 TCA_TBF_PAD))
537 goto nla_put_failure;
538 if (tbf_peak_present(q) &&
539 q->peak.rate_bytes_ps >= (1ULL << 32) &&
540 nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
541 TCA_TBF_PAD))
542 goto nla_put_failure;
543
544 return nla_nest_end(skb, nest);
545
546nla_put_failure:
547 nla_nest_cancel(skb, nest);
548 return -1;
549}
550
551static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
552 struct sk_buff *skb, struct tcmsg *tcm)
553{
554 struct tbf_sched_data *q = qdisc_priv(sch);
555
556 tcm->tcm_handle |= TC_H_MIN(1);
557 tcm->tcm_info = q->qdisc->handle;
558
559 return 0;
560}
561
562static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
563 struct Qdisc **old, struct netlink_ext_ack *extack)
564{
565 struct tbf_sched_data *q = qdisc_priv(sch);
566
567 if (new == NULL)
568 new = &noop_qdisc;
569
570 *old = qdisc_replace(sch, new, &q->qdisc);
571
572 tbf_offload_graft(sch, new, *old, extack);
573 return 0;
574}
575
576static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
577{
578 struct tbf_sched_data *q = qdisc_priv(sch);
579 return q->qdisc;
580}
581
582static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
583{
584 return 1;
585}
586
587static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
588{
589 if (!walker->stop) {
590 tc_qdisc_stats_dump(sch, 1, walker);
591 }
592}
593
594static const struct Qdisc_class_ops tbf_class_ops = {
595 .graft = tbf_graft,
596 .leaf = tbf_leaf,
597 .find = tbf_find,
598 .walk = tbf_walk,
599 .dump = tbf_dump_class,
600};
601
602static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
603 .next = NULL,
604 .cl_ops = &tbf_class_ops,
605 .id = "tbf",
606 .priv_size = sizeof(struct tbf_sched_data),
607 .enqueue = tbf_enqueue,
608 .dequeue = tbf_dequeue,
609 .peek = qdisc_peek_dequeued,
610 .init = tbf_init,
611 .reset = tbf_reset,
612 .destroy = tbf_destroy,
613 .change = tbf_change,
614 .dump = tbf_dump,
615 .owner = THIS_MODULE,
616};
617MODULE_ALIAS_NET_SCH("tbf");
618
619static int __init tbf_module_init(void)
620{
621 return register_qdisc(&tbf_qdisc_ops);
622}
623
624static void __exit tbf_module_exit(void)
625{
626 unregister_qdisc(&tbf_qdisc_ops);
627}
628module_init(tbf_module_init)
629module_exit(tbf_module_exit)
630MODULE_LICENSE("GPL");
631MODULE_DESCRIPTION("Token Bucket Filter qdisc");
1/*
2 * net/sched/sch_tbf.c Token Bucket Filter queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
11 * original idea by Martin Devera
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/string.h>
19#include <linux/errno.h>
20#include <linux/skbuff.h>
21#include <net/netlink.h>
22#include <net/sch_generic.h>
23#include <net/pkt_sched.h>
24
25
26/* Simple Token Bucket Filter.
27 =======================================
28
29 SOURCE.
30 -------
31
32 None.
33
34 Description.
35 ------------
36
37 A data flow obeys TBF with rate R and depth B, if for any
38 time interval t_i...t_f the number of transmitted bits
39 does not exceed B + R*(t_f-t_i).
40
41 Packetized version of this definition:
42 The sequence of packets of sizes s_i served at moments t_i
43 obeys TBF, if for any i<=k:
44
45 s_i+....+s_k <= B + R*(t_k - t_i)
46
47 Algorithm.
48 ----------
49
50 Let N(t_i) be B/R initially and N(t) grow continuously with time as:
51
52 N(t+delta) = min{B/R, N(t) + delta}
53
54 If the first packet in queue has length S, it may be
55 transmitted only at the time t_* when S/R <= N(t_*),
56 and in this case N(t) jumps:
57
58 N(t_* + 0) = N(t_* - 0) - S/R.
59
60
61
62 Actually, QoS requires two TBF to be applied to a data stream.
63 One of them controls steady state burst size, another
64 one with rate P (peak rate) and depth M (equal to link MTU)
65 limits bursts at a smaller time scale.
66
67 It is easy to see that P>R, and B>M. If P is infinity, this double
68 TBF is equivalent to a single one.
69
70 When TBF works in reshaping mode, latency is estimated as:
71
72 lat = max ((L-B)/R, (L-M)/P)
73
74
75 NOTES.
76 ------
77
78 If TBF throttles, it starts a watchdog timer, which will wake it up
79 when it is ready to transmit.
80 Note that the minimal timer resolution is 1/HZ.
81 If no new packets arrive during this period,
82 or if the device is not awaken by EOI for some previous packet,
83 TBF can stop its activity for 1/HZ.
84
85
86 This means, that with depth B, the maximal rate is
87
88 R_crit = B*HZ
89
90 F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
91
92 Note that the peak rate TBF is much more tough: with MTU 1500
93 P_crit = 150Kbytes/sec. So, if you need greater peak
94 rates, use alpha with HZ=1000 :-)
95
96 With classful TBF, limit is just kept for backwards compatibility.
97 It is passed to the default bfifo qdisc - if the inner qdisc is
98 changed the limit is not effective anymore.
99*/
100
101struct tbf_sched_data {
102/* Parameters */
103 u32 limit; /* Maximal length of backlog: bytes */
104 u32 max_size;
105 s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
106 s64 mtu;
107 struct psched_ratecfg rate;
108 struct psched_ratecfg peak;
109
110/* Variables */
111 s64 tokens; /* Current number of B tokens */
112 s64 ptokens; /* Current number of P tokens */
113 s64 t_c; /* Time check-point */
114 struct Qdisc *qdisc; /* Inner qdisc, default - bfifo queue */
115 struct qdisc_watchdog watchdog; /* Watchdog timer */
116};
117
118
119/* Time to Length, convert time in ns to length in bytes
120 * to determinate how many bytes can be sent in given time.
121 */
122static u64 psched_ns_t2l(const struct psched_ratecfg *r,
123 u64 time_in_ns)
124{
125 /* The formula is :
126 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
127 */
128 u64 len = time_in_ns * r->rate_bytes_ps;
129
130 do_div(len, NSEC_PER_SEC);
131
132 if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
133 do_div(len, 53);
134 len = len * 48;
135 }
136
137 if (len > r->overhead)
138 len -= r->overhead;
139 else
140 len = 0;
141
142 return len;
143}
144
145/*
146 * Return length of individual segments of a gso packet,
147 * including all headers (MAC, IP, TCP/UDP)
148 */
149static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
150{
151 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
152 return hdr_len + skb_gso_transport_seglen(skb);
153}
154
155/* GSO packet is too big, segment it so that tbf can transmit
156 * each segment in time
157 */
158static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
159{
160 struct tbf_sched_data *q = qdisc_priv(sch);
161 struct sk_buff *segs, *nskb;
162 netdev_features_t features = netif_skb_features(skb);
163 unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
164 int ret, nb;
165
166 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
167
168 if (IS_ERR_OR_NULL(segs))
169 return qdisc_reshape_fail(skb, sch);
170
171 nb = 0;
172 while (segs) {
173 nskb = segs->next;
174 segs->next = NULL;
175 qdisc_skb_cb(segs)->pkt_len = segs->len;
176 len += segs->len;
177 ret = qdisc_enqueue(segs, q->qdisc);
178 if (ret != NET_XMIT_SUCCESS) {
179 if (net_xmit_drop_count(ret))
180 qdisc_qstats_drop(sch);
181 } else {
182 nb++;
183 }
184 segs = nskb;
185 }
186 sch->q.qlen += nb;
187 if (nb > 1)
188 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
189 consume_skb(skb);
190 return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
191}
192
193static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
194{
195 struct tbf_sched_data *q = qdisc_priv(sch);
196 int ret;
197
198 if (qdisc_pkt_len(skb) > q->max_size) {
199 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
200 return tbf_segment(skb, sch);
201 return qdisc_reshape_fail(skb, sch);
202 }
203 ret = qdisc_enqueue(skb, q->qdisc);
204 if (ret != NET_XMIT_SUCCESS) {
205 if (net_xmit_drop_count(ret))
206 qdisc_qstats_drop(sch);
207 return ret;
208 }
209
210 sch->q.qlen++;
211 return NET_XMIT_SUCCESS;
212}
213
214static unsigned int tbf_drop(struct Qdisc *sch)
215{
216 struct tbf_sched_data *q = qdisc_priv(sch);
217 unsigned int len = 0;
218
219 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
220 sch->q.qlen--;
221 qdisc_qstats_drop(sch);
222 }
223 return len;
224}
225
226static bool tbf_peak_present(const struct tbf_sched_data *q)
227{
228 return q->peak.rate_bytes_ps;
229}
230
231static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
232{
233 struct tbf_sched_data *q = qdisc_priv(sch);
234 struct sk_buff *skb;
235
236 skb = q->qdisc->ops->peek(q->qdisc);
237
238 if (skb) {
239 s64 now;
240 s64 toks;
241 s64 ptoks = 0;
242 unsigned int len = qdisc_pkt_len(skb);
243
244 now = ktime_get_ns();
245 toks = min_t(s64, now - q->t_c, q->buffer);
246
247 if (tbf_peak_present(q)) {
248 ptoks = toks + q->ptokens;
249 if (ptoks > q->mtu)
250 ptoks = q->mtu;
251 ptoks -= (s64) psched_l2t_ns(&q->peak, len);
252 }
253 toks += q->tokens;
254 if (toks > q->buffer)
255 toks = q->buffer;
256 toks -= (s64) psched_l2t_ns(&q->rate, len);
257
258 if ((toks|ptoks) >= 0) {
259 skb = qdisc_dequeue_peeked(q->qdisc);
260 if (unlikely(!skb))
261 return NULL;
262
263 q->t_c = now;
264 q->tokens = toks;
265 q->ptokens = ptoks;
266 sch->q.qlen--;
267 qdisc_unthrottled(sch);
268 qdisc_bstats_update(sch, skb);
269 return skb;
270 }
271
272 qdisc_watchdog_schedule_ns(&q->watchdog,
273 now + max_t(long, -toks, -ptoks),
274 true);
275
276 /* Maybe we have a shorter packet in the queue,
277 which can be sent now. It sounds cool,
278 but, however, this is wrong in principle.
279 We MUST NOT reorder packets under these circumstances.
280
281 Really, if we split the flow into independent
282 subflows, it would be a very good solution.
283 This is the main idea of all FQ algorithms
284 (cf. CSZ, HPFQ, HFSC)
285 */
286
287 qdisc_qstats_overlimit(sch);
288 }
289 return NULL;
290}
291
292static void tbf_reset(struct Qdisc *sch)
293{
294 struct tbf_sched_data *q = qdisc_priv(sch);
295
296 qdisc_reset(q->qdisc);
297 sch->q.qlen = 0;
298 q->t_c = ktime_get_ns();
299 q->tokens = q->buffer;
300 q->ptokens = q->mtu;
301 qdisc_watchdog_cancel(&q->watchdog);
302}
303
304static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
305 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
306 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
307 [TCA_TBF_PTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
308 [TCA_TBF_RATE64] = { .type = NLA_U64 },
309 [TCA_TBF_PRATE64] = { .type = NLA_U64 },
310 [TCA_TBF_BURST] = { .type = NLA_U32 },
311 [TCA_TBF_PBURST] = { .type = NLA_U32 },
312};
313
314static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
315{
316 int err;
317 struct tbf_sched_data *q = qdisc_priv(sch);
318 struct nlattr *tb[TCA_TBF_MAX + 1];
319 struct tc_tbf_qopt *qopt;
320 struct Qdisc *child = NULL;
321 struct psched_ratecfg rate;
322 struct psched_ratecfg peak;
323 u64 max_size;
324 s64 buffer, mtu;
325 u64 rate64 = 0, prate64 = 0;
326
327 err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
328 if (err < 0)
329 return err;
330
331 err = -EINVAL;
332 if (tb[TCA_TBF_PARMS] == NULL)
333 goto done;
334
335 qopt = nla_data(tb[TCA_TBF_PARMS]);
336 if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
337 qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
338 tb[TCA_TBF_RTAB]));
339
340 if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
341 qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
342 tb[TCA_TBF_PTAB]));
343
344 buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
345 mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
346
347 if (tb[TCA_TBF_RATE64])
348 rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
349 psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
350
351 if (tb[TCA_TBF_BURST]) {
352 max_size = nla_get_u32(tb[TCA_TBF_BURST]);
353 buffer = psched_l2t_ns(&rate, max_size);
354 } else {
355 max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
356 }
357
358 if (qopt->peakrate.rate) {
359 if (tb[TCA_TBF_PRATE64])
360 prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
361 psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
362 if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
363 pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
364 peak.rate_bytes_ps, rate.rate_bytes_ps);
365 err = -EINVAL;
366 goto done;
367 }
368
369 if (tb[TCA_TBF_PBURST]) {
370 u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
371 max_size = min_t(u32, max_size, pburst);
372 mtu = psched_l2t_ns(&peak, pburst);
373 } else {
374 max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
375 }
376 } else {
377 memset(&peak, 0, sizeof(peak));
378 }
379
380 if (max_size < psched_mtu(qdisc_dev(sch)))
381 pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
382 max_size, qdisc_dev(sch)->name,
383 psched_mtu(qdisc_dev(sch)));
384
385 if (!max_size) {
386 err = -EINVAL;
387 goto done;
388 }
389
390 if (q->qdisc != &noop_qdisc) {
391 err = fifo_set_limit(q->qdisc, qopt->limit);
392 if (err)
393 goto done;
394 } else if (qopt->limit > 0) {
395 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
396 if (IS_ERR(child)) {
397 err = PTR_ERR(child);
398 goto done;
399 }
400 }
401
402 sch_tree_lock(sch);
403 if (child) {
404 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
405 q->qdisc->qstats.backlog);
406 qdisc_destroy(q->qdisc);
407 q->qdisc = child;
408 }
409 q->limit = qopt->limit;
410 if (tb[TCA_TBF_PBURST])
411 q->mtu = mtu;
412 else
413 q->mtu = PSCHED_TICKS2NS(qopt->mtu);
414 q->max_size = max_size;
415 if (tb[TCA_TBF_BURST])
416 q->buffer = buffer;
417 else
418 q->buffer = PSCHED_TICKS2NS(qopt->buffer);
419 q->tokens = q->buffer;
420 q->ptokens = q->mtu;
421
422 memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
423 memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
424
425 sch_tree_unlock(sch);
426 err = 0;
427done:
428 return err;
429}
430
431static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
432{
433 struct tbf_sched_data *q = qdisc_priv(sch);
434
435 if (opt == NULL)
436 return -EINVAL;
437
438 q->t_c = ktime_get_ns();
439 qdisc_watchdog_init(&q->watchdog, sch);
440 q->qdisc = &noop_qdisc;
441
442 return tbf_change(sch, opt);
443}
444
445static void tbf_destroy(struct Qdisc *sch)
446{
447 struct tbf_sched_data *q = qdisc_priv(sch);
448
449 qdisc_watchdog_cancel(&q->watchdog);
450 qdisc_destroy(q->qdisc);
451}
452
453static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
454{
455 struct tbf_sched_data *q = qdisc_priv(sch);
456 struct nlattr *nest;
457 struct tc_tbf_qopt opt;
458
459 sch->qstats.backlog = q->qdisc->qstats.backlog;
460 nest = nla_nest_start(skb, TCA_OPTIONS);
461 if (nest == NULL)
462 goto nla_put_failure;
463
464 opt.limit = q->limit;
465 psched_ratecfg_getrate(&opt.rate, &q->rate);
466 if (tbf_peak_present(q))
467 psched_ratecfg_getrate(&opt.peakrate, &q->peak);
468 else
469 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
470 opt.mtu = PSCHED_NS2TICKS(q->mtu);
471 opt.buffer = PSCHED_NS2TICKS(q->buffer);
472 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
473 goto nla_put_failure;
474 if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
475 nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
476 goto nla_put_failure;
477 if (tbf_peak_present(q) &&
478 q->peak.rate_bytes_ps >= (1ULL << 32) &&
479 nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
480 goto nla_put_failure;
481
482 return nla_nest_end(skb, nest);
483
484nla_put_failure:
485 nla_nest_cancel(skb, nest);
486 return -1;
487}
488
489static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
490 struct sk_buff *skb, struct tcmsg *tcm)
491{
492 struct tbf_sched_data *q = qdisc_priv(sch);
493
494 tcm->tcm_handle |= TC_H_MIN(1);
495 tcm->tcm_info = q->qdisc->handle;
496
497 return 0;
498}
499
500static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
501 struct Qdisc **old)
502{
503 struct tbf_sched_data *q = qdisc_priv(sch);
504
505 if (new == NULL)
506 new = &noop_qdisc;
507
508 *old = qdisc_replace(sch, new, &q->qdisc);
509 return 0;
510}
511
512static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
513{
514 struct tbf_sched_data *q = qdisc_priv(sch);
515 return q->qdisc;
516}
517
518static unsigned long tbf_get(struct Qdisc *sch, u32 classid)
519{
520 return 1;
521}
522
523static void tbf_put(struct Qdisc *sch, unsigned long arg)
524{
525}
526
527static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
528{
529 if (!walker->stop) {
530 if (walker->count >= walker->skip)
531 if (walker->fn(sch, 1, walker) < 0) {
532 walker->stop = 1;
533 return;
534 }
535 walker->count++;
536 }
537}
538
539static const struct Qdisc_class_ops tbf_class_ops = {
540 .graft = tbf_graft,
541 .leaf = tbf_leaf,
542 .get = tbf_get,
543 .put = tbf_put,
544 .walk = tbf_walk,
545 .dump = tbf_dump_class,
546};
547
548static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
549 .next = NULL,
550 .cl_ops = &tbf_class_ops,
551 .id = "tbf",
552 .priv_size = sizeof(struct tbf_sched_data),
553 .enqueue = tbf_enqueue,
554 .dequeue = tbf_dequeue,
555 .peek = qdisc_peek_dequeued,
556 .drop = tbf_drop,
557 .init = tbf_init,
558 .reset = tbf_reset,
559 .destroy = tbf_destroy,
560 .change = tbf_change,
561 .dump = tbf_dump,
562 .owner = THIS_MODULE,
563};
564
565static int __init tbf_module_init(void)
566{
567 return register_qdisc(&tbf_qdisc_ops);
568}
569
570static void __exit tbf_module_exit(void)
571{
572 unregister_qdisc(&tbf_qdisc_ops);
573}
574module_init(tbf_module_init)
575module_exit(tbf_module_exit)
576MODULE_LICENSE("GPL");