Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_tbf.c	Token Bucket Filter queue.
  4 *
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 *		Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
  7 *						 original idea by Martin Devera
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/kernel.h>
 13#include <linux/string.h>
 14#include <linux/errno.h>
 15#include <linux/skbuff.h>
 16#include <net/gso.h>
 17#include <net/netlink.h>
 18#include <net/sch_generic.h>
 19#include <net/pkt_cls.h>
 20#include <net/pkt_sched.h>
 21
 22
 23/*	Simple Token Bucket Filter.
 24	=======================================
 25
 26	SOURCE.
 27	-------
 28
 29	None.
 30
 31	Description.
 32	------------
 33
 34	A data flow obeys TBF with rate R and depth B, if for any
 35	time interval t_i...t_f the number of transmitted bits
 36	does not exceed B + R*(t_f-t_i).
 37
 38	Packetized version of this definition:
 39	The sequence of packets of sizes s_i served at moments t_i
 40	obeys TBF, if for any i<=k:
 41
 42	s_i+....+s_k <= B + R*(t_k - t_i)
 43
 44	Algorithm.
 45	----------
 46
 47	Let N(t_i) be B/R initially and N(t) grow continuously with time as:
 48
 49	N(t+delta) = min{B/R, N(t) + delta}
 50
 51	If the first packet in queue has length S, it may be
 52	transmitted only at the time t_* when S/R <= N(t_*),
 53	and in this case N(t) jumps:
 54
 55	N(t_* + 0) = N(t_* - 0) - S/R.
 56
 57
 58
 59	Actually, QoS requires two TBF to be applied to a data stream.
 60	One of them controls steady state burst size, another
 61	one with rate P (peak rate) and depth M (equal to link MTU)
 62	limits bursts at a smaller time scale.
 63
 64	It is easy to see that P>R, and B>M. If P is infinity, this double
 65	TBF is equivalent to a single one.
 66
 67	When TBF works in reshaping mode, latency is estimated as:
 68
 69	lat = max ((L-B)/R, (L-M)/P)
 70
 71
 72	NOTES.
 73	------
 74
 75	If TBF throttles, it starts a watchdog timer, which will wake it up
 76	when it is ready to transmit.
 77	Note that the minimal timer resolution is 1/HZ.
 78	If no new packets arrive during this period,
 79	or if the device is not awaken by EOI for some previous packet,
 80	TBF can stop its activity for 1/HZ.
 81
 82
 83	This means, that with depth B, the maximal rate is
 84
 85	R_crit = B*HZ
 86
 87	F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
 88
 89	Note that the peak rate TBF is much more tough: with MTU 1500
 90	P_crit = 150Kbytes/sec. So, if you need greater peak
 91	rates, use alpha with HZ=1000 :-)
 92
 93	With classful TBF, limit is just kept for backwards compatibility.
 94	It is passed to the default bfifo qdisc - if the inner qdisc is
 95	changed the limit is not effective anymore.
 96*/
 97
 98struct tbf_sched_data {
 99/* Parameters */
100	u32		limit;		/* Maximal length of backlog: bytes */
101	u32		max_size;
102	s64		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
103	s64		mtu;
104	struct psched_ratecfg rate;
105	struct psched_ratecfg peak;
106
107/* Variables */
108	s64	tokens;			/* Current number of B tokens */
109	s64	ptokens;		/* Current number of P tokens */
110	s64	t_c;			/* Time check-point */
111	struct Qdisc	*qdisc;		/* Inner qdisc, default - bfifo queue */
112	struct qdisc_watchdog watchdog;	/* Watchdog timer */
113};
114
115
116/* Time to Length, convert time in ns to length in bytes
117 * to determinate how many bytes can be sent in given time.
118 */
119static u64 psched_ns_t2l(const struct psched_ratecfg *r,
120			 u64 time_in_ns)
121{
122	/* The formula is :
123	 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
124	 */
125	u64 len = time_in_ns * r->rate_bytes_ps;
126
127	do_div(len, NSEC_PER_SEC);
128
129	if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
130		do_div(len, 53);
131		len = len * 48;
132	}
133
134	if (len > r->overhead)
135		len -= r->overhead;
136	else
137		len = 0;
138
139	return len;
140}
141
142static void tbf_offload_change(struct Qdisc *sch)
143{
144	struct tbf_sched_data *q = qdisc_priv(sch);
145	struct net_device *dev = qdisc_dev(sch);
146	struct tc_tbf_qopt_offload qopt;
147
148	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
149		return;
150
151	qopt.command = TC_TBF_REPLACE;
152	qopt.handle = sch->handle;
153	qopt.parent = sch->parent;
154	qopt.replace_params.rate = q->rate;
155	qopt.replace_params.max_size = q->max_size;
156	qopt.replace_params.qstats = &sch->qstats;
157
158	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
159}
160
161static void tbf_offload_destroy(struct Qdisc *sch)
162{
163	struct net_device *dev = qdisc_dev(sch);
164	struct tc_tbf_qopt_offload qopt;
165
166	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
167		return;
168
169	qopt.command = TC_TBF_DESTROY;
170	qopt.handle = sch->handle;
171	qopt.parent = sch->parent;
172	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TBF, &qopt);
173}
174
175static int tbf_offload_dump(struct Qdisc *sch)
176{
177	struct tc_tbf_qopt_offload qopt;
178
179	qopt.command = TC_TBF_STATS;
180	qopt.handle = sch->handle;
181	qopt.parent = sch->parent;
182	qopt.stats.bstats = &sch->bstats;
183	qopt.stats.qstats = &sch->qstats;
184
185	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_TBF, &qopt);
186}
187
188static void tbf_offload_graft(struct Qdisc *sch, struct Qdisc *new,
189			      struct Qdisc *old, struct netlink_ext_ack *extack)
190{
191	struct tc_tbf_qopt_offload graft_offload = {
192		.handle		= sch->handle,
193		.parent		= sch->parent,
194		.child_handle	= new->handle,
195		.command	= TC_TBF_GRAFT,
196	};
197
198	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
199				   TC_SETUP_QDISC_TBF, &graft_offload, extack);
200}
201
202/* GSO packet is too big, segment it so that tbf can transmit
203 * each segment in time
204 */
205static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
206		       struct sk_buff **to_free)
207{
208	struct tbf_sched_data *q = qdisc_priv(sch);
209	struct sk_buff *segs, *nskb;
210	netdev_features_t features = netif_skb_features(skb);
211	unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len;
212	int ret, nb;
213
214	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
215
216	if (IS_ERR_OR_NULL(segs))
217		return qdisc_drop(skb, sch, to_free);
218
219	nb = 0;
220	skb_list_walk_safe(segs, segs, nskb) {
 
221		skb_mark_not_on_list(segs);
222		seg_len = segs->len;
223		qdisc_skb_cb(segs)->pkt_len = seg_len;
224		ret = qdisc_enqueue(segs, q->qdisc, to_free);
225		if (ret != NET_XMIT_SUCCESS) {
226			if (net_xmit_drop_count(ret))
227				qdisc_qstats_drop(sch);
228		} else {
229			nb++;
230			len += seg_len;
231		}
 
232	}
233	sch->q.qlen += nb;
234	sch->qstats.backlog += len;
235	if (nb > 0) {
236		qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
237		consume_skb(skb);
238		return NET_XMIT_SUCCESS;
239	}
240
241	kfree_skb(skb);
242	return NET_XMIT_DROP;
243}
244
245static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
246		       struct sk_buff **to_free)
247{
248	struct tbf_sched_data *q = qdisc_priv(sch);
249	unsigned int len = qdisc_pkt_len(skb);
250	int ret;
251
252	if (qdisc_pkt_len(skb) > q->max_size) {
253		if (skb_is_gso(skb) &&
254		    skb_gso_validate_mac_len(skb, q->max_size))
255			return tbf_segment(skb, sch, to_free);
256		return qdisc_drop(skb, sch, to_free);
257	}
258	ret = qdisc_enqueue(skb, q->qdisc, to_free);
259	if (ret != NET_XMIT_SUCCESS) {
260		if (net_xmit_drop_count(ret))
261			qdisc_qstats_drop(sch);
262		return ret;
263	}
264
265	sch->qstats.backlog += len;
266	sch->q.qlen++;
267	return NET_XMIT_SUCCESS;
268}
269
270static bool tbf_peak_present(const struct tbf_sched_data *q)
271{
272	return q->peak.rate_bytes_ps;
273}
274
275static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
276{
277	struct tbf_sched_data *q = qdisc_priv(sch);
278	struct sk_buff *skb;
279
280	skb = q->qdisc->ops->peek(q->qdisc);
281
282	if (skb) {
283		s64 now;
284		s64 toks;
285		s64 ptoks = 0;
286		unsigned int len = qdisc_pkt_len(skb);
287
288		now = ktime_get_ns();
289		toks = min_t(s64, now - q->t_c, q->buffer);
290
291		if (tbf_peak_present(q)) {
292			ptoks = toks + q->ptokens;
293			if (ptoks > q->mtu)
294				ptoks = q->mtu;
295			ptoks -= (s64) psched_l2t_ns(&q->peak, len);
296		}
297		toks += q->tokens;
298		if (toks > q->buffer)
299			toks = q->buffer;
300		toks -= (s64) psched_l2t_ns(&q->rate, len);
301
302		if ((toks|ptoks) >= 0) {
303			skb = qdisc_dequeue_peeked(q->qdisc);
304			if (unlikely(!skb))
305				return NULL;
306
307			q->t_c = now;
308			q->tokens = toks;
309			q->ptokens = ptoks;
310			qdisc_qstats_backlog_dec(sch, skb);
311			sch->q.qlen--;
312			qdisc_bstats_update(sch, skb);
313			return skb;
314		}
315
316		qdisc_watchdog_schedule_ns(&q->watchdog,
317					   now + max_t(long, -toks, -ptoks));
318
319		/* Maybe we have a shorter packet in the queue,
320		   which can be sent now. It sounds cool,
321		   but, however, this is wrong in principle.
322		   We MUST NOT reorder packets under these circumstances.
323
324		   Really, if we split the flow into independent
325		   subflows, it would be a very good solution.
326		   This is the main idea of all FQ algorithms
327		   (cf. CSZ, HPFQ, HFSC)
328		 */
329
330		qdisc_qstats_overlimit(sch);
331	}
332	return NULL;
333}
334
335static void tbf_reset(struct Qdisc *sch)
336{
337	struct tbf_sched_data *q = qdisc_priv(sch);
338
339	qdisc_reset(q->qdisc);
 
 
340	q->t_c = ktime_get_ns();
341	q->tokens = q->buffer;
342	q->ptokens = q->mtu;
343	qdisc_watchdog_cancel(&q->watchdog);
344}
345
346static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
347	[TCA_TBF_PARMS]	= { .len = sizeof(struct tc_tbf_qopt) },
348	[TCA_TBF_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
349	[TCA_TBF_PTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
350	[TCA_TBF_RATE64]	= { .type = NLA_U64 },
351	[TCA_TBF_PRATE64]	= { .type = NLA_U64 },
352	[TCA_TBF_BURST] = { .type = NLA_U32 },
353	[TCA_TBF_PBURST] = { .type = NLA_U32 },
354};
355
356static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
357		      struct netlink_ext_ack *extack)
358{
359	int err;
360	struct tbf_sched_data *q = qdisc_priv(sch);
361	struct nlattr *tb[TCA_TBF_MAX + 1];
362	struct tc_tbf_qopt *qopt;
363	struct Qdisc *child = NULL;
364	struct Qdisc *old = NULL;
365	struct psched_ratecfg rate;
366	struct psched_ratecfg peak;
367	u64 max_size;
368	s64 buffer, mtu;
369	u64 rate64 = 0, prate64 = 0;
370
371	err = nla_parse_nested_deprecated(tb, TCA_TBF_MAX, opt, tbf_policy,
372					  NULL);
373	if (err < 0)
374		return err;
375
376	err = -EINVAL;
377	if (tb[TCA_TBF_PARMS] == NULL)
378		goto done;
379
380	qopt = nla_data(tb[TCA_TBF_PARMS]);
381	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
382		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
383					      tb[TCA_TBF_RTAB],
384					      NULL));
385
386	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
387			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
388						      tb[TCA_TBF_PTAB],
389						      NULL));
390
391	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
392	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
393
394	if (tb[TCA_TBF_RATE64])
395		rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
396	psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
397
398	if (tb[TCA_TBF_BURST]) {
399		max_size = nla_get_u32(tb[TCA_TBF_BURST]);
400		buffer = psched_l2t_ns(&rate, max_size);
401	} else {
402		max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
403	}
404
405	if (qopt->peakrate.rate) {
406		if (tb[TCA_TBF_PRATE64])
407			prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
408		psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
409		if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
410			pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
411					peak.rate_bytes_ps, rate.rate_bytes_ps);
412			err = -EINVAL;
413			goto done;
414		}
415
416		if (tb[TCA_TBF_PBURST]) {
417			u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
418			max_size = min_t(u32, max_size, pburst);
419			mtu = psched_l2t_ns(&peak, pburst);
420		} else {
421			max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
422		}
423	} else {
424		memset(&peak, 0, sizeof(peak));
425	}
426
427	if (max_size < psched_mtu(qdisc_dev(sch)))
428		pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
429				    max_size, qdisc_dev(sch)->name,
430				    psched_mtu(qdisc_dev(sch)));
431
432	if (!max_size) {
433		err = -EINVAL;
434		goto done;
435	}
436
437	if (q->qdisc != &noop_qdisc) {
438		err = fifo_set_limit(q->qdisc, qopt->limit);
439		if (err)
440			goto done;
441	} else if (qopt->limit > 0) {
442		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
443					 extack);
444		if (IS_ERR(child)) {
445			err = PTR_ERR(child);
446			goto done;
447		}
448
449		/* child is fifo, no need to check for noop_qdisc */
450		qdisc_hash_add(child, true);
451	}
452
453	sch_tree_lock(sch);
454	if (child) {
455		qdisc_tree_flush_backlog(q->qdisc);
456		old = q->qdisc;
457		q->qdisc = child;
458	}
459	q->limit = qopt->limit;
460	if (tb[TCA_TBF_PBURST])
461		q->mtu = mtu;
462	else
463		q->mtu = PSCHED_TICKS2NS(qopt->mtu);
464	q->max_size = max_size;
465	if (tb[TCA_TBF_BURST])
466		q->buffer = buffer;
467	else
468		q->buffer = PSCHED_TICKS2NS(qopt->buffer);
469	q->tokens = q->buffer;
470	q->ptokens = q->mtu;
471
472	memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
473	memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
474
475	sch_tree_unlock(sch);
476	qdisc_put(old);
477	err = 0;
478
479	tbf_offload_change(sch);
480done:
481	return err;
482}
483
484static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
485		    struct netlink_ext_ack *extack)
486{
487	struct tbf_sched_data *q = qdisc_priv(sch);
488
489	qdisc_watchdog_init(&q->watchdog, sch);
490	q->qdisc = &noop_qdisc;
491
492	if (!opt)
493		return -EINVAL;
494
495	q->t_c = ktime_get_ns();
496
497	return tbf_change(sch, opt, extack);
498}
499
500static void tbf_destroy(struct Qdisc *sch)
501{
502	struct tbf_sched_data *q = qdisc_priv(sch);
503
504	qdisc_watchdog_cancel(&q->watchdog);
505	tbf_offload_destroy(sch);
506	qdisc_put(q->qdisc);
507}
508
509static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
510{
511	struct tbf_sched_data *q = qdisc_priv(sch);
512	struct nlattr *nest;
513	struct tc_tbf_qopt opt;
514	int err;
515
516	err = tbf_offload_dump(sch);
517	if (err)
518		return err;
519
 
520	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
521	if (nest == NULL)
522		goto nla_put_failure;
523
524	opt.limit = q->limit;
525	psched_ratecfg_getrate(&opt.rate, &q->rate);
526	if (tbf_peak_present(q))
527		psched_ratecfg_getrate(&opt.peakrate, &q->peak);
528	else
529		memset(&opt.peakrate, 0, sizeof(opt.peakrate));
530	opt.mtu = PSCHED_NS2TICKS(q->mtu);
531	opt.buffer = PSCHED_NS2TICKS(q->buffer);
532	if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
533		goto nla_put_failure;
534	if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
535	    nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
536			      TCA_TBF_PAD))
537		goto nla_put_failure;
538	if (tbf_peak_present(q) &&
539	    q->peak.rate_bytes_ps >= (1ULL << 32) &&
540	    nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
541			      TCA_TBF_PAD))
542		goto nla_put_failure;
543
544	return nla_nest_end(skb, nest);
545
546nla_put_failure:
547	nla_nest_cancel(skb, nest);
548	return -1;
549}
550
551static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
552			  struct sk_buff *skb, struct tcmsg *tcm)
553{
554	struct tbf_sched_data *q = qdisc_priv(sch);
555
556	tcm->tcm_handle |= TC_H_MIN(1);
557	tcm->tcm_info = q->qdisc->handle;
558
559	return 0;
560}
561
562static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
563		     struct Qdisc **old, struct netlink_ext_ack *extack)
564{
565	struct tbf_sched_data *q = qdisc_priv(sch);
566
567	if (new == NULL)
568		new = &noop_qdisc;
569
570	*old = qdisc_replace(sch, new, &q->qdisc);
571
572	tbf_offload_graft(sch, new, *old, extack);
573	return 0;
574}
575
576static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
577{
578	struct tbf_sched_data *q = qdisc_priv(sch);
579	return q->qdisc;
580}
581
582static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
583{
584	return 1;
585}
586
587static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
588{
589	if (!walker->stop) {
590		tc_qdisc_stats_dump(sch, 1, walker);
 
 
 
 
 
591	}
592}
593
594static const struct Qdisc_class_ops tbf_class_ops = {
595	.graft		=	tbf_graft,
596	.leaf		=	tbf_leaf,
597	.find		=	tbf_find,
598	.walk		=	tbf_walk,
599	.dump		=	tbf_dump_class,
600};
601
602static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
603	.next		=	NULL,
604	.cl_ops		=	&tbf_class_ops,
605	.id		=	"tbf",
606	.priv_size	=	sizeof(struct tbf_sched_data),
607	.enqueue	=	tbf_enqueue,
608	.dequeue	=	tbf_dequeue,
609	.peek		=	qdisc_peek_dequeued,
610	.init		=	tbf_init,
611	.reset		=	tbf_reset,
612	.destroy	=	tbf_destroy,
613	.change		=	tbf_change,
614	.dump		=	tbf_dump,
615	.owner		=	THIS_MODULE,
616};
617MODULE_ALIAS_NET_SCH("tbf");
618
619static int __init tbf_module_init(void)
620{
621	return register_qdisc(&tbf_qdisc_ops);
622}
623
624static void __exit tbf_module_exit(void)
625{
626	unregister_qdisc(&tbf_qdisc_ops);
627}
628module_init(tbf_module_init)
629module_exit(tbf_module_exit)
630MODULE_LICENSE("GPL");
631MODULE_DESCRIPTION("Token Bucket Filter qdisc");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_tbf.c	Token Bucket Filter queue.
  4 *
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 *		Dmitry Torokhov <dtor@mail.ru> - allow attaching inner qdiscs -
  7 *						 original idea by Martin Devera
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/types.h>
 12#include <linux/kernel.h>
 13#include <linux/string.h>
 14#include <linux/errno.h>
 15#include <linux/skbuff.h>
 
 16#include <net/netlink.h>
 17#include <net/sch_generic.h>
 
 18#include <net/pkt_sched.h>
 19
 20
 21/*	Simple Token Bucket Filter.
 22	=======================================
 23
 24	SOURCE.
 25	-------
 26
 27	None.
 28
 29	Description.
 30	------------
 31
 32	A data flow obeys TBF with rate R and depth B, if for any
 33	time interval t_i...t_f the number of transmitted bits
 34	does not exceed B + R*(t_f-t_i).
 35
 36	Packetized version of this definition:
 37	The sequence of packets of sizes s_i served at moments t_i
 38	obeys TBF, if for any i<=k:
 39
 40	s_i+....+s_k <= B + R*(t_k - t_i)
 41
 42	Algorithm.
 43	----------
 44
 45	Let N(t_i) be B/R initially and N(t) grow continuously with time as:
 46
 47	N(t+delta) = min{B/R, N(t) + delta}
 48
 49	If the first packet in queue has length S, it may be
 50	transmitted only at the time t_* when S/R <= N(t_*),
 51	and in this case N(t) jumps:
 52
 53	N(t_* + 0) = N(t_* - 0) - S/R.
 54
 55
 56
 57	Actually, QoS requires two TBF to be applied to a data stream.
 58	One of them controls steady state burst size, another
 59	one with rate P (peak rate) and depth M (equal to link MTU)
 60	limits bursts at a smaller time scale.
 61
 62	It is easy to see that P>R, and B>M. If P is infinity, this double
 63	TBF is equivalent to a single one.
 64
 65	When TBF works in reshaping mode, latency is estimated as:
 66
 67	lat = max ((L-B)/R, (L-M)/P)
 68
 69
 70	NOTES.
 71	------
 72
 73	If TBF throttles, it starts a watchdog timer, which will wake it up
 74	when it is ready to transmit.
 75	Note that the minimal timer resolution is 1/HZ.
 76	If no new packets arrive during this period,
 77	or if the device is not awaken by EOI for some previous packet,
 78	TBF can stop its activity for 1/HZ.
 79
 80
 81	This means, that with depth B, the maximal rate is
 82
 83	R_crit = B*HZ
 84
 85	F.e. for 10Mbit ethernet and HZ=100 the minimal allowed B is ~10Kbytes.
 86
 87	Note that the peak rate TBF is much more tough: with MTU 1500
 88	P_crit = 150Kbytes/sec. So, if you need greater peak
 89	rates, use alpha with HZ=1000 :-)
 90
 91	With classful TBF, limit is just kept for backwards compatibility.
 92	It is passed to the default bfifo qdisc - if the inner qdisc is
 93	changed the limit is not effective anymore.
 94*/
 95
 96struct tbf_sched_data {
 97/* Parameters */
 98	u32		limit;		/* Maximal length of backlog: bytes */
 99	u32		max_size;
100	s64		buffer;		/* Token bucket depth/rate: MUST BE >= MTU/B */
101	s64		mtu;
102	struct psched_ratecfg rate;
103	struct psched_ratecfg peak;
104
105/* Variables */
106	s64	tokens;			/* Current number of B tokens */
107	s64	ptokens;		/* Current number of P tokens */
108	s64	t_c;			/* Time check-point */
109	struct Qdisc	*qdisc;		/* Inner qdisc, default - bfifo queue */
110	struct qdisc_watchdog watchdog;	/* Watchdog timer */
111};
112
113
114/* Time to Length, convert time in ns to length in bytes
115 * to determinate how many bytes can be sent in given time.
116 */
117static u64 psched_ns_t2l(const struct psched_ratecfg *r,
118			 u64 time_in_ns)
119{
120	/* The formula is :
121	 * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
122	 */
123	u64 len = time_in_ns * r->rate_bytes_ps;
124
125	do_div(len, NSEC_PER_SEC);
126
127	if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
128		do_div(len, 53);
129		len = len * 48;
130	}
131
132	if (len > r->overhead)
133		len -= r->overhead;
134	else
135		len = 0;
136
137	return len;
138}
139
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140/* GSO packet is too big, segment it so that tbf can transmit
141 * each segment in time
142 */
143static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
144		       struct sk_buff **to_free)
145{
146	struct tbf_sched_data *q = qdisc_priv(sch);
147	struct sk_buff *segs, *nskb;
148	netdev_features_t features = netif_skb_features(skb);
149	unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
150	int ret, nb;
151
152	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
153
154	if (IS_ERR_OR_NULL(segs))
155		return qdisc_drop(skb, sch, to_free);
156
157	nb = 0;
158	while (segs) {
159		nskb = segs->next;
160		skb_mark_not_on_list(segs);
161		qdisc_skb_cb(segs)->pkt_len = segs->len;
162		len += segs->len;
163		ret = qdisc_enqueue(segs, q->qdisc, to_free);
164		if (ret != NET_XMIT_SUCCESS) {
165			if (net_xmit_drop_count(ret))
166				qdisc_qstats_drop(sch);
167		} else {
168			nb++;
 
169		}
170		segs = nskb;
171	}
172	sch->q.qlen += nb;
173	if (nb > 1)
 
174		qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
175	consume_skb(skb);
176	return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
 
 
 
 
177}
178
179static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
180		       struct sk_buff **to_free)
181{
182	struct tbf_sched_data *q = qdisc_priv(sch);
183	unsigned int len = qdisc_pkt_len(skb);
184	int ret;
185
186	if (qdisc_pkt_len(skb) > q->max_size) {
187		if (skb_is_gso(skb) &&
188		    skb_gso_validate_mac_len(skb, q->max_size))
189			return tbf_segment(skb, sch, to_free);
190		return qdisc_drop(skb, sch, to_free);
191	}
192	ret = qdisc_enqueue(skb, q->qdisc, to_free);
193	if (ret != NET_XMIT_SUCCESS) {
194		if (net_xmit_drop_count(ret))
195			qdisc_qstats_drop(sch);
196		return ret;
197	}
198
199	sch->qstats.backlog += len;
200	sch->q.qlen++;
201	return NET_XMIT_SUCCESS;
202}
203
204static bool tbf_peak_present(const struct tbf_sched_data *q)
205{
206	return q->peak.rate_bytes_ps;
207}
208
209static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
210{
211	struct tbf_sched_data *q = qdisc_priv(sch);
212	struct sk_buff *skb;
213
214	skb = q->qdisc->ops->peek(q->qdisc);
215
216	if (skb) {
217		s64 now;
218		s64 toks;
219		s64 ptoks = 0;
220		unsigned int len = qdisc_pkt_len(skb);
221
222		now = ktime_get_ns();
223		toks = min_t(s64, now - q->t_c, q->buffer);
224
225		if (tbf_peak_present(q)) {
226			ptoks = toks + q->ptokens;
227			if (ptoks > q->mtu)
228				ptoks = q->mtu;
229			ptoks -= (s64) psched_l2t_ns(&q->peak, len);
230		}
231		toks += q->tokens;
232		if (toks > q->buffer)
233			toks = q->buffer;
234		toks -= (s64) psched_l2t_ns(&q->rate, len);
235
236		if ((toks|ptoks) >= 0) {
237			skb = qdisc_dequeue_peeked(q->qdisc);
238			if (unlikely(!skb))
239				return NULL;
240
241			q->t_c = now;
242			q->tokens = toks;
243			q->ptokens = ptoks;
244			qdisc_qstats_backlog_dec(sch, skb);
245			sch->q.qlen--;
246			qdisc_bstats_update(sch, skb);
247			return skb;
248		}
249
250		qdisc_watchdog_schedule_ns(&q->watchdog,
251					   now + max_t(long, -toks, -ptoks));
252
253		/* Maybe we have a shorter packet in the queue,
254		   which can be sent now. It sounds cool,
255		   but, however, this is wrong in principle.
256		   We MUST NOT reorder packets under these circumstances.
257
258		   Really, if we split the flow into independent
259		   subflows, it would be a very good solution.
260		   This is the main idea of all FQ algorithms
261		   (cf. CSZ, HPFQ, HFSC)
262		 */
263
264		qdisc_qstats_overlimit(sch);
265	}
266	return NULL;
267}
268
269static void tbf_reset(struct Qdisc *sch)
270{
271	struct tbf_sched_data *q = qdisc_priv(sch);
272
273	qdisc_reset(q->qdisc);
274	sch->qstats.backlog = 0;
275	sch->q.qlen = 0;
276	q->t_c = ktime_get_ns();
277	q->tokens = q->buffer;
278	q->ptokens = q->mtu;
279	qdisc_watchdog_cancel(&q->watchdog);
280}
281
282static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
283	[TCA_TBF_PARMS]	= { .len = sizeof(struct tc_tbf_qopt) },
284	[TCA_TBF_RTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
285	[TCA_TBF_PTAB]	= { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
286	[TCA_TBF_RATE64]	= { .type = NLA_U64 },
287	[TCA_TBF_PRATE64]	= { .type = NLA_U64 },
288	[TCA_TBF_BURST] = { .type = NLA_U32 },
289	[TCA_TBF_PBURST] = { .type = NLA_U32 },
290};
291
292static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
293		      struct netlink_ext_ack *extack)
294{
295	int err;
296	struct tbf_sched_data *q = qdisc_priv(sch);
297	struct nlattr *tb[TCA_TBF_MAX + 1];
298	struct tc_tbf_qopt *qopt;
299	struct Qdisc *child = NULL;
 
300	struct psched_ratecfg rate;
301	struct psched_ratecfg peak;
302	u64 max_size;
303	s64 buffer, mtu;
304	u64 rate64 = 0, prate64 = 0;
305
306	err = nla_parse_nested_deprecated(tb, TCA_TBF_MAX, opt, tbf_policy,
307					  NULL);
308	if (err < 0)
309		return err;
310
311	err = -EINVAL;
312	if (tb[TCA_TBF_PARMS] == NULL)
313		goto done;
314
315	qopt = nla_data(tb[TCA_TBF_PARMS]);
316	if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
317		qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
318					      tb[TCA_TBF_RTAB],
319					      NULL));
320
321	if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
322			qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
323						      tb[TCA_TBF_PTAB],
324						      NULL));
325
326	buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
327	mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
328
329	if (tb[TCA_TBF_RATE64])
330		rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
331	psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
332
333	if (tb[TCA_TBF_BURST]) {
334		max_size = nla_get_u32(tb[TCA_TBF_BURST]);
335		buffer = psched_l2t_ns(&rate, max_size);
336	} else {
337		max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
338	}
339
340	if (qopt->peakrate.rate) {
341		if (tb[TCA_TBF_PRATE64])
342			prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
343		psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
344		if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
345			pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
346					peak.rate_bytes_ps, rate.rate_bytes_ps);
347			err = -EINVAL;
348			goto done;
349		}
350
351		if (tb[TCA_TBF_PBURST]) {
352			u32 pburst = nla_get_u32(tb[TCA_TBF_PBURST]);
353			max_size = min_t(u32, max_size, pburst);
354			mtu = psched_l2t_ns(&peak, pburst);
355		} else {
356			max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
357		}
358	} else {
359		memset(&peak, 0, sizeof(peak));
360	}
361
362	if (max_size < psched_mtu(qdisc_dev(sch)))
363		pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
364				    max_size, qdisc_dev(sch)->name,
365				    psched_mtu(qdisc_dev(sch)));
366
367	if (!max_size) {
368		err = -EINVAL;
369		goto done;
370	}
371
372	if (q->qdisc != &noop_qdisc) {
373		err = fifo_set_limit(q->qdisc, qopt->limit);
374		if (err)
375			goto done;
376	} else if (qopt->limit > 0) {
377		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit,
378					 extack);
379		if (IS_ERR(child)) {
380			err = PTR_ERR(child);
381			goto done;
382		}
383
384		/* child is fifo, no need to check for noop_qdisc */
385		qdisc_hash_add(child, true);
386	}
387
388	sch_tree_lock(sch);
389	if (child) {
390		qdisc_tree_flush_backlog(q->qdisc);
391		qdisc_put(q->qdisc);
392		q->qdisc = child;
393	}
394	q->limit = qopt->limit;
395	if (tb[TCA_TBF_PBURST])
396		q->mtu = mtu;
397	else
398		q->mtu = PSCHED_TICKS2NS(qopt->mtu);
399	q->max_size = max_size;
400	if (tb[TCA_TBF_BURST])
401		q->buffer = buffer;
402	else
403		q->buffer = PSCHED_TICKS2NS(qopt->buffer);
404	q->tokens = q->buffer;
405	q->ptokens = q->mtu;
406
407	memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
408	memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
409
410	sch_tree_unlock(sch);
 
411	err = 0;
 
 
412done:
413	return err;
414}
415
416static int tbf_init(struct Qdisc *sch, struct nlattr *opt,
417		    struct netlink_ext_ack *extack)
418{
419	struct tbf_sched_data *q = qdisc_priv(sch);
420
421	qdisc_watchdog_init(&q->watchdog, sch);
422	q->qdisc = &noop_qdisc;
423
424	if (!opt)
425		return -EINVAL;
426
427	q->t_c = ktime_get_ns();
428
429	return tbf_change(sch, opt, extack);
430}
431
432static void tbf_destroy(struct Qdisc *sch)
433{
434	struct tbf_sched_data *q = qdisc_priv(sch);
435
436	qdisc_watchdog_cancel(&q->watchdog);
 
437	qdisc_put(q->qdisc);
438}
439
440static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
441{
442	struct tbf_sched_data *q = qdisc_priv(sch);
443	struct nlattr *nest;
444	struct tc_tbf_qopt opt;
 
 
 
 
 
445
446	sch->qstats.backlog = q->qdisc->qstats.backlog;
447	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
448	if (nest == NULL)
449		goto nla_put_failure;
450
451	opt.limit = q->limit;
452	psched_ratecfg_getrate(&opt.rate, &q->rate);
453	if (tbf_peak_present(q))
454		psched_ratecfg_getrate(&opt.peakrate, &q->peak);
455	else
456		memset(&opt.peakrate, 0, sizeof(opt.peakrate));
457	opt.mtu = PSCHED_NS2TICKS(q->mtu);
458	opt.buffer = PSCHED_NS2TICKS(q->buffer);
459	if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
460		goto nla_put_failure;
461	if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
462	    nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps,
463			      TCA_TBF_PAD))
464		goto nla_put_failure;
465	if (tbf_peak_present(q) &&
466	    q->peak.rate_bytes_ps >= (1ULL << 32) &&
467	    nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps,
468			      TCA_TBF_PAD))
469		goto nla_put_failure;
470
471	return nla_nest_end(skb, nest);
472
473nla_put_failure:
474	nla_nest_cancel(skb, nest);
475	return -1;
476}
477
478static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
479			  struct sk_buff *skb, struct tcmsg *tcm)
480{
481	struct tbf_sched_data *q = qdisc_priv(sch);
482
483	tcm->tcm_handle |= TC_H_MIN(1);
484	tcm->tcm_info = q->qdisc->handle;
485
486	return 0;
487}
488
489static int tbf_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
490		     struct Qdisc **old, struct netlink_ext_ack *extack)
491{
492	struct tbf_sched_data *q = qdisc_priv(sch);
493
494	if (new == NULL)
495		new = &noop_qdisc;
496
497	*old = qdisc_replace(sch, new, &q->qdisc);
 
 
498	return 0;
499}
500
501static struct Qdisc *tbf_leaf(struct Qdisc *sch, unsigned long arg)
502{
503	struct tbf_sched_data *q = qdisc_priv(sch);
504	return q->qdisc;
505}
506
507static unsigned long tbf_find(struct Qdisc *sch, u32 classid)
508{
509	return 1;
510}
511
512static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
513{
514	if (!walker->stop) {
515		if (walker->count >= walker->skip)
516			if (walker->fn(sch, 1, walker) < 0) {
517				walker->stop = 1;
518				return;
519			}
520		walker->count++;
521	}
522}
523
524static const struct Qdisc_class_ops tbf_class_ops = {
525	.graft		=	tbf_graft,
526	.leaf		=	tbf_leaf,
527	.find		=	tbf_find,
528	.walk		=	tbf_walk,
529	.dump		=	tbf_dump_class,
530};
531
532static struct Qdisc_ops tbf_qdisc_ops __read_mostly = {
533	.next		=	NULL,
534	.cl_ops		=	&tbf_class_ops,
535	.id		=	"tbf",
536	.priv_size	=	sizeof(struct tbf_sched_data),
537	.enqueue	=	tbf_enqueue,
538	.dequeue	=	tbf_dequeue,
539	.peek		=	qdisc_peek_dequeued,
540	.init		=	tbf_init,
541	.reset		=	tbf_reset,
542	.destroy	=	tbf_destroy,
543	.change		=	tbf_change,
544	.dump		=	tbf_dump,
545	.owner		=	THIS_MODULE,
546};
 
547
548static int __init tbf_module_init(void)
549{
550	return register_qdisc(&tbf_qdisc_ops);
551}
552
553static void __exit tbf_module_exit(void)
554{
555	unregister_qdisc(&tbf_qdisc_ops);
556}
557module_init(tbf_module_init)
558module_exit(tbf_module_exit)
559MODULE_LICENSE("GPL");