Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2/*
  3 * Codel - The Controlled-Delay Active Queue Management algorithm
  4 *
  5 *  Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
  6 *  Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
  7 *
  8 *  Implemented on linux by :
  9 *  Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
 10 *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
 11 */
 12
 13#include <linux/module.h>
 14#include <linux/slab.h>
 15#include <linux/types.h>
 16#include <linux/kernel.h>
 17#include <linux/errno.h>
 18#include <linux/skbuff.h>
 19#include <linux/prefetch.h>
 20#include <net/pkt_sched.h>
 21#include <net/codel.h>
 22#include <net/codel_impl.h>
 23#include <net/codel_qdisc.h>
 24
 25
 26#define DEFAULT_CODEL_LIMIT 1000
 27
 28struct codel_sched_data {
 29	struct codel_params	params;
 30	struct codel_vars	vars;
 31	struct codel_stats	stats;
 32	u32			drop_overlimit;
 33};
 34
 35/* This is the specific function called from codel_dequeue()
 36 * to dequeue a packet from queue. Note: backlog is handled in
 37 * codel, we dont need to reduce it here.
 38 */
 39static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
 40{
 41	struct Qdisc *sch = ctx;
 42	struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
 43
 44	if (skb) {
 45		sch->qstats.backlog -= qdisc_pkt_len(skb);
 46		prefetch(&skb->end); /* we'll need skb_shinfo() */
 47	}
 48	return skb;
 49}
 50
 51static void drop_func(struct sk_buff *skb, void *ctx)
 52{
 53	struct Qdisc *sch = ctx;
 54
 55	kfree_skb(skb);
 56	qdisc_qstats_drop(sch);
 57}
 58
 59static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
 60{
 61	struct codel_sched_data *q = qdisc_priv(sch);
 62	struct sk_buff *skb;
 63
 64	skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
 65			    &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
 66			    drop_func, dequeue_func);
 67
 68	/* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
 69	 * or HTB crashes. Defer it for next round.
 70	 */
 71	if (q->stats.drop_count && sch->q.qlen) {
 72		qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
 73		q->stats.drop_count = 0;
 74		q->stats.drop_len = 0;
 75	}
 76	if (skb)
 77		qdisc_bstats_update(sch, skb);
 78	return skb;
 79}
 80
 81static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 82			       struct sk_buff **to_free)
 83{
 84	struct codel_sched_data *q;
 85
 86	if (likely(qdisc_qlen(sch) < sch->limit)) {
 87		codel_set_enqueue_time(skb);
 88		return qdisc_enqueue_tail(skb, sch);
 89	}
 90	q = qdisc_priv(sch);
 91	q->drop_overlimit++;
 92	return qdisc_drop(skb, sch, to_free);
 93}
 94
 95static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
 96	[TCA_CODEL_TARGET]	= { .type = NLA_U32 },
 97	[TCA_CODEL_LIMIT]	= { .type = NLA_U32 },
 98	[TCA_CODEL_INTERVAL]	= { .type = NLA_U32 },
 99	[TCA_CODEL_ECN]		= { .type = NLA_U32 },
100	[TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
101};
102
103static int codel_change(struct Qdisc *sch, struct nlattr *opt,
104			struct netlink_ext_ack *extack)
105{
106	struct codel_sched_data *q = qdisc_priv(sch);
107	struct nlattr *tb[TCA_CODEL_MAX + 1];
108	unsigned int qlen, dropped = 0;
109	int err;
110
111	err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
112					  codel_policy, NULL);
113	if (err < 0)
114		return err;
115
116	sch_tree_lock(sch);
117
118	if (tb[TCA_CODEL_TARGET]) {
119		u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
120
121		WRITE_ONCE(q->params.target,
122			   ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
123	}
124
125	if (tb[TCA_CODEL_CE_THRESHOLD]) {
126		u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
127
128		WRITE_ONCE(q->params.ce_threshold,
129			   (val * NSEC_PER_USEC) >> CODEL_SHIFT);
130	}
131
132	if (tb[TCA_CODEL_INTERVAL]) {
133		u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
134
135		WRITE_ONCE(q->params.interval,
136			   ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
137	}
138
139	if (tb[TCA_CODEL_LIMIT])
140		WRITE_ONCE(sch->limit,
141			   nla_get_u32(tb[TCA_CODEL_LIMIT]));
142
143	if (tb[TCA_CODEL_ECN])
144		WRITE_ONCE(q->params.ecn,
145			   !!nla_get_u32(tb[TCA_CODEL_ECN]));
146
147	qlen = sch->q.qlen;
148	while (sch->q.qlen > sch->limit) {
149		struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
150
151		dropped += qdisc_pkt_len(skb);
152		qdisc_qstats_backlog_dec(sch, skb);
153		rtnl_qdisc_drop(skb, sch);
154	}
155	qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
156
157	sch_tree_unlock(sch);
158	return 0;
159}
160
161static int codel_init(struct Qdisc *sch, struct nlattr *opt,
162		      struct netlink_ext_ack *extack)
163{
164	struct codel_sched_data *q = qdisc_priv(sch);
165
166	sch->limit = DEFAULT_CODEL_LIMIT;
167
168	codel_params_init(&q->params);
169	codel_vars_init(&q->vars);
170	codel_stats_init(&q->stats);
171	q->params.mtu = psched_mtu(qdisc_dev(sch));
172
173	if (opt) {
174		int err = codel_change(sch, opt, extack);
175
176		if (err)
177			return err;
178	}
179
180	if (sch->limit >= 1)
181		sch->flags |= TCQ_F_CAN_BYPASS;
182	else
183		sch->flags &= ~TCQ_F_CAN_BYPASS;
184
185	return 0;
186}
187
188static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
189{
190	struct codel_sched_data *q = qdisc_priv(sch);
191	codel_time_t ce_threshold;
192	struct nlattr *opts;
193
194	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
195	if (opts == NULL)
196		goto nla_put_failure;
197
198	if (nla_put_u32(skb, TCA_CODEL_TARGET,
199			codel_time_to_us(READ_ONCE(q->params.target))) ||
200	    nla_put_u32(skb, TCA_CODEL_LIMIT,
201			READ_ONCE(sch->limit)) ||
202	    nla_put_u32(skb, TCA_CODEL_INTERVAL,
203			codel_time_to_us(READ_ONCE(q->params.interval))) ||
204	    nla_put_u32(skb, TCA_CODEL_ECN,
205			READ_ONCE(q->params.ecn)))
206		goto nla_put_failure;
207	ce_threshold = READ_ONCE(q->params.ce_threshold);
208	if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
209	    nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
210			codel_time_to_us(ce_threshold)))
211		goto nla_put_failure;
212	return nla_nest_end(skb, opts);
213
214nla_put_failure:
215	nla_nest_cancel(skb, opts);
216	return -1;
217}
218
219static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
220{
221	const struct codel_sched_data *q = qdisc_priv(sch);
222	struct tc_codel_xstats st = {
223		.maxpacket	= q->stats.maxpacket,
224		.count		= q->vars.count,
225		.lastcount	= q->vars.lastcount,
226		.drop_overlimit = q->drop_overlimit,
227		.ldelay		= codel_time_to_us(q->vars.ldelay),
228		.dropping	= q->vars.dropping,
229		.ecn_mark	= q->stats.ecn_mark,
230		.ce_mark	= q->stats.ce_mark,
231	};
232
233	if (q->vars.dropping) {
234		codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
235
236		if (delta >= 0)
237			st.drop_next = codel_time_to_us(delta);
238		else
239			st.drop_next = -codel_time_to_us(-delta);
240	}
241
242	return gnet_stats_copy_app(d, &st, sizeof(st));
243}
244
245static void codel_reset(struct Qdisc *sch)
246{
247	struct codel_sched_data *q = qdisc_priv(sch);
248
249	qdisc_reset_queue(sch);
250	codel_vars_init(&q->vars);
251}
252
253static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
254	.id		=	"codel",
255	.priv_size	=	sizeof(struct codel_sched_data),
256
257	.enqueue	=	codel_qdisc_enqueue,
258	.dequeue	=	codel_qdisc_dequeue,
259	.peek		=	qdisc_peek_dequeued,
260	.init		=	codel_init,
261	.reset		=	codel_reset,
262	.change 	=	codel_change,
263	.dump		=	codel_dump,
264	.dump_stats	=	codel_dump_stats,
265	.owner		=	THIS_MODULE,
266};
267MODULE_ALIAS_NET_SCH("codel");
268
269static int __init codel_module_init(void)
270{
271	return register_qdisc(&codel_qdisc_ops);
272}
273
274static void __exit codel_module_exit(void)
275{
276	unregister_qdisc(&codel_qdisc_ops);
277}
278
279module_init(codel_module_init)
280module_exit(codel_module_exit)
281
282MODULE_DESCRIPTION("Controlled Delay queue discipline");
283MODULE_AUTHOR("Dave Taht");
284MODULE_AUTHOR("Eric Dumazet");
285MODULE_LICENSE("Dual BSD/GPL");