Loading...
1/*
2 * Codel - The Controlled-Delay Active Queue Management algorithm
3 *
4 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
5 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
6 *
7 * Implemented on linux by :
8 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The names of the authors may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * Alternatively, provided that this notice is retained in full, this
24 * software may be distributed under the terms of the GNU General
25 * Public License ("GPL") version 2, in which case the provisions of the
26 * GPL apply INSTEAD OF those given above.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43#include <linux/module.h>
44#include <linux/slab.h>
45#include <linux/types.h>
46#include <linux/kernel.h>
47#include <linux/errno.h>
48#include <linux/skbuff.h>
49#include <linux/prefetch.h>
50#include <net/pkt_sched.h>
51#include <net/codel.h>
52#include <net/codel_impl.h>
53#include <net/codel_qdisc.h>
54
55
56#define DEFAULT_CODEL_LIMIT 1000
57
58struct codel_sched_data {
59 struct codel_params params;
60 struct codel_vars vars;
61 struct codel_stats stats;
62 u32 drop_overlimit;
63};
64
65/* This is the specific function called from codel_dequeue()
66 * to dequeue a packet from queue. Note: backlog is handled in
67 * codel, we dont need to reduce it here.
68 */
69static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
70{
71 struct Qdisc *sch = ctx;
72 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
73
74 if (skb)
75 sch->qstats.backlog -= qdisc_pkt_len(skb);
76
77 prefetch(&skb->end); /* we'll need skb_shinfo() */
78 return skb;
79}
80
81static void drop_func(struct sk_buff *skb, void *ctx)
82{
83 struct Qdisc *sch = ctx;
84
85 kfree_skb(skb);
86 qdisc_qstats_drop(sch);
87}
88
89static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
90{
91 struct codel_sched_data *q = qdisc_priv(sch);
92 struct sk_buff *skb;
93
94 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
95 &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
96 drop_func, dequeue_func);
97
98 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
99 * or HTB crashes. Defer it for next round.
100 */
101 if (q->stats.drop_count && sch->q.qlen) {
102 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
103 q->stats.drop_count = 0;
104 q->stats.drop_len = 0;
105 }
106 if (skb)
107 qdisc_bstats_update(sch, skb);
108 return skb;
109}
110
111static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
112 struct sk_buff **to_free)
113{
114 struct codel_sched_data *q;
115
116 if (likely(qdisc_qlen(sch) < sch->limit)) {
117 codel_set_enqueue_time(skb);
118 return qdisc_enqueue_tail(skb, sch);
119 }
120 q = qdisc_priv(sch);
121 q->drop_overlimit++;
122 return qdisc_drop(skb, sch, to_free);
123}
124
125static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
126 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
127 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
128 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
129 [TCA_CODEL_ECN] = { .type = NLA_U32 },
130 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
131};
132
133static int codel_change(struct Qdisc *sch, struct nlattr *opt)
134{
135 struct codel_sched_data *q = qdisc_priv(sch);
136 struct nlattr *tb[TCA_CODEL_MAX + 1];
137 unsigned int qlen, dropped = 0;
138 int err;
139
140 if (!opt)
141 return -EINVAL;
142
143 err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
144 if (err < 0)
145 return err;
146
147 sch_tree_lock(sch);
148
149 if (tb[TCA_CODEL_TARGET]) {
150 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
151
152 q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
153 }
154
155 if (tb[TCA_CODEL_CE_THRESHOLD]) {
156 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
157
158 q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
159 }
160
161 if (tb[TCA_CODEL_INTERVAL]) {
162 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
163
164 q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
165 }
166
167 if (tb[TCA_CODEL_LIMIT])
168 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
169
170 if (tb[TCA_CODEL_ECN])
171 q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
172
173 qlen = sch->q.qlen;
174 while (sch->q.qlen > sch->limit) {
175 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
176
177 dropped += qdisc_pkt_len(skb);
178 qdisc_qstats_backlog_dec(sch, skb);
179 rtnl_qdisc_drop(skb, sch);
180 }
181 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
182
183 sch_tree_unlock(sch);
184 return 0;
185}
186
187static int codel_init(struct Qdisc *sch, struct nlattr *opt)
188{
189 struct codel_sched_data *q = qdisc_priv(sch);
190
191 sch->limit = DEFAULT_CODEL_LIMIT;
192
193 codel_params_init(&q->params);
194 codel_vars_init(&q->vars);
195 codel_stats_init(&q->stats);
196 q->params.mtu = psched_mtu(qdisc_dev(sch));
197
198 if (opt) {
199 int err = codel_change(sch, opt);
200
201 if (err)
202 return err;
203 }
204
205 if (sch->limit >= 1)
206 sch->flags |= TCQ_F_CAN_BYPASS;
207 else
208 sch->flags &= ~TCQ_F_CAN_BYPASS;
209
210 return 0;
211}
212
213static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
214{
215 struct codel_sched_data *q = qdisc_priv(sch);
216 struct nlattr *opts;
217
218 opts = nla_nest_start(skb, TCA_OPTIONS);
219 if (opts == NULL)
220 goto nla_put_failure;
221
222 if (nla_put_u32(skb, TCA_CODEL_TARGET,
223 codel_time_to_us(q->params.target)) ||
224 nla_put_u32(skb, TCA_CODEL_LIMIT,
225 sch->limit) ||
226 nla_put_u32(skb, TCA_CODEL_INTERVAL,
227 codel_time_to_us(q->params.interval)) ||
228 nla_put_u32(skb, TCA_CODEL_ECN,
229 q->params.ecn))
230 goto nla_put_failure;
231 if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
232 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
233 codel_time_to_us(q->params.ce_threshold)))
234 goto nla_put_failure;
235 return nla_nest_end(skb, opts);
236
237nla_put_failure:
238 nla_nest_cancel(skb, opts);
239 return -1;
240}
241
242static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
243{
244 const struct codel_sched_data *q = qdisc_priv(sch);
245 struct tc_codel_xstats st = {
246 .maxpacket = q->stats.maxpacket,
247 .count = q->vars.count,
248 .lastcount = q->vars.lastcount,
249 .drop_overlimit = q->drop_overlimit,
250 .ldelay = codel_time_to_us(q->vars.ldelay),
251 .dropping = q->vars.dropping,
252 .ecn_mark = q->stats.ecn_mark,
253 .ce_mark = q->stats.ce_mark,
254 };
255
256 if (q->vars.dropping) {
257 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
258
259 if (delta >= 0)
260 st.drop_next = codel_time_to_us(delta);
261 else
262 st.drop_next = -codel_time_to_us(-delta);
263 }
264
265 return gnet_stats_copy_app(d, &st, sizeof(st));
266}
267
268static void codel_reset(struct Qdisc *sch)
269{
270 struct codel_sched_data *q = qdisc_priv(sch);
271
272 qdisc_reset_queue(sch);
273 codel_vars_init(&q->vars);
274}
275
276static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
277 .id = "codel",
278 .priv_size = sizeof(struct codel_sched_data),
279
280 .enqueue = codel_qdisc_enqueue,
281 .dequeue = codel_qdisc_dequeue,
282 .peek = qdisc_peek_dequeued,
283 .init = codel_init,
284 .reset = codel_reset,
285 .change = codel_change,
286 .dump = codel_dump,
287 .dump_stats = codel_dump_stats,
288 .owner = THIS_MODULE,
289};
290
291static int __init codel_module_init(void)
292{
293 return register_qdisc(&codel_qdisc_ops);
294}
295
296static void __exit codel_module_exit(void)
297{
298 unregister_qdisc(&codel_qdisc_ops);
299}
300
301module_init(codel_module_init)
302module_exit(codel_module_exit)
303
304MODULE_DESCRIPTION("Controlled Delay queue discipline");
305MODULE_AUTHOR("Dave Taht");
306MODULE_AUTHOR("Eric Dumazet");
307MODULE_LICENSE("Dual BSD/GPL");
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Codel - The Controlled-Delay Active Queue Management algorithm
4 *
5 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
6 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
7 *
8 * Implemented on linux by :
9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <linux/prefetch.h>
20#include <net/pkt_sched.h>
21#include <net/codel.h>
22#include <net/codel_impl.h>
23#include <net/codel_qdisc.h>
24
25
26#define DEFAULT_CODEL_LIMIT 1000
27
28struct codel_sched_data {
29 struct codel_params params;
30 struct codel_vars vars;
31 struct codel_stats stats;
32 u32 drop_overlimit;
33};
34
35/* This is the specific function called from codel_dequeue()
36 * to dequeue a packet from queue. Note: backlog is handled in
37 * codel, we dont need to reduce it here.
38 */
39static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
40{
41 struct Qdisc *sch = ctx;
42 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
43
44 if (skb) {
45 sch->qstats.backlog -= qdisc_pkt_len(skb);
46 prefetch(&skb->end); /* we'll need skb_shinfo() */
47 }
48 return skb;
49}
50
51static void drop_func(struct sk_buff *skb, void *ctx)
52{
53 struct Qdisc *sch = ctx;
54
55 kfree_skb(skb);
56 qdisc_qstats_drop(sch);
57}
58
59static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
60{
61 struct codel_sched_data *q = qdisc_priv(sch);
62 struct sk_buff *skb;
63
64 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
65 &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
66 drop_func, dequeue_func);
67
68 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
69 * or HTB crashes. Defer it for next round.
70 */
71 if (q->stats.drop_count && sch->q.qlen) {
72 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
73 q->stats.drop_count = 0;
74 q->stats.drop_len = 0;
75 }
76 if (skb)
77 qdisc_bstats_update(sch, skb);
78 return skb;
79}
80
81static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
82 struct sk_buff **to_free)
83{
84 struct codel_sched_data *q;
85
86 if (likely(qdisc_qlen(sch) < sch->limit)) {
87 codel_set_enqueue_time(skb);
88 return qdisc_enqueue_tail(skb, sch);
89 }
90 q = qdisc_priv(sch);
91 q->drop_overlimit++;
92 return qdisc_drop(skb, sch, to_free);
93}
94
95static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
96 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
97 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
98 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
99 [TCA_CODEL_ECN] = { .type = NLA_U32 },
100 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
101};
102
103static int codel_change(struct Qdisc *sch, struct nlattr *opt,
104 struct netlink_ext_ack *extack)
105{
106 struct codel_sched_data *q = qdisc_priv(sch);
107 struct nlattr *tb[TCA_CODEL_MAX + 1];
108 unsigned int qlen, dropped = 0;
109 int err;
110
111 err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
112 codel_policy, NULL);
113 if (err < 0)
114 return err;
115
116 sch_tree_lock(sch);
117
118 if (tb[TCA_CODEL_TARGET]) {
119 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
120
121 WRITE_ONCE(q->params.target,
122 ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
123 }
124
125 if (tb[TCA_CODEL_CE_THRESHOLD]) {
126 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
127
128 WRITE_ONCE(q->params.ce_threshold,
129 (val * NSEC_PER_USEC) >> CODEL_SHIFT);
130 }
131
132 if (tb[TCA_CODEL_INTERVAL]) {
133 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
134
135 WRITE_ONCE(q->params.interval,
136 ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
137 }
138
139 if (tb[TCA_CODEL_LIMIT])
140 WRITE_ONCE(sch->limit,
141 nla_get_u32(tb[TCA_CODEL_LIMIT]));
142
143 if (tb[TCA_CODEL_ECN])
144 WRITE_ONCE(q->params.ecn,
145 !!nla_get_u32(tb[TCA_CODEL_ECN]));
146
147 qlen = sch->q.qlen;
148 while (sch->q.qlen > sch->limit) {
149 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
150
151 dropped += qdisc_pkt_len(skb);
152 qdisc_qstats_backlog_dec(sch, skb);
153 rtnl_qdisc_drop(skb, sch);
154 }
155 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
156
157 sch_tree_unlock(sch);
158 return 0;
159}
160
161static int codel_init(struct Qdisc *sch, struct nlattr *opt,
162 struct netlink_ext_ack *extack)
163{
164 struct codel_sched_data *q = qdisc_priv(sch);
165
166 sch->limit = DEFAULT_CODEL_LIMIT;
167
168 codel_params_init(&q->params);
169 codel_vars_init(&q->vars);
170 codel_stats_init(&q->stats);
171 q->params.mtu = psched_mtu(qdisc_dev(sch));
172
173 if (opt) {
174 int err = codel_change(sch, opt, extack);
175
176 if (err)
177 return err;
178 }
179
180 if (sch->limit >= 1)
181 sch->flags |= TCQ_F_CAN_BYPASS;
182 else
183 sch->flags &= ~TCQ_F_CAN_BYPASS;
184
185 return 0;
186}
187
188static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
189{
190 struct codel_sched_data *q = qdisc_priv(sch);
191 codel_time_t ce_threshold;
192 struct nlattr *opts;
193
194 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
195 if (opts == NULL)
196 goto nla_put_failure;
197
198 if (nla_put_u32(skb, TCA_CODEL_TARGET,
199 codel_time_to_us(READ_ONCE(q->params.target))) ||
200 nla_put_u32(skb, TCA_CODEL_LIMIT,
201 READ_ONCE(sch->limit)) ||
202 nla_put_u32(skb, TCA_CODEL_INTERVAL,
203 codel_time_to_us(READ_ONCE(q->params.interval))) ||
204 nla_put_u32(skb, TCA_CODEL_ECN,
205 READ_ONCE(q->params.ecn)))
206 goto nla_put_failure;
207 ce_threshold = READ_ONCE(q->params.ce_threshold);
208 if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
209 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
210 codel_time_to_us(ce_threshold)))
211 goto nla_put_failure;
212 return nla_nest_end(skb, opts);
213
214nla_put_failure:
215 nla_nest_cancel(skb, opts);
216 return -1;
217}
218
219static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
220{
221 const struct codel_sched_data *q = qdisc_priv(sch);
222 struct tc_codel_xstats st = {
223 .maxpacket = q->stats.maxpacket,
224 .count = q->vars.count,
225 .lastcount = q->vars.lastcount,
226 .drop_overlimit = q->drop_overlimit,
227 .ldelay = codel_time_to_us(q->vars.ldelay),
228 .dropping = q->vars.dropping,
229 .ecn_mark = q->stats.ecn_mark,
230 .ce_mark = q->stats.ce_mark,
231 };
232
233 if (q->vars.dropping) {
234 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
235
236 if (delta >= 0)
237 st.drop_next = codel_time_to_us(delta);
238 else
239 st.drop_next = -codel_time_to_us(-delta);
240 }
241
242 return gnet_stats_copy_app(d, &st, sizeof(st));
243}
244
245static void codel_reset(struct Qdisc *sch)
246{
247 struct codel_sched_data *q = qdisc_priv(sch);
248
249 qdisc_reset_queue(sch);
250 codel_vars_init(&q->vars);
251}
252
253static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
254 .id = "codel",
255 .priv_size = sizeof(struct codel_sched_data),
256
257 .enqueue = codel_qdisc_enqueue,
258 .dequeue = codel_qdisc_dequeue,
259 .peek = qdisc_peek_dequeued,
260 .init = codel_init,
261 .reset = codel_reset,
262 .change = codel_change,
263 .dump = codel_dump,
264 .dump_stats = codel_dump_stats,
265 .owner = THIS_MODULE,
266};
267MODULE_ALIAS_NET_SCH("codel");
268
269static int __init codel_module_init(void)
270{
271 return register_qdisc(&codel_qdisc_ops);
272}
273
274static void __exit codel_module_exit(void)
275{
276 unregister_qdisc(&codel_qdisc_ops);
277}
278
279module_init(codel_module_init)
280module_exit(codel_module_exit)
281
282MODULE_DESCRIPTION("Controlled Delay queue discipline");
283MODULE_AUTHOR("Dave Taht");
284MODULE_AUTHOR("Eric Dumazet");
285MODULE_LICENSE("Dual BSD/GPL");