Loading...
1/*
2 * Codel - The Controlled-Delay Active Queue Management algorithm
3 *
4 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
5 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
6 *
7 * Implemented on linux by :
8 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
9 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The names of the authors may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * Alternatively, provided that this notice is retained in full, this
24 * software may be distributed under the terms of the GNU General
25 * Public License ("GPL") version 2, in which case the provisions of the
26 * GPL apply INSTEAD OF those given above.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
39 * DAMAGE.
40 *
41 */
42
43#include <linux/module.h>
44#include <linux/slab.h>
45#include <linux/types.h>
46#include <linux/kernel.h>
47#include <linux/errno.h>
48#include <linux/skbuff.h>
49#include <linux/prefetch.h>
50#include <net/pkt_sched.h>
51#include <net/codel.h>
52
53
54#define DEFAULT_CODEL_LIMIT 1000
55
56struct codel_sched_data {
57 struct codel_params params;
58 struct codel_vars vars;
59 struct codel_stats stats;
60 u32 drop_overlimit;
61};
62
63/* This is the specific function called from codel_dequeue()
64 * to dequeue a packet from queue. Note: backlog is handled in
65 * codel, we dont need to reduce it here.
66 */
67static struct sk_buff *dequeue(struct codel_vars *vars, struct Qdisc *sch)
68{
69 struct sk_buff *skb = __skb_dequeue(&sch->q);
70
71 prefetch(&skb->end); /* we'll need skb_shinfo() */
72 return skb;
73}
74
75static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
76{
77 struct codel_sched_data *q = qdisc_priv(sch);
78 struct sk_buff *skb;
79
80 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue);
81
82 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
83 * or HTB crashes. Defer it for next round.
84 */
85 if (q->stats.drop_count && sch->q.qlen) {
86 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
87 q->stats.drop_count = 0;
88 q->stats.drop_len = 0;
89 }
90 if (skb)
91 qdisc_bstats_update(sch, skb);
92 return skb;
93}
94
95static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
96{
97 struct codel_sched_data *q;
98
99 if (likely(qdisc_qlen(sch) < sch->limit)) {
100 codel_set_enqueue_time(skb);
101 return qdisc_enqueue_tail(skb, sch);
102 }
103 q = qdisc_priv(sch);
104 q->drop_overlimit++;
105 return qdisc_drop(skb, sch);
106}
107
108static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
109 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
110 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
111 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
112 [TCA_CODEL_ECN] = { .type = NLA_U32 },
113 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
114};
115
116static int codel_change(struct Qdisc *sch, struct nlattr *opt)
117{
118 struct codel_sched_data *q = qdisc_priv(sch);
119 struct nlattr *tb[TCA_CODEL_MAX + 1];
120 unsigned int qlen, dropped = 0;
121 int err;
122
123 if (!opt)
124 return -EINVAL;
125
126 err = nla_parse_nested(tb, TCA_CODEL_MAX, opt, codel_policy);
127 if (err < 0)
128 return err;
129
130 sch_tree_lock(sch);
131
132 if (tb[TCA_CODEL_TARGET]) {
133 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
134
135 q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT;
136 }
137
138 if (tb[TCA_CODEL_CE_THRESHOLD]) {
139 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
140
141 q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
142 }
143
144 if (tb[TCA_CODEL_INTERVAL]) {
145 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
146
147 q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT;
148 }
149
150 if (tb[TCA_CODEL_LIMIT])
151 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]);
152
153 if (tb[TCA_CODEL_ECN])
154 q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]);
155
156 qlen = sch->q.qlen;
157 while (sch->q.qlen > sch->limit) {
158 struct sk_buff *skb = __skb_dequeue(&sch->q);
159
160 dropped += qdisc_pkt_len(skb);
161 qdisc_qstats_backlog_dec(sch, skb);
162 qdisc_drop(skb, sch);
163 }
164 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
165
166 sch_tree_unlock(sch);
167 return 0;
168}
169
170static int codel_init(struct Qdisc *sch, struct nlattr *opt)
171{
172 struct codel_sched_data *q = qdisc_priv(sch);
173
174 sch->limit = DEFAULT_CODEL_LIMIT;
175
176 codel_params_init(&q->params, sch);
177 codel_vars_init(&q->vars);
178 codel_stats_init(&q->stats);
179
180 if (opt) {
181 int err = codel_change(sch, opt);
182
183 if (err)
184 return err;
185 }
186
187 if (sch->limit >= 1)
188 sch->flags |= TCQ_F_CAN_BYPASS;
189 else
190 sch->flags &= ~TCQ_F_CAN_BYPASS;
191
192 return 0;
193}
194
195static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
196{
197 struct codel_sched_data *q = qdisc_priv(sch);
198 struct nlattr *opts;
199
200 opts = nla_nest_start(skb, TCA_OPTIONS);
201 if (opts == NULL)
202 goto nla_put_failure;
203
204 if (nla_put_u32(skb, TCA_CODEL_TARGET,
205 codel_time_to_us(q->params.target)) ||
206 nla_put_u32(skb, TCA_CODEL_LIMIT,
207 sch->limit) ||
208 nla_put_u32(skb, TCA_CODEL_INTERVAL,
209 codel_time_to_us(q->params.interval)) ||
210 nla_put_u32(skb, TCA_CODEL_ECN,
211 q->params.ecn))
212 goto nla_put_failure;
213 if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD &&
214 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
215 codel_time_to_us(q->params.ce_threshold)))
216 goto nla_put_failure;
217 return nla_nest_end(skb, opts);
218
219nla_put_failure:
220 nla_nest_cancel(skb, opts);
221 return -1;
222}
223
224static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
225{
226 const struct codel_sched_data *q = qdisc_priv(sch);
227 struct tc_codel_xstats st = {
228 .maxpacket = q->stats.maxpacket,
229 .count = q->vars.count,
230 .lastcount = q->vars.lastcount,
231 .drop_overlimit = q->drop_overlimit,
232 .ldelay = codel_time_to_us(q->vars.ldelay),
233 .dropping = q->vars.dropping,
234 .ecn_mark = q->stats.ecn_mark,
235 .ce_mark = q->stats.ce_mark,
236 };
237
238 if (q->vars.dropping) {
239 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
240
241 if (delta >= 0)
242 st.drop_next = codel_time_to_us(delta);
243 else
244 st.drop_next = -codel_time_to_us(-delta);
245 }
246
247 return gnet_stats_copy_app(d, &st, sizeof(st));
248}
249
250static void codel_reset(struct Qdisc *sch)
251{
252 struct codel_sched_data *q = qdisc_priv(sch);
253
254 qdisc_reset_queue(sch);
255 codel_vars_init(&q->vars);
256}
257
258static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
259 .id = "codel",
260 .priv_size = sizeof(struct codel_sched_data),
261
262 .enqueue = codel_qdisc_enqueue,
263 .dequeue = codel_qdisc_dequeue,
264 .peek = qdisc_peek_dequeued,
265 .init = codel_init,
266 .reset = codel_reset,
267 .change = codel_change,
268 .dump = codel_dump,
269 .dump_stats = codel_dump_stats,
270 .owner = THIS_MODULE,
271};
272
273static int __init codel_module_init(void)
274{
275 return register_qdisc(&codel_qdisc_ops);
276}
277
278static void __exit codel_module_exit(void)
279{
280 unregister_qdisc(&codel_qdisc_ops);
281}
282
283module_init(codel_module_init)
284module_exit(codel_module_exit)
285
286MODULE_DESCRIPTION("Controlled Delay queue discipline");
287MODULE_AUTHOR("Dave Taht");
288MODULE_AUTHOR("Eric Dumazet");
289MODULE_LICENSE("Dual BSD/GPL");
1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Codel - The Controlled-Delay Active Queue Management algorithm
4 *
5 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
6 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
7 *
8 * Implemented on linux by :
9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
11 */
12
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/skbuff.h>
19#include <linux/prefetch.h>
20#include <net/pkt_sched.h>
21#include <net/codel.h>
22#include <net/codel_impl.h>
23#include <net/codel_qdisc.h>
24
25
26#define DEFAULT_CODEL_LIMIT 1000
27
28struct codel_sched_data {
29 struct codel_params params;
30 struct codel_vars vars;
31 struct codel_stats stats;
32 u32 drop_overlimit;
33};
34
35/* This is the specific function called from codel_dequeue()
36 * to dequeue a packet from queue. Note: backlog is handled in
37 * codel, we dont need to reduce it here.
38 */
39static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
40{
41 struct Qdisc *sch = ctx;
42 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
43
44 if (skb) {
45 sch->qstats.backlog -= qdisc_pkt_len(skb);
46 prefetch(&skb->end); /* we'll need skb_shinfo() */
47 }
48 return skb;
49}
50
51static void drop_func(struct sk_buff *skb, void *ctx)
52{
53 struct Qdisc *sch = ctx;
54
55 kfree_skb(skb);
56 qdisc_qstats_drop(sch);
57}
58
59static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
60{
61 struct codel_sched_data *q = qdisc_priv(sch);
62 struct sk_buff *skb;
63
64 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars,
65 &q->stats, qdisc_pkt_len, codel_get_enqueue_time,
66 drop_func, dequeue_func);
67
68 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
69 * or HTB crashes. Defer it for next round.
70 */
71 if (q->stats.drop_count && sch->q.qlen) {
72 qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
73 q->stats.drop_count = 0;
74 q->stats.drop_len = 0;
75 }
76 if (skb)
77 qdisc_bstats_update(sch, skb);
78 return skb;
79}
80
81static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
82 struct sk_buff **to_free)
83{
84 struct codel_sched_data *q;
85
86 if (likely(qdisc_qlen(sch) < sch->limit)) {
87 codel_set_enqueue_time(skb);
88 return qdisc_enqueue_tail(skb, sch);
89 }
90 q = qdisc_priv(sch);
91 q->drop_overlimit++;
92 return qdisc_drop(skb, sch, to_free);
93}
94
95static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
96 [TCA_CODEL_TARGET] = { .type = NLA_U32 },
97 [TCA_CODEL_LIMIT] = { .type = NLA_U32 },
98 [TCA_CODEL_INTERVAL] = { .type = NLA_U32 },
99 [TCA_CODEL_ECN] = { .type = NLA_U32 },
100 [TCA_CODEL_CE_THRESHOLD]= { .type = NLA_U32 },
101};
102
103static int codel_change(struct Qdisc *sch, struct nlattr *opt,
104 struct netlink_ext_ack *extack)
105{
106 struct codel_sched_data *q = qdisc_priv(sch);
107 struct nlattr *tb[TCA_CODEL_MAX + 1];
108 unsigned int qlen, dropped = 0;
109 int err;
110
111 err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
112 codel_policy, NULL);
113 if (err < 0)
114 return err;
115
116 sch_tree_lock(sch);
117
118 if (tb[TCA_CODEL_TARGET]) {
119 u32 target = nla_get_u32(tb[TCA_CODEL_TARGET]);
120
121 WRITE_ONCE(q->params.target,
122 ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT);
123 }
124
125 if (tb[TCA_CODEL_CE_THRESHOLD]) {
126 u64 val = nla_get_u32(tb[TCA_CODEL_CE_THRESHOLD]);
127
128 WRITE_ONCE(q->params.ce_threshold,
129 (val * NSEC_PER_USEC) >> CODEL_SHIFT);
130 }
131
132 if (tb[TCA_CODEL_INTERVAL]) {
133 u32 interval = nla_get_u32(tb[TCA_CODEL_INTERVAL]);
134
135 WRITE_ONCE(q->params.interval,
136 ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT);
137 }
138
139 if (tb[TCA_CODEL_LIMIT])
140 WRITE_ONCE(sch->limit,
141 nla_get_u32(tb[TCA_CODEL_LIMIT]));
142
143 if (tb[TCA_CODEL_ECN])
144 WRITE_ONCE(q->params.ecn,
145 !!nla_get_u32(tb[TCA_CODEL_ECN]));
146
147 qlen = sch->q.qlen;
148 while (sch->q.qlen > sch->limit) {
149 struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
150
151 dropped += qdisc_pkt_len(skb);
152 qdisc_qstats_backlog_dec(sch, skb);
153 rtnl_qdisc_drop(skb, sch);
154 }
155 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
156
157 sch_tree_unlock(sch);
158 return 0;
159}
160
161static int codel_init(struct Qdisc *sch, struct nlattr *opt,
162 struct netlink_ext_ack *extack)
163{
164 struct codel_sched_data *q = qdisc_priv(sch);
165
166 sch->limit = DEFAULT_CODEL_LIMIT;
167
168 codel_params_init(&q->params);
169 codel_vars_init(&q->vars);
170 codel_stats_init(&q->stats);
171 q->params.mtu = psched_mtu(qdisc_dev(sch));
172
173 if (opt) {
174 int err = codel_change(sch, opt, extack);
175
176 if (err)
177 return err;
178 }
179
180 if (sch->limit >= 1)
181 sch->flags |= TCQ_F_CAN_BYPASS;
182 else
183 sch->flags &= ~TCQ_F_CAN_BYPASS;
184
185 return 0;
186}
187
188static int codel_dump(struct Qdisc *sch, struct sk_buff *skb)
189{
190 struct codel_sched_data *q = qdisc_priv(sch);
191 codel_time_t ce_threshold;
192 struct nlattr *opts;
193
194 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
195 if (opts == NULL)
196 goto nla_put_failure;
197
198 if (nla_put_u32(skb, TCA_CODEL_TARGET,
199 codel_time_to_us(READ_ONCE(q->params.target))) ||
200 nla_put_u32(skb, TCA_CODEL_LIMIT,
201 READ_ONCE(sch->limit)) ||
202 nla_put_u32(skb, TCA_CODEL_INTERVAL,
203 codel_time_to_us(READ_ONCE(q->params.interval))) ||
204 nla_put_u32(skb, TCA_CODEL_ECN,
205 READ_ONCE(q->params.ecn)))
206 goto nla_put_failure;
207 ce_threshold = READ_ONCE(q->params.ce_threshold);
208 if (ce_threshold != CODEL_DISABLED_THRESHOLD &&
209 nla_put_u32(skb, TCA_CODEL_CE_THRESHOLD,
210 codel_time_to_us(ce_threshold)))
211 goto nla_put_failure;
212 return nla_nest_end(skb, opts);
213
214nla_put_failure:
215 nla_nest_cancel(skb, opts);
216 return -1;
217}
218
219static int codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
220{
221 const struct codel_sched_data *q = qdisc_priv(sch);
222 struct tc_codel_xstats st = {
223 .maxpacket = q->stats.maxpacket,
224 .count = q->vars.count,
225 .lastcount = q->vars.lastcount,
226 .drop_overlimit = q->drop_overlimit,
227 .ldelay = codel_time_to_us(q->vars.ldelay),
228 .dropping = q->vars.dropping,
229 .ecn_mark = q->stats.ecn_mark,
230 .ce_mark = q->stats.ce_mark,
231 };
232
233 if (q->vars.dropping) {
234 codel_tdiff_t delta = q->vars.drop_next - codel_get_time();
235
236 if (delta >= 0)
237 st.drop_next = codel_time_to_us(delta);
238 else
239 st.drop_next = -codel_time_to_us(-delta);
240 }
241
242 return gnet_stats_copy_app(d, &st, sizeof(st));
243}
244
245static void codel_reset(struct Qdisc *sch)
246{
247 struct codel_sched_data *q = qdisc_priv(sch);
248
249 qdisc_reset_queue(sch);
250 codel_vars_init(&q->vars);
251}
252
253static struct Qdisc_ops codel_qdisc_ops __read_mostly = {
254 .id = "codel",
255 .priv_size = sizeof(struct codel_sched_data),
256
257 .enqueue = codel_qdisc_enqueue,
258 .dequeue = codel_qdisc_dequeue,
259 .peek = qdisc_peek_dequeued,
260 .init = codel_init,
261 .reset = codel_reset,
262 .change = codel_change,
263 .dump = codel_dump,
264 .dump_stats = codel_dump_stats,
265 .owner = THIS_MODULE,
266};
267MODULE_ALIAS_NET_SCH("codel");
268
269static int __init codel_module_init(void)
270{
271 return register_qdisc(&codel_qdisc_ops);
272}
273
274static void __exit codel_module_exit(void)
275{
276 unregister_qdisc(&codel_qdisc_ops);
277}
278
279module_init(codel_module_init)
280module_exit(codel_module_exit)
281
282MODULE_DESCRIPTION("Controlled Delay queue discipline");
283MODULE_AUTHOR("Dave Taht");
284MODULE_AUTHOR("Eric Dumazet");
285MODULE_LICENSE("Dual BSD/GPL");