Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_skbprio.c SKB Priority Queue.
4 *
5 * Authors: Nishanth Devarajan, <ndev2021@gmail.com>
6 * Cody Doucette, <doucette@bu.edu>
7 * original idea by Michel Machado, Cody Doucette, and Qiaobin Fu
8 */
9
10#include <linux/string.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/skbuff.h>
17#include <net/pkt_sched.h>
18#include <net/sch_generic.h>
19#include <net/inet_ecn.h>
20
21/* SKB Priority Queue
22 * =================================
23 *
24 * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes
25 * packets according to their skb->priority field. Under congestion,
26 * Skbprio drops already-enqueued lower priority packets to make space
27 * available for higher priority packets; it was conceived as a solution
28 * for denial-of-service defenses that need to route packets with different
29 * priorities as a mean to overcome DoS attacks.
30 */
31
32struct skbprio_sched_data {
33 /* Queue state. */
34 struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY];
35 struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY];
36 u16 highest_prio;
37 u16 lowest_prio;
38};
39
40static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
41{
42 int prio;
43
44 for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) {
45 if (!skb_queue_empty(&q->qdiscs[prio]))
46 return prio;
47 }
48
49 /* SKB queue is empty, return 0 (default highest priority setting). */
50 return 0;
51}
52
53static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
54{
55 int prio;
56
57 for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) {
58 if (!skb_queue_empty(&q->qdiscs[prio]))
59 return prio;
60 }
61
62 /* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1
63 * (default lowest priority setting).
64 */
65 return SKBPRIO_MAX_PRIORITY - 1;
66}
67
68static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
69 struct sk_buff **to_free)
70{
71 const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1;
72 struct skbprio_sched_data *q = qdisc_priv(sch);
73 struct sk_buff_head *qdisc;
74 struct sk_buff_head *lp_qdisc;
75 struct sk_buff *to_drop;
76 u16 prio, lp;
77
78 /* Obtain the priority of @skb. */
79 prio = min(skb->priority, max_priority);
80
81 qdisc = &q->qdiscs[prio];
82 if (sch->q.qlen < sch->limit) {
83 __skb_queue_tail(qdisc, skb);
84 qdisc_qstats_backlog_inc(sch, skb);
85 q->qstats[prio].backlog += qdisc_pkt_len(skb);
86
87 /* Check to update highest and lowest priorities. */
88 if (prio > q->highest_prio)
89 q->highest_prio = prio;
90
91 if (prio < q->lowest_prio)
92 q->lowest_prio = prio;
93
94 sch->q.qlen++;
95 return NET_XMIT_SUCCESS;
96 }
97
98 /* If this packet has the lowest priority, drop it. */
99 lp = q->lowest_prio;
100 if (prio <= lp) {
101 q->qstats[prio].drops++;
102 q->qstats[prio].overlimits++;
103 return qdisc_drop(skb, sch, to_free);
104 }
105
106 __skb_queue_tail(qdisc, skb);
107 qdisc_qstats_backlog_inc(sch, skb);
108 q->qstats[prio].backlog += qdisc_pkt_len(skb);
109
110 /* Drop the packet at the tail of the lowest priority qdisc. */
111 lp_qdisc = &q->qdiscs[lp];
112 to_drop = __skb_dequeue_tail(lp_qdisc);
113 BUG_ON(!to_drop);
114 qdisc_qstats_backlog_dec(sch, to_drop);
115 qdisc_drop(to_drop, sch, to_free);
116
117 q->qstats[lp].backlog -= qdisc_pkt_len(to_drop);
118 q->qstats[lp].drops++;
119 q->qstats[lp].overlimits++;
120
121 /* Check to update highest and lowest priorities. */
122 if (skb_queue_empty(lp_qdisc)) {
123 if (q->lowest_prio == q->highest_prio) {
124 /* The incoming packet is the only packet in queue. */
125 BUG_ON(sch->q.qlen != 1);
126 q->lowest_prio = prio;
127 q->highest_prio = prio;
128 } else {
129 q->lowest_prio = calc_new_low_prio(q);
130 }
131 }
132
133 if (prio > q->highest_prio)
134 q->highest_prio = prio;
135
136 return NET_XMIT_CN;
137}
138
139static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
140{
141 struct skbprio_sched_data *q = qdisc_priv(sch);
142 struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio];
143 struct sk_buff *skb = __skb_dequeue(hpq);
144
145 if (unlikely(!skb))
146 return NULL;
147
148 sch->q.qlen--;
149 qdisc_qstats_backlog_dec(sch, skb);
150 qdisc_bstats_update(sch, skb);
151
152 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb);
153
154 /* Update highest priority field. */
155 if (skb_queue_empty(hpq)) {
156 if (q->lowest_prio == q->highest_prio) {
157 BUG_ON(sch->q.qlen);
158 q->highest_prio = 0;
159 q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
160 } else {
161 q->highest_prio = calc_new_high_prio(q);
162 }
163 }
164 return skb;
165}
166
167static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
168 struct netlink_ext_ack *extack)
169{
170 struct tc_skbprio_qopt *ctl = nla_data(opt);
171
172 sch->limit = ctl->limit;
173 return 0;
174}
175
176static int skbprio_init(struct Qdisc *sch, struct nlattr *opt,
177 struct netlink_ext_ack *extack)
178{
179 struct skbprio_sched_data *q = qdisc_priv(sch);
180 int prio;
181
182 /* Initialise all queues, one for each possible priority. */
183 for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
184 __skb_queue_head_init(&q->qdiscs[prio]);
185
186 memset(&q->qstats, 0, sizeof(q->qstats));
187 q->highest_prio = 0;
188 q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
189 sch->limit = 64;
190 if (!opt)
191 return 0;
192
193 return skbprio_change(sch, opt, extack);
194}
195
196static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
197{
198 struct tc_skbprio_qopt opt;
199
200 opt.limit = sch->limit;
201
202 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
203 return -1;
204
205 return skb->len;
206}
207
208static void skbprio_reset(struct Qdisc *sch)
209{
210 struct skbprio_sched_data *q = qdisc_priv(sch);
211 int prio;
212
213 sch->qstats.backlog = 0;
214 sch->q.qlen = 0;
215
216 for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
217 __skb_queue_purge(&q->qdiscs[prio]);
218
219 memset(&q->qstats, 0, sizeof(q->qstats));
220 q->highest_prio = 0;
221 q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
222}
223
224static void skbprio_destroy(struct Qdisc *sch)
225{
226 struct skbprio_sched_data *q = qdisc_priv(sch);
227 int prio;
228
229 for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
230 __skb_queue_purge(&q->qdiscs[prio]);
231}
232
233static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg)
234{
235 return NULL;
236}
237
238static unsigned long skbprio_find(struct Qdisc *sch, u32 classid)
239{
240 return 0;
241}
242
243static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl,
244 struct sk_buff *skb, struct tcmsg *tcm)
245{
246 tcm->tcm_handle |= TC_H_MIN(cl);
247 return 0;
248}
249
250static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
251 struct gnet_dump *d)
252{
253 struct skbprio_sched_data *q = qdisc_priv(sch);
254 if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1],
255 q->qstats[cl - 1].qlen) < 0)
256 return -1;
257 return 0;
258}
259
260static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
261{
262 unsigned int i;
263
264 if (arg->stop)
265 return;
266
267 for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) {
268 if (arg->count < arg->skip) {
269 arg->count++;
270 continue;
271 }
272 if (arg->fn(sch, i + 1, arg) < 0) {
273 arg->stop = 1;
274 break;
275 }
276 arg->count++;
277 }
278}
279
280static const struct Qdisc_class_ops skbprio_class_ops = {
281 .leaf = skbprio_leaf,
282 .find = skbprio_find,
283 .dump = skbprio_dump_class,
284 .dump_stats = skbprio_dump_class_stats,
285 .walk = skbprio_walk,
286};
287
288static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
289 .cl_ops = &skbprio_class_ops,
290 .id = "skbprio",
291 .priv_size = sizeof(struct skbprio_sched_data),
292 .enqueue = skbprio_enqueue,
293 .dequeue = skbprio_dequeue,
294 .peek = qdisc_peek_dequeued,
295 .init = skbprio_init,
296 .reset = skbprio_reset,
297 .change = skbprio_change,
298 .dump = skbprio_dump,
299 .destroy = skbprio_destroy,
300 .owner = THIS_MODULE,
301};
302
303static int __init skbprio_module_init(void)
304{
305 return register_qdisc(&skbprio_qdisc_ops);
306}
307
308static void __exit skbprio_module_exit(void)
309{
310 unregister_qdisc(&skbprio_qdisc_ops);
311}
312
313module_init(skbprio_module_init)
314module_exit(skbprio_module_exit)
315
316MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_skbprio.c SKB Priority Queue.
4 *
5 * Authors: Nishanth Devarajan, <ndev2021@gmail.com>
6 * Cody Doucette, <doucette@bu.edu>
7 * original idea by Michel Machado, Cody Doucette, and Qiaobin Fu
8 */
9
10#include <linux/string.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/skbuff.h>
17#include <net/pkt_sched.h>
18#include <net/sch_generic.h>
19#include <net/inet_ecn.h>
20
21/* SKB Priority Queue
22 * =================================
23 *
24 * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes
25 * packets according to their skb->priority field. Under congestion,
26 * Skbprio drops already-enqueued lower priority packets to make space
27 * available for higher priority packets; it was conceived as a solution
28 * for denial-of-service defenses that need to route packets with different
29 * priorities as a mean to overcome DoS attacks.
30 */
31
32struct skbprio_sched_data {
33 /* Queue state. */
34 struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY];
35 struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY];
36 u16 highest_prio;
37 u16 lowest_prio;
38};
39
40static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
41{
42 int prio;
43
44 for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) {
45 if (!skb_queue_empty(&q->qdiscs[prio]))
46 return prio;
47 }
48
49 /* SKB queue is empty, return 0 (default highest priority setting). */
50 return 0;
51}
52
53static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
54{
55 int prio;
56
57 for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) {
58 if (!skb_queue_empty(&q->qdiscs[prio]))
59 return prio;
60 }
61
62 /* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1
63 * (default lowest priority setting).
64 */
65 return SKBPRIO_MAX_PRIORITY - 1;
66}
67
68static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
69 struct sk_buff **to_free)
70{
71 const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1;
72 struct skbprio_sched_data *q = qdisc_priv(sch);
73 struct sk_buff_head *qdisc;
74 struct sk_buff_head *lp_qdisc;
75 struct sk_buff *to_drop;
76 u16 prio, lp;
77
78 /* Obtain the priority of @skb. */
79 prio = min(skb->priority, max_priority);
80
81 qdisc = &q->qdiscs[prio];
82
83 /* sch->limit can change under us from skbprio_change() */
84 if (sch->q.qlen < READ_ONCE(sch->limit)) {
85 __skb_queue_tail(qdisc, skb);
86 qdisc_qstats_backlog_inc(sch, skb);
87 q->qstats[prio].backlog += qdisc_pkt_len(skb);
88
89 /* Check to update highest and lowest priorities. */
90 if (prio > q->highest_prio)
91 q->highest_prio = prio;
92
93 if (prio < q->lowest_prio)
94 q->lowest_prio = prio;
95
96 sch->q.qlen++;
97 return NET_XMIT_SUCCESS;
98 }
99
100 /* If this packet has the lowest priority, drop it. */
101 lp = q->lowest_prio;
102 if (prio <= lp) {
103 q->qstats[prio].drops++;
104 q->qstats[prio].overlimits++;
105 return qdisc_drop(skb, sch, to_free);
106 }
107
108 __skb_queue_tail(qdisc, skb);
109 qdisc_qstats_backlog_inc(sch, skb);
110 q->qstats[prio].backlog += qdisc_pkt_len(skb);
111
112 /* Drop the packet at the tail of the lowest priority qdisc. */
113 lp_qdisc = &q->qdiscs[lp];
114 to_drop = __skb_dequeue_tail(lp_qdisc);
115 BUG_ON(!to_drop);
116 qdisc_qstats_backlog_dec(sch, to_drop);
117 qdisc_drop(to_drop, sch, to_free);
118
119 q->qstats[lp].backlog -= qdisc_pkt_len(to_drop);
120 q->qstats[lp].drops++;
121 q->qstats[lp].overlimits++;
122
123 /* Check to update highest and lowest priorities. */
124 if (skb_queue_empty(lp_qdisc)) {
125 if (q->lowest_prio == q->highest_prio) {
126 /* The incoming packet is the only packet in queue. */
127 BUG_ON(sch->q.qlen != 1);
128 q->lowest_prio = prio;
129 q->highest_prio = prio;
130 } else {
131 q->lowest_prio = calc_new_low_prio(q);
132 }
133 }
134
135 if (prio > q->highest_prio)
136 q->highest_prio = prio;
137
138 return NET_XMIT_CN;
139}
140
141static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
142{
143 struct skbprio_sched_data *q = qdisc_priv(sch);
144 struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio];
145 struct sk_buff *skb = __skb_dequeue(hpq);
146
147 if (unlikely(!skb))
148 return NULL;
149
150 sch->q.qlen--;
151 qdisc_qstats_backlog_dec(sch, skb);
152 qdisc_bstats_update(sch, skb);
153
154 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb);
155
156 /* Update highest priority field. */
157 if (skb_queue_empty(hpq)) {
158 if (q->lowest_prio == q->highest_prio) {
159 BUG_ON(sch->q.qlen);
160 q->highest_prio = 0;
161 q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
162 } else {
163 q->highest_prio = calc_new_high_prio(q);
164 }
165 }
166 return skb;
167}
168
169static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
170 struct netlink_ext_ack *extack)
171{
172 struct tc_skbprio_qopt *ctl = nla_data(opt);
173
174 if (opt->nla_len != nla_attr_size(sizeof(*ctl)))
175 return -EINVAL;
176
177 WRITE_ONCE(sch->limit, ctl->limit);
178 return 0;
179}
180
181static int skbprio_init(struct Qdisc *sch, struct nlattr *opt,
182 struct netlink_ext_ack *extack)
183{
184 struct skbprio_sched_data *q = qdisc_priv(sch);
185 int prio;
186
187 /* Initialise all queues, one for each possible priority. */
188 for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
189 __skb_queue_head_init(&q->qdiscs[prio]);
190
191 memset(&q->qstats, 0, sizeof(q->qstats));
192 q->highest_prio = 0;
193 q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
194 sch->limit = 64;
195 if (!opt)
196 return 0;
197
198 return skbprio_change(sch, opt, extack);
199}
200
201static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
202{
203 struct tc_skbprio_qopt opt;
204
205 opt.limit = READ_ONCE(sch->limit);
206
207 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
208 return -1;
209
210 return skb->len;
211}
212
213static void skbprio_reset(struct Qdisc *sch)
214{
215 struct skbprio_sched_data *q = qdisc_priv(sch);
216 int prio;
217
218 for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
219 __skb_queue_purge(&q->qdiscs[prio]);
220
221 memset(&q->qstats, 0, sizeof(q->qstats));
222 q->highest_prio = 0;
223 q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
224}
225
226static void skbprio_destroy(struct Qdisc *sch)
227{
228 struct skbprio_sched_data *q = qdisc_priv(sch);
229 int prio;
230
231 for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
232 __skb_queue_purge(&q->qdiscs[prio]);
233}
234
235static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg)
236{
237 return NULL;
238}
239
240static unsigned long skbprio_find(struct Qdisc *sch, u32 classid)
241{
242 return 0;
243}
244
245static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl,
246 struct sk_buff *skb, struct tcmsg *tcm)
247{
248 tcm->tcm_handle |= TC_H_MIN(cl);
249 return 0;
250}
251
252static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
253 struct gnet_dump *d)
254{
255 struct skbprio_sched_data *q = qdisc_priv(sch);
256 if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1],
257 q->qstats[cl - 1].qlen) < 0)
258 return -1;
259 return 0;
260}
261
262static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
263{
264 unsigned int i;
265
266 if (arg->stop)
267 return;
268
269 for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) {
270 if (!tc_qdisc_stats_dump(sch, i + 1, arg))
271 break;
272 }
273}
274
275static const struct Qdisc_class_ops skbprio_class_ops = {
276 .leaf = skbprio_leaf,
277 .find = skbprio_find,
278 .dump = skbprio_dump_class,
279 .dump_stats = skbprio_dump_class_stats,
280 .walk = skbprio_walk,
281};
282
283static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
284 .cl_ops = &skbprio_class_ops,
285 .id = "skbprio",
286 .priv_size = sizeof(struct skbprio_sched_data),
287 .enqueue = skbprio_enqueue,
288 .dequeue = skbprio_dequeue,
289 .peek = qdisc_peek_dequeued,
290 .init = skbprio_init,
291 .reset = skbprio_reset,
292 .change = skbprio_change,
293 .dump = skbprio_dump,
294 .destroy = skbprio_destroy,
295 .owner = THIS_MODULE,
296};
297MODULE_ALIAS_NET_SCH("skbprio");
298
299static int __init skbprio_module_init(void)
300{
301 return register_qdisc(&skbprio_qdisc_ops);
302}
303
304static void __exit skbprio_module_exit(void)
305{
306 unregister_qdisc(&skbprio_qdisc_ops);
307}
308
309module_init(skbprio_module_init)
310module_exit(skbprio_module_exit)
311
312MODULE_LICENSE("GPL");
313MODULE_DESCRIPTION("SKB priority based scheduling qdisc");