Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_skbprio.c  SKB Priority Queue.
  4 *
  5 * Authors:	Nishanth Devarajan, <ndev2021@gmail.com>
  6 *		Cody Doucette, <doucette@bu.edu>
  7 *	        original idea by Michel Machado, Cody Doucette, and Qiaobin Fu
  8 */
  9
 10#include <linux/string.h>
 11#include <linux/module.h>
 12#include <linux/slab.h>
 13#include <linux/types.h>
 14#include <linux/kernel.h>
 15#include <linux/errno.h>
 16#include <linux/skbuff.h>
 17#include <net/pkt_sched.h>
 18#include <net/sch_generic.h>
 19#include <net/inet_ecn.h>
 20
 21/*		SKB Priority Queue
 22 *	=================================
 23 *
 24 * Skbprio (SKB Priority Queue) is a queueing discipline that prioritizes
 25 * packets according to their skb->priority field. Under congestion,
 26 * Skbprio drops already-enqueued lower priority packets to make space
 27 * available for higher priority packets; it was conceived as a solution
 28 * for denial-of-service defenses that need to route packets with different
 29 * priorities as a mean to overcome DoS attacks.
 30 */
 31
 32struct skbprio_sched_data {
 33	/* Queue state. */
 34	struct sk_buff_head qdiscs[SKBPRIO_MAX_PRIORITY];
 35	struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY];
 36	u16 highest_prio;
 37	u16 lowest_prio;
 38};
 39
 40static u16 calc_new_high_prio(const struct skbprio_sched_data *q)
 41{
 42	int prio;
 43
 44	for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) {
 45		if (!skb_queue_empty(&q->qdiscs[prio]))
 46			return prio;
 47	}
 48
 49	/* SKB queue is empty, return 0 (default highest priority setting). */
 50	return 0;
 51}
 52
 53static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
 54{
 55	int prio;
 56
 57	for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) {
 58		if (!skb_queue_empty(&q->qdiscs[prio]))
 59			return prio;
 60	}
 61
 62	/* SKB queue is empty, return SKBPRIO_MAX_PRIORITY - 1
 63	 * (default lowest priority setting).
 64	 */
 65	return SKBPRIO_MAX_PRIORITY - 1;
 66}
 67
 68static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 69			  struct sk_buff **to_free)
 70{
 71	const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1;
 72	struct skbprio_sched_data *q = qdisc_priv(sch);
 73	struct sk_buff_head *qdisc;
 74	struct sk_buff_head *lp_qdisc;
 75	struct sk_buff *to_drop;
 76	u16 prio, lp;
 77
 78	/* Obtain the priority of @skb. */
 79	prio = min(skb->priority, max_priority);
 80
 81	qdisc = &q->qdiscs[prio];
 82	if (sch->q.qlen < sch->limit) {
 83		__skb_queue_tail(qdisc, skb);
 84		qdisc_qstats_backlog_inc(sch, skb);
 85		q->qstats[prio].backlog += qdisc_pkt_len(skb);
 86
 87		/* Check to update highest and lowest priorities. */
 88		if (prio > q->highest_prio)
 89			q->highest_prio = prio;
 90
 91		if (prio < q->lowest_prio)
 92			q->lowest_prio = prio;
 93
 94		sch->q.qlen++;
 95		return NET_XMIT_SUCCESS;
 96	}
 97
 98	/* If this packet has the lowest priority, drop it. */
 99	lp = q->lowest_prio;
100	if (prio <= lp) {
101		q->qstats[prio].drops++;
102		q->qstats[prio].overlimits++;
103		return qdisc_drop(skb, sch, to_free);
104	}
105
106	__skb_queue_tail(qdisc, skb);
107	qdisc_qstats_backlog_inc(sch, skb);
108	q->qstats[prio].backlog += qdisc_pkt_len(skb);
109
110	/* Drop the packet at the tail of the lowest priority qdisc. */
111	lp_qdisc = &q->qdiscs[lp];
112	to_drop = __skb_dequeue_tail(lp_qdisc);
113	BUG_ON(!to_drop);
114	qdisc_qstats_backlog_dec(sch, to_drop);
115	qdisc_drop(to_drop, sch, to_free);
116
117	q->qstats[lp].backlog -= qdisc_pkt_len(to_drop);
118	q->qstats[lp].drops++;
119	q->qstats[lp].overlimits++;
120
121	/* Check to update highest and lowest priorities. */
122	if (skb_queue_empty(lp_qdisc)) {
123		if (q->lowest_prio == q->highest_prio) {
124			/* The incoming packet is the only packet in queue. */
125			BUG_ON(sch->q.qlen != 1);
126			q->lowest_prio = prio;
127			q->highest_prio = prio;
128		} else {
129			q->lowest_prio = calc_new_low_prio(q);
130		}
131	}
132
133	if (prio > q->highest_prio)
134		q->highest_prio = prio;
135
136	return NET_XMIT_CN;
137}
138
139static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
140{
141	struct skbprio_sched_data *q = qdisc_priv(sch);
142	struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio];
143	struct sk_buff *skb = __skb_dequeue(hpq);
144
145	if (unlikely(!skb))
146		return NULL;
147
148	sch->q.qlen--;
149	qdisc_qstats_backlog_dec(sch, skb);
150	qdisc_bstats_update(sch, skb);
151
152	q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb);
153
154	/* Update highest priority field. */
155	if (skb_queue_empty(hpq)) {
156		if (q->lowest_prio == q->highest_prio) {
157			BUG_ON(sch->q.qlen);
158			q->highest_prio = 0;
159			q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
160		} else {
161			q->highest_prio = calc_new_high_prio(q);
162		}
163	}
164	return skb;
165}
166
167static int skbprio_change(struct Qdisc *sch, struct nlattr *opt,
168			struct netlink_ext_ack *extack)
169{
170	struct tc_skbprio_qopt *ctl = nla_data(opt);
171
172	sch->limit = ctl->limit;
173	return 0;
174}
175
176static int skbprio_init(struct Qdisc *sch, struct nlattr *opt,
177			struct netlink_ext_ack *extack)
178{
179	struct skbprio_sched_data *q = qdisc_priv(sch);
180	int prio;
181
182	/* Initialise all queues, one for each possible priority. */
183	for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
184		__skb_queue_head_init(&q->qdiscs[prio]);
185
186	memset(&q->qstats, 0, sizeof(q->qstats));
187	q->highest_prio = 0;
188	q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
189	sch->limit = 64;
190	if (!opt)
191		return 0;
192
193	return skbprio_change(sch, opt, extack);
194}
195
196static int skbprio_dump(struct Qdisc *sch, struct sk_buff *skb)
197{
198	struct tc_skbprio_qopt opt;
199
200	opt.limit = sch->limit;
201
202	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
203		return -1;
204
205	return skb->len;
206}
207
208static void skbprio_reset(struct Qdisc *sch)
209{
210	struct skbprio_sched_data *q = qdisc_priv(sch);
211	int prio;
212
213	sch->qstats.backlog = 0;
214	sch->q.qlen = 0;
215
216	for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
217		__skb_queue_purge(&q->qdiscs[prio]);
218
219	memset(&q->qstats, 0, sizeof(q->qstats));
220	q->highest_prio = 0;
221	q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
222}
223
224static void skbprio_destroy(struct Qdisc *sch)
225{
226	struct skbprio_sched_data *q = qdisc_priv(sch);
227	int prio;
228
229	for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
230		__skb_queue_purge(&q->qdiscs[prio]);
231}
232
233static struct Qdisc *skbprio_leaf(struct Qdisc *sch, unsigned long arg)
234{
235	return NULL;
236}
237
238static unsigned long skbprio_find(struct Qdisc *sch, u32 classid)
239{
240	return 0;
241}
242
243static int skbprio_dump_class(struct Qdisc *sch, unsigned long cl,
244			     struct sk_buff *skb, struct tcmsg *tcm)
245{
246	tcm->tcm_handle |= TC_H_MIN(cl);
247	return 0;
248}
249
250static int skbprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
251				   struct gnet_dump *d)
252{
253	struct skbprio_sched_data *q = qdisc_priv(sch);
254	if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1],
255		q->qstats[cl - 1].qlen) < 0)
256		return -1;
257	return 0;
258}
259
260static void skbprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
261{
262	unsigned int i;
263
264	if (arg->stop)
265		return;
266
267	for (i = 0; i < SKBPRIO_MAX_PRIORITY; i++) {
268		if (arg->count < arg->skip) {
269			arg->count++;
270			continue;
271		}
272		if (arg->fn(sch, i + 1, arg) < 0) {
273			arg->stop = 1;
274			break;
275		}
276		arg->count++;
277	}
278}
279
280static const struct Qdisc_class_ops skbprio_class_ops = {
281	.leaf		=	skbprio_leaf,
282	.find		=	skbprio_find,
283	.dump		=	skbprio_dump_class,
284	.dump_stats	=	skbprio_dump_class_stats,
285	.walk		=	skbprio_walk,
286};
287
288static struct Qdisc_ops skbprio_qdisc_ops __read_mostly = {
289	.cl_ops		=	&skbprio_class_ops,
290	.id		=	"skbprio",
291	.priv_size	=	sizeof(struct skbprio_sched_data),
292	.enqueue	=	skbprio_enqueue,
293	.dequeue	=	skbprio_dequeue,
294	.peek		=	qdisc_peek_dequeued,
295	.init		=	skbprio_init,
296	.reset		=	skbprio_reset,
297	.change		=	skbprio_change,
298	.dump		=	skbprio_dump,
299	.destroy	=	skbprio_destroy,
300	.owner		=	THIS_MODULE,
301};
302
303static int __init skbprio_module_init(void)
304{
305	return register_qdisc(&skbprio_qdisc_ops);
306}
307
308static void __exit skbprio_module_exit(void)
309{
310	unregister_qdisc(&skbprio_qdisc_ops);
311}
312
313module_init(skbprio_module_init)
314module_exit(skbprio_module_exit)
315
316MODULE_LICENSE("GPL");