Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_prio.c	Simple 3-band priority "scheduler".
  4 *
 
 
 
 
 
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 * Fixes:       19990609: J Hadi Salim <hadi@nortelnetworks.com>:
  7 *              Init --  EINVAL when opt undefined
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/slab.h>
 12#include <linux/types.h>
 13#include <linux/kernel.h>
 14#include <linux/string.h>
 15#include <linux/errno.h>
 16#include <linux/skbuff.h>
 17#include <net/netlink.h>
 18#include <net/pkt_sched.h>
 19#include <net/pkt_cls.h>
 20
 21struct prio_sched_data {
 22	int bands;
 23	struct tcf_proto __rcu *filter_list;
 24	struct tcf_block *block;
 25	u8  prio2band[TC_PRIO_MAX+1];
 26	struct Qdisc *queues[TCQ_PRIO_BANDS];
 27};
 28
 29
 30static struct Qdisc *
 31prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 32{
 33	struct prio_sched_data *q = qdisc_priv(sch);
 34	u32 band = skb->priority;
 35	struct tcf_result res;
 36	struct tcf_proto *fl;
 37	int err;
 38
 39	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 40	if (TC_H_MAJ(skb->priority) != sch->handle) {
 41		fl = rcu_dereference_bh(q->filter_list);
 42		err = tcf_classify(skb, NULL, fl, &res, false);
 43#ifdef CONFIG_NET_CLS_ACT
 44		switch (err) {
 45		case TC_ACT_STOLEN:
 46		case TC_ACT_QUEUED:
 47		case TC_ACT_TRAP:
 48			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 49			fallthrough;
 50		case TC_ACT_SHOT:
 51			return NULL;
 52		}
 53#endif
 54		if (!fl || err < 0) {
 55			if (TC_H_MAJ(band))
 56				band = 0;
 57			return q->queues[q->prio2band[band & TC_PRIO_MAX]];
 58		}
 59		band = res.classid;
 60	}
 61	band = TC_H_MIN(band) - 1;
 62	if (band >= q->bands)
 63		return q->queues[q->prio2band[0]];
 64
 65	return q->queues[band];
 66}
 67
 68static int
 69prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
 70{
 71	unsigned int len = qdisc_pkt_len(skb);
 72	struct Qdisc *qdisc;
 73	int ret;
 74
 75	qdisc = prio_classify(skb, sch, &ret);
 76#ifdef CONFIG_NET_CLS_ACT
 77	if (qdisc == NULL) {
 78
 79		if (ret & __NET_XMIT_BYPASS)
 80			qdisc_qstats_drop(sch);
 81		__qdisc_drop(skb, to_free);
 82		return ret;
 83	}
 84#endif
 85
 86	ret = qdisc_enqueue(skb, qdisc, to_free);
 87	if (ret == NET_XMIT_SUCCESS) {
 88		sch->qstats.backlog += len;
 89		sch->q.qlen++;
 90		return NET_XMIT_SUCCESS;
 91	}
 92	if (net_xmit_drop_count(ret))
 93		qdisc_qstats_drop(sch);
 94	return ret;
 95}
 96
 97static struct sk_buff *prio_peek(struct Qdisc *sch)
 98{
 99	struct prio_sched_data *q = qdisc_priv(sch);
100	int prio;
101
102	for (prio = 0; prio < q->bands; prio++) {
103		struct Qdisc *qdisc = q->queues[prio];
104		struct sk_buff *skb = qdisc->ops->peek(qdisc);
105		if (skb)
106			return skb;
107	}
108	return NULL;
109}
110
111static struct sk_buff *prio_dequeue(struct Qdisc *sch)
112{
113	struct prio_sched_data *q = qdisc_priv(sch);
114	int prio;
115
116	for (prio = 0; prio < q->bands; prio++) {
117		struct Qdisc *qdisc = q->queues[prio];
118		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
119		if (skb) {
120			qdisc_bstats_update(sch, skb);
121			qdisc_qstats_backlog_dec(sch, skb);
122			sch->q.qlen--;
123			return skb;
124		}
125	}
126	return NULL;
127
128}
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130static void
131prio_reset(struct Qdisc *sch)
132{
133	int prio;
134	struct prio_sched_data *q = qdisc_priv(sch);
135
136	for (prio = 0; prio < q->bands; prio++)
137		qdisc_reset(q->queues[prio]);
138}
139
140static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
141{
142	struct net_device *dev = qdisc_dev(sch);
143	struct tc_prio_qopt_offload opt = {
144		.handle = sch->handle,
145		.parent = sch->parent,
146	};
147
148	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
149		return -EOPNOTSUPP;
150
151	if (qopt) {
152		opt.command = TC_PRIO_REPLACE;
153		opt.replace_params.bands = qopt->bands;
154		memcpy(&opt.replace_params.priomap, qopt->priomap,
155		       TC_PRIO_MAX + 1);
156		opt.replace_params.qstats = &sch->qstats;
157	} else {
158		opt.command = TC_PRIO_DESTROY;
159	}
160
161	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
162}
163
164static void
165prio_destroy(struct Qdisc *sch)
166{
167	int prio;
168	struct prio_sched_data *q = qdisc_priv(sch);
169
170	tcf_block_put(q->block);
171	prio_offload(sch, NULL);
172	for (prio = 0; prio < q->bands; prio++)
173		qdisc_put(q->queues[prio]);
174}
175
176static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
177		     struct netlink_ext_ack *extack)
178{
179	struct prio_sched_data *q = qdisc_priv(sch);
180	struct Qdisc *queues[TCQ_PRIO_BANDS];
181	int oldbands = q->bands, i;
182	struct tc_prio_qopt *qopt;
 
183
184	if (nla_len(opt) < sizeof(*qopt))
185		return -EINVAL;
186	qopt = nla_data(opt);
187
188	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < TCQ_MIN_PRIO_BANDS)
189		return -EINVAL;
190
191	for (i = 0; i <= TC_PRIO_MAX; i++) {
192		if (qopt->priomap[i] >= qopt->bands)
193			return -EINVAL;
194	}
195
196	/* Before commit, make sure we can allocate all new qdiscs */
197	for (i = oldbands; i < qopt->bands; i++) {
198		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
199					      TC_H_MAKE(sch->handle, i + 1),
200					      extack);
201		if (!queues[i]) {
202			while (i > oldbands)
203				qdisc_put(queues[--i]);
204			return -ENOMEM;
205		}
206	}
207
208	prio_offload(sch, qopt);
209	sch_tree_lock(sch);
210	q->bands = qopt->bands;
211	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
212
213	for (i = q->bands; i < oldbands; i++)
214		qdisc_tree_flush_backlog(q->queues[i]);
215
216	for (i = oldbands; i < q->bands; i++) {
217		q->queues[i] = queues[i];
218		if (q->queues[i] != &noop_qdisc)
219			qdisc_hash_add(q->queues[i], true);
220	}
221
222	sch_tree_unlock(sch);
223
224	for (i = q->bands; i < oldbands; i++)
225		qdisc_put(q->queues[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226	return 0;
227}
228
229static int prio_init(struct Qdisc *sch, struct nlattr *opt,
230		     struct netlink_ext_ack *extack)
231{
232	struct prio_sched_data *q = qdisc_priv(sch);
233	int err;
234
235	if (!opt)
236		return -EINVAL;
237
238	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
239	if (err)
240		return err;
241
242	return prio_tune(sch, opt, extack);
243}
244
245static int prio_dump_offload(struct Qdisc *sch)
246{
247	struct tc_prio_qopt_offload hw_stats = {
248		.command = TC_PRIO_STATS,
249		.handle = sch->handle,
250		.parent = sch->parent,
251		{
252			.stats = {
253				.bstats = &sch->bstats,
254				.qstats = &sch->qstats,
255			},
256		},
257	};
258
259	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_PRIO, &hw_stats);
 
 
 
260}
261
262static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
263{
264	struct prio_sched_data *q = qdisc_priv(sch);
265	unsigned char *b = skb_tail_pointer(skb);
266	struct tc_prio_qopt opt;
267	int err;
268
269	opt.bands = q->bands;
270	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
271
272	err = prio_dump_offload(sch);
273	if (err)
274		goto nla_put_failure;
275
276	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
277		goto nla_put_failure;
278
279	return skb->len;
280
281nla_put_failure:
282	nlmsg_trim(skb, b);
283	return -1;
284}
285
286static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
287		      struct Qdisc **old, struct netlink_ext_ack *extack)
288{
289	struct prio_sched_data *q = qdisc_priv(sch);
290	struct tc_prio_qopt_offload graft_offload;
291	unsigned long band = arg - 1;
292
293	if (!new) {
294		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
295					TC_H_MAKE(sch->handle, arg), extack);
296		if (!new)
297			new = &noop_qdisc;
298		else
299			qdisc_hash_add(new, true);
300	}
301
302	*old = qdisc_replace(sch, new, &q->queues[band]);
303
304	graft_offload.handle = sch->handle;
305	graft_offload.parent = sch->parent;
306	graft_offload.graft_params.band = band;
307	graft_offload.graft_params.child_handle = new->handle;
308	graft_offload.command = TC_PRIO_GRAFT;
309
310	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
311				   TC_SETUP_QDISC_PRIO, &graft_offload,
312				   extack);
313	return 0;
314}
315
316static struct Qdisc *
317prio_leaf(struct Qdisc *sch, unsigned long arg)
318{
319	struct prio_sched_data *q = qdisc_priv(sch);
320	unsigned long band = arg - 1;
321
322	return q->queues[band];
323}
324
325static unsigned long prio_find(struct Qdisc *sch, u32 classid)
326{
327	struct prio_sched_data *q = qdisc_priv(sch);
328	unsigned long band = TC_H_MIN(classid);
329
330	if (band - 1 >= q->bands)
331		return 0;
332	return band;
333}
334
335static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
336{
337	return prio_find(sch, classid);
338}
339
340
341static void prio_unbind(struct Qdisc *q, unsigned long cl)
342{
343}
344
345static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
346			   struct tcmsg *tcm)
347{
348	struct prio_sched_data *q = qdisc_priv(sch);
349
350	tcm->tcm_handle |= TC_H_MIN(cl);
351	tcm->tcm_info = q->queues[cl-1]->handle;
352	return 0;
353}
354
355static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
356				 struct gnet_dump *d)
357{
358	struct prio_sched_data *q = qdisc_priv(sch);
359	struct Qdisc *cl_q;
360
361	cl_q = q->queues[cl - 1];
362	if (gnet_stats_copy_basic(d, cl_q->cpu_bstats,
363				  &cl_q->bstats, true) < 0 ||
364	    qdisc_qstats_copy(d, cl_q) < 0)
365		return -1;
366
367	return 0;
368}
369
370static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
371{
372	struct prio_sched_data *q = qdisc_priv(sch);
373	int prio;
374
375	if (arg->stop)
376		return;
377
378	for (prio = 0; prio < q->bands; prio++) {
379		if (!tc_qdisc_stats_dump(sch, prio + 1, arg))
 
 
 
 
 
380			break;
 
 
381	}
382}
383
384static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
385					struct netlink_ext_ack *extack)
386{
387	struct prio_sched_data *q = qdisc_priv(sch);
388
389	if (cl)
390		return NULL;
391	return q->block;
392}
393
394static const struct Qdisc_class_ops prio_class_ops = {
395	.graft		=	prio_graft,
396	.leaf		=	prio_leaf,
397	.find		=	prio_find,
 
398	.walk		=	prio_walk,
399	.tcf_block	=	prio_tcf_block,
400	.bind_tcf	=	prio_bind,
401	.unbind_tcf	=	prio_unbind,
402	.dump		=	prio_dump_class,
403	.dump_stats	=	prio_dump_class_stats,
404};
405
406static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
407	.next		=	NULL,
408	.cl_ops		=	&prio_class_ops,
409	.id		=	"prio",
410	.priv_size	=	sizeof(struct prio_sched_data),
411	.enqueue	=	prio_enqueue,
412	.dequeue	=	prio_dequeue,
413	.peek		=	prio_peek,
 
414	.init		=	prio_init,
415	.reset		=	prio_reset,
416	.destroy	=	prio_destroy,
417	.change		=	prio_tune,
418	.dump		=	prio_dump,
419	.owner		=	THIS_MODULE,
420};
421MODULE_ALIAS_NET_SCH("prio");
422
423static int __init prio_module_init(void)
424{
425	return register_qdisc(&prio_qdisc_ops);
426}
427
428static void __exit prio_module_exit(void)
429{
430	unregister_qdisc(&prio_qdisc_ops);
431}
432
433module_init(prio_module_init)
434module_exit(prio_module_exit)
435
436MODULE_LICENSE("GPL");
437MODULE_DESCRIPTION("Simple 3-band priority qdisc");
v4.6
 
  1/*
  2 * net/sched/sch_prio.c	Simple 3-band priority "scheduler".
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 * Fixes:       19990609: J Hadi Salim <hadi@nortelnetworks.com>:
 11 *              Init --  EINVAL when opt undefined
 12 */
 13
 14#include <linux/module.h>
 15#include <linux/slab.h>
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/string.h>
 19#include <linux/errno.h>
 20#include <linux/skbuff.h>
 21#include <net/netlink.h>
 22#include <net/pkt_sched.h>
 23
 24
 25struct prio_sched_data {
 26	int bands;
 27	struct tcf_proto __rcu *filter_list;
 
 28	u8  prio2band[TC_PRIO_MAX+1];
 29	struct Qdisc *queues[TCQ_PRIO_BANDS];
 30};
 31
 32
 33static struct Qdisc *
 34prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 35{
 36	struct prio_sched_data *q = qdisc_priv(sch);
 37	u32 band = skb->priority;
 38	struct tcf_result res;
 39	struct tcf_proto *fl;
 40	int err;
 41
 42	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 43	if (TC_H_MAJ(skb->priority) != sch->handle) {
 44		fl = rcu_dereference_bh(q->filter_list);
 45		err = tc_classify(skb, fl, &res, false);
 46#ifdef CONFIG_NET_CLS_ACT
 47		switch (err) {
 48		case TC_ACT_STOLEN:
 49		case TC_ACT_QUEUED:
 
 50			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
 51		case TC_ACT_SHOT:
 52			return NULL;
 53		}
 54#endif
 55		if (!fl || err < 0) {
 56			if (TC_H_MAJ(band))
 57				band = 0;
 58			return q->queues[q->prio2band[band & TC_PRIO_MAX]];
 59		}
 60		band = res.classid;
 61	}
 62	band = TC_H_MIN(band) - 1;
 63	if (band >= q->bands)
 64		return q->queues[q->prio2band[0]];
 65
 66	return q->queues[band];
 67}
 68
 69static int
 70prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 71{
 
 72	struct Qdisc *qdisc;
 73	int ret;
 74
 75	qdisc = prio_classify(skb, sch, &ret);
 76#ifdef CONFIG_NET_CLS_ACT
 77	if (qdisc == NULL) {
 78
 79		if (ret & __NET_XMIT_BYPASS)
 80			qdisc_qstats_drop(sch);
 81		kfree_skb(skb);
 82		return ret;
 83	}
 84#endif
 85
 86	ret = qdisc_enqueue(skb, qdisc);
 87	if (ret == NET_XMIT_SUCCESS) {
 
 88		sch->q.qlen++;
 89		return NET_XMIT_SUCCESS;
 90	}
 91	if (net_xmit_drop_count(ret))
 92		qdisc_qstats_drop(sch);
 93	return ret;
 94}
 95
 96static struct sk_buff *prio_peek(struct Qdisc *sch)
 97{
 98	struct prio_sched_data *q = qdisc_priv(sch);
 99	int prio;
100
101	for (prio = 0; prio < q->bands; prio++) {
102		struct Qdisc *qdisc = q->queues[prio];
103		struct sk_buff *skb = qdisc->ops->peek(qdisc);
104		if (skb)
105			return skb;
106	}
107	return NULL;
108}
109
110static struct sk_buff *prio_dequeue(struct Qdisc *sch)
111{
112	struct prio_sched_data *q = qdisc_priv(sch);
113	int prio;
114
115	for (prio = 0; prio < q->bands; prio++) {
116		struct Qdisc *qdisc = q->queues[prio];
117		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
118		if (skb) {
119			qdisc_bstats_update(sch, skb);
 
120			sch->q.qlen--;
121			return skb;
122		}
123	}
124	return NULL;
125
126}
127
128static unsigned int prio_drop(struct Qdisc *sch)
129{
130	struct prio_sched_data *q = qdisc_priv(sch);
131	int prio;
132	unsigned int len;
133	struct Qdisc *qdisc;
134
135	for (prio = q->bands-1; prio >= 0; prio--) {
136		qdisc = q->queues[prio];
137		if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
138			sch->q.qlen--;
139			return len;
140		}
141	}
142	return 0;
143}
144
145
146static void
147prio_reset(struct Qdisc *sch)
148{
149	int prio;
150	struct prio_sched_data *q = qdisc_priv(sch);
151
152	for (prio = 0; prio < q->bands; prio++)
153		qdisc_reset(q->queues[prio]);
154	sch->q.qlen = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155}
156
157static void
158prio_destroy(struct Qdisc *sch)
159{
160	int prio;
161	struct prio_sched_data *q = qdisc_priv(sch);
162
163	tcf_destroy_chain(&q->filter_list);
 
164	for (prio = 0; prio < q->bands; prio++)
165		qdisc_destroy(q->queues[prio]);
166}
167
168static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
 
169{
170	struct prio_sched_data *q = qdisc_priv(sch);
 
 
171	struct tc_prio_qopt *qopt;
172	int i;
173
174	if (nla_len(opt) < sizeof(*qopt))
175		return -EINVAL;
176	qopt = nla_data(opt);
177
178	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
179		return -EINVAL;
180
181	for (i = 0; i <= TC_PRIO_MAX; i++) {
182		if (qopt->priomap[i] >= qopt->bands)
183			return -EINVAL;
184	}
185
 
 
 
 
 
 
 
 
 
 
 
 
 
186	sch_tree_lock(sch);
187	q->bands = qopt->bands;
188	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
189
190	for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
191		struct Qdisc *child = q->queues[i];
192		q->queues[i] = &noop_qdisc;
193		if (child != &noop_qdisc) {
194			qdisc_tree_reduce_backlog(child, child->q.qlen, child->qstats.backlog);
195			qdisc_destroy(child);
196		}
197	}
 
198	sch_tree_unlock(sch);
199
200	for (i = 0; i < q->bands; i++) {
201		if (q->queues[i] == &noop_qdisc) {
202			struct Qdisc *child, *old;
203
204			child = qdisc_create_dflt(sch->dev_queue,
205						  &pfifo_qdisc_ops,
206						  TC_H_MAKE(sch->handle, i + 1));
207			if (child) {
208				sch_tree_lock(sch);
209				old = q->queues[i];
210				q->queues[i] = child;
211
212				if (old != &noop_qdisc) {
213					qdisc_tree_reduce_backlog(old,
214								  old->q.qlen,
215								  old->qstats.backlog);
216					qdisc_destroy(old);
217				}
218				sch_tree_unlock(sch);
219			}
220		}
221	}
222	return 0;
223}
224
225static int prio_init(struct Qdisc *sch, struct nlattr *opt)
 
226{
227	struct prio_sched_data *q = qdisc_priv(sch);
228	int i;
229
230	for (i = 0; i < TCQ_PRIO_BANDS; i++)
231		q->queues[i] = &noop_qdisc;
232
233	if (opt == NULL) {
234		return -EINVAL;
235	} else {
236		int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
238		if ((err = prio_tune(sch, opt)) != 0)
239			return err;
240	}
241	return 0;
242}
243
244static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
245{
246	struct prio_sched_data *q = qdisc_priv(sch);
247	unsigned char *b = skb_tail_pointer(skb);
248	struct tc_prio_qopt opt;
 
249
250	opt.bands = q->bands;
251	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
252
 
 
 
 
253	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
254		goto nla_put_failure;
255
256	return skb->len;
257
258nla_put_failure:
259	nlmsg_trim(skb, b);
260	return -1;
261}
262
263static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
264		      struct Qdisc **old)
265{
266	struct prio_sched_data *q = qdisc_priv(sch);
 
267	unsigned long band = arg - 1;
268
269	if (new == NULL)
270		new = &noop_qdisc;
 
 
 
 
 
 
271
272	*old = qdisc_replace(sch, new, &q->queues[band]);
 
 
 
 
 
 
 
 
 
 
273	return 0;
274}
275
276static struct Qdisc *
277prio_leaf(struct Qdisc *sch, unsigned long arg)
278{
279	struct prio_sched_data *q = qdisc_priv(sch);
280	unsigned long band = arg - 1;
281
282	return q->queues[band];
283}
284
285static unsigned long prio_get(struct Qdisc *sch, u32 classid)
286{
287	struct prio_sched_data *q = qdisc_priv(sch);
288	unsigned long band = TC_H_MIN(classid);
289
290	if (band - 1 >= q->bands)
291		return 0;
292	return band;
293}
294
295static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
296{
297	return prio_get(sch, classid);
298}
299
300
301static void prio_put(struct Qdisc *q, unsigned long cl)
302{
303}
304
305static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
306			   struct tcmsg *tcm)
307{
308	struct prio_sched_data *q = qdisc_priv(sch);
309
310	tcm->tcm_handle |= TC_H_MIN(cl);
311	tcm->tcm_info = q->queues[cl-1]->handle;
312	return 0;
313}
314
315static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
316				 struct gnet_dump *d)
317{
318	struct prio_sched_data *q = qdisc_priv(sch);
319	struct Qdisc *cl_q;
320
321	cl_q = q->queues[cl - 1];
322	if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
323	    gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
 
324		return -1;
325
326	return 0;
327}
328
329static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
330{
331	struct prio_sched_data *q = qdisc_priv(sch);
332	int prio;
333
334	if (arg->stop)
335		return;
336
337	for (prio = 0; prio < q->bands; prio++) {
338		if (arg->count < arg->skip) {
339			arg->count++;
340			continue;
341		}
342		if (arg->fn(sch, prio + 1, arg) < 0) {
343			arg->stop = 1;
344			break;
345		}
346		arg->count++;
347	}
348}
349
350static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch,
351					      unsigned long cl)
352{
353	struct prio_sched_data *q = qdisc_priv(sch);
354
355	if (cl)
356		return NULL;
357	return &q->filter_list;
358}
359
360static const struct Qdisc_class_ops prio_class_ops = {
361	.graft		=	prio_graft,
362	.leaf		=	prio_leaf,
363	.get		=	prio_get,
364	.put		=	prio_put,
365	.walk		=	prio_walk,
366	.tcf_chain	=	prio_find_tcf,
367	.bind_tcf	=	prio_bind,
368	.unbind_tcf	=	prio_put,
369	.dump		=	prio_dump_class,
370	.dump_stats	=	prio_dump_class_stats,
371};
372
373static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
374	.next		=	NULL,
375	.cl_ops		=	&prio_class_ops,
376	.id		=	"prio",
377	.priv_size	=	sizeof(struct prio_sched_data),
378	.enqueue	=	prio_enqueue,
379	.dequeue	=	prio_dequeue,
380	.peek		=	prio_peek,
381	.drop		=	prio_drop,
382	.init		=	prio_init,
383	.reset		=	prio_reset,
384	.destroy	=	prio_destroy,
385	.change		=	prio_tune,
386	.dump		=	prio_dump,
387	.owner		=	THIS_MODULE,
388};
 
389
390static int __init prio_module_init(void)
391{
392	return register_qdisc(&prio_qdisc_ops);
393}
394
395static void __exit prio_module_exit(void)
396{
397	unregister_qdisc(&prio_qdisc_ops);
398}
399
400module_init(prio_module_init)
401module_exit(prio_module_exit)
402
403MODULE_LICENSE("GPL");