Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * net/sched/sch_prio.c	Simple 3-band priority "scheduler".
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 * Fixes:       19990609: J Hadi Salim <hadi@nortelnetworks.com>:
 11 *              Init --  EINVAL when opt undefined
 12 */
 13
 14#include <linux/module.h>
 15#include <linux/slab.h>
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/string.h>
 19#include <linux/errno.h>
 20#include <linux/skbuff.h>
 21#include <net/netlink.h>
 22#include <net/pkt_sched.h>
 23
 24
 25struct prio_sched_data {
 26	int bands;
 27	struct tcf_proto *filter_list;
 
 28	u8  prio2band[TC_PRIO_MAX+1];
 29	struct Qdisc *queues[TCQ_PRIO_BANDS];
 30};
 31
 32
 33static struct Qdisc *
 34prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 35{
 36	struct prio_sched_data *q = qdisc_priv(sch);
 37	u32 band = skb->priority;
 38	struct tcf_result res;
 
 39	int err;
 40
 41	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 42	if (TC_H_MAJ(skb->priority) != sch->handle) {
 43		err = tc_classify(skb, q->filter_list, &res);
 
 44#ifdef CONFIG_NET_CLS_ACT
 45		switch (err) {
 46		case TC_ACT_STOLEN:
 47		case TC_ACT_QUEUED:
 
 48			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
 49		case TC_ACT_SHOT:
 50			return NULL;
 51		}
 52#endif
 53		if (!q->filter_list || err < 0) {
 54			if (TC_H_MAJ(band))
 55				band = 0;
 56			return q->queues[q->prio2band[band & TC_PRIO_MAX]];
 57		}
 58		band = res.classid;
 59	}
 60	band = TC_H_MIN(band) - 1;
 61	if (band >= q->bands)
 62		return q->queues[q->prio2band[0]];
 63
 64	return q->queues[band];
 65}
 66
 67static int
 68prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 69{
 
 70	struct Qdisc *qdisc;
 71	int ret;
 72
 73	qdisc = prio_classify(skb, sch, &ret);
 74#ifdef CONFIG_NET_CLS_ACT
 75	if (qdisc == NULL) {
 76
 77		if (ret & __NET_XMIT_BYPASS)
 78			sch->qstats.drops++;
 79		kfree_skb(skb);
 80		return ret;
 81	}
 82#endif
 83
 84	ret = qdisc_enqueue(skb, qdisc);
 85	if (ret == NET_XMIT_SUCCESS) {
 
 86		sch->q.qlen++;
 87		return NET_XMIT_SUCCESS;
 88	}
 89	if (net_xmit_drop_count(ret))
 90		sch->qstats.drops++;
 91	return ret;
 92}
 93
 94static struct sk_buff *prio_peek(struct Qdisc *sch)
 95{
 96	struct prio_sched_data *q = qdisc_priv(sch);
 97	int prio;
 98
 99	for (prio = 0; prio < q->bands; prio++) {
100		struct Qdisc *qdisc = q->queues[prio];
101		struct sk_buff *skb = qdisc->ops->peek(qdisc);
102		if (skb)
103			return skb;
104	}
105	return NULL;
106}
107
108static struct sk_buff *prio_dequeue(struct Qdisc *sch)
109{
110	struct prio_sched_data *q = qdisc_priv(sch);
111	int prio;
112
113	for (prio = 0; prio < q->bands; prio++) {
114		struct Qdisc *qdisc = q->queues[prio];
115		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
116		if (skb) {
117			qdisc_bstats_update(sch, skb);
 
118			sch->q.qlen--;
119			return skb;
120		}
121	}
122	return NULL;
123
124}
125
126static unsigned int prio_drop(struct Qdisc *sch)
127{
128	struct prio_sched_data *q = qdisc_priv(sch);
129	int prio;
130	unsigned int len;
131	struct Qdisc *qdisc;
132
133	for (prio = q->bands-1; prio >= 0; prio--) {
134		qdisc = q->queues[prio];
135		if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) {
136			sch->q.qlen--;
137			return len;
138		}
139	}
140	return 0;
141}
142
143
144static void
145prio_reset(struct Qdisc *sch)
146{
147	int prio;
148	struct prio_sched_data *q = qdisc_priv(sch);
149
150	for (prio = 0; prio < q->bands; prio++)
151		qdisc_reset(q->queues[prio]);
152	sch->q.qlen = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153}
154
155static void
156prio_destroy(struct Qdisc *sch)
157{
158	int prio;
159	struct prio_sched_data *q = qdisc_priv(sch);
160
161	tcf_destroy_chain(&q->filter_list);
 
162	for (prio = 0; prio < q->bands; prio++)
163		qdisc_destroy(q->queues[prio]);
164}
165
166static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
 
167{
168	struct prio_sched_data *q = qdisc_priv(sch);
 
 
169	struct tc_prio_qopt *qopt;
170	int i;
171
172	if (nla_len(opt) < sizeof(*qopt))
173		return -EINVAL;
174	qopt = nla_data(opt);
175
176	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
177		return -EINVAL;
178
179	for (i = 0; i <= TC_PRIO_MAX; i++) {
180		if (qopt->priomap[i] >= qopt->bands)
181			return -EINVAL;
182	}
183
 
 
 
 
 
 
 
 
 
 
 
 
 
184	sch_tree_lock(sch);
185	q->bands = qopt->bands;
186	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
187
188	for (i = q->bands; i < TCQ_PRIO_BANDS; i++) {
189		struct Qdisc *child = q->queues[i];
190		q->queues[i] = &noop_qdisc;
191		if (child != &noop_qdisc) {
192			qdisc_tree_decrease_qlen(child, child->q.qlen);
193			qdisc_destroy(child);
194		}
195	}
 
196	sch_tree_unlock(sch);
197
198	for (i = 0; i < q->bands; i++) {
199		if (q->queues[i] == &noop_qdisc) {
200			struct Qdisc *child, *old;
201
202			child = qdisc_create_dflt(sch->dev_queue,
203						  &pfifo_qdisc_ops,
204						  TC_H_MAKE(sch->handle, i + 1));
205			if (child) {
206				sch_tree_lock(sch);
207				old = q->queues[i];
208				q->queues[i] = child;
209
210				if (old != &noop_qdisc) {
211					qdisc_tree_decrease_qlen(old,
212								 old->q.qlen);
213					qdisc_destroy(old);
214				}
215				sch_tree_unlock(sch);
216			}
217		}
218	}
219	return 0;
220}
221
222static int prio_init(struct Qdisc *sch, struct nlattr *opt)
 
223{
224	struct prio_sched_data *q = qdisc_priv(sch);
225	int i;
226
227	for (i = 0; i < TCQ_PRIO_BANDS; i++)
228		q->queues[i] = &noop_qdisc;
229
230	if (opt == NULL) {
231		return -EINVAL;
232	} else {
233		int err;
234
235		if ((err = prio_tune(sch, opt)) != 0)
236			return err;
237	}
238	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239}
240
241static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
242{
243	struct prio_sched_data *q = qdisc_priv(sch);
244	unsigned char *b = skb_tail_pointer(skb);
245	struct tc_prio_qopt opt;
 
246
247	opt.bands = q->bands;
248	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
249
 
 
 
 
250	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
251		goto nla_put_failure;
252
253	return skb->len;
254
255nla_put_failure:
256	nlmsg_trim(skb, b);
257	return -1;
258}
259
260static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
261		      struct Qdisc **old)
262{
263	struct prio_sched_data *q = qdisc_priv(sch);
 
264	unsigned long band = arg - 1;
265
266	if (new == NULL)
267		new = &noop_qdisc;
268
269	sch_tree_lock(sch);
270	*old = q->queues[band];
271	q->queues[band] = new;
272	qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
273	qdisc_reset(*old);
274	sch_tree_unlock(sch);
275
 
 
 
 
 
 
 
 
 
 
276	return 0;
277}
278
279static struct Qdisc *
280prio_leaf(struct Qdisc *sch, unsigned long arg)
281{
282	struct prio_sched_data *q = qdisc_priv(sch);
283	unsigned long band = arg - 1;
284
285	return q->queues[band];
286}
287
288static unsigned long prio_get(struct Qdisc *sch, u32 classid)
289{
290	struct prio_sched_data *q = qdisc_priv(sch);
291	unsigned long band = TC_H_MIN(classid);
292
293	if (band - 1 >= q->bands)
294		return 0;
295	return band;
296}
297
298static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
299{
300	return prio_get(sch, classid);
301}
302
303
304static void prio_put(struct Qdisc *q, unsigned long cl)
305{
306}
307
308static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
309			   struct tcmsg *tcm)
310{
311	struct prio_sched_data *q = qdisc_priv(sch);
312
313	tcm->tcm_handle |= TC_H_MIN(cl);
314	tcm->tcm_info = q->queues[cl-1]->handle;
315	return 0;
316}
317
318static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
319				 struct gnet_dump *d)
320{
321	struct prio_sched_data *q = qdisc_priv(sch);
322	struct Qdisc *cl_q;
323
324	cl_q = q->queues[cl - 1];
325	cl_q->qstats.qlen = cl_q->q.qlen;
326	if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
327	    gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
328		return -1;
329
330	return 0;
331}
332
333static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
334{
335	struct prio_sched_data *q = qdisc_priv(sch);
336	int prio;
337
338	if (arg->stop)
339		return;
340
341	for (prio = 0; prio < q->bands; prio++) {
342		if (arg->count < arg->skip) {
343			arg->count++;
344			continue;
345		}
346		if (arg->fn(sch, prio + 1, arg) < 0) {
347			arg->stop = 1;
348			break;
349		}
350		arg->count++;
351	}
352}
353
354static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
 
355{
356	struct prio_sched_data *q = qdisc_priv(sch);
357
358	if (cl)
359		return NULL;
360	return &q->filter_list;
361}
362
363static const struct Qdisc_class_ops prio_class_ops = {
364	.graft		=	prio_graft,
365	.leaf		=	prio_leaf,
366	.get		=	prio_get,
367	.put		=	prio_put,
368	.walk		=	prio_walk,
369	.tcf_chain	=	prio_find_tcf,
370	.bind_tcf	=	prio_bind,
371	.unbind_tcf	=	prio_put,
372	.dump		=	prio_dump_class,
373	.dump_stats	=	prio_dump_class_stats,
374};
375
376static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
377	.next		=	NULL,
378	.cl_ops		=	&prio_class_ops,
379	.id		=	"prio",
380	.priv_size	=	sizeof(struct prio_sched_data),
381	.enqueue	=	prio_enqueue,
382	.dequeue	=	prio_dequeue,
383	.peek		=	prio_peek,
384	.drop		=	prio_drop,
385	.init		=	prio_init,
386	.reset		=	prio_reset,
387	.destroy	=	prio_destroy,
388	.change		=	prio_tune,
389	.dump		=	prio_dump,
390	.owner		=	THIS_MODULE,
391};
 
392
393static int __init prio_module_init(void)
394{
395	return register_qdisc(&prio_qdisc_ops);
396}
397
398static void __exit prio_module_exit(void)
399{
400	unregister_qdisc(&prio_qdisc_ops);
401}
402
403module_init(prio_module_init)
404module_exit(prio_module_exit)
405
406MODULE_LICENSE("GPL");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_prio.c	Simple 3-band priority "scheduler".
  4 *
 
 
 
 
 
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 * Fixes:       19990609: J Hadi Salim <hadi@nortelnetworks.com>:
  7 *              Init --  EINVAL when opt undefined
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/slab.h>
 12#include <linux/types.h>
 13#include <linux/kernel.h>
 14#include <linux/string.h>
 15#include <linux/errno.h>
 16#include <linux/skbuff.h>
 17#include <net/netlink.h>
 18#include <net/pkt_sched.h>
 19#include <net/pkt_cls.h>
 20
 21struct prio_sched_data {
 22	int bands;
 23	struct tcf_proto __rcu *filter_list;
 24	struct tcf_block *block;
 25	u8  prio2band[TC_PRIO_MAX+1];
 26	struct Qdisc *queues[TCQ_PRIO_BANDS];
 27};
 28
 29
 30static struct Qdisc *
 31prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 32{
 33	struct prio_sched_data *q = qdisc_priv(sch);
 34	u32 band = skb->priority;
 35	struct tcf_result res;
 36	struct tcf_proto *fl;
 37	int err;
 38
 39	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 40	if (TC_H_MAJ(skb->priority) != sch->handle) {
 41		fl = rcu_dereference_bh(q->filter_list);
 42		err = tcf_classify(skb, NULL, fl, &res, false);
 43#ifdef CONFIG_NET_CLS_ACT
 44		switch (err) {
 45		case TC_ACT_STOLEN:
 46		case TC_ACT_QUEUED:
 47		case TC_ACT_TRAP:
 48			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 49			fallthrough;
 50		case TC_ACT_SHOT:
 51			return NULL;
 52		}
 53#endif
 54		if (!fl || err < 0) {
 55			if (TC_H_MAJ(band))
 56				band = 0;
 57			return q->queues[q->prio2band[band & TC_PRIO_MAX]];
 58		}
 59		band = res.classid;
 60	}
 61	band = TC_H_MIN(band) - 1;
 62	if (band >= q->bands)
 63		return q->queues[q->prio2band[0]];
 64
 65	return q->queues[band];
 66}
 67
 68static int
 69prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
 70{
 71	unsigned int len = qdisc_pkt_len(skb);
 72	struct Qdisc *qdisc;
 73	int ret;
 74
 75	qdisc = prio_classify(skb, sch, &ret);
 76#ifdef CONFIG_NET_CLS_ACT
 77	if (qdisc == NULL) {
 78
 79		if (ret & __NET_XMIT_BYPASS)
 80			qdisc_qstats_drop(sch);
 81		__qdisc_drop(skb, to_free);
 82		return ret;
 83	}
 84#endif
 85
 86	ret = qdisc_enqueue(skb, qdisc, to_free);
 87	if (ret == NET_XMIT_SUCCESS) {
 88		sch->qstats.backlog += len;
 89		sch->q.qlen++;
 90		return NET_XMIT_SUCCESS;
 91	}
 92	if (net_xmit_drop_count(ret))
 93		qdisc_qstats_drop(sch);
 94	return ret;
 95}
 96
 97static struct sk_buff *prio_peek(struct Qdisc *sch)
 98{
 99	struct prio_sched_data *q = qdisc_priv(sch);
100	int prio;
101
102	for (prio = 0; prio < q->bands; prio++) {
103		struct Qdisc *qdisc = q->queues[prio];
104		struct sk_buff *skb = qdisc->ops->peek(qdisc);
105		if (skb)
106			return skb;
107	}
108	return NULL;
109}
110
111static struct sk_buff *prio_dequeue(struct Qdisc *sch)
112{
113	struct prio_sched_data *q = qdisc_priv(sch);
114	int prio;
115
116	for (prio = 0; prio < q->bands; prio++) {
117		struct Qdisc *qdisc = q->queues[prio];
118		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
119		if (skb) {
120			qdisc_bstats_update(sch, skb);
121			qdisc_qstats_backlog_dec(sch, skb);
122			sch->q.qlen--;
123			return skb;
124		}
125	}
126	return NULL;
127
128}
129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130static void
131prio_reset(struct Qdisc *sch)
132{
133	int prio;
134	struct prio_sched_data *q = qdisc_priv(sch);
135
136	for (prio = 0; prio < q->bands; prio++)
137		qdisc_reset(q->queues[prio]);
138}
139
140static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
141{
142	struct net_device *dev = qdisc_dev(sch);
143	struct tc_prio_qopt_offload opt = {
144		.handle = sch->handle,
145		.parent = sch->parent,
146	};
147
148	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
149		return -EOPNOTSUPP;
150
151	if (qopt) {
152		opt.command = TC_PRIO_REPLACE;
153		opt.replace_params.bands = qopt->bands;
154		memcpy(&opt.replace_params.priomap, qopt->priomap,
155		       TC_PRIO_MAX + 1);
156		opt.replace_params.qstats = &sch->qstats;
157	} else {
158		opt.command = TC_PRIO_DESTROY;
159	}
160
161	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
162}
163
164static void
165prio_destroy(struct Qdisc *sch)
166{
167	int prio;
168	struct prio_sched_data *q = qdisc_priv(sch);
169
170	tcf_block_put(q->block);
171	prio_offload(sch, NULL);
172	for (prio = 0; prio < q->bands; prio++)
173		qdisc_put(q->queues[prio]);
174}
175
176static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
177		     struct netlink_ext_ack *extack)
178{
179	struct prio_sched_data *q = qdisc_priv(sch);
180	struct Qdisc *queues[TCQ_PRIO_BANDS];
181	int oldbands = q->bands, i;
182	struct tc_prio_qopt *qopt;
 
183
184	if (nla_len(opt) < sizeof(*qopt))
185		return -EINVAL;
186	qopt = nla_data(opt);
187
188	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < TCQ_MIN_PRIO_BANDS)
189		return -EINVAL;
190
191	for (i = 0; i <= TC_PRIO_MAX; i++) {
192		if (qopt->priomap[i] >= qopt->bands)
193			return -EINVAL;
194	}
195
196	/* Before commit, make sure we can allocate all new qdiscs */
197	for (i = oldbands; i < qopt->bands; i++) {
198		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
199					      TC_H_MAKE(sch->handle, i + 1),
200					      extack);
201		if (!queues[i]) {
202			while (i > oldbands)
203				qdisc_put(queues[--i]);
204			return -ENOMEM;
205		}
206	}
207
208	prio_offload(sch, qopt);
209	sch_tree_lock(sch);
210	q->bands = qopt->bands;
211	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
212
213	for (i = q->bands; i < oldbands; i++)
214		qdisc_tree_flush_backlog(q->queues[i]);
215
216	for (i = oldbands; i < q->bands; i++) {
217		q->queues[i] = queues[i];
218		if (q->queues[i] != &noop_qdisc)
219			qdisc_hash_add(q->queues[i], true);
220	}
221
222	sch_tree_unlock(sch);
223
224	for (i = q->bands; i < oldbands; i++)
225		qdisc_put(q->queues[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226	return 0;
227}
228
229static int prio_init(struct Qdisc *sch, struct nlattr *opt,
230		     struct netlink_ext_ack *extack)
231{
232	struct prio_sched_data *q = qdisc_priv(sch);
233	int err;
 
 
 
234
235	if (!opt)
236		return -EINVAL;
 
 
237
238	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
239	if (err)
240		return err;
241
242	return prio_tune(sch, opt, extack);
243}
244
245static int prio_dump_offload(struct Qdisc *sch)
246{
247	struct tc_prio_qopt_offload hw_stats = {
248		.command = TC_PRIO_STATS,
249		.handle = sch->handle,
250		.parent = sch->parent,
251		{
252			.stats = {
253				.bstats = &sch->bstats,
254				.qstats = &sch->qstats,
255			},
256		},
257	};
258
259	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_PRIO, &hw_stats);
260}
261
262static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
263{
264	struct prio_sched_data *q = qdisc_priv(sch);
265	unsigned char *b = skb_tail_pointer(skb);
266	struct tc_prio_qopt opt;
267	int err;
268
269	opt.bands = q->bands;
270	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
271
272	err = prio_dump_offload(sch);
273	if (err)
274		goto nla_put_failure;
275
276	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
277		goto nla_put_failure;
278
279	return skb->len;
280
281nla_put_failure:
282	nlmsg_trim(skb, b);
283	return -1;
284}
285
286static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
287		      struct Qdisc **old, struct netlink_ext_ack *extack)
288{
289	struct prio_sched_data *q = qdisc_priv(sch);
290	struct tc_prio_qopt_offload graft_offload;
291	unsigned long band = arg - 1;
292
293	if (!new) {
294		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
295					TC_H_MAKE(sch->handle, arg), extack);
296		if (!new)
297			new = &noop_qdisc;
298		else
299			qdisc_hash_add(new, true);
300	}
301
302	*old = qdisc_replace(sch, new, &q->queues[band]);
303
304	graft_offload.handle = sch->handle;
305	graft_offload.parent = sch->parent;
306	graft_offload.graft_params.band = band;
307	graft_offload.graft_params.child_handle = new->handle;
308	graft_offload.command = TC_PRIO_GRAFT;
309
310	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
311				   TC_SETUP_QDISC_PRIO, &graft_offload,
312				   extack);
313	return 0;
314}
315
316static struct Qdisc *
317prio_leaf(struct Qdisc *sch, unsigned long arg)
318{
319	struct prio_sched_data *q = qdisc_priv(sch);
320	unsigned long band = arg - 1;
321
322	return q->queues[band];
323}
324
325static unsigned long prio_find(struct Qdisc *sch, u32 classid)
326{
327	struct prio_sched_data *q = qdisc_priv(sch);
328	unsigned long band = TC_H_MIN(classid);
329
330	if (band - 1 >= q->bands)
331		return 0;
332	return band;
333}
334
335static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
336{
337	return prio_find(sch, classid);
338}
339
340
341static void prio_unbind(struct Qdisc *q, unsigned long cl)
342{
343}
344
345static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
346			   struct tcmsg *tcm)
347{
348	struct prio_sched_data *q = qdisc_priv(sch);
349
350	tcm->tcm_handle |= TC_H_MIN(cl);
351	tcm->tcm_info = q->queues[cl-1]->handle;
352	return 0;
353}
354
355static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
356				 struct gnet_dump *d)
357{
358	struct prio_sched_data *q = qdisc_priv(sch);
359	struct Qdisc *cl_q;
360
361	cl_q = q->queues[cl - 1];
362	if (gnet_stats_copy_basic(d, cl_q->cpu_bstats,
363				  &cl_q->bstats, true) < 0 ||
364	    qdisc_qstats_copy(d, cl_q) < 0)
365		return -1;
366
367	return 0;
368}
369
370static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
371{
372	struct prio_sched_data *q = qdisc_priv(sch);
373	int prio;
374
375	if (arg->stop)
376		return;
377
378	for (prio = 0; prio < q->bands; prio++) {
379		if (!tc_qdisc_stats_dump(sch, prio + 1, arg))
 
 
 
 
 
380			break;
 
 
381	}
382}
383
384static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
385					struct netlink_ext_ack *extack)
386{
387	struct prio_sched_data *q = qdisc_priv(sch);
388
389	if (cl)
390		return NULL;
391	return q->block;
392}
393
394static const struct Qdisc_class_ops prio_class_ops = {
395	.graft		=	prio_graft,
396	.leaf		=	prio_leaf,
397	.find		=	prio_find,
 
398	.walk		=	prio_walk,
399	.tcf_block	=	prio_tcf_block,
400	.bind_tcf	=	prio_bind,
401	.unbind_tcf	=	prio_unbind,
402	.dump		=	prio_dump_class,
403	.dump_stats	=	prio_dump_class_stats,
404};
405
406static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
407	.next		=	NULL,
408	.cl_ops		=	&prio_class_ops,
409	.id		=	"prio",
410	.priv_size	=	sizeof(struct prio_sched_data),
411	.enqueue	=	prio_enqueue,
412	.dequeue	=	prio_dequeue,
413	.peek		=	prio_peek,
 
414	.init		=	prio_init,
415	.reset		=	prio_reset,
416	.destroy	=	prio_destroy,
417	.change		=	prio_tune,
418	.dump		=	prio_dump,
419	.owner		=	THIS_MODULE,
420};
421MODULE_ALIAS_NET_SCH("prio");
422
423static int __init prio_module_init(void)
424{
425	return register_qdisc(&prio_qdisc_ops);
426}
427
428static void __exit prio_module_exit(void)
429{
430	unregister_qdisc(&prio_qdisc_ops);
431}
432
433module_init(prio_module_init)
434module_exit(prio_module_exit)
435
436MODULE_LICENSE("GPL");
437MODULE_DESCRIPTION("Simple 3-band priority qdisc");