Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_prio.c	Simple 3-band priority "scheduler".
  4 *
 
 
 
 
 
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 * Fixes:       19990609: J Hadi Salim <hadi@nortelnetworks.com>:
  7 *              Init --  EINVAL when opt undefined
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/slab.h>
 12#include <linux/types.h>
 13#include <linux/kernel.h>
 14#include <linux/string.h>
 15#include <linux/errno.h>
 16#include <linux/skbuff.h>
 17#include <net/netlink.h>
 18#include <net/pkt_sched.h>
 19#include <net/pkt_cls.h>
 20
 21struct prio_sched_data {
 22	int bands;
 23	struct tcf_proto __rcu *filter_list;
 24	struct tcf_block *block;
 25	u8  prio2band[TC_PRIO_MAX+1];
 26	struct Qdisc *queues[TCQ_PRIO_BANDS];
 27};
 28
 29
 30static struct Qdisc *
 31prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 32{
 33	struct prio_sched_data *q = qdisc_priv(sch);
 34	u32 band = skb->priority;
 35	struct tcf_result res;
 36	struct tcf_proto *fl;
 37	int err;
 38
 39	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 40	if (TC_H_MAJ(skb->priority) != sch->handle) {
 41		fl = rcu_dereference_bh(q->filter_list);
 42		err = tcf_classify(skb, NULL, fl, &res, false);
 43#ifdef CONFIG_NET_CLS_ACT
 44		switch (err) {
 45		case TC_ACT_STOLEN:
 46		case TC_ACT_QUEUED:
 47		case TC_ACT_TRAP:
 48			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 49			fallthrough;
 50		case TC_ACT_SHOT:
 51			return NULL;
 52		}
 53#endif
 54		if (!fl || err < 0) {
 55			if (TC_H_MAJ(band))
 56				band = 0;
 57			return q->queues[q->prio2band[band & TC_PRIO_MAX]];
 58		}
 59		band = res.classid;
 60	}
 61	band = TC_H_MIN(band) - 1;
 62	if (band >= q->bands)
 63		return q->queues[q->prio2band[0]];
 64
 65	return q->queues[band];
 66}
 67
 68static int
 69prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
 70{
 71	unsigned int len = qdisc_pkt_len(skb);
 72	struct Qdisc *qdisc;
 73	int ret;
 74
 75	qdisc = prio_classify(skb, sch, &ret);
 76#ifdef CONFIG_NET_CLS_ACT
 77	if (qdisc == NULL) {
 78
 79		if (ret & __NET_XMIT_BYPASS)
 80			qdisc_qstats_drop(sch);
 81		__qdisc_drop(skb, to_free);
 82		return ret;
 83	}
 84#endif
 85
 86	ret = qdisc_enqueue(skb, qdisc, to_free);
 87	if (ret == NET_XMIT_SUCCESS) {
 88		sch->qstats.backlog += len;
 89		sch->q.qlen++;
 90		return NET_XMIT_SUCCESS;
 91	}
 92	if (net_xmit_drop_count(ret))
 93		qdisc_qstats_drop(sch);
 94	return ret;
 95}
 96
 97static struct sk_buff *prio_peek(struct Qdisc *sch)
 98{
 99	struct prio_sched_data *q = qdisc_priv(sch);
100	int prio;
101
102	for (prio = 0; prio < q->bands; prio++) {
103		struct Qdisc *qdisc = q->queues[prio];
104		struct sk_buff *skb = qdisc->ops->peek(qdisc);
105		if (skb)
106			return skb;
107	}
108	return NULL;
109}
110
111static struct sk_buff *prio_dequeue(struct Qdisc *sch)
112{
113	struct prio_sched_data *q = qdisc_priv(sch);
114	int prio;
115
116	for (prio = 0; prio < q->bands; prio++) {
117		struct Qdisc *qdisc = q->queues[prio];
118		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
119		if (skb) {
120			qdisc_bstats_update(sch, skb);
121			qdisc_qstats_backlog_dec(sch, skb);
122			sch->q.qlen--;
123			return skb;
124		}
125	}
126	return NULL;
127
128}
129
130static void
131prio_reset(struct Qdisc *sch)
132{
133	int prio;
134	struct prio_sched_data *q = qdisc_priv(sch);
135
136	for (prio = 0; prio < q->bands; prio++)
137		qdisc_reset(q->queues[prio]);
 
 
138}
139
140static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
141{
142	struct net_device *dev = qdisc_dev(sch);
143	struct tc_prio_qopt_offload opt = {
144		.handle = sch->handle,
145		.parent = sch->parent,
146	};
147
148	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
149		return -EOPNOTSUPP;
150
151	if (qopt) {
152		opt.command = TC_PRIO_REPLACE;
153		opt.replace_params.bands = qopt->bands;
154		memcpy(&opt.replace_params.priomap, qopt->priomap,
155		       TC_PRIO_MAX + 1);
156		opt.replace_params.qstats = &sch->qstats;
157	} else {
158		opt.command = TC_PRIO_DESTROY;
159	}
160
161	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
162}
163
164static void
165prio_destroy(struct Qdisc *sch)
166{
167	int prio;
168	struct prio_sched_data *q = qdisc_priv(sch);
169
170	tcf_block_put(q->block);
171	prio_offload(sch, NULL);
172	for (prio = 0; prio < q->bands; prio++)
173		qdisc_put(q->queues[prio]);
174}
175
176static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
177		     struct netlink_ext_ack *extack)
178{
179	struct prio_sched_data *q = qdisc_priv(sch);
180	struct Qdisc *queues[TCQ_PRIO_BANDS];
181	int oldbands = q->bands, i;
182	struct tc_prio_qopt *qopt;
183
184	if (nla_len(opt) < sizeof(*qopt))
185		return -EINVAL;
186	qopt = nla_data(opt);
187
188	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < TCQ_MIN_PRIO_BANDS)
189		return -EINVAL;
190
191	for (i = 0; i <= TC_PRIO_MAX; i++) {
192		if (qopt->priomap[i] >= qopt->bands)
193			return -EINVAL;
194	}
195
196	/* Before commit, make sure we can allocate all new qdiscs */
197	for (i = oldbands; i < qopt->bands; i++) {
198		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
199					      TC_H_MAKE(sch->handle, i + 1),
200					      extack);
201		if (!queues[i]) {
202			while (i > oldbands)
203				qdisc_put(queues[--i]);
204			return -ENOMEM;
205		}
206	}
207
208	prio_offload(sch, qopt);
209	sch_tree_lock(sch);
210	q->bands = qopt->bands;
211	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
212
213	for (i = q->bands; i < oldbands; i++)
214		qdisc_tree_flush_backlog(q->queues[i]);
 
 
 
 
 
215
216	for (i = oldbands; i < q->bands; i++) {
217		q->queues[i] = queues[i];
218		if (q->queues[i] != &noop_qdisc)
219			qdisc_hash_add(q->queues[i], true);
220	}
221
222	sch_tree_unlock(sch);
223
224	for (i = q->bands; i < oldbands; i++)
225		qdisc_put(q->queues[i]);
226	return 0;
227}
228
229static int prio_init(struct Qdisc *sch, struct nlattr *opt,
230		     struct netlink_ext_ack *extack)
231{
232	struct prio_sched_data *q = qdisc_priv(sch);
233	int err;
234
235	if (!opt)
236		return -EINVAL;
237
238	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
239	if (err)
240		return err;
241
242	return prio_tune(sch, opt, extack);
243}
244
245static int prio_dump_offload(struct Qdisc *sch)
246{
 
247	struct tc_prio_qopt_offload hw_stats = {
248		.command = TC_PRIO_STATS,
249		.handle = sch->handle,
250		.parent = sch->parent,
251		{
252			.stats = {
253				.bstats = &sch->bstats,
254				.qstats = &sch->qstats,
255			},
256		},
257	};
 
 
 
 
 
 
 
 
 
 
 
 
 
258
259	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_PRIO, &hw_stats);
260}
261
262static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
263{
264	struct prio_sched_data *q = qdisc_priv(sch);
265	unsigned char *b = skb_tail_pointer(skb);
266	struct tc_prio_qopt opt;
267	int err;
268
269	opt.bands = q->bands;
270	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
271
272	err = prio_dump_offload(sch);
273	if (err)
274		goto nla_put_failure;
275
276	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
277		goto nla_put_failure;
278
279	return skb->len;
280
281nla_put_failure:
282	nlmsg_trim(skb, b);
283	return -1;
284}
285
286static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
287		      struct Qdisc **old, struct netlink_ext_ack *extack)
288{
289	struct prio_sched_data *q = qdisc_priv(sch);
290	struct tc_prio_qopt_offload graft_offload;
 
291	unsigned long band = arg - 1;
 
 
292
293	if (!new) {
294		new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
295					TC_H_MAKE(sch->handle, arg), extack);
296		if (!new)
297			new = &noop_qdisc;
298		else
299			qdisc_hash_add(new, true);
300	}
301
302	*old = qdisc_replace(sch, new, &q->queues[band]);
303
 
 
 
304	graft_offload.handle = sch->handle;
305	graft_offload.parent = sch->parent;
306	graft_offload.graft_params.band = band;
307	graft_offload.graft_params.child_handle = new->handle;
308	graft_offload.command = TC_PRIO_GRAFT;
309
310	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
311				   TC_SETUP_QDISC_PRIO, &graft_offload,
312				   extack);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313	return 0;
314}
315
316static struct Qdisc *
317prio_leaf(struct Qdisc *sch, unsigned long arg)
318{
319	struct prio_sched_data *q = qdisc_priv(sch);
320	unsigned long band = arg - 1;
321
322	return q->queues[band];
323}
324
325static unsigned long prio_find(struct Qdisc *sch, u32 classid)
326{
327	struct prio_sched_data *q = qdisc_priv(sch);
328	unsigned long band = TC_H_MIN(classid);
329
330	if (band - 1 >= q->bands)
331		return 0;
332	return band;
333}
334
335static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
336{
337	return prio_find(sch, classid);
338}
339
340
341static void prio_unbind(struct Qdisc *q, unsigned long cl)
342{
343}
344
345static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
346			   struct tcmsg *tcm)
347{
348	struct prio_sched_data *q = qdisc_priv(sch);
349
350	tcm->tcm_handle |= TC_H_MIN(cl);
351	tcm->tcm_info = q->queues[cl-1]->handle;
352	return 0;
353}
354
355static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
356				 struct gnet_dump *d)
357{
358	struct prio_sched_data *q = qdisc_priv(sch);
359	struct Qdisc *cl_q;
360
361	cl_q = q->queues[cl - 1];
362	if (gnet_stats_copy_basic(d, cl_q->cpu_bstats,
363				  &cl_q->bstats, true) < 0 ||
364	    qdisc_qstats_copy(d, cl_q) < 0)
365		return -1;
366
367	return 0;
368}
369
370static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
371{
372	struct prio_sched_data *q = qdisc_priv(sch);
373	int prio;
374
375	if (arg->stop)
376		return;
377
378	for (prio = 0; prio < q->bands; prio++) {
379		if (!tc_qdisc_stats_dump(sch, prio + 1, arg))
 
 
 
 
 
380			break;
 
 
381	}
382}
383
384static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
385					struct netlink_ext_ack *extack)
386{
387	struct prio_sched_data *q = qdisc_priv(sch);
388
389	if (cl)
390		return NULL;
391	return q->block;
392}
393
394static const struct Qdisc_class_ops prio_class_ops = {
395	.graft		=	prio_graft,
396	.leaf		=	prio_leaf,
397	.find		=	prio_find,
398	.walk		=	prio_walk,
399	.tcf_block	=	prio_tcf_block,
400	.bind_tcf	=	prio_bind,
401	.unbind_tcf	=	prio_unbind,
402	.dump		=	prio_dump_class,
403	.dump_stats	=	prio_dump_class_stats,
404};
405
406static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
407	.next		=	NULL,
408	.cl_ops		=	&prio_class_ops,
409	.id		=	"prio",
410	.priv_size	=	sizeof(struct prio_sched_data),
411	.enqueue	=	prio_enqueue,
412	.dequeue	=	prio_dequeue,
413	.peek		=	prio_peek,
414	.init		=	prio_init,
415	.reset		=	prio_reset,
416	.destroy	=	prio_destroy,
417	.change		=	prio_tune,
418	.dump		=	prio_dump,
419	.owner		=	THIS_MODULE,
420};
421MODULE_ALIAS_NET_SCH("prio");
422
423static int __init prio_module_init(void)
424{
425	return register_qdisc(&prio_qdisc_ops);
426}
427
428static void __exit prio_module_exit(void)
429{
430	unregister_qdisc(&prio_qdisc_ops);
431}
432
433module_init(prio_module_init)
434module_exit(prio_module_exit)
435
436MODULE_LICENSE("GPL");
437MODULE_DESCRIPTION("Simple 3-band priority qdisc");
v4.17
 
  1/*
  2 * net/sched/sch_prio.c	Simple 3-band priority "scheduler".
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 * Fixes:       19990609: J Hadi Salim <hadi@nortelnetworks.com>:
 11 *              Init --  EINVAL when opt undefined
 12 */
 13
 14#include <linux/module.h>
 15#include <linux/slab.h>
 16#include <linux/types.h>
 17#include <linux/kernel.h>
 18#include <linux/string.h>
 19#include <linux/errno.h>
 20#include <linux/skbuff.h>
 21#include <net/netlink.h>
 22#include <net/pkt_sched.h>
 23#include <net/pkt_cls.h>
 24
 25struct prio_sched_data {
 26	int bands;
 27	struct tcf_proto __rcu *filter_list;
 28	struct tcf_block *block;
 29	u8  prio2band[TC_PRIO_MAX+1];
 30	struct Qdisc *queues[TCQ_PRIO_BANDS];
 31};
 32
 33
 34static struct Qdisc *
 35prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
 36{
 37	struct prio_sched_data *q = qdisc_priv(sch);
 38	u32 band = skb->priority;
 39	struct tcf_result res;
 40	struct tcf_proto *fl;
 41	int err;
 42
 43	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 44	if (TC_H_MAJ(skb->priority) != sch->handle) {
 45		fl = rcu_dereference_bh(q->filter_list);
 46		err = tcf_classify(skb, fl, &res, false);
 47#ifdef CONFIG_NET_CLS_ACT
 48		switch (err) {
 49		case TC_ACT_STOLEN:
 50		case TC_ACT_QUEUED:
 51		case TC_ACT_TRAP:
 52			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 53			/* fall through */
 54		case TC_ACT_SHOT:
 55			return NULL;
 56		}
 57#endif
 58		if (!fl || err < 0) {
 59			if (TC_H_MAJ(band))
 60				band = 0;
 61			return q->queues[q->prio2band[band & TC_PRIO_MAX]];
 62		}
 63		band = res.classid;
 64	}
 65	band = TC_H_MIN(band) - 1;
 66	if (band >= q->bands)
 67		return q->queues[q->prio2band[0]];
 68
 69	return q->queues[band];
 70}
 71
 72static int
 73prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
 74{
 
 75	struct Qdisc *qdisc;
 76	int ret;
 77
 78	qdisc = prio_classify(skb, sch, &ret);
 79#ifdef CONFIG_NET_CLS_ACT
 80	if (qdisc == NULL) {
 81
 82		if (ret & __NET_XMIT_BYPASS)
 83			qdisc_qstats_drop(sch);
 84		__qdisc_drop(skb, to_free);
 85		return ret;
 86	}
 87#endif
 88
 89	ret = qdisc_enqueue(skb, qdisc, to_free);
 90	if (ret == NET_XMIT_SUCCESS) {
 91		qdisc_qstats_backlog_inc(sch, skb);
 92		sch->q.qlen++;
 93		return NET_XMIT_SUCCESS;
 94	}
 95	if (net_xmit_drop_count(ret))
 96		qdisc_qstats_drop(sch);
 97	return ret;
 98}
 99
100static struct sk_buff *prio_peek(struct Qdisc *sch)
101{
102	struct prio_sched_data *q = qdisc_priv(sch);
103	int prio;
104
105	for (prio = 0; prio < q->bands; prio++) {
106		struct Qdisc *qdisc = q->queues[prio];
107		struct sk_buff *skb = qdisc->ops->peek(qdisc);
108		if (skb)
109			return skb;
110	}
111	return NULL;
112}
113
114static struct sk_buff *prio_dequeue(struct Qdisc *sch)
115{
116	struct prio_sched_data *q = qdisc_priv(sch);
117	int prio;
118
119	for (prio = 0; prio < q->bands; prio++) {
120		struct Qdisc *qdisc = q->queues[prio];
121		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
122		if (skb) {
123			qdisc_bstats_update(sch, skb);
124			qdisc_qstats_backlog_dec(sch, skb);
125			sch->q.qlen--;
126			return skb;
127		}
128	}
129	return NULL;
130
131}
132
133static void
134prio_reset(struct Qdisc *sch)
135{
136	int prio;
137	struct prio_sched_data *q = qdisc_priv(sch);
138
139	for (prio = 0; prio < q->bands; prio++)
140		qdisc_reset(q->queues[prio]);
141	sch->qstats.backlog = 0;
142	sch->q.qlen = 0;
143}
144
145static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
146{
147	struct net_device *dev = qdisc_dev(sch);
148	struct tc_prio_qopt_offload opt = {
149		.handle = sch->handle,
150		.parent = sch->parent,
151	};
152
153	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
154		return -EOPNOTSUPP;
155
156	if (qopt) {
157		opt.command = TC_PRIO_REPLACE;
158		opt.replace_params.bands = qopt->bands;
159		memcpy(&opt.replace_params.priomap, qopt->priomap,
160		       TC_PRIO_MAX + 1);
161		opt.replace_params.qstats = &sch->qstats;
162	} else {
163		opt.command = TC_PRIO_DESTROY;
164	}
165
166	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
167}
168
169static void
170prio_destroy(struct Qdisc *sch)
171{
172	int prio;
173	struct prio_sched_data *q = qdisc_priv(sch);
174
175	tcf_block_put(q->block);
176	prio_offload(sch, NULL);
177	for (prio = 0; prio < q->bands; prio++)
178		qdisc_destroy(q->queues[prio]);
179}
180
181static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
182		     struct netlink_ext_ack *extack)
183{
184	struct prio_sched_data *q = qdisc_priv(sch);
185	struct Qdisc *queues[TCQ_PRIO_BANDS];
186	int oldbands = q->bands, i;
187	struct tc_prio_qopt *qopt;
188
189	if (nla_len(opt) < sizeof(*qopt))
190		return -EINVAL;
191	qopt = nla_data(opt);
192
193	if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
194		return -EINVAL;
195
196	for (i = 0; i <= TC_PRIO_MAX; i++) {
197		if (qopt->priomap[i] >= qopt->bands)
198			return -EINVAL;
199	}
200
201	/* Before commit, make sure we can allocate all new qdiscs */
202	for (i = oldbands; i < qopt->bands; i++) {
203		queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
204					      TC_H_MAKE(sch->handle, i + 1),
205					      extack);
206		if (!queues[i]) {
207			while (i > oldbands)
208				qdisc_destroy(queues[--i]);
209			return -ENOMEM;
210		}
211	}
212
213	prio_offload(sch, qopt);
214	sch_tree_lock(sch);
215	q->bands = qopt->bands;
216	memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
217
218	for (i = q->bands; i < oldbands; i++) {
219		struct Qdisc *child = q->queues[i];
220
221		qdisc_tree_reduce_backlog(child, child->q.qlen,
222					  child->qstats.backlog);
223		qdisc_destroy(child);
224	}
225
226	for (i = oldbands; i < q->bands; i++) {
227		q->queues[i] = queues[i];
228		if (q->queues[i] != &noop_qdisc)
229			qdisc_hash_add(q->queues[i], true);
230	}
231
232	sch_tree_unlock(sch);
 
 
 
233	return 0;
234}
235
236static int prio_init(struct Qdisc *sch, struct nlattr *opt,
237		     struct netlink_ext_ack *extack)
238{
239	struct prio_sched_data *q = qdisc_priv(sch);
240	int err;
241
242	if (!opt)
243		return -EINVAL;
244
245	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
246	if (err)
247		return err;
248
249	return prio_tune(sch, opt, extack);
250}
251
252static int prio_dump_offload(struct Qdisc *sch)
253{
254	struct net_device *dev = qdisc_dev(sch);
255	struct tc_prio_qopt_offload hw_stats = {
256		.command = TC_PRIO_STATS,
257		.handle = sch->handle,
258		.parent = sch->parent,
259		{
260			.stats = {
261				.bstats = &sch->bstats,
262				.qstats = &sch->qstats,
263			},
264		},
265	};
266	int err;
267
268	sch->flags &= ~TCQ_F_OFFLOADED;
269	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
270		return 0;
271
272	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
273					    &hw_stats);
274	if (err == -EOPNOTSUPP)
275		return 0;
276
277	if (!err)
278		sch->flags |= TCQ_F_OFFLOADED;
279
280	return err;
281}
282
283static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
284{
285	struct prio_sched_data *q = qdisc_priv(sch);
286	unsigned char *b = skb_tail_pointer(skb);
287	struct tc_prio_qopt opt;
288	int err;
289
290	opt.bands = q->bands;
291	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
292
293	err = prio_dump_offload(sch);
294	if (err)
295		goto nla_put_failure;
296
297	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
298		goto nla_put_failure;
299
300	return skb->len;
301
302nla_put_failure:
303	nlmsg_trim(skb, b);
304	return -1;
305}
306
307static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
308		      struct Qdisc **old, struct netlink_ext_ack *extack)
309{
310	struct prio_sched_data *q = qdisc_priv(sch);
311	struct tc_prio_qopt_offload graft_offload;
312	struct net_device *dev = qdisc_dev(sch);
313	unsigned long band = arg - 1;
314	bool any_qdisc_is_offloaded;
315	int err;
316
317	if (new == NULL)
318		new = &noop_qdisc;
 
 
 
 
 
 
319
320	*old = qdisc_replace(sch, new, &q->queues[band]);
321
322	if (!tc_can_offload(dev))
323		return 0;
324
325	graft_offload.handle = sch->handle;
326	graft_offload.parent = sch->parent;
327	graft_offload.graft_params.band = band;
328	graft_offload.graft_params.child_handle = new->handle;
329	graft_offload.command = TC_PRIO_GRAFT;
330
331	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO,
332					    &graft_offload);
333
334	/* Don't report error if the graft is part of destroy operation. */
335	if (err && new != &noop_qdisc) {
336		/* Don't report error if the parent, the old child and the new
337		 * one are not offloaded.
338		 */
339		any_qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
340		any_qdisc_is_offloaded |= new->flags & TCQ_F_OFFLOADED;
341		if (*old)
342			any_qdisc_is_offloaded |= (*old)->flags &
343						   TCQ_F_OFFLOADED;
344
345		if (any_qdisc_is_offloaded)
346			NL_SET_ERR_MSG(extack, "Offloading graft operation failed.");
347	}
348
349	return 0;
350}
351
352static struct Qdisc *
353prio_leaf(struct Qdisc *sch, unsigned long arg)
354{
355	struct prio_sched_data *q = qdisc_priv(sch);
356	unsigned long band = arg - 1;
357
358	return q->queues[band];
359}
360
361static unsigned long prio_find(struct Qdisc *sch, u32 classid)
362{
363	struct prio_sched_data *q = qdisc_priv(sch);
364	unsigned long band = TC_H_MIN(classid);
365
366	if (band - 1 >= q->bands)
367		return 0;
368	return band;
369}
370
371static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
372{
373	return prio_find(sch, classid);
374}
375
376
377static void prio_unbind(struct Qdisc *q, unsigned long cl)
378{
379}
380
381static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
382			   struct tcmsg *tcm)
383{
384	struct prio_sched_data *q = qdisc_priv(sch);
385
386	tcm->tcm_handle |= TC_H_MIN(cl);
387	tcm->tcm_info = q->queues[cl-1]->handle;
388	return 0;
389}
390
391static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
392				 struct gnet_dump *d)
393{
394	struct prio_sched_data *q = qdisc_priv(sch);
395	struct Qdisc *cl_q;
396
397	cl_q = q->queues[cl - 1];
398	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
399				  d, NULL, &cl_q->bstats) < 0 ||
400	    gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
401		return -1;
402
403	return 0;
404}
405
406static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
407{
408	struct prio_sched_data *q = qdisc_priv(sch);
409	int prio;
410
411	if (arg->stop)
412		return;
413
414	for (prio = 0; prio < q->bands; prio++) {
415		if (arg->count < arg->skip) {
416			arg->count++;
417			continue;
418		}
419		if (arg->fn(sch, prio + 1, arg) < 0) {
420			arg->stop = 1;
421			break;
422		}
423		arg->count++;
424	}
425}
426
427static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
428					struct netlink_ext_ack *extack)
429{
430	struct prio_sched_data *q = qdisc_priv(sch);
431
432	if (cl)
433		return NULL;
434	return q->block;
435}
436
437static const struct Qdisc_class_ops prio_class_ops = {
438	.graft		=	prio_graft,
439	.leaf		=	prio_leaf,
440	.find		=	prio_find,
441	.walk		=	prio_walk,
442	.tcf_block	=	prio_tcf_block,
443	.bind_tcf	=	prio_bind,
444	.unbind_tcf	=	prio_unbind,
445	.dump		=	prio_dump_class,
446	.dump_stats	=	prio_dump_class_stats,
447};
448
449static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
450	.next		=	NULL,
451	.cl_ops		=	&prio_class_ops,
452	.id		=	"prio",
453	.priv_size	=	sizeof(struct prio_sched_data),
454	.enqueue	=	prio_enqueue,
455	.dequeue	=	prio_dequeue,
456	.peek		=	prio_peek,
457	.init		=	prio_init,
458	.reset		=	prio_reset,
459	.destroy	=	prio_destroy,
460	.change		=	prio_tune,
461	.dump		=	prio_dump,
462	.owner		=	THIS_MODULE,
463};
 
464
465static int __init prio_module_init(void)
466{
467	return register_qdisc(&prio_qdisc_ops);
468}
469
470static void __exit prio_module_exit(void)
471{
472	unregister_qdisc(&prio_qdisc_ops);
473}
474
475module_init(prio_module_init)
476module_exit(prio_module_exit)
477
478MODULE_LICENSE("GPL");