Loading...
1/*
2 * net/sched/sch_mqprio.c
3 *
4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/errno.h>
16#include <linux/skbuff.h>
17#include <linux/module.h>
18#include <net/netlink.h>
19#include <net/pkt_sched.h>
20#include <net/sch_generic.h>
21
22struct mqprio_sched {
23 struct Qdisc **qdiscs;
24 int hw_owned;
25};
26
27static void mqprio_destroy(struct Qdisc *sch)
28{
29 struct net_device *dev = qdisc_dev(sch);
30 struct mqprio_sched *priv = qdisc_priv(sch);
31 unsigned int ntx;
32
33 if (priv->qdiscs) {
34 for (ntx = 0;
35 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
36 ntx++)
37 qdisc_destroy(priv->qdiscs[ntx]);
38 kfree(priv->qdiscs);
39 }
40
41 if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
42 dev->netdev_ops->ndo_setup_tc(dev, 0);
43 else
44 netdev_set_num_tc(dev, 0);
45}
46
47static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
48{
49 int i, j;
50
51 /* Verify num_tc is not out of max range */
52 if (qopt->num_tc > TC_MAX_QUEUE)
53 return -EINVAL;
54
55 /* Verify priority mapping uses valid tcs */
56 for (i = 0; i < TC_BITMASK + 1; i++) {
57 if (qopt->prio_tc_map[i] >= qopt->num_tc)
58 return -EINVAL;
59 }
60
61 /* net_device does not support requested operation */
62 if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
63 return -EINVAL;
64
65 /* if hw owned qcount and qoffset are taken from LLD so
66 * no reason to verify them here
67 */
68 if (qopt->hw)
69 return 0;
70
71 for (i = 0; i < qopt->num_tc; i++) {
72 unsigned int last = qopt->offset[i] + qopt->count[i];
73
74 /* Verify the queue count is in tx range being equal to the
75 * real_num_tx_queues indicates the last queue is in use.
76 */
77 if (qopt->offset[i] >= dev->real_num_tx_queues ||
78 !qopt->count[i] ||
79 last > dev->real_num_tx_queues)
80 return -EINVAL;
81
82 /* Verify that the offset and counts do not overlap */
83 for (j = i + 1; j < qopt->num_tc; j++) {
84 if (last > qopt->offset[j])
85 return -EINVAL;
86 }
87 }
88
89 return 0;
90}
91
92static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
93{
94 struct net_device *dev = qdisc_dev(sch);
95 struct mqprio_sched *priv = qdisc_priv(sch);
96 struct netdev_queue *dev_queue;
97 struct Qdisc *qdisc;
98 int i, err = -EOPNOTSUPP;
99 struct tc_mqprio_qopt *qopt = NULL;
100
101 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
102 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
103
104 if (sch->parent != TC_H_ROOT)
105 return -EOPNOTSUPP;
106
107 if (!netif_is_multiqueue(dev))
108 return -EOPNOTSUPP;
109
110 if (!opt || nla_len(opt) < sizeof(*qopt))
111 return -EINVAL;
112
113 qopt = nla_data(opt);
114 if (mqprio_parse_opt(dev, qopt))
115 return -EINVAL;
116
117 /* pre-allocate qdisc, attachment can't fail */
118 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
119 GFP_KERNEL);
120 if (priv->qdiscs == NULL) {
121 err = -ENOMEM;
122 goto err;
123 }
124
125 for (i = 0; i < dev->num_tx_queues; i++) {
126 dev_queue = netdev_get_tx_queue(dev, i);
127 qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
128 TC_H_MAKE(TC_H_MAJ(sch->handle),
129 TC_H_MIN(i + 1)));
130 if (qdisc == NULL) {
131 err = -ENOMEM;
132 goto err;
133 }
134 priv->qdiscs[i] = qdisc;
135 qdisc->flags |= TCQ_F_ONETXQUEUE;
136 }
137
138 /* If the mqprio options indicate that hardware should own
139 * the queue mapping then run ndo_setup_tc otherwise use the
140 * supplied and verified mapping
141 */
142 if (qopt->hw) {
143 priv->hw_owned = 1;
144 err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
145 if (err)
146 goto err;
147 } else {
148 netdev_set_num_tc(dev, qopt->num_tc);
149 for (i = 0; i < qopt->num_tc; i++)
150 netdev_set_tc_queue(dev, i,
151 qopt->count[i], qopt->offset[i]);
152 }
153
154 /* Always use supplied priority mappings */
155 for (i = 0; i < TC_BITMASK + 1; i++)
156 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
157
158 sch->flags |= TCQ_F_MQROOT;
159 return 0;
160
161err:
162 mqprio_destroy(sch);
163 return err;
164}
165
166static void mqprio_attach(struct Qdisc *sch)
167{
168 struct net_device *dev = qdisc_dev(sch);
169 struct mqprio_sched *priv = qdisc_priv(sch);
170 struct Qdisc *qdisc, *old;
171 unsigned int ntx;
172
173 /* Attach underlying qdisc */
174 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
175 qdisc = priv->qdiscs[ntx];
176 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
177 if (old)
178 qdisc_destroy(old);
179 if (ntx < dev->real_num_tx_queues)
180 qdisc_list_add(qdisc);
181 }
182 kfree(priv->qdiscs);
183 priv->qdiscs = NULL;
184}
185
186static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
187 unsigned long cl)
188{
189 struct net_device *dev = qdisc_dev(sch);
190 unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
191
192 if (ntx >= dev->num_tx_queues)
193 return NULL;
194 return netdev_get_tx_queue(dev, ntx);
195}
196
197static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
198 struct Qdisc **old)
199{
200 struct net_device *dev = qdisc_dev(sch);
201 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
202
203 if (!dev_queue)
204 return -EINVAL;
205
206 if (dev->flags & IFF_UP)
207 dev_deactivate(dev);
208
209 *old = dev_graft_qdisc(dev_queue, new);
210
211 if (new)
212 new->flags |= TCQ_F_ONETXQUEUE;
213
214 if (dev->flags & IFF_UP)
215 dev_activate(dev);
216
217 return 0;
218}
219
220static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
221{
222 struct net_device *dev = qdisc_dev(sch);
223 struct mqprio_sched *priv = qdisc_priv(sch);
224 unsigned char *b = skb_tail_pointer(skb);
225 struct tc_mqprio_qopt opt = { 0 };
226 struct Qdisc *qdisc;
227 unsigned int i;
228
229 sch->q.qlen = 0;
230 memset(&sch->bstats, 0, sizeof(sch->bstats));
231 memset(&sch->qstats, 0, sizeof(sch->qstats));
232
233 for (i = 0; i < dev->num_tx_queues; i++) {
234 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
235 spin_lock_bh(qdisc_lock(qdisc));
236 sch->q.qlen += qdisc->q.qlen;
237 sch->bstats.bytes += qdisc->bstats.bytes;
238 sch->bstats.packets += qdisc->bstats.packets;
239 sch->qstats.qlen += qdisc->qstats.qlen;
240 sch->qstats.backlog += qdisc->qstats.backlog;
241 sch->qstats.drops += qdisc->qstats.drops;
242 sch->qstats.requeues += qdisc->qstats.requeues;
243 sch->qstats.overlimits += qdisc->qstats.overlimits;
244 spin_unlock_bh(qdisc_lock(qdisc));
245 }
246
247 opt.num_tc = netdev_get_num_tc(dev);
248 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
249 opt.hw = priv->hw_owned;
250
251 for (i = 0; i < netdev_get_num_tc(dev); i++) {
252 opt.count[i] = dev->tc_to_txq[i].count;
253 opt.offset[i] = dev->tc_to_txq[i].offset;
254 }
255
256 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
257 goto nla_put_failure;
258
259 return skb->len;
260nla_put_failure:
261 nlmsg_trim(skb, b);
262 return -1;
263}
264
265static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
266{
267 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
268
269 if (!dev_queue)
270 return NULL;
271
272 return dev_queue->qdisc_sleeping;
273}
274
275static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
276{
277 struct net_device *dev = qdisc_dev(sch);
278 unsigned int ntx = TC_H_MIN(classid);
279
280 if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
281 return 0;
282 return ntx;
283}
284
285static void mqprio_put(struct Qdisc *sch, unsigned long cl)
286{
287}
288
289static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
290 struct sk_buff *skb, struct tcmsg *tcm)
291{
292 struct net_device *dev = qdisc_dev(sch);
293
294 if (cl <= netdev_get_num_tc(dev)) {
295 tcm->tcm_parent = TC_H_ROOT;
296 tcm->tcm_info = 0;
297 } else {
298 int i;
299 struct netdev_queue *dev_queue;
300
301 dev_queue = mqprio_queue_get(sch, cl);
302 tcm->tcm_parent = 0;
303 for (i = 0; i < netdev_get_num_tc(dev); i++) {
304 struct netdev_tc_txq tc = dev->tc_to_txq[i];
305 int q_idx = cl - netdev_get_num_tc(dev);
306
307 if (q_idx > tc.offset &&
308 q_idx <= tc.offset + tc.count) {
309 tcm->tcm_parent =
310 TC_H_MAKE(TC_H_MAJ(sch->handle),
311 TC_H_MIN(i + 1));
312 break;
313 }
314 }
315 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
316 }
317 tcm->tcm_handle |= TC_H_MIN(cl);
318 return 0;
319}
320
321static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
322 struct gnet_dump *d)
323 __releases(d->lock)
324 __acquires(d->lock)
325{
326 struct net_device *dev = qdisc_dev(sch);
327
328 if (cl <= netdev_get_num_tc(dev)) {
329 int i;
330 struct Qdisc *qdisc;
331 struct gnet_stats_queue qstats = {0};
332 struct gnet_stats_basic_packed bstats = {0};
333 struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
334
335 /* Drop lock here it will be reclaimed before touching
336 * statistics this is required because the d->lock we
337 * hold here is the look on dev_queue->qdisc_sleeping
338 * also acquired below.
339 */
340 spin_unlock_bh(d->lock);
341
342 for (i = tc.offset; i < tc.offset + tc.count; i++) {
343 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
344 spin_lock_bh(qdisc_lock(qdisc));
345 bstats.bytes += qdisc->bstats.bytes;
346 bstats.packets += qdisc->bstats.packets;
347 qstats.qlen += qdisc->qstats.qlen;
348 qstats.backlog += qdisc->qstats.backlog;
349 qstats.drops += qdisc->qstats.drops;
350 qstats.requeues += qdisc->qstats.requeues;
351 qstats.overlimits += qdisc->qstats.overlimits;
352 spin_unlock_bh(qdisc_lock(qdisc));
353 }
354 /* Reclaim root sleeping lock before completing stats */
355 spin_lock_bh(d->lock);
356 if (gnet_stats_copy_basic(d, &bstats) < 0 ||
357 gnet_stats_copy_queue(d, &qstats) < 0)
358 return -1;
359 } else {
360 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
361
362 sch = dev_queue->qdisc_sleeping;
363 sch->qstats.qlen = sch->q.qlen;
364 if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
365 gnet_stats_copy_queue(d, &sch->qstats) < 0)
366 return -1;
367 }
368 return 0;
369}
370
371static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
372{
373 struct net_device *dev = qdisc_dev(sch);
374 unsigned long ntx;
375
376 if (arg->stop)
377 return;
378
379 /* Walk hierarchy with a virtual class per tc */
380 arg->count = arg->skip;
381 for (ntx = arg->skip;
382 ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
383 ntx++) {
384 if (arg->fn(sch, ntx + 1, arg) < 0) {
385 arg->stop = 1;
386 break;
387 }
388 arg->count++;
389 }
390}
391
392static const struct Qdisc_class_ops mqprio_class_ops = {
393 .graft = mqprio_graft,
394 .leaf = mqprio_leaf,
395 .get = mqprio_get,
396 .put = mqprio_put,
397 .walk = mqprio_walk,
398 .dump = mqprio_dump_class,
399 .dump_stats = mqprio_dump_class_stats,
400};
401
402static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
403 .cl_ops = &mqprio_class_ops,
404 .id = "mqprio",
405 .priv_size = sizeof(struct mqprio_sched),
406 .init = mqprio_init,
407 .destroy = mqprio_destroy,
408 .attach = mqprio_attach,
409 .dump = mqprio_dump,
410 .owner = THIS_MODULE,
411};
412
413static int __init mqprio_module_init(void)
414{
415 return register_qdisc(&mqprio_qdisc_ops);
416}
417
418static void __exit mqprio_module_exit(void)
419{
420 unregister_qdisc(&mqprio_qdisc_ops);
421}
422
423module_init(mqprio_module_init);
424module_exit(mqprio_module_exit);
425
426MODULE_LICENSE("GPL");
1/*
2 * net/sched/sch_mqprio.c
3 *
4 * Copyright (c) 2010 John Fastabend <john.r.fastabend@intel.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/errno.h>
16#include <linux/skbuff.h>
17#include <net/netlink.h>
18#include <net/pkt_sched.h>
19#include <net/sch_generic.h>
20
21struct mqprio_sched {
22 struct Qdisc **qdiscs;
23 int hw_owned;
24};
25
26static void mqprio_destroy(struct Qdisc *sch)
27{
28 struct net_device *dev = qdisc_dev(sch);
29 struct mqprio_sched *priv = qdisc_priv(sch);
30 unsigned int ntx;
31
32 if (priv->qdiscs) {
33 for (ntx = 0;
34 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
35 ntx++)
36 qdisc_destroy(priv->qdiscs[ntx]);
37 kfree(priv->qdiscs);
38 }
39
40 if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
41 dev->netdev_ops->ndo_setup_tc(dev, 0);
42 else
43 netdev_set_num_tc(dev, 0);
44}
45
46static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
47{
48 int i, j;
49
50 /* Verify num_tc is not out of max range */
51 if (qopt->num_tc > TC_MAX_QUEUE)
52 return -EINVAL;
53
54 /* Verify priority mapping uses valid tcs */
55 for (i = 0; i < TC_BITMASK + 1; i++) {
56 if (qopt->prio_tc_map[i] >= qopt->num_tc)
57 return -EINVAL;
58 }
59
60 /* net_device does not support requested operation */
61 if (qopt->hw && !dev->netdev_ops->ndo_setup_tc)
62 return -EINVAL;
63
64 /* if hw owned qcount and qoffset are taken from LLD so
65 * no reason to verify them here
66 */
67 if (qopt->hw)
68 return 0;
69
70 for (i = 0; i < qopt->num_tc; i++) {
71 unsigned int last = qopt->offset[i] + qopt->count[i];
72
73 /* Verify the queue count is in tx range being equal to the
74 * real_num_tx_queues indicates the last queue is in use.
75 */
76 if (qopt->offset[i] >= dev->real_num_tx_queues ||
77 !qopt->count[i] ||
78 last > dev->real_num_tx_queues)
79 return -EINVAL;
80
81 /* Verify that the offset and counts do not overlap */
82 for (j = i + 1; j < qopt->num_tc; j++) {
83 if (last > qopt->offset[j])
84 return -EINVAL;
85 }
86 }
87
88 return 0;
89}
90
91static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
92{
93 struct net_device *dev = qdisc_dev(sch);
94 struct mqprio_sched *priv = qdisc_priv(sch);
95 struct netdev_queue *dev_queue;
96 struct Qdisc *qdisc;
97 int i, err = -EOPNOTSUPP;
98 struct tc_mqprio_qopt *qopt = NULL;
99
100 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
101 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
102
103 if (sch->parent != TC_H_ROOT)
104 return -EOPNOTSUPP;
105
106 if (!netif_is_multiqueue(dev))
107 return -EOPNOTSUPP;
108
109 if (nla_len(opt) < sizeof(*qopt))
110 return -EINVAL;
111
112 qopt = nla_data(opt);
113 if (mqprio_parse_opt(dev, qopt))
114 return -EINVAL;
115
116 /* pre-allocate qdisc, attachment can't fail */
117 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
118 GFP_KERNEL);
119 if (priv->qdiscs == NULL) {
120 err = -ENOMEM;
121 goto err;
122 }
123
124 for (i = 0; i < dev->num_tx_queues; i++) {
125 dev_queue = netdev_get_tx_queue(dev, i);
126 qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
127 TC_H_MAKE(TC_H_MAJ(sch->handle),
128 TC_H_MIN(i + 1)));
129 if (qdisc == NULL) {
130 err = -ENOMEM;
131 goto err;
132 }
133 priv->qdiscs[i] = qdisc;
134 }
135
136 /* If the mqprio options indicate that hardware should own
137 * the queue mapping then run ndo_setup_tc otherwise use the
138 * supplied and verified mapping
139 */
140 if (qopt->hw) {
141 priv->hw_owned = 1;
142 err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
143 if (err)
144 goto err;
145 } else {
146 netdev_set_num_tc(dev, qopt->num_tc);
147 for (i = 0; i < qopt->num_tc; i++)
148 netdev_set_tc_queue(dev, i,
149 qopt->count[i], qopt->offset[i]);
150 }
151
152 /* Always use supplied priority mappings */
153 for (i = 0; i < TC_BITMASK + 1; i++)
154 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
155
156 sch->flags |= TCQ_F_MQROOT;
157 return 0;
158
159err:
160 mqprio_destroy(sch);
161 return err;
162}
163
164static void mqprio_attach(struct Qdisc *sch)
165{
166 struct net_device *dev = qdisc_dev(sch);
167 struct mqprio_sched *priv = qdisc_priv(sch);
168 struct Qdisc *qdisc;
169 unsigned int ntx;
170
171 /* Attach underlying qdisc */
172 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
173 qdisc = priv->qdiscs[ntx];
174 qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
175 if (qdisc)
176 qdisc_destroy(qdisc);
177 }
178 kfree(priv->qdiscs);
179 priv->qdiscs = NULL;
180}
181
182static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
183 unsigned long cl)
184{
185 struct net_device *dev = qdisc_dev(sch);
186 unsigned long ntx = cl - 1 - netdev_get_num_tc(dev);
187
188 if (ntx >= dev->num_tx_queues)
189 return NULL;
190 return netdev_get_tx_queue(dev, ntx);
191}
192
193static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
194 struct Qdisc **old)
195{
196 struct net_device *dev = qdisc_dev(sch);
197 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
198
199 if (!dev_queue)
200 return -EINVAL;
201
202 if (dev->flags & IFF_UP)
203 dev_deactivate(dev);
204
205 *old = dev_graft_qdisc(dev_queue, new);
206
207 if (dev->flags & IFF_UP)
208 dev_activate(dev);
209
210 return 0;
211}
212
213static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
214{
215 struct net_device *dev = qdisc_dev(sch);
216 struct mqprio_sched *priv = qdisc_priv(sch);
217 unsigned char *b = skb_tail_pointer(skb);
218 struct tc_mqprio_qopt opt = { 0 };
219 struct Qdisc *qdisc;
220 unsigned int i;
221
222 sch->q.qlen = 0;
223 memset(&sch->bstats, 0, sizeof(sch->bstats));
224 memset(&sch->qstats, 0, sizeof(sch->qstats));
225
226 for (i = 0; i < dev->num_tx_queues; i++) {
227 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
228 spin_lock_bh(qdisc_lock(qdisc));
229 sch->q.qlen += qdisc->q.qlen;
230 sch->bstats.bytes += qdisc->bstats.bytes;
231 sch->bstats.packets += qdisc->bstats.packets;
232 sch->qstats.qlen += qdisc->qstats.qlen;
233 sch->qstats.backlog += qdisc->qstats.backlog;
234 sch->qstats.drops += qdisc->qstats.drops;
235 sch->qstats.requeues += qdisc->qstats.requeues;
236 sch->qstats.overlimits += qdisc->qstats.overlimits;
237 spin_unlock_bh(qdisc_lock(qdisc));
238 }
239
240 opt.num_tc = netdev_get_num_tc(dev);
241 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
242 opt.hw = priv->hw_owned;
243
244 for (i = 0; i < netdev_get_num_tc(dev); i++) {
245 opt.count[i] = dev->tc_to_txq[i].count;
246 opt.offset[i] = dev->tc_to_txq[i].offset;
247 }
248
249 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
250
251 return skb->len;
252nla_put_failure:
253 nlmsg_trim(skb, b);
254 return -1;
255}
256
257static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
258{
259 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
260
261 if (!dev_queue)
262 return NULL;
263
264 return dev_queue->qdisc_sleeping;
265}
266
267static unsigned long mqprio_get(struct Qdisc *sch, u32 classid)
268{
269 struct net_device *dev = qdisc_dev(sch);
270 unsigned int ntx = TC_H_MIN(classid);
271
272 if (ntx > dev->num_tx_queues + netdev_get_num_tc(dev))
273 return 0;
274 return ntx;
275}
276
277static void mqprio_put(struct Qdisc *sch, unsigned long cl)
278{
279}
280
281static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
282 struct sk_buff *skb, struct tcmsg *tcm)
283{
284 struct net_device *dev = qdisc_dev(sch);
285
286 if (cl <= netdev_get_num_tc(dev)) {
287 tcm->tcm_parent = TC_H_ROOT;
288 tcm->tcm_info = 0;
289 } else {
290 int i;
291 struct netdev_queue *dev_queue;
292
293 dev_queue = mqprio_queue_get(sch, cl);
294 tcm->tcm_parent = 0;
295 for (i = 0; i < netdev_get_num_tc(dev); i++) {
296 struct netdev_tc_txq tc = dev->tc_to_txq[i];
297 int q_idx = cl - netdev_get_num_tc(dev);
298
299 if (q_idx > tc.offset &&
300 q_idx <= tc.offset + tc.count) {
301 tcm->tcm_parent =
302 TC_H_MAKE(TC_H_MAJ(sch->handle),
303 TC_H_MIN(i + 1));
304 break;
305 }
306 }
307 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
308 }
309 tcm->tcm_handle |= TC_H_MIN(cl);
310 return 0;
311}
312
313static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
314 struct gnet_dump *d)
315 __releases(d->lock)
316 __acquires(d->lock)
317{
318 struct net_device *dev = qdisc_dev(sch);
319
320 if (cl <= netdev_get_num_tc(dev)) {
321 int i;
322 struct Qdisc *qdisc;
323 struct gnet_stats_queue qstats = {0};
324 struct gnet_stats_basic_packed bstats = {0};
325 struct netdev_tc_txq tc = dev->tc_to_txq[cl - 1];
326
327 /* Drop lock here it will be reclaimed before touching
328 * statistics this is required because the d->lock we
329 * hold here is the look on dev_queue->qdisc_sleeping
330 * also acquired below.
331 */
332 spin_unlock_bh(d->lock);
333
334 for (i = tc.offset; i < tc.offset + tc.count; i++) {
335 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
336 spin_lock_bh(qdisc_lock(qdisc));
337 bstats.bytes += qdisc->bstats.bytes;
338 bstats.packets += qdisc->bstats.packets;
339 qstats.qlen += qdisc->qstats.qlen;
340 qstats.backlog += qdisc->qstats.backlog;
341 qstats.drops += qdisc->qstats.drops;
342 qstats.requeues += qdisc->qstats.requeues;
343 qstats.overlimits += qdisc->qstats.overlimits;
344 spin_unlock_bh(qdisc_lock(qdisc));
345 }
346 /* Reclaim root sleeping lock before completing stats */
347 spin_lock_bh(d->lock);
348 if (gnet_stats_copy_basic(d, &bstats) < 0 ||
349 gnet_stats_copy_queue(d, &qstats) < 0)
350 return -1;
351 } else {
352 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
353
354 sch = dev_queue->qdisc_sleeping;
355 sch->qstats.qlen = sch->q.qlen;
356 if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
357 gnet_stats_copy_queue(d, &sch->qstats) < 0)
358 return -1;
359 }
360 return 0;
361}
362
363static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
364{
365 struct net_device *dev = qdisc_dev(sch);
366 unsigned long ntx;
367
368 if (arg->stop)
369 return;
370
371 /* Walk hierarchy with a virtual class per tc */
372 arg->count = arg->skip;
373 for (ntx = arg->skip;
374 ntx < dev->num_tx_queues + netdev_get_num_tc(dev);
375 ntx++) {
376 if (arg->fn(sch, ntx + 1, arg) < 0) {
377 arg->stop = 1;
378 break;
379 }
380 arg->count++;
381 }
382}
383
384static const struct Qdisc_class_ops mqprio_class_ops = {
385 .graft = mqprio_graft,
386 .leaf = mqprio_leaf,
387 .get = mqprio_get,
388 .put = mqprio_put,
389 .walk = mqprio_walk,
390 .dump = mqprio_dump_class,
391 .dump_stats = mqprio_dump_class_stats,
392};
393
394static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
395 .cl_ops = &mqprio_class_ops,
396 .id = "mqprio",
397 .priv_size = sizeof(struct mqprio_sched),
398 .init = mqprio_init,
399 .destroy = mqprio_destroy,
400 .attach = mqprio_attach,
401 .dump = mqprio_dump,
402 .owner = THIS_MODULE,
403};
404
405static int __init mqprio_module_init(void)
406{
407 return register_qdisc(&mqprio_qdisc_ops);
408}
409
410static void __exit mqprio_module_exit(void)
411{
412 unregister_qdisc(&mqprio_qdisc_ops);
413}
414
415module_init(mqprio_module_init);
416module_exit(mqprio_module_exit);
417
418MODULE_LICENSE("GPL");