Loading...
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
17 */
18
19#include <linux/module.h>
20#include <linux/slab.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/skbuff.h>
26#include <net/netlink.h>
27#include <net/pkt_sched.h>
28
29
30struct multiq_sched_data {
31 u16 bands;
32 u16 max_bands;
33 u16 curband;
34 struct tcf_proto __rcu *filter_list;
35 struct Qdisc **queues;
36};
37
38
39static struct Qdisc *
40multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
41{
42 struct multiq_sched_data *q = qdisc_priv(sch);
43 u32 band;
44 struct tcf_result res;
45 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
46 int err;
47
48 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
49 err = tc_classify(skb, fl, &res, false);
50#ifdef CONFIG_NET_CLS_ACT
51 switch (err) {
52 case TC_ACT_STOLEN:
53 case TC_ACT_QUEUED:
54 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
55 case TC_ACT_SHOT:
56 return NULL;
57 }
58#endif
59 band = skb_get_queue_mapping(skb);
60
61 if (band >= q->bands)
62 return q->queues[0];
63
64 return q->queues[band];
65}
66
67static int
68multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
69{
70 struct Qdisc *qdisc;
71 int ret;
72
73 qdisc = multiq_classify(skb, sch, &ret);
74#ifdef CONFIG_NET_CLS_ACT
75 if (qdisc == NULL) {
76
77 if (ret & __NET_XMIT_BYPASS)
78 qdisc_qstats_drop(sch);
79 kfree_skb(skb);
80 return ret;
81 }
82#endif
83
84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) {
86 sch->q.qlen++;
87 return NET_XMIT_SUCCESS;
88 }
89 if (net_xmit_drop_count(ret))
90 qdisc_qstats_drop(sch);
91 return ret;
92}
93
94static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
95{
96 struct multiq_sched_data *q = qdisc_priv(sch);
97 struct Qdisc *qdisc;
98 struct sk_buff *skb;
99 int band;
100
101 for (band = 0; band < q->bands; band++) {
102 /* cycle through bands to ensure fairness */
103 q->curband++;
104 if (q->curband >= q->bands)
105 q->curband = 0;
106
107 /* Check that target subqueue is available before
108 * pulling an skb to avoid head-of-line blocking.
109 */
110 if (!netif_xmit_stopped(
111 netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
112 qdisc = q->queues[q->curband];
113 skb = qdisc->dequeue(qdisc);
114 if (skb) {
115 qdisc_bstats_update(sch, skb);
116 sch->q.qlen--;
117 return skb;
118 }
119 }
120 }
121 return NULL;
122
123}
124
125static struct sk_buff *multiq_peek(struct Qdisc *sch)
126{
127 struct multiq_sched_data *q = qdisc_priv(sch);
128 unsigned int curband = q->curband;
129 struct Qdisc *qdisc;
130 struct sk_buff *skb;
131 int band;
132
133 for (band = 0; band < q->bands; band++) {
134 /* cycle through bands to ensure fairness */
135 curband++;
136 if (curband >= q->bands)
137 curband = 0;
138
139 /* Check that target subqueue is available before
140 * pulling an skb to avoid head-of-line blocking.
141 */
142 if (!netif_xmit_stopped(
143 netdev_get_tx_queue(qdisc_dev(sch), curband))) {
144 qdisc = q->queues[curband];
145 skb = qdisc->ops->peek(qdisc);
146 if (skb)
147 return skb;
148 }
149 }
150 return NULL;
151
152}
153
154static unsigned int multiq_drop(struct Qdisc *sch)
155{
156 struct multiq_sched_data *q = qdisc_priv(sch);
157 int band;
158 unsigned int len;
159 struct Qdisc *qdisc;
160
161 for (band = q->bands - 1; band >= 0; band--) {
162 qdisc = q->queues[band];
163 if (qdisc->ops->drop) {
164 len = qdisc->ops->drop(qdisc);
165 if (len != 0) {
166 sch->q.qlen--;
167 return len;
168 }
169 }
170 }
171 return 0;
172}
173
174
175static void
176multiq_reset(struct Qdisc *sch)
177{
178 u16 band;
179 struct multiq_sched_data *q = qdisc_priv(sch);
180
181 for (band = 0; band < q->bands; band++)
182 qdisc_reset(q->queues[band]);
183 sch->q.qlen = 0;
184 q->curband = 0;
185}
186
187static void
188multiq_destroy(struct Qdisc *sch)
189{
190 int band;
191 struct multiq_sched_data *q = qdisc_priv(sch);
192
193 tcf_destroy_chain(&q->filter_list);
194 for (band = 0; band < q->bands; band++)
195 qdisc_destroy(q->queues[band]);
196
197 kfree(q->queues);
198}
199
200static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
201{
202 struct multiq_sched_data *q = qdisc_priv(sch);
203 struct tc_multiq_qopt *qopt;
204 int i;
205
206 if (!netif_is_multiqueue(qdisc_dev(sch)))
207 return -EOPNOTSUPP;
208 if (nla_len(opt) < sizeof(*qopt))
209 return -EINVAL;
210
211 qopt = nla_data(opt);
212
213 qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
214
215 sch_tree_lock(sch);
216 q->bands = qopt->bands;
217 for (i = q->bands; i < q->max_bands; i++) {
218 if (q->queues[i] != &noop_qdisc) {
219 struct Qdisc *child = q->queues[i];
220 q->queues[i] = &noop_qdisc;
221 qdisc_tree_reduce_backlog(child, child->q.qlen,
222 child->qstats.backlog);
223 qdisc_destroy(child);
224 }
225 }
226
227 sch_tree_unlock(sch);
228
229 for (i = 0; i < q->bands; i++) {
230 if (q->queues[i] == &noop_qdisc) {
231 struct Qdisc *child, *old;
232 child = qdisc_create_dflt(sch->dev_queue,
233 &pfifo_qdisc_ops,
234 TC_H_MAKE(sch->handle,
235 i + 1));
236 if (child) {
237 sch_tree_lock(sch);
238 old = q->queues[i];
239 q->queues[i] = child;
240
241 if (old != &noop_qdisc) {
242 qdisc_tree_reduce_backlog(old,
243 old->q.qlen,
244 old->qstats.backlog);
245 qdisc_destroy(old);
246 }
247 sch_tree_unlock(sch);
248 }
249 }
250 }
251 return 0;
252}
253
254static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
255{
256 struct multiq_sched_data *q = qdisc_priv(sch);
257 int i, err;
258
259 q->queues = NULL;
260
261 if (opt == NULL)
262 return -EINVAL;
263
264 q->max_bands = qdisc_dev(sch)->num_tx_queues;
265
266 q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
267 if (!q->queues)
268 return -ENOBUFS;
269 for (i = 0; i < q->max_bands; i++)
270 q->queues[i] = &noop_qdisc;
271
272 err = multiq_tune(sch, opt);
273
274 if (err)
275 kfree(q->queues);
276
277 return err;
278}
279
280static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
281{
282 struct multiq_sched_data *q = qdisc_priv(sch);
283 unsigned char *b = skb_tail_pointer(skb);
284 struct tc_multiq_qopt opt;
285
286 opt.bands = q->bands;
287 opt.max_bands = q->max_bands;
288
289 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
290 goto nla_put_failure;
291
292 return skb->len;
293
294nla_put_failure:
295 nlmsg_trim(skb, b);
296 return -1;
297}
298
299static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
300 struct Qdisc **old)
301{
302 struct multiq_sched_data *q = qdisc_priv(sch);
303 unsigned long band = arg - 1;
304
305 if (new == NULL)
306 new = &noop_qdisc;
307
308 *old = qdisc_replace(sch, new, &q->queues[band]);
309 return 0;
310}
311
312static struct Qdisc *
313multiq_leaf(struct Qdisc *sch, unsigned long arg)
314{
315 struct multiq_sched_data *q = qdisc_priv(sch);
316 unsigned long band = arg - 1;
317
318 return q->queues[band];
319}
320
321static unsigned long multiq_get(struct Qdisc *sch, u32 classid)
322{
323 struct multiq_sched_data *q = qdisc_priv(sch);
324 unsigned long band = TC_H_MIN(classid);
325
326 if (band - 1 >= q->bands)
327 return 0;
328 return band;
329}
330
331static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
332 u32 classid)
333{
334 return multiq_get(sch, classid);
335}
336
337
338static void multiq_put(struct Qdisc *q, unsigned long cl)
339{
340}
341
342static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
343 struct sk_buff *skb, struct tcmsg *tcm)
344{
345 struct multiq_sched_data *q = qdisc_priv(sch);
346
347 tcm->tcm_handle |= TC_H_MIN(cl);
348 tcm->tcm_info = q->queues[cl - 1]->handle;
349 return 0;
350}
351
352static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
353 struct gnet_dump *d)
354{
355 struct multiq_sched_data *q = qdisc_priv(sch);
356 struct Qdisc *cl_q;
357
358 cl_q = q->queues[cl - 1];
359 if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats) < 0 ||
360 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0)
361 return -1;
362
363 return 0;
364}
365
366static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
367{
368 struct multiq_sched_data *q = qdisc_priv(sch);
369 int band;
370
371 if (arg->stop)
372 return;
373
374 for (band = 0; band < q->bands; band++) {
375 if (arg->count < arg->skip) {
376 arg->count++;
377 continue;
378 }
379 if (arg->fn(sch, band + 1, arg) < 0) {
380 arg->stop = 1;
381 break;
382 }
383 arg->count++;
384 }
385}
386
387static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch,
388 unsigned long cl)
389{
390 struct multiq_sched_data *q = qdisc_priv(sch);
391
392 if (cl)
393 return NULL;
394 return &q->filter_list;
395}
396
397static const struct Qdisc_class_ops multiq_class_ops = {
398 .graft = multiq_graft,
399 .leaf = multiq_leaf,
400 .get = multiq_get,
401 .put = multiq_put,
402 .walk = multiq_walk,
403 .tcf_chain = multiq_find_tcf,
404 .bind_tcf = multiq_bind,
405 .unbind_tcf = multiq_put,
406 .dump = multiq_dump_class,
407 .dump_stats = multiq_dump_class_stats,
408};
409
410static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
411 .next = NULL,
412 .cl_ops = &multiq_class_ops,
413 .id = "multiq",
414 .priv_size = sizeof(struct multiq_sched_data),
415 .enqueue = multiq_enqueue,
416 .dequeue = multiq_dequeue,
417 .peek = multiq_peek,
418 .drop = multiq_drop,
419 .init = multiq_init,
420 .reset = multiq_reset,
421 .destroy = multiq_destroy,
422 .change = multiq_tune,
423 .dump = multiq_dump,
424 .owner = THIS_MODULE,
425};
426
427static int __init multiq_module_init(void)
428{
429 return register_qdisc(&multiq_qdisc_ops);
430}
431
432static void __exit multiq_module_exit(void)
433{
434 unregister_qdisc(&multiq_qdisc_ops);
435}
436
437module_init(multiq_module_init)
438module_exit(multiq_module_exit)
439
440MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008, Intel Corporation.
4 *
5 * Author: Alexander Duyck <alexander.h.duyck@intel.com>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <net/netlink.h>
16#include <net/pkt_sched.h>
17#include <net/pkt_cls.h>
18
19struct multiq_sched_data {
20 u16 bands;
21 u16 max_bands;
22 u16 curband;
23 struct tcf_proto __rcu *filter_list;
24 struct tcf_block *block;
25 struct Qdisc **queues;
26};
27
28
29static struct Qdisc *
30multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
31{
32 struct multiq_sched_data *q = qdisc_priv(sch);
33 u32 band;
34 struct tcf_result res;
35 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
36 int err;
37
38 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
39 err = tcf_classify(skb, NULL, fl, &res, false);
40#ifdef CONFIG_NET_CLS_ACT
41 switch (err) {
42 case TC_ACT_STOLEN:
43 case TC_ACT_QUEUED:
44 case TC_ACT_TRAP:
45 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
46 fallthrough;
47 case TC_ACT_SHOT:
48 return NULL;
49 }
50#endif
51 band = skb_get_queue_mapping(skb);
52
53 if (band >= q->bands)
54 return q->queues[0];
55
56 return q->queues[band];
57}
58
59static int
60multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
61 struct sk_buff **to_free)
62{
63 struct Qdisc *qdisc;
64 int ret;
65
66 qdisc = multiq_classify(skb, sch, &ret);
67#ifdef CONFIG_NET_CLS_ACT
68 if (qdisc == NULL) {
69
70 if (ret & __NET_XMIT_BYPASS)
71 qdisc_qstats_drop(sch);
72 __qdisc_drop(skb, to_free);
73 return ret;
74 }
75#endif
76
77 ret = qdisc_enqueue(skb, qdisc, to_free);
78 if (ret == NET_XMIT_SUCCESS) {
79 sch->q.qlen++;
80 return NET_XMIT_SUCCESS;
81 }
82 if (net_xmit_drop_count(ret))
83 qdisc_qstats_drop(sch);
84 return ret;
85}
86
87static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
88{
89 struct multiq_sched_data *q = qdisc_priv(sch);
90 struct Qdisc *qdisc;
91 struct sk_buff *skb;
92 int band;
93
94 for (band = 0; band < q->bands; band++) {
95 /* cycle through bands to ensure fairness */
96 q->curband++;
97 if (q->curband >= q->bands)
98 q->curband = 0;
99
100 /* Check that target subqueue is available before
101 * pulling an skb to avoid head-of-line blocking.
102 */
103 if (!netif_xmit_stopped(
104 netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
105 qdisc = q->queues[q->curband];
106 skb = qdisc->dequeue(qdisc);
107 if (skb) {
108 qdisc_bstats_update(sch, skb);
109 sch->q.qlen--;
110 return skb;
111 }
112 }
113 }
114 return NULL;
115
116}
117
118static struct sk_buff *multiq_peek(struct Qdisc *sch)
119{
120 struct multiq_sched_data *q = qdisc_priv(sch);
121 unsigned int curband = q->curband;
122 struct Qdisc *qdisc;
123 struct sk_buff *skb;
124 int band;
125
126 for (band = 0; band < q->bands; band++) {
127 /* cycle through bands to ensure fairness */
128 curband++;
129 if (curband >= q->bands)
130 curband = 0;
131
132 /* Check that target subqueue is available before
133 * pulling an skb to avoid head-of-line blocking.
134 */
135 if (!netif_xmit_stopped(
136 netdev_get_tx_queue(qdisc_dev(sch), curband))) {
137 qdisc = q->queues[curband];
138 skb = qdisc->ops->peek(qdisc);
139 if (skb)
140 return skb;
141 }
142 }
143 return NULL;
144
145}
146
147static void
148multiq_reset(struct Qdisc *sch)
149{
150 u16 band;
151 struct multiq_sched_data *q = qdisc_priv(sch);
152
153 for (band = 0; band < q->bands; band++)
154 qdisc_reset(q->queues[band]);
155 q->curband = 0;
156}
157
158static void
159multiq_destroy(struct Qdisc *sch)
160{
161 int band;
162 struct multiq_sched_data *q = qdisc_priv(sch);
163
164 tcf_block_put(q->block);
165 for (band = 0; band < q->bands; band++)
166 qdisc_put(q->queues[band]);
167
168 kfree(q->queues);
169}
170
171static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
172 struct netlink_ext_ack *extack)
173{
174 struct multiq_sched_data *q = qdisc_priv(sch);
175 struct tc_multiq_qopt *qopt;
176 struct Qdisc **removed;
177 int i, n_removed = 0;
178
179 if (!netif_is_multiqueue(qdisc_dev(sch)))
180 return -EOPNOTSUPP;
181 if (nla_len(opt) < sizeof(*qopt))
182 return -EINVAL;
183
184 qopt = nla_data(opt);
185
186 qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
187
188 removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
189 GFP_KERNEL);
190 if (!removed)
191 return -ENOMEM;
192
193 sch_tree_lock(sch);
194 q->bands = qopt->bands;
195 for (i = q->bands; i < q->max_bands; i++) {
196 if (q->queues[i] != &noop_qdisc) {
197 struct Qdisc *child = q->queues[i];
198
199 q->queues[i] = &noop_qdisc;
200 qdisc_purge_queue(child);
201 removed[n_removed++] = child;
202 }
203 }
204
205 sch_tree_unlock(sch);
206
207 for (i = 0; i < n_removed; i++)
208 qdisc_put(removed[i]);
209 kfree(removed);
210
211 for (i = 0; i < q->bands; i++) {
212 if (q->queues[i] == &noop_qdisc) {
213 struct Qdisc *child, *old;
214 child = qdisc_create_dflt(sch->dev_queue,
215 &pfifo_qdisc_ops,
216 TC_H_MAKE(sch->handle,
217 i + 1), extack);
218 if (child) {
219 sch_tree_lock(sch);
220 old = q->queues[i];
221 q->queues[i] = child;
222 if (child != &noop_qdisc)
223 qdisc_hash_add(child, true);
224
225 if (old != &noop_qdisc)
226 qdisc_purge_queue(old);
227 sch_tree_unlock(sch);
228 qdisc_put(old);
229 }
230 }
231 }
232 return 0;
233}
234
235static int multiq_init(struct Qdisc *sch, struct nlattr *opt,
236 struct netlink_ext_ack *extack)
237{
238 struct multiq_sched_data *q = qdisc_priv(sch);
239 int i, err;
240
241 q->queues = NULL;
242
243 if (!opt)
244 return -EINVAL;
245
246 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
247 if (err)
248 return err;
249
250 q->max_bands = qdisc_dev(sch)->num_tx_queues;
251
252 q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL);
253 if (!q->queues)
254 return -ENOBUFS;
255 for (i = 0; i < q->max_bands; i++)
256 q->queues[i] = &noop_qdisc;
257
258 return multiq_tune(sch, opt, extack);
259}
260
261static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
262{
263 struct multiq_sched_data *q = qdisc_priv(sch);
264 unsigned char *b = skb_tail_pointer(skb);
265 struct tc_multiq_qopt opt;
266
267 opt.bands = q->bands;
268 opt.max_bands = q->max_bands;
269
270 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
271 goto nla_put_failure;
272
273 return skb->len;
274
275nla_put_failure:
276 nlmsg_trim(skb, b);
277 return -1;
278}
279
280static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
281 struct Qdisc **old, struct netlink_ext_ack *extack)
282{
283 struct multiq_sched_data *q = qdisc_priv(sch);
284 unsigned long band = arg - 1;
285
286 if (new == NULL)
287 new = &noop_qdisc;
288
289 *old = qdisc_replace(sch, new, &q->queues[band]);
290 return 0;
291}
292
293static struct Qdisc *
294multiq_leaf(struct Qdisc *sch, unsigned long arg)
295{
296 struct multiq_sched_data *q = qdisc_priv(sch);
297 unsigned long band = arg - 1;
298
299 return q->queues[band];
300}
301
302static unsigned long multiq_find(struct Qdisc *sch, u32 classid)
303{
304 struct multiq_sched_data *q = qdisc_priv(sch);
305 unsigned long band = TC_H_MIN(classid);
306
307 if (band - 1 >= q->bands)
308 return 0;
309 return band;
310}
311
312static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent,
313 u32 classid)
314{
315 return multiq_find(sch, classid);
316}
317
318
319static void multiq_unbind(struct Qdisc *q, unsigned long cl)
320{
321}
322
323static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
324 struct sk_buff *skb, struct tcmsg *tcm)
325{
326 struct multiq_sched_data *q = qdisc_priv(sch);
327
328 tcm->tcm_handle |= TC_H_MIN(cl);
329 tcm->tcm_info = q->queues[cl - 1]->handle;
330 return 0;
331}
332
333static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
334 struct gnet_dump *d)
335{
336 struct multiq_sched_data *q = qdisc_priv(sch);
337 struct Qdisc *cl_q;
338
339 cl_q = q->queues[cl - 1];
340 if (gnet_stats_copy_basic(d, cl_q->cpu_bstats, &cl_q->bstats, true) < 0 ||
341 qdisc_qstats_copy(d, cl_q) < 0)
342 return -1;
343
344 return 0;
345}
346
347static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
348{
349 struct multiq_sched_data *q = qdisc_priv(sch);
350 int band;
351
352 if (arg->stop)
353 return;
354
355 for (band = 0; band < q->bands; band++) {
356 if (!tc_qdisc_stats_dump(sch, band + 1, arg))
357 break;
358 }
359}
360
361static struct tcf_block *multiq_tcf_block(struct Qdisc *sch, unsigned long cl,
362 struct netlink_ext_ack *extack)
363{
364 struct multiq_sched_data *q = qdisc_priv(sch);
365
366 if (cl)
367 return NULL;
368 return q->block;
369}
370
371static const struct Qdisc_class_ops multiq_class_ops = {
372 .graft = multiq_graft,
373 .leaf = multiq_leaf,
374 .find = multiq_find,
375 .walk = multiq_walk,
376 .tcf_block = multiq_tcf_block,
377 .bind_tcf = multiq_bind,
378 .unbind_tcf = multiq_unbind,
379 .dump = multiq_dump_class,
380 .dump_stats = multiq_dump_class_stats,
381};
382
383static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
384 .next = NULL,
385 .cl_ops = &multiq_class_ops,
386 .id = "multiq",
387 .priv_size = sizeof(struct multiq_sched_data),
388 .enqueue = multiq_enqueue,
389 .dequeue = multiq_dequeue,
390 .peek = multiq_peek,
391 .init = multiq_init,
392 .reset = multiq_reset,
393 .destroy = multiq_destroy,
394 .change = multiq_tune,
395 .dump = multiq_dump,
396 .owner = THIS_MODULE,
397};
398MODULE_ALIAS_NET_SCH("multiq");
399
400static int __init multiq_module_init(void)
401{
402 return register_qdisc(&multiq_qdisc_ops);
403}
404
405static void __exit multiq_module_exit(void)
406{
407 unregister_qdisc(&multiq_qdisc_ops);
408}
409
410module_init(multiq_module_init)
411module_exit(multiq_module_exit)
412
413MODULE_LICENSE("GPL");
414MODULE_DESCRIPTION("Multi queue to hardware queue mapping qdisc");