Loading...
1/*
2 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
3 *
4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/types.h>
12#include <linux/slab.h>
13#include <linux/kernel.h>
14#include <linux/export.h>
15#include <linux/string.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <net/netlink.h>
19#include <net/pkt_sched.h>
20
21struct mq_sched {
22 struct Qdisc **qdiscs;
23};
24
25static void mq_destroy(struct Qdisc *sch)
26{
27 struct net_device *dev = qdisc_dev(sch);
28 struct mq_sched *priv = qdisc_priv(sch);
29 unsigned int ntx;
30
31 if (!priv->qdiscs)
32 return;
33 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
34 qdisc_destroy(priv->qdiscs[ntx]);
35 kfree(priv->qdiscs);
36}
37
38static int mq_init(struct Qdisc *sch, struct nlattr *opt)
39{
40 struct net_device *dev = qdisc_dev(sch);
41 struct mq_sched *priv = qdisc_priv(sch);
42 struct netdev_queue *dev_queue;
43 struct Qdisc *qdisc;
44 unsigned int ntx;
45
46 if (sch->parent != TC_H_ROOT)
47 return -EOPNOTSUPP;
48
49 if (!netif_is_multiqueue(dev))
50 return -EOPNOTSUPP;
51
52 /* pre-allocate qdiscs, attachment can't fail */
53 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
54 GFP_KERNEL);
55 if (priv->qdiscs == NULL)
56 return -ENOMEM;
57
58 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
59 dev_queue = netdev_get_tx_queue(dev, ntx);
60 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
61 TC_H_MAKE(TC_H_MAJ(sch->handle),
62 TC_H_MIN(ntx + 1)));
63 if (qdisc == NULL)
64 goto err;
65 priv->qdiscs[ntx] = qdisc;
66 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
67 }
68
69 sch->flags |= TCQ_F_MQROOT;
70 return 0;
71
72err:
73 mq_destroy(sch);
74 return -ENOMEM;
75}
76
77static void mq_attach(struct Qdisc *sch)
78{
79 struct net_device *dev = qdisc_dev(sch);
80 struct mq_sched *priv = qdisc_priv(sch);
81 struct Qdisc *qdisc, *old;
82 unsigned int ntx;
83
84 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
85 qdisc = priv->qdiscs[ntx];
86 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
87 if (old)
88 qdisc_destroy(old);
89#ifdef CONFIG_NET_SCHED
90 if (ntx < dev->real_num_tx_queues)
91 qdisc_list_add(qdisc);
92#endif
93
94 }
95 kfree(priv->qdiscs);
96 priv->qdiscs = NULL;
97}
98
99static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
100{
101 struct net_device *dev = qdisc_dev(sch);
102 struct Qdisc *qdisc;
103 unsigned int ntx;
104
105 sch->q.qlen = 0;
106 memset(&sch->bstats, 0, sizeof(sch->bstats));
107 memset(&sch->qstats, 0, sizeof(sch->qstats));
108
109 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
110 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
111 spin_lock_bh(qdisc_lock(qdisc));
112 sch->q.qlen += qdisc->q.qlen;
113 sch->bstats.bytes += qdisc->bstats.bytes;
114 sch->bstats.packets += qdisc->bstats.packets;
115 sch->qstats.backlog += qdisc->qstats.backlog;
116 sch->qstats.drops += qdisc->qstats.drops;
117 sch->qstats.requeues += qdisc->qstats.requeues;
118 sch->qstats.overlimits += qdisc->qstats.overlimits;
119 spin_unlock_bh(qdisc_lock(qdisc));
120 }
121 return 0;
122}
123
124static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
125{
126 struct net_device *dev = qdisc_dev(sch);
127 unsigned long ntx = cl - 1;
128
129 if (ntx >= dev->num_tx_queues)
130 return NULL;
131 return netdev_get_tx_queue(dev, ntx);
132}
133
134static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
135 struct tcmsg *tcm)
136{
137 unsigned int ntx = TC_H_MIN(tcm->tcm_parent);
138 struct netdev_queue *dev_queue = mq_queue_get(sch, ntx);
139
140 if (!dev_queue) {
141 struct net_device *dev = qdisc_dev(sch);
142
143 return netdev_get_tx_queue(dev, 0);
144 }
145 return dev_queue;
146}
147
148static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
149 struct Qdisc **old)
150{
151 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
152 struct net_device *dev = qdisc_dev(sch);
153
154 if (dev->flags & IFF_UP)
155 dev_deactivate(dev);
156
157 *old = dev_graft_qdisc(dev_queue, new);
158 if (new)
159 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
160 if (dev->flags & IFF_UP)
161 dev_activate(dev);
162 return 0;
163}
164
165static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
166{
167 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
168
169 return dev_queue->qdisc_sleeping;
170}
171
172static unsigned long mq_get(struct Qdisc *sch, u32 classid)
173{
174 unsigned int ntx = TC_H_MIN(classid);
175
176 if (!mq_queue_get(sch, ntx))
177 return 0;
178 return ntx;
179}
180
181static void mq_put(struct Qdisc *sch, unsigned long cl)
182{
183}
184
185static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
186 struct sk_buff *skb, struct tcmsg *tcm)
187{
188 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
189
190 tcm->tcm_parent = TC_H_ROOT;
191 tcm->tcm_handle |= TC_H_MIN(cl);
192 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
193 return 0;
194}
195
196static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
197 struct gnet_dump *d)
198{
199 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
200
201 sch = dev_queue->qdisc_sleeping;
202 if (gnet_stats_copy_basic(d, NULL, &sch->bstats) < 0 ||
203 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
204 return -1;
205 return 0;
206}
207
208static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
209{
210 struct net_device *dev = qdisc_dev(sch);
211 unsigned int ntx;
212
213 if (arg->stop)
214 return;
215
216 arg->count = arg->skip;
217 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
218 if (arg->fn(sch, ntx + 1, arg) < 0) {
219 arg->stop = 1;
220 break;
221 }
222 arg->count++;
223 }
224}
225
226static const struct Qdisc_class_ops mq_class_ops = {
227 .select_queue = mq_select_queue,
228 .graft = mq_graft,
229 .leaf = mq_leaf,
230 .get = mq_get,
231 .put = mq_put,
232 .walk = mq_walk,
233 .dump = mq_dump_class,
234 .dump_stats = mq_dump_class_stats,
235};
236
237struct Qdisc_ops mq_qdisc_ops __read_mostly = {
238 .cl_ops = &mq_class_ops,
239 .id = "mq",
240 .priv_size = sizeof(struct mq_sched),
241 .init = mq_init,
242 .destroy = mq_destroy,
243 .attach = mq_attach,
244 .dump = mq_dump,
245 .owner = THIS_MODULE,
246};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sched/sch_mq.c Classful multiqueue dummy scheduler
4 *
5 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
6 */
7
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/kernel.h>
11#include <linux/export.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <net/netlink.h>
16#include <net/pkt_cls.h>
17#include <net/pkt_sched.h>
18#include <net/sch_generic.h>
19
20struct mq_sched {
21 struct Qdisc **qdiscs;
22};
23
24static int mq_offload(struct Qdisc *sch, enum tc_mq_command cmd)
25{
26 struct net_device *dev = qdisc_dev(sch);
27 struct tc_mq_qopt_offload opt = {
28 .command = cmd,
29 .handle = sch->handle,
30 };
31
32 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
33 return -EOPNOTSUPP;
34
35 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_MQ, &opt);
36}
37
38static int mq_offload_stats(struct Qdisc *sch)
39{
40 struct tc_mq_qopt_offload opt = {
41 .command = TC_MQ_STATS,
42 .handle = sch->handle,
43 .stats = {
44 .bstats = &sch->bstats,
45 .qstats = &sch->qstats,
46 },
47 };
48
49 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_MQ, &opt);
50}
51
52static void mq_destroy(struct Qdisc *sch)
53{
54 struct net_device *dev = qdisc_dev(sch);
55 struct mq_sched *priv = qdisc_priv(sch);
56 unsigned int ntx;
57
58 mq_offload(sch, TC_MQ_DESTROY);
59
60 if (!priv->qdiscs)
61 return;
62 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
63 qdisc_put(priv->qdiscs[ntx]);
64 kfree(priv->qdiscs);
65}
66
67static int mq_init(struct Qdisc *sch, struct nlattr *opt,
68 struct netlink_ext_ack *extack)
69{
70 struct net_device *dev = qdisc_dev(sch);
71 struct mq_sched *priv = qdisc_priv(sch);
72 struct netdev_queue *dev_queue;
73 struct Qdisc *qdisc;
74 unsigned int ntx;
75
76 if (sch->parent != TC_H_ROOT)
77 return -EOPNOTSUPP;
78
79 if (!netif_is_multiqueue(dev))
80 return -EOPNOTSUPP;
81
82 /* pre-allocate qdiscs, attachment can't fail */
83 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
84 GFP_KERNEL);
85 if (!priv->qdiscs)
86 return -ENOMEM;
87
88 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
89 dev_queue = netdev_get_tx_queue(dev, ntx);
90 qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
91 TC_H_MAKE(TC_H_MAJ(sch->handle),
92 TC_H_MIN(ntx + 1)),
93 extack);
94 if (!qdisc)
95 return -ENOMEM;
96 priv->qdiscs[ntx] = qdisc;
97 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
98 }
99
100 sch->flags |= TCQ_F_MQROOT;
101
102 mq_offload(sch, TC_MQ_CREATE);
103 return 0;
104}
105
106static void mq_attach(struct Qdisc *sch)
107{
108 struct net_device *dev = qdisc_dev(sch);
109 struct mq_sched *priv = qdisc_priv(sch);
110 struct Qdisc *qdisc, *old;
111 unsigned int ntx;
112
113 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
114 qdisc = priv->qdiscs[ntx];
115 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
116 if (old)
117 qdisc_put(old);
118#ifdef CONFIG_NET_SCHED
119 if (ntx < dev->real_num_tx_queues)
120 qdisc_hash_add(qdisc, false);
121#endif
122
123 }
124 kfree(priv->qdiscs);
125 priv->qdiscs = NULL;
126}
127
128static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
129{
130 struct net_device *dev = qdisc_dev(sch);
131 struct Qdisc *qdisc;
132 unsigned int ntx;
133 __u32 qlen = 0;
134
135 sch->q.qlen = 0;
136 memset(&sch->bstats, 0, sizeof(sch->bstats));
137 memset(&sch->qstats, 0, sizeof(sch->qstats));
138
139 /* MQ supports lockless qdiscs. However, statistics accounting needs
140 * to account for all, none, or a mix of locked and unlocked child
141 * qdiscs. Percpu stats are added to counters in-band and locking
142 * qdisc totals are added at end.
143 */
144 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
145 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
146 spin_lock_bh(qdisc_lock(qdisc));
147
148 if (qdisc_is_percpu_stats(qdisc)) {
149 qlen = qdisc_qlen_sum(qdisc);
150 __gnet_stats_copy_basic(NULL, &sch->bstats,
151 qdisc->cpu_bstats,
152 &qdisc->bstats);
153 __gnet_stats_copy_queue(&sch->qstats,
154 qdisc->cpu_qstats,
155 &qdisc->qstats, qlen);
156 } else {
157 sch->q.qlen += qdisc->q.qlen;
158 sch->bstats.bytes += qdisc->bstats.bytes;
159 sch->bstats.packets += qdisc->bstats.packets;
160 sch->qstats.qlen += qdisc->qstats.qlen;
161 sch->qstats.backlog += qdisc->qstats.backlog;
162 sch->qstats.drops += qdisc->qstats.drops;
163 sch->qstats.requeues += qdisc->qstats.requeues;
164 sch->qstats.overlimits += qdisc->qstats.overlimits;
165 }
166
167 spin_unlock_bh(qdisc_lock(qdisc));
168 }
169
170 return mq_offload_stats(sch);
171}
172
173static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
174{
175 struct net_device *dev = qdisc_dev(sch);
176 unsigned long ntx = cl - 1;
177
178 if (ntx >= dev->num_tx_queues)
179 return NULL;
180 return netdev_get_tx_queue(dev, ntx);
181}
182
183static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
184 struct tcmsg *tcm)
185{
186 return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
187}
188
189static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
190 struct Qdisc **old, struct netlink_ext_ack *extack)
191{
192 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
193 struct tc_mq_qopt_offload graft_offload;
194 struct net_device *dev = qdisc_dev(sch);
195
196 if (dev->flags & IFF_UP)
197 dev_deactivate(dev);
198
199 *old = dev_graft_qdisc(dev_queue, new);
200 if (new)
201 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
202 if (dev->flags & IFF_UP)
203 dev_activate(dev);
204
205 graft_offload.handle = sch->handle;
206 graft_offload.graft_params.queue = cl - 1;
207 graft_offload.graft_params.child_handle = new ? new->handle : 0;
208 graft_offload.command = TC_MQ_GRAFT;
209
210 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
211 TC_SETUP_QDISC_MQ, &graft_offload, extack);
212 return 0;
213}
214
215static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
216{
217 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
218
219 return dev_queue->qdisc_sleeping;
220}
221
222static unsigned long mq_find(struct Qdisc *sch, u32 classid)
223{
224 unsigned int ntx = TC_H_MIN(classid);
225
226 if (!mq_queue_get(sch, ntx))
227 return 0;
228 return ntx;
229}
230
231static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
232 struct sk_buff *skb, struct tcmsg *tcm)
233{
234 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
235
236 tcm->tcm_parent = TC_H_ROOT;
237 tcm->tcm_handle |= TC_H_MIN(cl);
238 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
239 return 0;
240}
241
242static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
243 struct gnet_dump *d)
244{
245 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
246
247 sch = dev_queue->qdisc_sleeping;
248 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
249 qdisc_qstats_copy(d, sch) < 0)
250 return -1;
251 return 0;
252}
253
254static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
255{
256 struct net_device *dev = qdisc_dev(sch);
257 unsigned int ntx;
258
259 if (arg->stop)
260 return;
261
262 arg->count = arg->skip;
263 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
264 if (arg->fn(sch, ntx + 1, arg) < 0) {
265 arg->stop = 1;
266 break;
267 }
268 arg->count++;
269 }
270}
271
272static const struct Qdisc_class_ops mq_class_ops = {
273 .select_queue = mq_select_queue,
274 .graft = mq_graft,
275 .leaf = mq_leaf,
276 .find = mq_find,
277 .walk = mq_walk,
278 .dump = mq_dump_class,
279 .dump_stats = mq_dump_class_stats,
280};
281
282struct Qdisc_ops mq_qdisc_ops __read_mostly = {
283 .cl_ops = &mq_class_ops,
284 .id = "mq",
285 .priv_size = sizeof(struct mq_sched),
286 .init = mq_init,
287 .destroy = mq_destroy,
288 .attach = mq_attach,
289 .dump = mq_dump,
290 .owner = THIS_MODULE,
291};