Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * net/sched/sch_mq.c		Classful multiqueue dummy scheduler
  3 *
  4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * version 2 as published by the Free Software Foundation.
  9 */
 10
 11#include <linux/types.h>
 12#include <linux/slab.h>
 13#include <linux/kernel.h>
 14#include <linux/export.h>
 15#include <linux/string.h>
 16#include <linux/errno.h>
 17#include <linux/skbuff.h>
 18#include <net/netlink.h>
 19#include <net/pkt_sched.h>
 20#include <net/sch_generic.h>
 21
 22struct mq_sched {
 23	struct Qdisc		**qdiscs;
 24};
 25
 26static void mq_destroy(struct Qdisc *sch)
 27{
 28	struct net_device *dev = qdisc_dev(sch);
 29	struct mq_sched *priv = qdisc_priv(sch);
 30	unsigned int ntx;
 31
 32	if (!priv->qdiscs)
 33		return;
 34	for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
 35		qdisc_destroy(priv->qdiscs[ntx]);
 36	kfree(priv->qdiscs);
 37}
 38
 39static int mq_init(struct Qdisc *sch, struct nlattr *opt,
 40		   struct netlink_ext_ack *extack)
 41{
 42	struct net_device *dev = qdisc_dev(sch);
 43	struct mq_sched *priv = qdisc_priv(sch);
 44	struct netdev_queue *dev_queue;
 45	struct Qdisc *qdisc;
 46	unsigned int ntx;
 47
 48	if (sch->parent != TC_H_ROOT)
 49		return -EOPNOTSUPP;
 50
 51	if (!netif_is_multiqueue(dev))
 52		return -EOPNOTSUPP;
 53
 54	/* pre-allocate qdiscs, attachment can't fail */
 55	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
 56			       GFP_KERNEL);
 57	if (!priv->qdiscs)
 58		return -ENOMEM;
 59
 60	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
 61		dev_queue = netdev_get_tx_queue(dev, ntx);
 62		qdisc = qdisc_create_dflt(dev_queue, get_default_qdisc_ops(dev, ntx),
 63					  TC_H_MAKE(TC_H_MAJ(sch->handle),
 64						    TC_H_MIN(ntx + 1)),
 65					  extack);
 66		if (!qdisc)
 67			return -ENOMEM;
 68		priv->qdiscs[ntx] = qdisc;
 69		qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
 70	}
 71
 72	sch->flags |= TCQ_F_MQROOT;
 73	return 0;
 
 
 
 
 74}
 75
 76static void mq_attach(struct Qdisc *sch)
 77{
 78	struct net_device *dev = qdisc_dev(sch);
 79	struct mq_sched *priv = qdisc_priv(sch);
 80	struct Qdisc *qdisc, *old;
 81	unsigned int ntx;
 82
 83	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
 84		qdisc = priv->qdiscs[ntx];
 85		old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
 86		if (old)
 87			qdisc_destroy(old);
 88#ifdef CONFIG_NET_SCHED
 89		if (ntx < dev->real_num_tx_queues)
 90			qdisc_hash_add(qdisc, false);
 91#endif
 92
 93	}
 94	kfree(priv->qdiscs);
 95	priv->qdiscs = NULL;
 96}
 97
 98static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
 99{
100	struct net_device *dev = qdisc_dev(sch);
101	struct Qdisc *qdisc;
102	unsigned int ntx;
103	__u32 qlen = 0;
104
105	sch->q.qlen = 0;
106	memset(&sch->bstats, 0, sizeof(sch->bstats));
107	memset(&sch->qstats, 0, sizeof(sch->qstats));
108
109	/* MQ supports lockless qdiscs. However, statistics accounting needs
110	 * to account for all, none, or a mix of locked and unlocked child
111	 * qdiscs. Percpu stats are added to counters in-band and locking
112	 * qdisc totals are added at end.
113	 */
114	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
115		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
116		spin_lock_bh(qdisc_lock(qdisc));
117
118		if (qdisc_is_percpu_stats(qdisc)) {
119			qlen = qdisc_qlen_sum(qdisc);
120			__gnet_stats_copy_basic(NULL, &sch->bstats,
121						qdisc->cpu_bstats,
122						&qdisc->bstats);
123			__gnet_stats_copy_queue(&sch->qstats,
124						qdisc->cpu_qstats,
125						&qdisc->qstats, qlen);
126		} else {
127			sch->q.qlen		+= qdisc->q.qlen;
128			sch->bstats.bytes	+= qdisc->bstats.bytes;
129			sch->bstats.packets	+= qdisc->bstats.packets;
130			sch->qstats.backlog	+= qdisc->qstats.backlog;
131			sch->qstats.drops	+= qdisc->qstats.drops;
132			sch->qstats.requeues	+= qdisc->qstats.requeues;
133			sch->qstats.overlimits	+= qdisc->qstats.overlimits;
134		}
135
136		spin_unlock_bh(qdisc_lock(qdisc));
137	}
138
139	return 0;
140}
141
142static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
143{
144	struct net_device *dev = qdisc_dev(sch);
145	unsigned long ntx = cl - 1;
146
147	if (ntx >= dev->num_tx_queues)
148		return NULL;
149	return netdev_get_tx_queue(dev, ntx);
150}
151
152static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
153					    struct tcmsg *tcm)
154{
155	return mq_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
 
 
 
 
 
 
 
 
156}
157
158static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
159		    struct Qdisc **old, struct netlink_ext_ack *extack)
160{
161	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
162	struct net_device *dev = qdisc_dev(sch);
163
164	if (dev->flags & IFF_UP)
165		dev_deactivate(dev);
166
167	*old = dev_graft_qdisc(dev_queue, new);
168	if (new)
169		new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
170	if (dev->flags & IFF_UP)
171		dev_activate(dev);
172	return 0;
173}
174
175static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
176{
177	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
178
179	return dev_queue->qdisc_sleeping;
180}
181
182static unsigned long mq_find(struct Qdisc *sch, u32 classid)
183{
184	unsigned int ntx = TC_H_MIN(classid);
185
186	if (!mq_queue_get(sch, ntx))
187		return 0;
188	return ntx;
189}
190
 
 
 
 
191static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
192			 struct sk_buff *skb, struct tcmsg *tcm)
193{
194	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
195
196	tcm->tcm_parent = TC_H_ROOT;
197	tcm->tcm_handle |= TC_H_MIN(cl);
198	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
199	return 0;
200}
201
202static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
203			       struct gnet_dump *d)
204{
205	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
206
207	sch = dev_queue->qdisc_sleeping;
208	if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
209	    gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0)
 
210		return -1;
211	return 0;
212}
213
214static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
215{
216	struct net_device *dev = qdisc_dev(sch);
217	unsigned int ntx;
218
219	if (arg->stop)
220		return;
221
222	arg->count = arg->skip;
223	for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
224		if (arg->fn(sch, ntx + 1, arg) < 0) {
225			arg->stop = 1;
226			break;
227		}
228		arg->count++;
229	}
230}
231
232static const struct Qdisc_class_ops mq_class_ops = {
233	.select_queue	= mq_select_queue,
234	.graft		= mq_graft,
235	.leaf		= mq_leaf,
236	.find		= mq_find,
 
237	.walk		= mq_walk,
238	.dump		= mq_dump_class,
239	.dump_stats	= mq_dump_class_stats,
240};
241
242struct Qdisc_ops mq_qdisc_ops __read_mostly = {
243	.cl_ops		= &mq_class_ops,
244	.id		= "mq",
245	.priv_size	= sizeof(struct mq_sched),
246	.init		= mq_init,
247	.destroy	= mq_destroy,
248	.attach		= mq_attach,
249	.dump		= mq_dump,
250	.owner		= THIS_MODULE,
251};
v3.1
  1/*
  2 * net/sched/sch_mq.c		Classful multiqueue dummy scheduler
  3 *
  4 * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * version 2 as published by the Free Software Foundation.
  9 */
 10
 11#include <linux/types.h>
 12#include <linux/slab.h>
 13#include <linux/kernel.h>
 
 14#include <linux/string.h>
 15#include <linux/errno.h>
 16#include <linux/skbuff.h>
 17#include <net/netlink.h>
 18#include <net/pkt_sched.h>
 
 19
 20struct mq_sched {
 21	struct Qdisc		**qdiscs;
 22};
 23
 24static void mq_destroy(struct Qdisc *sch)
 25{
 26	struct net_device *dev = qdisc_dev(sch);
 27	struct mq_sched *priv = qdisc_priv(sch);
 28	unsigned int ntx;
 29
 30	if (!priv->qdiscs)
 31		return;
 32	for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
 33		qdisc_destroy(priv->qdiscs[ntx]);
 34	kfree(priv->qdiscs);
 35}
 36
 37static int mq_init(struct Qdisc *sch, struct nlattr *opt)
 
 38{
 39	struct net_device *dev = qdisc_dev(sch);
 40	struct mq_sched *priv = qdisc_priv(sch);
 41	struct netdev_queue *dev_queue;
 42	struct Qdisc *qdisc;
 43	unsigned int ntx;
 44
 45	if (sch->parent != TC_H_ROOT)
 46		return -EOPNOTSUPP;
 47
 48	if (!netif_is_multiqueue(dev))
 49		return -EOPNOTSUPP;
 50
 51	/* pre-allocate qdiscs, attachment can't fail */
 52	priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
 53			       GFP_KERNEL);
 54	if (priv->qdiscs == NULL)
 55		return -ENOMEM;
 56
 57	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
 58		dev_queue = netdev_get_tx_queue(dev, ntx);
 59		qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
 60					  TC_H_MAKE(TC_H_MAJ(sch->handle),
 61						    TC_H_MIN(ntx + 1)));
 62		if (qdisc == NULL)
 63			goto err;
 
 64		priv->qdiscs[ntx] = qdisc;
 
 65	}
 66
 67	sch->flags |= TCQ_F_MQROOT;
 68	return 0;
 69
 70err:
 71	mq_destroy(sch);
 72	return -ENOMEM;
 73}
 74
 75static void mq_attach(struct Qdisc *sch)
 76{
 77	struct net_device *dev = qdisc_dev(sch);
 78	struct mq_sched *priv = qdisc_priv(sch);
 79	struct Qdisc *qdisc;
 80	unsigned int ntx;
 81
 82	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
 83		qdisc = priv->qdiscs[ntx];
 84		qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
 85		if (qdisc)
 86			qdisc_destroy(qdisc);
 
 
 
 
 
 87	}
 88	kfree(priv->qdiscs);
 89	priv->qdiscs = NULL;
 90}
 91
 92static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
 93{
 94	struct net_device *dev = qdisc_dev(sch);
 95	struct Qdisc *qdisc;
 96	unsigned int ntx;
 
 97
 98	sch->q.qlen = 0;
 99	memset(&sch->bstats, 0, sizeof(sch->bstats));
100	memset(&sch->qstats, 0, sizeof(sch->qstats));
101
 
 
 
 
 
102	for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
103		qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
104		spin_lock_bh(qdisc_lock(qdisc));
105		sch->q.qlen		+= qdisc->q.qlen;
106		sch->bstats.bytes	+= qdisc->bstats.bytes;
107		sch->bstats.packets	+= qdisc->bstats.packets;
108		sch->qstats.qlen	+= qdisc->qstats.qlen;
109		sch->qstats.backlog	+= qdisc->qstats.backlog;
110		sch->qstats.drops	+= qdisc->qstats.drops;
111		sch->qstats.requeues	+= qdisc->qstats.requeues;
112		sch->qstats.overlimits	+= qdisc->qstats.overlimits;
 
 
 
 
 
 
 
 
 
 
 
113		spin_unlock_bh(qdisc_lock(qdisc));
114	}
 
115	return 0;
116}
117
118static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
119{
120	struct net_device *dev = qdisc_dev(sch);
121	unsigned long ntx = cl - 1;
122
123	if (ntx >= dev->num_tx_queues)
124		return NULL;
125	return netdev_get_tx_queue(dev, ntx);
126}
127
128static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
129					    struct tcmsg *tcm)
130{
131	unsigned int ntx = TC_H_MIN(tcm->tcm_parent);
132	struct netdev_queue *dev_queue = mq_queue_get(sch, ntx);
133
134	if (!dev_queue) {
135		struct net_device *dev = qdisc_dev(sch);
136
137		return netdev_get_tx_queue(dev, 0);
138	}
139	return dev_queue;
140}
141
142static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
143		    struct Qdisc **old)
144{
145	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
146	struct net_device *dev = qdisc_dev(sch);
147
148	if (dev->flags & IFF_UP)
149		dev_deactivate(dev);
150
151	*old = dev_graft_qdisc(dev_queue, new);
152
 
153	if (dev->flags & IFF_UP)
154		dev_activate(dev);
155	return 0;
156}
157
158static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
159{
160	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
161
162	return dev_queue->qdisc_sleeping;
163}
164
165static unsigned long mq_get(struct Qdisc *sch, u32 classid)
166{
167	unsigned int ntx = TC_H_MIN(classid);
168
169	if (!mq_queue_get(sch, ntx))
170		return 0;
171	return ntx;
172}
173
174static void mq_put(struct Qdisc *sch, unsigned long cl)
175{
176}
177
178static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
179			 struct sk_buff *skb, struct tcmsg *tcm)
180{
181	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
182
183	tcm->tcm_parent = TC_H_ROOT;
184	tcm->tcm_handle |= TC_H_MIN(cl);
185	tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
186	return 0;
187}
188
189static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
190			       struct gnet_dump *d)
191{
192	struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
193
194	sch = dev_queue->qdisc_sleeping;
195	sch->qstats.qlen = sch->q.qlen;
196	if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
197	    gnet_stats_copy_queue(d, &sch->qstats) < 0)
198		return -1;
199	return 0;
200}
201
202static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
203{
204	struct net_device *dev = qdisc_dev(sch);
205	unsigned int ntx;
206
207	if (arg->stop)
208		return;
209
210	arg->count = arg->skip;
211	for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
212		if (arg->fn(sch, ntx + 1, arg) < 0) {
213			arg->stop = 1;
214			break;
215		}
216		arg->count++;
217	}
218}
219
220static const struct Qdisc_class_ops mq_class_ops = {
221	.select_queue	= mq_select_queue,
222	.graft		= mq_graft,
223	.leaf		= mq_leaf,
224	.get		= mq_get,
225	.put		= mq_put,
226	.walk		= mq_walk,
227	.dump		= mq_dump_class,
228	.dump_stats	= mq_dump_class_stats,
229};
230
231struct Qdisc_ops mq_qdisc_ops __read_mostly = {
232	.cl_ops		= &mq_class_ops,
233	.id		= "mq",
234	.priv_size	= sizeof(struct mq_sched),
235	.init		= mq_init,
236	.destroy	= mq_destroy,
237	.attach		= mq_attach,
238	.dump		= mq_dump,
239	.owner		= THIS_MODULE,
240};