Linux Audio

Check our new training course

Loading...
v4.6
 
  1/* net/sched/sch_ingress.c - Ingress and clsact qdisc
  2 *
  3 *              This program is free software; you can redistribute it and/or
  4 *              modify it under the terms of the GNU General Public License
  5 *              as published by the Free Software Foundation; either version
  6 *              2 of the License, or (at your option) any later version.
  7 *
  8 * Authors:     Jamal Hadi Salim 1999
  9 */
 10
 11#include <linux/module.h>
 12#include <linux/types.h>
 13#include <linux/list.h>
 14#include <linux/skbuff.h>
 15#include <linux/rtnetlink.h>
 16
 17#include <net/netlink.h>
 18#include <net/pkt_sched.h>
 
 
 
 
 
 
 
 19
 20static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
 21{
 22	return NULL;
 23}
 24
 25static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
 26{
 27	return TC_H_MIN(classid) + 1;
 28}
 29
 30static unsigned long ingress_bind_filter(struct Qdisc *sch,
 31					 unsigned long parent, u32 classid)
 32{
 33	return ingress_get(sch, classid);
 34}
 35
 36static void ingress_put(struct Qdisc *sch, unsigned long cl)
 37{
 38}
 39
 40static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 41{
 42}
 43
 44static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch,
 45						 unsigned long cl)
 46{
 47	struct net_device *dev = qdisc_dev(sch);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48
 49	return &dev->ingress_cl_list;
 50}
 51
 52static int ingress_init(struct Qdisc *sch, struct nlattr *opt)
 53{
 
 
 
 
 
 
 
 
 
 
 
 54	net_inc_ingress_queue();
 55	sch->flags |= TCQ_F_CPUSTATS;
 56
 57	return 0;
 
 
 
 
 
 
 58}
 59
 60static void ingress_destroy(struct Qdisc *sch)
 61{
 62	struct net_device *dev = qdisc_dev(sch);
 63
 64	tcf_destroy_chain(&dev->ingress_cl_list);
 65	net_dec_ingress_queue();
 66}
 67
 68static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
 69{
 70	struct nlattr *nest;
 71
 72	nest = nla_nest_start(skb, TCA_OPTIONS);
 73	if (nest == NULL)
 74		goto nla_put_failure;
 75
 76	return nla_nest_end(skb, nest);
 77
 78nla_put_failure:
 79	nla_nest_cancel(skb, nest);
 80	return -1;
 81}
 82
 83static const struct Qdisc_class_ops ingress_class_ops = {
 
 84	.leaf		=	ingress_leaf,
 85	.get		=	ingress_get,
 86	.put		=	ingress_put,
 87	.walk		=	ingress_walk,
 88	.tcf_chain	=	ingress_find_tcf,
 89	.bind_tcf	=	ingress_bind_filter,
 90	.unbind_tcf	=	ingress_put,
 91};
 92
 93static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
 94	.cl_ops		=	&ingress_class_ops,
 95	.id		=	"ingress",
 96	.init		=	ingress_init,
 97	.destroy	=	ingress_destroy,
 98	.dump		=	ingress_dump,
 99	.owner		=	THIS_MODULE,
 
 
 
 
100};
101
102static unsigned long clsact_get(struct Qdisc *sch, u32 classid)
 
 
 
 
 
 
 
 
 
103{
104	switch (TC_H_MIN(classid)) {
105	case TC_H_MIN(TC_H_MIN_INGRESS):
106	case TC_H_MIN(TC_H_MIN_EGRESS):
107		return TC_H_MIN(classid);
108	default:
109		return 0;
110	}
111}
112
113static unsigned long clsact_bind_filter(struct Qdisc *sch,
114					unsigned long parent, u32 classid)
115{
116	return clsact_get(sch, classid);
117}
118
119static struct tcf_proto __rcu **clsact_find_tcf(struct Qdisc *sch,
120						unsigned long cl)
121{
122	struct net_device *dev = qdisc_dev(sch);
123
124	switch (cl) {
125	case TC_H_MIN(TC_H_MIN_INGRESS):
126		return &dev->ingress_cl_list;
127	case TC_H_MIN(TC_H_MIN_EGRESS):
128		return &dev->egress_cl_list;
129	default:
130		return NULL;
131	}
132}
133
134static int clsact_init(struct Qdisc *sch, struct nlattr *opt)
135{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
136	net_inc_ingress_queue();
137	net_inc_egress_queue();
138
139	sch->flags |= TCQ_F_CPUSTATS;
140
141	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142}
143
144static void clsact_destroy(struct Qdisc *sch)
145{
146	struct net_device *dev = qdisc_dev(sch);
147
148	tcf_destroy_chain(&dev->ingress_cl_list);
149	tcf_destroy_chain(&dev->egress_cl_list);
150
151	net_dec_ingress_queue();
152	net_dec_egress_queue();
153}
154
155static const struct Qdisc_class_ops clsact_class_ops = {
 
156	.leaf		=	ingress_leaf,
157	.get		=	clsact_get,
158	.put		=	ingress_put,
159	.walk		=	ingress_walk,
160	.tcf_chain	=	clsact_find_tcf,
161	.bind_tcf	=	clsact_bind_filter,
162	.unbind_tcf	=	ingress_put,
163};
164
165static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
166	.cl_ops		=	&clsact_class_ops,
167	.id		=	"clsact",
168	.init		=	clsact_init,
169	.destroy	=	clsact_destroy,
170	.dump		=	ingress_dump,
171	.owner		=	THIS_MODULE,
 
 
 
 
 
 
172};
173
174static int __init ingress_module_init(void)
175{
176	int ret;
177
178	ret = register_qdisc(&ingress_qdisc_ops);
179	if (!ret) {
180		ret = register_qdisc(&clsact_qdisc_ops);
181		if (ret)
182			unregister_qdisc(&ingress_qdisc_ops);
183	}
184
185	return ret;
186}
187
188static void __exit ingress_module_exit(void)
189{
190	unregister_qdisc(&ingress_qdisc_ops);
191	unregister_qdisc(&clsact_qdisc_ops);
192}
193
194module_init(ingress_module_init);
195module_exit(ingress_module_exit);
196
197MODULE_ALIAS("sch_clsact");
198MODULE_LICENSE("GPL");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* net/sched/sch_ingress.c - Ingress and clsact qdisc
  3 *
 
 
 
 
 
  4 * Authors:     Jamal Hadi Salim 1999
  5 */
  6
  7#include <linux/module.h>
  8#include <linux/types.h>
  9#include <linux/list.h>
 10#include <linux/skbuff.h>
 11#include <linux/rtnetlink.h>
 12
 13#include <net/netlink.h>
 14#include <net/pkt_sched.h>
 15#include <net/pkt_cls.h>
 16
 17struct ingress_sched_data {
 18	struct tcf_block *block;
 19	struct tcf_block_ext_info block_info;
 20	struct mini_Qdisc_pair miniqp;
 21};
 22
 23static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
 24{
 25	return NULL;
 26}
 27
 28static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
 29{
 30	return TC_H_MIN(classid) + 1;
 31}
 32
 33static unsigned long ingress_bind_filter(struct Qdisc *sch,
 34					 unsigned long parent, u32 classid)
 35{
 36	return ingress_find(sch, classid);
 37}
 38
 39static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
 40{
 41}
 42
 43static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
 44{
 45}
 46
 47static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
 48					   struct netlink_ext_ack *extack)
 49{
 50	struct ingress_sched_data *q = qdisc_priv(sch);
 51
 52	return q->block;
 53}
 54
 55static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
 56{
 57	struct mini_Qdisc_pair *miniqp = priv;
 58
 59	mini_qdisc_pair_swap(miniqp, tp_head);
 60};
 61
 62static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
 63{
 64	struct ingress_sched_data *q = qdisc_priv(sch);
 65
 66	q->block_info.block_index = block_index;
 67}
 68
 69static u32 ingress_ingress_block_get(struct Qdisc *sch)
 70{
 71	struct ingress_sched_data *q = qdisc_priv(sch);
 72
 73	return q->block_info.block_index;
 74}
 75
 76static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
 77			struct netlink_ext_ack *extack)
 78{
 79	struct ingress_sched_data *q = qdisc_priv(sch);
 80	struct net_device *dev = qdisc_dev(sch);
 81
 82	net_inc_ingress_queue();
 
 83
 84	mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
 85
 86	q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
 87	q->block_info.chain_head_change = clsact_chain_head_change;
 88	q->block_info.chain_head_change_priv = &q->miniqp;
 89
 90	return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
 91}
 92
 93static void ingress_destroy(struct Qdisc *sch)
 94{
 95	struct ingress_sched_data *q = qdisc_priv(sch);
 96
 97	tcf_block_put_ext(q->block, sch, &q->block_info);
 98	net_dec_ingress_queue();
 99}
100
101static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
102{
103	struct nlattr *nest;
104
105	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
106	if (nest == NULL)
107		goto nla_put_failure;
108
109	return nla_nest_end(skb, nest);
110
111nla_put_failure:
112	nla_nest_cancel(skb, nest);
113	return -1;
114}
115
116static const struct Qdisc_class_ops ingress_class_ops = {
117	.flags		=	QDISC_CLASS_OPS_DOIT_UNLOCKED,
118	.leaf		=	ingress_leaf,
119	.find		=	ingress_find,
 
120	.walk		=	ingress_walk,
121	.tcf_block	=	ingress_tcf_block,
122	.bind_tcf	=	ingress_bind_filter,
123	.unbind_tcf	=	ingress_unbind_filter,
124};
125
126static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
127	.cl_ops			=	&ingress_class_ops,
128	.id			=	"ingress",
129	.priv_size		=	sizeof(struct ingress_sched_data),
130	.static_flags		=	TCQ_F_CPUSTATS,
131	.init			=	ingress_init,
132	.destroy		=	ingress_destroy,
133	.dump			=	ingress_dump,
134	.ingress_block_set	=	ingress_ingress_block_set,
135	.ingress_block_get	=	ingress_ingress_block_get,
136	.owner			=	THIS_MODULE,
137};
138
139struct clsact_sched_data {
140	struct tcf_block *ingress_block;
141	struct tcf_block *egress_block;
142	struct tcf_block_ext_info ingress_block_info;
143	struct tcf_block_ext_info egress_block_info;
144	struct mini_Qdisc_pair miniqp_ingress;
145	struct mini_Qdisc_pair miniqp_egress;
146};
147
148static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
149{
150	switch (TC_H_MIN(classid)) {
151	case TC_H_MIN(TC_H_MIN_INGRESS):
152	case TC_H_MIN(TC_H_MIN_EGRESS):
153		return TC_H_MIN(classid);
154	default:
155		return 0;
156	}
157}
158
159static unsigned long clsact_bind_filter(struct Qdisc *sch,
160					unsigned long parent, u32 classid)
161{
162	return clsact_find(sch, classid);
163}
164
165static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
166					  struct netlink_ext_ack *extack)
167{
168	struct clsact_sched_data *q = qdisc_priv(sch);
169
170	switch (cl) {
171	case TC_H_MIN(TC_H_MIN_INGRESS):
172		return q->ingress_block;
173	case TC_H_MIN(TC_H_MIN_EGRESS):
174		return q->egress_block;
175	default:
176		return NULL;
177	}
178}
179
180static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
181{
182	struct clsact_sched_data *q = qdisc_priv(sch);
183
184	q->ingress_block_info.block_index = block_index;
185}
186
187static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
188{
189	struct clsact_sched_data *q = qdisc_priv(sch);
190
191	q->egress_block_info.block_index = block_index;
192}
193
194static u32 clsact_ingress_block_get(struct Qdisc *sch)
195{
196	struct clsact_sched_data *q = qdisc_priv(sch);
197
198	return q->ingress_block_info.block_index;
199}
200
201static u32 clsact_egress_block_get(struct Qdisc *sch)
202{
203	struct clsact_sched_data *q = qdisc_priv(sch);
204
205	return q->egress_block_info.block_index;
206}
207
208static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
209		       struct netlink_ext_ack *extack)
210{
211	struct clsact_sched_data *q = qdisc_priv(sch);
212	struct net_device *dev = qdisc_dev(sch);
213	int err;
214
215	net_inc_ingress_queue();
216	net_inc_egress_queue();
217
218	mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
219
220	q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
221	q->ingress_block_info.chain_head_change = clsact_chain_head_change;
222	q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
223
224	err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
225				extack);
226	if (err)
227		return err;
228
229	mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
230
231	q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
232	q->egress_block_info.chain_head_change = clsact_chain_head_change;
233	q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
234
235	return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
236}
237
238static void clsact_destroy(struct Qdisc *sch)
239{
240	struct clsact_sched_data *q = qdisc_priv(sch);
241
242	tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
243	tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
244
245	net_dec_ingress_queue();
246	net_dec_egress_queue();
247}
248
249static const struct Qdisc_class_ops clsact_class_ops = {
250	.flags		=	QDISC_CLASS_OPS_DOIT_UNLOCKED,
251	.leaf		=	ingress_leaf,
252	.find		=	clsact_find,
 
253	.walk		=	ingress_walk,
254	.tcf_block	=	clsact_tcf_block,
255	.bind_tcf	=	clsact_bind_filter,
256	.unbind_tcf	=	ingress_unbind_filter,
257};
258
259static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
260	.cl_ops			=	&clsact_class_ops,
261	.id			=	"clsact",
262	.priv_size		=	sizeof(struct clsact_sched_data),
263	.static_flags		=	TCQ_F_CPUSTATS,
264	.init			=	clsact_init,
265	.destroy		=	clsact_destroy,
266	.dump			=	ingress_dump,
267	.ingress_block_set	=	clsact_ingress_block_set,
268	.egress_block_set	=	clsact_egress_block_set,
269	.ingress_block_get	=	clsact_ingress_block_get,
270	.egress_block_get	=	clsact_egress_block_get,
271	.owner			=	THIS_MODULE,
272};
273
274static int __init ingress_module_init(void)
275{
276	int ret;
277
278	ret = register_qdisc(&ingress_qdisc_ops);
279	if (!ret) {
280		ret = register_qdisc(&clsact_qdisc_ops);
281		if (ret)
282			unregister_qdisc(&ingress_qdisc_ops);
283	}
284
285	return ret;
286}
287
288static void __exit ingress_module_exit(void)
289{
290	unregister_qdisc(&ingress_qdisc_ops);
291	unregister_qdisc(&clsact_qdisc_ops);
292}
293
294module_init(ingress_module_init);
295module_exit(ingress_module_exit);
296
297MODULE_ALIAS("sch_clsact");
298MODULE_LICENSE("GPL");