Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* net/sched/sch_ingress.c - Ingress and clsact qdisc
3 *
4 * Authors: Jamal Hadi Salim 1999
5 */
6
7#include <linux/module.h>
8#include <linux/types.h>
9#include <linux/list.h>
10#include <linux/skbuff.h>
11#include <linux/rtnetlink.h>
12
13#include <net/netlink.h>
14#include <net/pkt_sched.h>
15#include <net/pkt_cls.h>
16
17struct ingress_sched_data {
18 struct tcf_block *block;
19 struct tcf_block_ext_info block_info;
20 struct mini_Qdisc_pair miniqp;
21};
22
23static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
24{
25 return NULL;
26}
27
28static unsigned long ingress_find(struct Qdisc *sch, u32 classid)
29{
30 return TC_H_MIN(classid) + 1;
31}
32
33static unsigned long ingress_bind_filter(struct Qdisc *sch,
34 unsigned long parent, u32 classid)
35{
36 return ingress_find(sch, classid);
37}
38
39static void ingress_unbind_filter(struct Qdisc *sch, unsigned long cl)
40{
41}
42
43static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
44{
45}
46
47static struct tcf_block *ingress_tcf_block(struct Qdisc *sch, unsigned long cl,
48 struct netlink_ext_ack *extack)
49{
50 struct ingress_sched_data *q = qdisc_priv(sch);
51
52 return q->block;
53}
54
55static void clsact_chain_head_change(struct tcf_proto *tp_head, void *priv)
56{
57 struct mini_Qdisc_pair *miniqp = priv;
58
59 mini_qdisc_pair_swap(miniqp, tp_head);
60};
61
62static void ingress_ingress_block_set(struct Qdisc *sch, u32 block_index)
63{
64 struct ingress_sched_data *q = qdisc_priv(sch);
65
66 q->block_info.block_index = block_index;
67}
68
69static u32 ingress_ingress_block_get(struct Qdisc *sch)
70{
71 struct ingress_sched_data *q = qdisc_priv(sch);
72
73 return q->block_info.block_index;
74}
75
76static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
77 struct netlink_ext_ack *extack)
78{
79 struct ingress_sched_data *q = qdisc_priv(sch);
80 struct net_device *dev = qdisc_dev(sch);
81
82 net_inc_ingress_queue();
83
84 mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
85
86 q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
87 q->block_info.chain_head_change = clsact_chain_head_change;
88 q->block_info.chain_head_change_priv = &q->miniqp;
89
90 return tcf_block_get_ext(&q->block, sch, &q->block_info, extack);
91}
92
93static void ingress_destroy(struct Qdisc *sch)
94{
95 struct ingress_sched_data *q = qdisc_priv(sch);
96
97 tcf_block_put_ext(q->block, sch, &q->block_info);
98 net_dec_ingress_queue();
99}
100
101static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
102{
103 struct nlattr *nest;
104
105 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
106 if (nest == NULL)
107 goto nla_put_failure;
108
109 return nla_nest_end(skb, nest);
110
111nla_put_failure:
112 nla_nest_cancel(skb, nest);
113 return -1;
114}
115
116static const struct Qdisc_class_ops ingress_class_ops = {
117 .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
118 .leaf = ingress_leaf,
119 .find = ingress_find,
120 .walk = ingress_walk,
121 .tcf_block = ingress_tcf_block,
122 .bind_tcf = ingress_bind_filter,
123 .unbind_tcf = ingress_unbind_filter,
124};
125
126static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
127 .cl_ops = &ingress_class_ops,
128 .id = "ingress",
129 .priv_size = sizeof(struct ingress_sched_data),
130 .static_flags = TCQ_F_CPUSTATS,
131 .init = ingress_init,
132 .destroy = ingress_destroy,
133 .dump = ingress_dump,
134 .ingress_block_set = ingress_ingress_block_set,
135 .ingress_block_get = ingress_ingress_block_get,
136 .owner = THIS_MODULE,
137};
138
139struct clsact_sched_data {
140 struct tcf_block *ingress_block;
141 struct tcf_block *egress_block;
142 struct tcf_block_ext_info ingress_block_info;
143 struct tcf_block_ext_info egress_block_info;
144 struct mini_Qdisc_pair miniqp_ingress;
145 struct mini_Qdisc_pair miniqp_egress;
146};
147
148static unsigned long clsact_find(struct Qdisc *sch, u32 classid)
149{
150 switch (TC_H_MIN(classid)) {
151 case TC_H_MIN(TC_H_MIN_INGRESS):
152 case TC_H_MIN(TC_H_MIN_EGRESS):
153 return TC_H_MIN(classid);
154 default:
155 return 0;
156 }
157}
158
159static unsigned long clsact_bind_filter(struct Qdisc *sch,
160 unsigned long parent, u32 classid)
161{
162 return clsact_find(sch, classid);
163}
164
165static struct tcf_block *clsact_tcf_block(struct Qdisc *sch, unsigned long cl,
166 struct netlink_ext_ack *extack)
167{
168 struct clsact_sched_data *q = qdisc_priv(sch);
169
170 switch (cl) {
171 case TC_H_MIN(TC_H_MIN_INGRESS):
172 return q->ingress_block;
173 case TC_H_MIN(TC_H_MIN_EGRESS):
174 return q->egress_block;
175 default:
176 return NULL;
177 }
178}
179
180static void clsact_ingress_block_set(struct Qdisc *sch, u32 block_index)
181{
182 struct clsact_sched_data *q = qdisc_priv(sch);
183
184 q->ingress_block_info.block_index = block_index;
185}
186
187static void clsact_egress_block_set(struct Qdisc *sch, u32 block_index)
188{
189 struct clsact_sched_data *q = qdisc_priv(sch);
190
191 q->egress_block_info.block_index = block_index;
192}
193
194static u32 clsact_ingress_block_get(struct Qdisc *sch)
195{
196 struct clsact_sched_data *q = qdisc_priv(sch);
197
198 return q->ingress_block_info.block_index;
199}
200
201static u32 clsact_egress_block_get(struct Qdisc *sch)
202{
203 struct clsact_sched_data *q = qdisc_priv(sch);
204
205 return q->egress_block_info.block_index;
206}
207
208static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
209 struct netlink_ext_ack *extack)
210{
211 struct clsact_sched_data *q = qdisc_priv(sch);
212 struct net_device *dev = qdisc_dev(sch);
213 int err;
214
215 net_inc_ingress_queue();
216 net_inc_egress_queue();
217
218 mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress);
219
220 q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
221 q->ingress_block_info.chain_head_change = clsact_chain_head_change;
222 q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress;
223
224 err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info,
225 extack);
226 if (err)
227 return err;
228
229 mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress);
230
231 q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS;
232 q->egress_block_info.chain_head_change = clsact_chain_head_change;
233 q->egress_block_info.chain_head_change_priv = &q->miniqp_egress;
234
235 return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack);
236}
237
238static void clsact_destroy(struct Qdisc *sch)
239{
240 struct clsact_sched_data *q = qdisc_priv(sch);
241
242 tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
243 tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
244
245 net_dec_ingress_queue();
246 net_dec_egress_queue();
247}
248
249static const struct Qdisc_class_ops clsact_class_ops = {
250 .flags = QDISC_CLASS_OPS_DOIT_UNLOCKED,
251 .leaf = ingress_leaf,
252 .find = clsact_find,
253 .walk = ingress_walk,
254 .tcf_block = clsact_tcf_block,
255 .bind_tcf = clsact_bind_filter,
256 .unbind_tcf = ingress_unbind_filter,
257};
258
259static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
260 .cl_ops = &clsact_class_ops,
261 .id = "clsact",
262 .priv_size = sizeof(struct clsact_sched_data),
263 .static_flags = TCQ_F_CPUSTATS,
264 .init = clsact_init,
265 .destroy = clsact_destroy,
266 .dump = ingress_dump,
267 .ingress_block_set = clsact_ingress_block_set,
268 .egress_block_set = clsact_egress_block_set,
269 .ingress_block_get = clsact_ingress_block_get,
270 .egress_block_get = clsact_egress_block_get,
271 .owner = THIS_MODULE,
272};
273
274static int __init ingress_module_init(void)
275{
276 int ret;
277
278 ret = register_qdisc(&ingress_qdisc_ops);
279 if (!ret) {
280 ret = register_qdisc(&clsact_qdisc_ops);
281 if (ret)
282 unregister_qdisc(&ingress_qdisc_ops);
283 }
284
285 return ret;
286}
287
288static void __exit ingress_module_exit(void)
289{
290 unregister_qdisc(&ingress_qdisc_ops);
291 unregister_qdisc(&clsact_qdisc_ops);
292}
293
294module_init(ingress_module_init);
295module_exit(ingress_module_exit);
296
297MODULE_ALIAS("sch_clsact");
298MODULE_LICENSE("GPL");
1/* net/sched/sch_ingress.c - Ingress qdisc
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
6 *
7 * Authors: Jamal Hadi Salim 1999
8 */
9
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/list.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <net/netlink.h>
16#include <net/pkt_sched.h>
17
18
19struct ingress_qdisc_data {
20 struct tcf_proto *filter_list;
21};
22
23/* ------------------------- Class/flow operations ------------------------- */
24
25static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
26{
27 return NULL;
28}
29
30static unsigned long ingress_get(struct Qdisc *sch, u32 classid)
31{
32 return TC_H_MIN(classid) + 1;
33}
34
35static unsigned long ingress_bind_filter(struct Qdisc *sch,
36 unsigned long parent, u32 classid)
37{
38 return ingress_get(sch, classid);
39}
40
41static void ingress_put(struct Qdisc *sch, unsigned long cl)
42{
43}
44
45static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
46{
47}
48
49static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl)
50{
51 struct ingress_qdisc_data *p = qdisc_priv(sch);
52
53 return &p->filter_list;
54}
55
56/* --------------------------- Qdisc operations ---------------------------- */
57
58static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
59{
60 struct ingress_qdisc_data *p = qdisc_priv(sch);
61 struct tcf_result res;
62 int result;
63
64 result = tc_classify(skb, p->filter_list, &res);
65
66 qdisc_bstats_update(sch, skb);
67 switch (result) {
68 case TC_ACT_SHOT:
69 result = TC_ACT_SHOT;
70 sch->qstats.drops++;
71 break;
72 case TC_ACT_STOLEN:
73 case TC_ACT_QUEUED:
74 result = TC_ACT_STOLEN;
75 break;
76 case TC_ACT_RECLASSIFY:
77 case TC_ACT_OK:
78 skb->tc_index = TC_H_MIN(res.classid);
79 default:
80 result = TC_ACT_OK;
81 break;
82 }
83
84 return result;
85}
86
87/* ------------------------------------------------------------- */
88
89static void ingress_destroy(struct Qdisc *sch)
90{
91 struct ingress_qdisc_data *p = qdisc_priv(sch);
92
93 tcf_destroy_chain(&p->filter_list);
94}
95
96static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
97{
98 struct nlattr *nest;
99
100 nest = nla_nest_start(skb, TCA_OPTIONS);
101 if (nest == NULL)
102 goto nla_put_failure;
103 nla_nest_end(skb, nest);
104 return skb->len;
105
106nla_put_failure:
107 nla_nest_cancel(skb, nest);
108 return -1;
109}
110
111static const struct Qdisc_class_ops ingress_class_ops = {
112 .leaf = ingress_leaf,
113 .get = ingress_get,
114 .put = ingress_put,
115 .walk = ingress_walk,
116 .tcf_chain = ingress_find_tcf,
117 .bind_tcf = ingress_bind_filter,
118 .unbind_tcf = ingress_put,
119};
120
121static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
122 .cl_ops = &ingress_class_ops,
123 .id = "ingress",
124 .priv_size = sizeof(struct ingress_qdisc_data),
125 .enqueue = ingress_enqueue,
126 .destroy = ingress_destroy,
127 .dump = ingress_dump,
128 .owner = THIS_MODULE,
129};
130
131static int __init ingress_module_init(void)
132{
133 return register_qdisc(&ingress_qdisc_ops);
134}
135
136static void __exit ingress_module_exit(void)
137{
138 unregister_qdisc(&ingress_qdisc_ops);
139}
140
141module_init(ingress_module_init)
142module_exit(ingress_module_exit)
143MODULE_LICENSE("GPL");