Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_matchll.c Match-all classifier
4 *
5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12
13#include <net/sch_generic.h>
14#include <net/pkt_cls.h>
15
16struct cls_mall_head {
17 struct tcf_exts exts;
18 struct tcf_result res;
19 u32 handle;
20 u32 flags;
21 unsigned int in_hw_count;
22 struct tc_matchall_pcnt __percpu *pf;
23 struct rcu_work rwork;
24 bool deleting;
25};
26
27static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
28 struct tcf_result *res)
29{
30 struct cls_mall_head *head = rcu_dereference_bh(tp->root);
31
32 if (unlikely(!head))
33 return -1;
34
35 if (tc_skip_sw(head->flags))
36 return -1;
37
38 *res = head->res;
39 __this_cpu_inc(head->pf->rhit);
40 return tcf_exts_exec(skb, &head->exts, res);
41}
42
43static int mall_init(struct tcf_proto *tp)
44{
45 return 0;
46}
47
48static void __mall_destroy(struct cls_mall_head *head)
49{
50 tcf_exts_destroy(&head->exts);
51 tcf_exts_put_net(&head->exts);
52 free_percpu(head->pf);
53 kfree(head);
54}
55
56static void mall_destroy_work(struct work_struct *work)
57{
58 struct cls_mall_head *head = container_of(to_rcu_work(work),
59 struct cls_mall_head,
60 rwork);
61 rtnl_lock();
62 __mall_destroy(head);
63 rtnl_unlock();
64}
65
66static void mall_destroy_hw_filter(struct tcf_proto *tp,
67 struct cls_mall_head *head,
68 unsigned long cookie,
69 struct netlink_ext_ack *extack)
70{
71 struct tc_cls_matchall_offload cls_mall = {};
72 struct tcf_block *block = tp->chain->block;
73
74 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
75 cls_mall.command = TC_CLSMATCHALL_DESTROY;
76 cls_mall.cookie = cookie;
77
78 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
79 &head->flags, &head->in_hw_count, true);
80}
81
82static int mall_replace_hw_filter(struct tcf_proto *tp,
83 struct cls_mall_head *head,
84 unsigned long cookie,
85 struct netlink_ext_ack *extack)
86{
87 struct tc_cls_matchall_offload cls_mall = {};
88 struct tcf_block *block = tp->chain->block;
89 bool skip_sw = tc_skip_sw(head->flags);
90 int err;
91
92 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
93 if (!cls_mall.rule)
94 return -ENOMEM;
95
96 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
97 cls_mall.command = TC_CLSMATCHALL_REPLACE;
98 cls_mall.cookie = cookie;
99
100 err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
101 if (err) {
102 kfree(cls_mall.rule);
103 mall_destroy_hw_filter(tp, head, cookie, NULL);
104 if (skip_sw)
105 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
106 else
107 err = 0;
108
109 return err;
110 }
111
112 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
113 skip_sw, &head->flags, &head->in_hw_count, true);
114 tc_cleanup_flow_action(&cls_mall.rule->action);
115 kfree(cls_mall.rule);
116
117 if (err) {
118 mall_destroy_hw_filter(tp, head, cookie, NULL);
119 return err;
120 }
121
122 if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
123 return -EINVAL;
124
125 return 0;
126}
127
128static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
129 struct netlink_ext_ack *extack)
130{
131 struct cls_mall_head *head = rtnl_dereference(tp->root);
132
133 if (!head)
134 return;
135
136 tcf_unbind_filter(tp, &head->res);
137
138 if (!tc_skip_hw(head->flags))
139 mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
140
141 if (tcf_exts_get_net(&head->exts))
142 tcf_queue_work(&head->rwork, mall_destroy_work);
143 else
144 __mall_destroy(head);
145}
146
147static void *mall_get(struct tcf_proto *tp, u32 handle)
148{
149 struct cls_mall_head *head = rtnl_dereference(tp->root);
150
151 if (head && head->handle == handle)
152 return head;
153
154 return NULL;
155}
156
157static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
158 [TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
159 [TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
160 [TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
161};
162
163static int mall_set_parms(struct net *net, struct tcf_proto *tp,
164 struct cls_mall_head *head,
165 unsigned long base, struct nlattr **tb,
166 struct nlattr *est, bool ovr,
167 struct netlink_ext_ack *extack)
168{
169 int err;
170
171 err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
172 extack);
173 if (err < 0)
174 return err;
175
176 if (tb[TCA_MATCHALL_CLASSID]) {
177 head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
178 tcf_bind_filter(tp, &head->res, base);
179 }
180 return 0;
181}
182
183static int mall_change(struct net *net, struct sk_buff *in_skb,
184 struct tcf_proto *tp, unsigned long base,
185 u32 handle, struct nlattr **tca,
186 void **arg, bool ovr, bool rtnl_held,
187 struct netlink_ext_ack *extack)
188{
189 struct cls_mall_head *head = rtnl_dereference(tp->root);
190 struct nlattr *tb[TCA_MATCHALL_MAX + 1];
191 struct cls_mall_head *new;
192 u32 flags = 0;
193 int err;
194
195 if (!tca[TCA_OPTIONS])
196 return -EINVAL;
197
198 if (head)
199 return -EEXIST;
200
201 err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
202 tca[TCA_OPTIONS], mall_policy, NULL);
203 if (err < 0)
204 return err;
205
206 if (tb[TCA_MATCHALL_FLAGS]) {
207 flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
208 if (!tc_flags_valid(flags))
209 return -EINVAL;
210 }
211
212 new = kzalloc(sizeof(*new), GFP_KERNEL);
213 if (!new)
214 return -ENOBUFS;
215
216 err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
217 if (err)
218 goto err_exts_init;
219
220 if (!handle)
221 handle = 1;
222 new->handle = handle;
223 new->flags = flags;
224 new->pf = alloc_percpu(struct tc_matchall_pcnt);
225 if (!new->pf) {
226 err = -ENOMEM;
227 goto err_alloc_percpu;
228 }
229
230 err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
231 extack);
232 if (err)
233 goto err_set_parms;
234
235 if (!tc_skip_hw(new->flags)) {
236 err = mall_replace_hw_filter(tp, new, (unsigned long)new,
237 extack);
238 if (err)
239 goto err_replace_hw_filter;
240 }
241
242 if (!tc_in_hw(new->flags))
243 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
244
245 *arg = head;
246 rcu_assign_pointer(tp->root, new);
247 return 0;
248
249err_replace_hw_filter:
250err_set_parms:
251 free_percpu(new->pf);
252err_alloc_percpu:
253 tcf_exts_destroy(&new->exts);
254err_exts_init:
255 kfree(new);
256 return err;
257}
258
259static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
260 bool rtnl_held, struct netlink_ext_ack *extack)
261{
262 struct cls_mall_head *head = rtnl_dereference(tp->root);
263
264 head->deleting = true;
265 *last = true;
266 return 0;
267}
268
269static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
270 bool rtnl_held)
271{
272 struct cls_mall_head *head = rtnl_dereference(tp->root);
273
274 if (arg->count < arg->skip)
275 goto skip;
276
277 if (!head || head->deleting)
278 return;
279 if (arg->fn(tp, head, arg) < 0)
280 arg->stop = 1;
281skip:
282 arg->count++;
283}
284
285static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
286 void *cb_priv, struct netlink_ext_ack *extack)
287{
288 struct cls_mall_head *head = rtnl_dereference(tp->root);
289 struct tc_cls_matchall_offload cls_mall = {};
290 struct tcf_block *block = tp->chain->block;
291 int err;
292
293 if (tc_skip_hw(head->flags))
294 return 0;
295
296 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
297 if (!cls_mall.rule)
298 return -ENOMEM;
299
300 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
301 cls_mall.command = add ?
302 TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
303 cls_mall.cookie = (unsigned long)head;
304
305 err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
306 if (err) {
307 kfree(cls_mall.rule);
308 if (add && tc_skip_sw(head->flags)) {
309 NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
310 return err;
311 }
312 return 0;
313 }
314
315 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
316 &cls_mall, cb_priv, &head->flags,
317 &head->in_hw_count);
318 tc_cleanup_flow_action(&cls_mall.rule->action);
319 kfree(cls_mall.rule);
320
321 if (err)
322 return err;
323
324 return 0;
325}
326
327static void mall_stats_hw_filter(struct tcf_proto *tp,
328 struct cls_mall_head *head,
329 unsigned long cookie)
330{
331 struct tc_cls_matchall_offload cls_mall = {};
332 struct tcf_block *block = tp->chain->block;
333
334 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
335 cls_mall.command = TC_CLSMATCHALL_STATS;
336 cls_mall.cookie = cookie;
337
338 tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
339
340 tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
341 cls_mall.stats.pkts, cls_mall.stats.drops,
342 cls_mall.stats.lastused,
343 cls_mall.stats.used_hw_stats,
344 cls_mall.stats.used_hw_stats_valid);
345}
346
347static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
348 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
349{
350 struct tc_matchall_pcnt gpf = {};
351 struct cls_mall_head *head = fh;
352 struct nlattr *nest;
353 int cpu;
354
355 if (!head)
356 return skb->len;
357
358 if (!tc_skip_hw(head->flags))
359 mall_stats_hw_filter(tp, head, (unsigned long)head);
360
361 t->tcm_handle = head->handle;
362
363 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
364 if (!nest)
365 goto nla_put_failure;
366
367 if (head->res.classid &&
368 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
369 goto nla_put_failure;
370
371 if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
372 goto nla_put_failure;
373
374 for_each_possible_cpu(cpu) {
375 struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
376
377 gpf.rhit += pf->rhit;
378 }
379
380 if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
381 sizeof(struct tc_matchall_pcnt),
382 &gpf, TCA_MATCHALL_PAD))
383 goto nla_put_failure;
384
385 if (tcf_exts_dump(skb, &head->exts))
386 goto nla_put_failure;
387
388 nla_nest_end(skb, nest);
389
390 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
391 goto nla_put_failure;
392
393 return skb->len;
394
395nla_put_failure:
396 nla_nest_cancel(skb, nest);
397 return -1;
398}
399
400static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
401 unsigned long base)
402{
403 struct cls_mall_head *head = fh;
404
405 if (head && head->res.classid == classid) {
406 if (cl)
407 __tcf_bind_filter(q, &head->res, base);
408 else
409 __tcf_unbind_filter(q, &head->res);
410 }
411}
412
413static struct tcf_proto_ops cls_mall_ops __read_mostly = {
414 .kind = "matchall",
415 .classify = mall_classify,
416 .init = mall_init,
417 .destroy = mall_destroy,
418 .get = mall_get,
419 .change = mall_change,
420 .delete = mall_delete,
421 .walk = mall_walk,
422 .reoffload = mall_reoffload,
423 .dump = mall_dump,
424 .bind_class = mall_bind_class,
425 .owner = THIS_MODULE,
426};
427
428static int __init cls_mall_init(void)
429{
430 return register_tcf_proto_ops(&cls_mall_ops);
431}
432
433static void __exit cls_mall_exit(void)
434{
435 unregister_tcf_proto_ops(&cls_mall_ops);
436}
437
438module_init(cls_mall_init);
439module_exit(cls_mall_exit);
440
441MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
442MODULE_DESCRIPTION("Match-all classifier");
443MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_matchll.c Match-all classifier
4 *
5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12
13#include <net/sch_generic.h>
14#include <net/pkt_cls.h>
15#include <net/tc_wrapper.h>
16
17struct cls_mall_head {
18 struct tcf_exts exts;
19 struct tcf_result res;
20 u32 handle;
21 u32 flags;
22 unsigned int in_hw_count;
23 struct tc_matchall_pcnt __percpu *pf;
24 struct rcu_work rwork;
25 bool deleting;
26};
27
28TC_INDIRECT_SCOPE int mall_classify(struct sk_buff *skb,
29 const struct tcf_proto *tp,
30 struct tcf_result *res)
31{
32 struct cls_mall_head *head = rcu_dereference_bh(tp->root);
33
34 if (unlikely(!head))
35 return -1;
36
37 if (tc_skip_sw(head->flags))
38 return -1;
39
40 *res = head->res;
41 __this_cpu_inc(head->pf->rhit);
42 return tcf_exts_exec(skb, &head->exts, res);
43}
44
45static int mall_init(struct tcf_proto *tp)
46{
47 return 0;
48}
49
50static void __mall_destroy(struct cls_mall_head *head)
51{
52 tcf_exts_destroy(&head->exts);
53 tcf_exts_put_net(&head->exts);
54 free_percpu(head->pf);
55 kfree(head);
56}
57
58static void mall_destroy_work(struct work_struct *work)
59{
60 struct cls_mall_head *head = container_of(to_rcu_work(work),
61 struct cls_mall_head,
62 rwork);
63 rtnl_lock();
64 __mall_destroy(head);
65 rtnl_unlock();
66}
67
68static void mall_destroy_hw_filter(struct tcf_proto *tp,
69 struct cls_mall_head *head,
70 unsigned long cookie,
71 struct netlink_ext_ack *extack)
72{
73 struct tc_cls_matchall_offload cls_mall = {};
74 struct tcf_block *block = tp->chain->block;
75
76 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
77 cls_mall.command = TC_CLSMATCHALL_DESTROY;
78 cls_mall.cookie = cookie;
79
80 tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
81 &head->flags, &head->in_hw_count, true);
82}
83
84static int mall_replace_hw_filter(struct tcf_proto *tp,
85 struct cls_mall_head *head,
86 unsigned long cookie,
87 struct netlink_ext_ack *extack)
88{
89 struct tc_cls_matchall_offload cls_mall = {};
90 struct tcf_block *block = tp->chain->block;
91 bool skip_sw = tc_skip_sw(head->flags);
92 int err;
93
94 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
95 if (!cls_mall.rule)
96 return -ENOMEM;
97
98 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
99 cls_mall.command = TC_CLSMATCHALL_REPLACE;
100 cls_mall.cookie = cookie;
101
102 err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
103 cls_mall.common.extack);
104 if (err) {
105 kfree(cls_mall.rule);
106 mall_destroy_hw_filter(tp, head, cookie, NULL);
107
108 return skip_sw ? err : 0;
109 }
110
111 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
112 skip_sw, &head->flags, &head->in_hw_count, true);
113 tc_cleanup_offload_action(&cls_mall.rule->action);
114 kfree(cls_mall.rule);
115
116 if (err) {
117 mall_destroy_hw_filter(tp, head, cookie, NULL);
118 return err;
119 }
120
121 if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
122 return -EINVAL;
123
124 return 0;
125}
126
127static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
128 struct netlink_ext_ack *extack)
129{
130 struct cls_mall_head *head = rtnl_dereference(tp->root);
131
132 if (!head)
133 return;
134
135 tcf_unbind_filter(tp, &head->res);
136
137 if (!tc_skip_hw(head->flags))
138 mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
139
140 if (tcf_exts_get_net(&head->exts))
141 tcf_queue_work(&head->rwork, mall_destroy_work);
142 else
143 __mall_destroy(head);
144}
145
146static void *mall_get(struct tcf_proto *tp, u32 handle)
147{
148 struct cls_mall_head *head = rtnl_dereference(tp->root);
149
150 if (head && head->handle == handle)
151 return head;
152
153 return NULL;
154}
155
156static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
157 [TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
158 [TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
159 [TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
160};
161
162static int mall_change(struct net *net, struct sk_buff *in_skb,
163 struct tcf_proto *tp, unsigned long base,
164 u32 handle, struct nlattr **tca,
165 void **arg, u32 flags,
166 struct netlink_ext_ack *extack)
167{
168 struct cls_mall_head *head = rtnl_dereference(tp->root);
169 struct nlattr *tb[TCA_MATCHALL_MAX + 1];
170 bool bound_to_filter = false;
171 struct cls_mall_head *new;
172 u32 userflags = 0;
173 int err;
174
175 if (!tca[TCA_OPTIONS])
176 return -EINVAL;
177
178 if (head)
179 return -EEXIST;
180
181 err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
182 tca[TCA_OPTIONS], mall_policy, NULL);
183 if (err < 0)
184 return err;
185
186 if (tb[TCA_MATCHALL_FLAGS]) {
187 userflags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
188 if (!tc_flags_valid(userflags))
189 return -EINVAL;
190 }
191
192 new = kzalloc(sizeof(*new), GFP_KERNEL);
193 if (!new)
194 return -ENOBUFS;
195
196 err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
197 if (err)
198 goto err_exts_init;
199
200 if (!handle)
201 handle = 1;
202 new->handle = handle;
203 new->flags = userflags;
204 new->pf = alloc_percpu(struct tc_matchall_pcnt);
205 if (!new->pf) {
206 err = -ENOMEM;
207 goto err_alloc_percpu;
208 }
209
210 err = tcf_exts_validate_ex(net, tp, tb, tca[TCA_RATE],
211 &new->exts, flags, new->flags, extack);
212 if (err < 0)
213 goto err_set_parms;
214
215 if (tb[TCA_MATCHALL_CLASSID]) {
216 new->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
217 tcf_bind_filter(tp, &new->res, base);
218 bound_to_filter = true;
219 }
220
221 if (!tc_skip_hw(new->flags)) {
222 err = mall_replace_hw_filter(tp, new, (unsigned long)new,
223 extack);
224 if (err)
225 goto err_replace_hw_filter;
226 }
227
228 if (!tc_in_hw(new->flags))
229 new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
230
231 tcf_proto_update_usesw(tp, new->flags);
232
233 *arg = head;
234 rcu_assign_pointer(tp->root, new);
235 return 0;
236
237err_replace_hw_filter:
238 if (bound_to_filter)
239 tcf_unbind_filter(tp, &new->res);
240err_set_parms:
241 free_percpu(new->pf);
242err_alloc_percpu:
243 tcf_exts_destroy(&new->exts);
244err_exts_init:
245 kfree(new);
246 return err;
247}
248
249static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
250 bool rtnl_held, struct netlink_ext_ack *extack)
251{
252 struct cls_mall_head *head = rtnl_dereference(tp->root);
253
254 head->deleting = true;
255 *last = true;
256 return 0;
257}
258
259static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
260 bool rtnl_held)
261{
262 struct cls_mall_head *head = rtnl_dereference(tp->root);
263
264 if (arg->count < arg->skip)
265 goto skip;
266
267 if (!head || head->deleting)
268 return;
269 if (arg->fn(tp, head, arg) < 0)
270 arg->stop = 1;
271skip:
272 arg->count++;
273}
274
275static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
276 void *cb_priv, struct netlink_ext_ack *extack)
277{
278 struct cls_mall_head *head = rtnl_dereference(tp->root);
279 struct tc_cls_matchall_offload cls_mall = {};
280 struct tcf_block *block = tp->chain->block;
281 int err;
282
283 if (tc_skip_hw(head->flags))
284 return 0;
285
286 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts));
287 if (!cls_mall.rule)
288 return -ENOMEM;
289
290 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
291 cls_mall.command = add ?
292 TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
293 cls_mall.cookie = (unsigned long)head;
294
295 err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts,
296 cls_mall.common.extack);
297 if (err) {
298 kfree(cls_mall.rule);
299
300 return add && tc_skip_sw(head->flags) ? err : 0;
301 }
302
303 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
304 &cls_mall, cb_priv, &head->flags,
305 &head->in_hw_count);
306 tc_cleanup_offload_action(&cls_mall.rule->action);
307 kfree(cls_mall.rule);
308
309 return err;
310}
311
312static void mall_stats_hw_filter(struct tcf_proto *tp,
313 struct cls_mall_head *head,
314 unsigned long cookie)
315{
316 struct tc_cls_matchall_offload cls_mall = {};
317 struct tcf_block *block = tp->chain->block;
318
319 tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
320 cls_mall.command = TC_CLSMATCHALL_STATS;
321 cls_mall.cookie = cookie;
322
323 tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
324
325 tcf_exts_hw_stats_update(&head->exts, &cls_mall.stats, cls_mall.use_act_stats);
326}
327
328static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
329 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
330{
331 struct tc_matchall_pcnt gpf = {};
332 struct cls_mall_head *head = fh;
333 struct nlattr *nest;
334 int cpu;
335
336 if (!head)
337 return skb->len;
338
339 if (!tc_skip_hw(head->flags))
340 mall_stats_hw_filter(tp, head, (unsigned long)head);
341
342 t->tcm_handle = head->handle;
343
344 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
345 if (!nest)
346 goto nla_put_failure;
347
348 if (head->res.classid &&
349 nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
350 goto nla_put_failure;
351
352 if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
353 goto nla_put_failure;
354
355 for_each_possible_cpu(cpu) {
356 struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
357
358 gpf.rhit += pf->rhit;
359 }
360
361 if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
362 sizeof(struct tc_matchall_pcnt),
363 &gpf, TCA_MATCHALL_PAD))
364 goto nla_put_failure;
365
366 if (tcf_exts_dump(skb, &head->exts))
367 goto nla_put_failure;
368
369 nla_nest_end(skb, nest);
370
371 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
372 goto nla_put_failure;
373
374 return skb->len;
375
376nla_put_failure:
377 nla_nest_cancel(skb, nest);
378 return -1;
379}
380
381static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
382 unsigned long base)
383{
384 struct cls_mall_head *head = fh;
385
386 tc_cls_bind_class(classid, cl, q, &head->res, base);
387}
388
389static struct tcf_proto_ops cls_mall_ops __read_mostly = {
390 .kind = "matchall",
391 .classify = mall_classify,
392 .init = mall_init,
393 .destroy = mall_destroy,
394 .get = mall_get,
395 .change = mall_change,
396 .delete = mall_delete,
397 .walk = mall_walk,
398 .reoffload = mall_reoffload,
399 .dump = mall_dump,
400 .bind_class = mall_bind_class,
401 .owner = THIS_MODULE,
402};
403MODULE_ALIAS_NET_CLS("matchall");
404
405static int __init cls_mall_init(void)
406{
407 return register_tcf_proto_ops(&cls_mall_ops);
408}
409
410static void __exit cls_mall_exit(void)
411{
412 unregister_tcf_proto_ops(&cls_mall_ops);
413}
414
415module_init(cls_mall_init);
416module_exit(cls_mall_exit);
417
418MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
419MODULE_DESCRIPTION("Match-all classifier");
420MODULE_LICENSE("GPL v2");