Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/cls_matchll.c		Match-all classifier
  4 *
  5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/module.h>
 11#include <linux/percpu.h>
 12
 13#include <net/sch_generic.h>
 14#include <net/pkt_cls.h>
 15
 16struct cls_mall_head {
 17	struct tcf_exts exts;
 18	struct tcf_result res;
 19	u32 handle;
 20	u32 flags;
 21	unsigned int in_hw_count;
 22	struct tc_matchall_pcnt __percpu *pf;
 23	struct rcu_work rwork;
 24	bool deleting;
 25};
 26
 27static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 28			 struct tcf_result *res)
 29{
 30	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
 31
 32	if (unlikely(!head))
 33		return -1;
 34
 35	if (tc_skip_sw(head->flags))
 36		return -1;
 37
 38	*res = head->res;
 39	__this_cpu_inc(head->pf->rhit);
 40	return tcf_exts_exec(skb, &head->exts, res);
 41}
 42
 43static int mall_init(struct tcf_proto *tp)
 44{
 45	return 0;
 46}
 47
 48static void __mall_destroy(struct cls_mall_head *head)
 49{
 50	tcf_exts_destroy(&head->exts);
 51	tcf_exts_put_net(&head->exts);
 52	free_percpu(head->pf);
 53	kfree(head);
 54}
 55
 56static void mall_destroy_work(struct work_struct *work)
 57{
 58	struct cls_mall_head *head = container_of(to_rcu_work(work),
 59						  struct cls_mall_head,
 60						  rwork);
 61	rtnl_lock();
 62	__mall_destroy(head);
 63	rtnl_unlock();
 64}
 65
 66static void mall_destroy_hw_filter(struct tcf_proto *tp,
 67				   struct cls_mall_head *head,
 68				   unsigned long cookie,
 69				   struct netlink_ext_ack *extack)
 70{
 71	struct tc_cls_matchall_offload cls_mall = {};
 72	struct tcf_block *block = tp->chain->block;
 73
 74	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 75	cls_mall.command = TC_CLSMATCHALL_DESTROY;
 76	cls_mall.cookie = cookie;
 77
 78	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
 79			    &head->flags, &head->in_hw_count, true);
 80}
 81
 82static int mall_replace_hw_filter(struct tcf_proto *tp,
 83				  struct cls_mall_head *head,
 84				  unsigned long cookie,
 85				  struct netlink_ext_ack *extack)
 86{
 87	struct tc_cls_matchall_offload cls_mall = {};
 88	struct tcf_block *block = tp->chain->block;
 89	bool skip_sw = tc_skip_sw(head->flags);
 90	int err;
 91
 92	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
 93	if (!cls_mall.rule)
 94		return -ENOMEM;
 95
 96	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 97	cls_mall.command = TC_CLSMATCHALL_REPLACE;
 98	cls_mall.cookie = cookie;
 99
100	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts, true);
101	if (err) {
102		kfree(cls_mall.rule);
103		mall_destroy_hw_filter(tp, head, cookie, NULL);
104		if (skip_sw)
105			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
106		else
107			err = 0;
108
109		return err;
110	}
111
112	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
113			      skip_sw, &head->flags, &head->in_hw_count, true);
114	tc_cleanup_flow_action(&cls_mall.rule->action);
115	kfree(cls_mall.rule);
116
117	if (err) {
118		mall_destroy_hw_filter(tp, head, cookie, NULL);
119		return err;
120	}
121
122	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
123		return -EINVAL;
124
125	return 0;
126}
127
128static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
129			 struct netlink_ext_ack *extack)
130{
131	struct cls_mall_head *head = rtnl_dereference(tp->root);
132
133	if (!head)
134		return;
135
136	tcf_unbind_filter(tp, &head->res);
137
138	if (!tc_skip_hw(head->flags))
139		mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
140
141	if (tcf_exts_get_net(&head->exts))
142		tcf_queue_work(&head->rwork, mall_destroy_work);
143	else
144		__mall_destroy(head);
145}
146
147static void *mall_get(struct tcf_proto *tp, u32 handle)
148{
149	struct cls_mall_head *head = rtnl_dereference(tp->root);
150
151	if (head && head->handle == handle)
152		return head;
153
154	return NULL;
155}
156
157static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
158	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
159	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
 
160};
161
162static int mall_set_parms(struct net *net, struct tcf_proto *tp,
163			  struct cls_mall_head *head,
164			  unsigned long base, struct nlattr **tb,
165			  struct nlattr *est, bool ovr,
166			  struct netlink_ext_ack *extack)
167{
168	int err;
169
170	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
171				extack);
172	if (err < 0)
173		return err;
174
175	if (tb[TCA_MATCHALL_CLASSID]) {
176		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
177		tcf_bind_filter(tp, &head->res, base);
178	}
179	return 0;
180}
181
182static int mall_change(struct net *net, struct sk_buff *in_skb,
183		       struct tcf_proto *tp, unsigned long base,
184		       u32 handle, struct nlattr **tca,
185		       void **arg, bool ovr, bool rtnl_held,
186		       struct netlink_ext_ack *extack)
187{
188	struct cls_mall_head *head = rtnl_dereference(tp->root);
189	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
190	struct cls_mall_head *new;
191	u32 flags = 0;
192	int err;
193
194	if (!tca[TCA_OPTIONS])
195		return -EINVAL;
196
197	if (head)
198		return -EEXIST;
199
200	err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
201					  tca[TCA_OPTIONS], mall_policy, NULL);
202	if (err < 0)
203		return err;
204
205	if (tb[TCA_MATCHALL_FLAGS]) {
206		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
207		if (!tc_flags_valid(flags))
208			return -EINVAL;
209	}
210
211	new = kzalloc(sizeof(*new), GFP_KERNEL);
212	if (!new)
213		return -ENOBUFS;
214
215	err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
216	if (err)
217		goto err_exts_init;
218
219	if (!handle)
220		handle = 1;
221	new->handle = handle;
222	new->flags = flags;
223	new->pf = alloc_percpu(struct tc_matchall_pcnt);
224	if (!new->pf) {
225		err = -ENOMEM;
226		goto err_alloc_percpu;
227	}
228
229	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
230			     extack);
231	if (err)
232		goto err_set_parms;
233
234	if (!tc_skip_hw(new->flags)) {
235		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
236					     extack);
237		if (err)
238			goto err_replace_hw_filter;
239	}
240
241	if (!tc_in_hw(new->flags))
242		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
243
244	*arg = head;
245	rcu_assign_pointer(tp->root, new);
246	return 0;
247
248err_replace_hw_filter:
249err_set_parms:
250	free_percpu(new->pf);
251err_alloc_percpu:
252	tcf_exts_destroy(&new->exts);
253err_exts_init:
254	kfree(new);
255	return err;
256}
257
258static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
259		       bool rtnl_held, struct netlink_ext_ack *extack)
260{
261	struct cls_mall_head *head = rtnl_dereference(tp->root);
262
263	head->deleting = true;
264	*last = true;
265	return 0;
266}
267
268static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
269		      bool rtnl_held)
270{
271	struct cls_mall_head *head = rtnl_dereference(tp->root);
272
273	if (arg->count < arg->skip)
274		goto skip;
275
276	if (!head || head->deleting)
277		return;
278	if (arg->fn(tp, head, arg) < 0)
279		arg->stop = 1;
280skip:
281	arg->count++;
282}
283
284static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
285			  void *cb_priv, struct netlink_ext_ack *extack)
286{
287	struct cls_mall_head *head = rtnl_dereference(tp->root);
288	struct tc_cls_matchall_offload cls_mall = {};
289	struct tcf_block *block = tp->chain->block;
290	int err;
291
292	if (tc_skip_hw(head->flags))
293		return 0;
294
295	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
296	if (!cls_mall.rule)
297		return -ENOMEM;
298
299	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
300	cls_mall.command = add ?
301		TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
302	cls_mall.cookie = (unsigned long)head;
303
304	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts, true);
305	if (err) {
306		kfree(cls_mall.rule);
307		if (add && tc_skip_sw(head->flags)) {
308			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
309			return err;
310		}
311		return 0;
312	}
313
314	err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
315				    &cls_mall, cb_priv, &head->flags,
316				    &head->in_hw_count);
317	tc_cleanup_flow_action(&cls_mall.rule->action);
318	kfree(cls_mall.rule);
319
320	if (err)
321		return err;
322
323	return 0;
324}
325
326static void mall_stats_hw_filter(struct tcf_proto *tp,
327				 struct cls_mall_head *head,
328				 unsigned long cookie)
329{
330	struct tc_cls_matchall_offload cls_mall = {};
331	struct tcf_block *block = tp->chain->block;
332
333	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
334	cls_mall.command = TC_CLSMATCHALL_STATS;
335	cls_mall.cookie = cookie;
336
337	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
338
339	tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
340			      cls_mall.stats.pkts, cls_mall.stats.lastused);
 
 
 
341}
342
343static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
344		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
345{
346	struct tc_matchall_pcnt gpf = {};
347	struct cls_mall_head *head = fh;
348	struct nlattr *nest;
349	int cpu;
350
351	if (!head)
352		return skb->len;
353
354	if (!tc_skip_hw(head->flags))
355		mall_stats_hw_filter(tp, head, (unsigned long)head);
356
357	t->tcm_handle = head->handle;
358
359	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
360	if (!nest)
361		goto nla_put_failure;
362
363	if (head->res.classid &&
364	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
365		goto nla_put_failure;
366
367	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
368		goto nla_put_failure;
369
370	for_each_possible_cpu(cpu) {
371		struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
372
373		gpf.rhit += pf->rhit;
374	}
375
376	if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
377			  sizeof(struct tc_matchall_pcnt),
378			  &gpf, TCA_MATCHALL_PAD))
379		goto nla_put_failure;
380
381	if (tcf_exts_dump(skb, &head->exts))
382		goto nla_put_failure;
383
384	nla_nest_end(skb, nest);
385
386	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
387		goto nla_put_failure;
388
389	return skb->len;
390
391nla_put_failure:
392	nla_nest_cancel(skb, nest);
393	return -1;
394}
395
396static void mall_bind_class(void *fh, u32 classid, unsigned long cl)
 
397{
398	struct cls_mall_head *head = fh;
399
400	if (head && head->res.classid == classid)
401		head->res.class = cl;
 
 
 
 
402}
403
404static struct tcf_proto_ops cls_mall_ops __read_mostly = {
405	.kind		= "matchall",
406	.classify	= mall_classify,
407	.init		= mall_init,
408	.destroy	= mall_destroy,
409	.get		= mall_get,
410	.change		= mall_change,
411	.delete		= mall_delete,
412	.walk		= mall_walk,
413	.reoffload	= mall_reoffload,
414	.dump		= mall_dump,
415	.bind_class	= mall_bind_class,
416	.owner		= THIS_MODULE,
417};
418
419static int __init cls_mall_init(void)
420{
421	return register_tcf_proto_ops(&cls_mall_ops);
422}
423
424static void __exit cls_mall_exit(void)
425{
426	unregister_tcf_proto_ops(&cls_mall_ops);
427}
428
429module_init(cls_mall_init);
430module_exit(cls_mall_exit);
431
432MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
433MODULE_DESCRIPTION("Match-all classifier");
434MODULE_LICENSE("GPL v2");
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/cls_matchll.c		Match-all classifier
  4 *
  5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/module.h>
 11#include <linux/percpu.h>
 12
 13#include <net/sch_generic.h>
 14#include <net/pkt_cls.h>
 15
 16struct cls_mall_head {
 17	struct tcf_exts exts;
 18	struct tcf_result res;
 19	u32 handle;
 20	u32 flags;
 21	unsigned int in_hw_count;
 22	struct tc_matchall_pcnt __percpu *pf;
 23	struct rcu_work rwork;
 24	bool deleting;
 25};
 26
 27static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 28			 struct tcf_result *res)
 29{
 30	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
 31
 32	if (unlikely(!head))
 33		return -1;
 34
 35	if (tc_skip_sw(head->flags))
 36		return -1;
 37
 38	*res = head->res;
 39	__this_cpu_inc(head->pf->rhit);
 40	return tcf_exts_exec(skb, &head->exts, res);
 41}
 42
 43static int mall_init(struct tcf_proto *tp)
 44{
 45	return 0;
 46}
 47
 48static void __mall_destroy(struct cls_mall_head *head)
 49{
 50	tcf_exts_destroy(&head->exts);
 51	tcf_exts_put_net(&head->exts);
 52	free_percpu(head->pf);
 53	kfree(head);
 54}
 55
 56static void mall_destroy_work(struct work_struct *work)
 57{
 58	struct cls_mall_head *head = container_of(to_rcu_work(work),
 59						  struct cls_mall_head,
 60						  rwork);
 61	rtnl_lock();
 62	__mall_destroy(head);
 63	rtnl_unlock();
 64}
 65
 66static void mall_destroy_hw_filter(struct tcf_proto *tp,
 67				   struct cls_mall_head *head,
 68				   unsigned long cookie,
 69				   struct netlink_ext_ack *extack)
 70{
 71	struct tc_cls_matchall_offload cls_mall = {};
 72	struct tcf_block *block = tp->chain->block;
 73
 74	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 75	cls_mall.command = TC_CLSMATCHALL_DESTROY;
 76	cls_mall.cookie = cookie;
 77
 78	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
 79			    &head->flags, &head->in_hw_count, true);
 80}
 81
 82static int mall_replace_hw_filter(struct tcf_proto *tp,
 83				  struct cls_mall_head *head,
 84				  unsigned long cookie,
 85				  struct netlink_ext_ack *extack)
 86{
 87	struct tc_cls_matchall_offload cls_mall = {};
 88	struct tcf_block *block = tp->chain->block;
 89	bool skip_sw = tc_skip_sw(head->flags);
 90	int err;
 91
 92	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
 93	if (!cls_mall.rule)
 94		return -ENOMEM;
 95
 96	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 97	cls_mall.command = TC_CLSMATCHALL_REPLACE;
 98	cls_mall.cookie = cookie;
 99
100	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
101	if (err) {
102		kfree(cls_mall.rule);
103		mall_destroy_hw_filter(tp, head, cookie, NULL);
104		if (skip_sw)
105			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
106		else
107			err = 0;
108
109		return err;
110	}
111
112	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
113			      skip_sw, &head->flags, &head->in_hw_count, true);
114	tc_cleanup_flow_action(&cls_mall.rule->action);
115	kfree(cls_mall.rule);
116
117	if (err) {
118		mall_destroy_hw_filter(tp, head, cookie, NULL);
119		return err;
120	}
121
122	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
123		return -EINVAL;
124
125	return 0;
126}
127
128static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
129			 struct netlink_ext_ack *extack)
130{
131	struct cls_mall_head *head = rtnl_dereference(tp->root);
132
133	if (!head)
134		return;
135
136	tcf_unbind_filter(tp, &head->res);
137
138	if (!tc_skip_hw(head->flags))
139		mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
140
141	if (tcf_exts_get_net(&head->exts))
142		tcf_queue_work(&head->rwork, mall_destroy_work);
143	else
144		__mall_destroy(head);
145}
146
147static void *mall_get(struct tcf_proto *tp, u32 handle)
148{
149	struct cls_mall_head *head = rtnl_dereference(tp->root);
150
151	if (head && head->handle == handle)
152		return head;
153
154	return NULL;
155}
156
157static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
158	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
159	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
160	[TCA_MATCHALL_FLAGS]		= { .type = NLA_U32 },
161};
162
163static int mall_set_parms(struct net *net, struct tcf_proto *tp,
164			  struct cls_mall_head *head,
165			  unsigned long base, struct nlattr **tb,
166			  struct nlattr *est, bool ovr,
167			  struct netlink_ext_ack *extack)
168{
169	int err;
170
171	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
172				extack);
173	if (err < 0)
174		return err;
175
176	if (tb[TCA_MATCHALL_CLASSID]) {
177		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
178		tcf_bind_filter(tp, &head->res, base);
179	}
180	return 0;
181}
182
183static int mall_change(struct net *net, struct sk_buff *in_skb,
184		       struct tcf_proto *tp, unsigned long base,
185		       u32 handle, struct nlattr **tca,
186		       void **arg, bool ovr, bool rtnl_held,
187		       struct netlink_ext_ack *extack)
188{
189	struct cls_mall_head *head = rtnl_dereference(tp->root);
190	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
191	struct cls_mall_head *new;
192	u32 flags = 0;
193	int err;
194
195	if (!tca[TCA_OPTIONS])
196		return -EINVAL;
197
198	if (head)
199		return -EEXIST;
200
201	err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
202					  tca[TCA_OPTIONS], mall_policy, NULL);
203	if (err < 0)
204		return err;
205
206	if (tb[TCA_MATCHALL_FLAGS]) {
207		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
208		if (!tc_flags_valid(flags))
209			return -EINVAL;
210	}
211
212	new = kzalloc(sizeof(*new), GFP_KERNEL);
213	if (!new)
214		return -ENOBUFS;
215
216	err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
217	if (err)
218		goto err_exts_init;
219
220	if (!handle)
221		handle = 1;
222	new->handle = handle;
223	new->flags = flags;
224	new->pf = alloc_percpu(struct tc_matchall_pcnt);
225	if (!new->pf) {
226		err = -ENOMEM;
227		goto err_alloc_percpu;
228	}
229
230	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
231			     extack);
232	if (err)
233		goto err_set_parms;
234
235	if (!tc_skip_hw(new->flags)) {
236		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
237					     extack);
238		if (err)
239			goto err_replace_hw_filter;
240	}
241
242	if (!tc_in_hw(new->flags))
243		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
244
245	*arg = head;
246	rcu_assign_pointer(tp->root, new);
247	return 0;
248
249err_replace_hw_filter:
250err_set_parms:
251	free_percpu(new->pf);
252err_alloc_percpu:
253	tcf_exts_destroy(&new->exts);
254err_exts_init:
255	kfree(new);
256	return err;
257}
258
259static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
260		       bool rtnl_held, struct netlink_ext_ack *extack)
261{
262	struct cls_mall_head *head = rtnl_dereference(tp->root);
263
264	head->deleting = true;
265	*last = true;
266	return 0;
267}
268
269static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
270		      bool rtnl_held)
271{
272	struct cls_mall_head *head = rtnl_dereference(tp->root);
273
274	if (arg->count < arg->skip)
275		goto skip;
276
277	if (!head || head->deleting)
278		return;
279	if (arg->fn(tp, head, arg) < 0)
280		arg->stop = 1;
281skip:
282	arg->count++;
283}
284
285static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
286			  void *cb_priv, struct netlink_ext_ack *extack)
287{
288	struct cls_mall_head *head = rtnl_dereference(tp->root);
289	struct tc_cls_matchall_offload cls_mall = {};
290	struct tcf_block *block = tp->chain->block;
291	int err;
292
293	if (tc_skip_hw(head->flags))
294		return 0;
295
296	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
297	if (!cls_mall.rule)
298		return -ENOMEM;
299
300	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
301	cls_mall.command = add ?
302		TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
303	cls_mall.cookie = (unsigned long)head;
304
305	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
306	if (err) {
307		kfree(cls_mall.rule);
308		if (add && tc_skip_sw(head->flags)) {
309			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
310			return err;
311		}
312		return 0;
313	}
314
315	err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
316				    &cls_mall, cb_priv, &head->flags,
317				    &head->in_hw_count);
318	tc_cleanup_flow_action(&cls_mall.rule->action);
319	kfree(cls_mall.rule);
320
321	if (err)
322		return err;
323
324	return 0;
325}
326
327static void mall_stats_hw_filter(struct tcf_proto *tp,
328				 struct cls_mall_head *head,
329				 unsigned long cookie)
330{
331	struct tc_cls_matchall_offload cls_mall = {};
332	struct tcf_block *block = tp->chain->block;
333
334	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
335	cls_mall.command = TC_CLSMATCHALL_STATS;
336	cls_mall.cookie = cookie;
337
338	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
339
340	tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
341			      cls_mall.stats.pkts, cls_mall.stats.drops,
342			      cls_mall.stats.lastused,
343			      cls_mall.stats.used_hw_stats,
344			      cls_mall.stats.used_hw_stats_valid);
345}
346
347static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
348		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
349{
350	struct tc_matchall_pcnt gpf = {};
351	struct cls_mall_head *head = fh;
352	struct nlattr *nest;
353	int cpu;
354
355	if (!head)
356		return skb->len;
357
358	if (!tc_skip_hw(head->flags))
359		mall_stats_hw_filter(tp, head, (unsigned long)head);
360
361	t->tcm_handle = head->handle;
362
363	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
364	if (!nest)
365		goto nla_put_failure;
366
367	if (head->res.classid &&
368	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
369		goto nla_put_failure;
370
371	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
372		goto nla_put_failure;
373
374	for_each_possible_cpu(cpu) {
375		struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
376
377		gpf.rhit += pf->rhit;
378	}
379
380	if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
381			  sizeof(struct tc_matchall_pcnt),
382			  &gpf, TCA_MATCHALL_PAD))
383		goto nla_put_failure;
384
385	if (tcf_exts_dump(skb, &head->exts))
386		goto nla_put_failure;
387
388	nla_nest_end(skb, nest);
389
390	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
391		goto nla_put_failure;
392
393	return skb->len;
394
395nla_put_failure:
396	nla_nest_cancel(skb, nest);
397	return -1;
398}
399
400static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
401			    unsigned long base)
402{
403	struct cls_mall_head *head = fh;
404
405	if (head && head->res.classid == classid) {
406		if (cl)
407			__tcf_bind_filter(q, &head->res, base);
408		else
409			__tcf_unbind_filter(q, &head->res);
410	}
411}
412
413static struct tcf_proto_ops cls_mall_ops __read_mostly = {
414	.kind		= "matchall",
415	.classify	= mall_classify,
416	.init		= mall_init,
417	.destroy	= mall_destroy,
418	.get		= mall_get,
419	.change		= mall_change,
420	.delete		= mall_delete,
421	.walk		= mall_walk,
422	.reoffload	= mall_reoffload,
423	.dump		= mall_dump,
424	.bind_class	= mall_bind_class,
425	.owner		= THIS_MODULE,
426};
427
428static int __init cls_mall_init(void)
429{
430	return register_tcf_proto_ops(&cls_mall_ops);
431}
432
433static void __exit cls_mall_exit(void)
434{
435	unregister_tcf_proto_ops(&cls_mall_ops);
436}
437
438module_init(cls_mall_init);
439module_exit(cls_mall_exit);
440
441MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
442MODULE_DESCRIPTION("Match-all classifier");
443MODULE_LICENSE("GPL v2");