Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/cls_matchll.c		Match-all classifier
  4 *
  5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/module.h>
 11#include <linux/percpu.h>
 12
 13#include <net/sch_generic.h>
 14#include <net/pkt_cls.h>
 15
 16struct cls_mall_head {
 17	struct tcf_exts exts;
 18	struct tcf_result res;
 19	u32 handle;
 20	u32 flags;
 21	unsigned int in_hw_count;
 22	struct tc_matchall_pcnt __percpu *pf;
 23	struct rcu_work rwork;
 24	bool deleting;
 25};
 26
 27static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 28			 struct tcf_result *res)
 29{
 30	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
 31
 32	if (unlikely(!head))
 33		return -1;
 34
 35	if (tc_skip_sw(head->flags))
 36		return -1;
 37
 38	*res = head->res;
 39	__this_cpu_inc(head->pf->rhit);
 40	return tcf_exts_exec(skb, &head->exts, res);
 41}
 42
 43static int mall_init(struct tcf_proto *tp)
 44{
 45	return 0;
 46}
 47
 48static void __mall_destroy(struct cls_mall_head *head)
 49{
 50	tcf_exts_destroy(&head->exts);
 51	tcf_exts_put_net(&head->exts);
 52	free_percpu(head->pf);
 53	kfree(head);
 54}
 55
 56static void mall_destroy_work(struct work_struct *work)
 57{
 58	struct cls_mall_head *head = container_of(to_rcu_work(work),
 59						  struct cls_mall_head,
 60						  rwork);
 61	rtnl_lock();
 62	__mall_destroy(head);
 63	rtnl_unlock();
 64}
 65
 66static void mall_destroy_hw_filter(struct tcf_proto *tp,
 67				   struct cls_mall_head *head,
 68				   unsigned long cookie,
 69				   struct netlink_ext_ack *extack)
 70{
 71	struct tc_cls_matchall_offload cls_mall = {};
 72	struct tcf_block *block = tp->chain->block;
 73
 74	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 75	cls_mall.command = TC_CLSMATCHALL_DESTROY;
 76	cls_mall.cookie = cookie;
 77
 78	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
 79			    &head->flags, &head->in_hw_count, true);
 80}
 81
 82static int mall_replace_hw_filter(struct tcf_proto *tp,
 83				  struct cls_mall_head *head,
 84				  unsigned long cookie,
 85				  struct netlink_ext_ack *extack)
 86{
 87	struct tc_cls_matchall_offload cls_mall = {};
 88	struct tcf_block *block = tp->chain->block;
 89	bool skip_sw = tc_skip_sw(head->flags);
 90	int err;
 91
 92	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
 93	if (!cls_mall.rule)
 94		return -ENOMEM;
 95
 96	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 97	cls_mall.command = TC_CLSMATCHALL_REPLACE;
 98	cls_mall.cookie = cookie;
 99
100	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts, true);
101	if (err) {
102		kfree(cls_mall.rule);
103		mall_destroy_hw_filter(tp, head, cookie, NULL);
104		if (skip_sw)
105			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
106		else
107			err = 0;
108
109		return err;
110	}
111
112	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
113			      skip_sw, &head->flags, &head->in_hw_count, true);
114	tc_cleanup_flow_action(&cls_mall.rule->action);
115	kfree(cls_mall.rule);
116
117	if (err) {
118		mall_destroy_hw_filter(tp, head, cookie, NULL);
119		return err;
120	}
121
122	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
123		return -EINVAL;
124
125	return 0;
126}
127
128static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
129			 struct netlink_ext_ack *extack)
130{
131	struct cls_mall_head *head = rtnl_dereference(tp->root);
132
133	if (!head)
134		return;
135
136	tcf_unbind_filter(tp, &head->res);
137
138	if (!tc_skip_hw(head->flags))
139		mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
140
141	if (tcf_exts_get_net(&head->exts))
142		tcf_queue_work(&head->rwork, mall_destroy_work);
143	else
144		__mall_destroy(head);
145}
146
147static void *mall_get(struct tcf_proto *tp, u32 handle)
148{
149	struct cls_mall_head *head = rtnl_dereference(tp->root);
150
151	if (head && head->handle == handle)
152		return head;
153
154	return NULL;
155}
156
157static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
158	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
159	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
160};
161
162static int mall_set_parms(struct net *net, struct tcf_proto *tp,
163			  struct cls_mall_head *head,
164			  unsigned long base, struct nlattr **tb,
165			  struct nlattr *est, bool ovr,
166			  struct netlink_ext_ack *extack)
167{
168	int err;
169
170	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
171				extack);
172	if (err < 0)
173		return err;
174
175	if (tb[TCA_MATCHALL_CLASSID]) {
176		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
177		tcf_bind_filter(tp, &head->res, base);
178	}
179	return 0;
180}
181
182static int mall_change(struct net *net, struct sk_buff *in_skb,
183		       struct tcf_proto *tp, unsigned long base,
184		       u32 handle, struct nlattr **tca,
185		       void **arg, bool ovr, bool rtnl_held,
186		       struct netlink_ext_ack *extack)
187{
188	struct cls_mall_head *head = rtnl_dereference(tp->root);
189	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
190	struct cls_mall_head *new;
191	u32 flags = 0;
192	int err;
193
194	if (!tca[TCA_OPTIONS])
195		return -EINVAL;
196
197	if (head)
198		return -EEXIST;
199
200	err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
201					  tca[TCA_OPTIONS], mall_policy, NULL);
202	if (err < 0)
203		return err;
204
205	if (tb[TCA_MATCHALL_FLAGS]) {
206		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
207		if (!tc_flags_valid(flags))
208			return -EINVAL;
209	}
210
211	new = kzalloc(sizeof(*new), GFP_KERNEL);
212	if (!new)
213		return -ENOBUFS;
214
215	err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
216	if (err)
217		goto err_exts_init;
218
219	if (!handle)
220		handle = 1;
221	new->handle = handle;
222	new->flags = flags;
223	new->pf = alloc_percpu(struct tc_matchall_pcnt);
224	if (!new->pf) {
225		err = -ENOMEM;
226		goto err_alloc_percpu;
227	}
228
229	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
230			     extack);
231	if (err)
232		goto err_set_parms;
233
234	if (!tc_skip_hw(new->flags)) {
235		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
236					     extack);
237		if (err)
238			goto err_replace_hw_filter;
239	}
240
241	if (!tc_in_hw(new->flags))
242		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
243
244	*arg = head;
245	rcu_assign_pointer(tp->root, new);
246	return 0;
247
248err_replace_hw_filter:
249err_set_parms:
250	free_percpu(new->pf);
251err_alloc_percpu:
252	tcf_exts_destroy(&new->exts);
253err_exts_init:
254	kfree(new);
255	return err;
256}
257
258static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
259		       bool rtnl_held, struct netlink_ext_ack *extack)
260{
261	struct cls_mall_head *head = rtnl_dereference(tp->root);
262
263	head->deleting = true;
264	*last = true;
265	return 0;
266}
267
268static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
269		      bool rtnl_held)
270{
271	struct cls_mall_head *head = rtnl_dereference(tp->root);
272
273	if (arg->count < arg->skip)
274		goto skip;
275
276	if (!head || head->deleting)
277		return;
278	if (arg->fn(tp, head, arg) < 0)
279		arg->stop = 1;
280skip:
281	arg->count++;
282}
283
284static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
285			  void *cb_priv, struct netlink_ext_ack *extack)
286{
287	struct cls_mall_head *head = rtnl_dereference(tp->root);
288	struct tc_cls_matchall_offload cls_mall = {};
289	struct tcf_block *block = tp->chain->block;
290	int err;
291
292	if (tc_skip_hw(head->flags))
293		return 0;
294
295	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
296	if (!cls_mall.rule)
297		return -ENOMEM;
298
299	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
300	cls_mall.command = add ?
301		TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
302	cls_mall.cookie = (unsigned long)head;
303
304	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts, true);
305	if (err) {
306		kfree(cls_mall.rule);
307		if (add && tc_skip_sw(head->flags)) {
308			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
309			return err;
310		}
311		return 0;
312	}
313
314	err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
315				    &cls_mall, cb_priv, &head->flags,
316				    &head->in_hw_count);
317	tc_cleanup_flow_action(&cls_mall.rule->action);
318	kfree(cls_mall.rule);
319
320	if (err)
321		return err;
322
323	return 0;
324}
325
326static void mall_stats_hw_filter(struct tcf_proto *tp,
327				 struct cls_mall_head *head,
328				 unsigned long cookie)
329{
330	struct tc_cls_matchall_offload cls_mall = {};
331	struct tcf_block *block = tp->chain->block;
332
333	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
334	cls_mall.command = TC_CLSMATCHALL_STATS;
335	cls_mall.cookie = cookie;
336
337	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
338
339	tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
340			      cls_mall.stats.pkts, cls_mall.stats.lastused);
341}
342
343static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
344		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
345{
346	struct tc_matchall_pcnt gpf = {};
347	struct cls_mall_head *head = fh;
348	struct nlattr *nest;
349	int cpu;
350
351	if (!head)
352		return skb->len;
353
354	if (!tc_skip_hw(head->flags))
355		mall_stats_hw_filter(tp, head, (unsigned long)head);
356
357	t->tcm_handle = head->handle;
358
359	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
360	if (!nest)
361		goto nla_put_failure;
362
363	if (head->res.classid &&
364	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
365		goto nla_put_failure;
366
367	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
368		goto nla_put_failure;
369
370	for_each_possible_cpu(cpu) {
371		struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
372
373		gpf.rhit += pf->rhit;
374	}
375
376	if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
377			  sizeof(struct tc_matchall_pcnt),
378			  &gpf, TCA_MATCHALL_PAD))
379		goto nla_put_failure;
380
381	if (tcf_exts_dump(skb, &head->exts))
382		goto nla_put_failure;
383
384	nla_nest_end(skb, nest);
385
386	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
387		goto nla_put_failure;
388
389	return skb->len;
390
391nla_put_failure:
392	nla_nest_cancel(skb, nest);
393	return -1;
394}
395
396static void mall_bind_class(void *fh, u32 classid, unsigned long cl)
397{
398	struct cls_mall_head *head = fh;
399
400	if (head && head->res.classid == classid)
401		head->res.class = cl;
402}
403
404static struct tcf_proto_ops cls_mall_ops __read_mostly = {
405	.kind		= "matchall",
406	.classify	= mall_classify,
407	.init		= mall_init,
408	.destroy	= mall_destroy,
409	.get		= mall_get,
410	.change		= mall_change,
411	.delete		= mall_delete,
412	.walk		= mall_walk,
413	.reoffload	= mall_reoffload,
414	.dump		= mall_dump,
415	.bind_class	= mall_bind_class,
416	.owner		= THIS_MODULE,
417};
418
419static int __init cls_mall_init(void)
420{
421	return register_tcf_proto_ops(&cls_mall_ops);
422}
423
424static void __exit cls_mall_exit(void)
425{
426	unregister_tcf_proto_ops(&cls_mall_ops);
427}
428
429module_init(cls_mall_init);
430module_exit(cls_mall_exit);
431
432MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
433MODULE_DESCRIPTION("Match-all classifier");
434MODULE_LICENSE("GPL v2");