Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * net/sched/cls_matchll.c		Match-all classifier
  3 *
  4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License as published by
  8 * the Free Software Foundation; either version 2 of the License, or
  9 * (at your option) any later version.
 10 */
 11
 12#include <linux/kernel.h>
 13#include <linux/init.h>
 14#include <linux/module.h>
 
 15
 16#include <net/sch_generic.h>
 17#include <net/pkt_cls.h>
 18
 19struct cls_mall_head {
 20	struct tcf_exts exts;
 21	struct tcf_result res;
 22	u32 handle;
 23	u32 flags;
 24	union {
 25		struct work_struct work;
 26		struct rcu_head	rcu;
 27	};
 28};
 29
 30static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 31			 struct tcf_result *res)
 32{
 33	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
 34
 
 
 
 35	if (tc_skip_sw(head->flags))
 36		return -1;
 37
 38	*res = head->res;
 
 39	return tcf_exts_exec(skb, &head->exts, res);
 40}
 41
 42static int mall_init(struct tcf_proto *tp)
 43{
 44	return 0;
 45}
 46
 47static void __mall_destroy(struct cls_mall_head *head)
 48{
 49	tcf_exts_destroy(&head->exts);
 50	tcf_exts_put_net(&head->exts);
 
 51	kfree(head);
 52}
 53
 54static void mall_destroy_work(struct work_struct *work)
 55{
 56	struct cls_mall_head *head = container_of(work, struct cls_mall_head,
 57						  work);
 
 58	rtnl_lock();
 59	__mall_destroy(head);
 60	rtnl_unlock();
 61}
 62
 63static void mall_destroy_rcu(struct rcu_head *rcu)
 64{
 65	struct cls_mall_head *head = container_of(rcu, struct cls_mall_head,
 66						  rcu);
 67
 68	INIT_WORK(&head->work, mall_destroy_work);
 69	tcf_queue_work(&head->work);
 70}
 71
 72static void mall_destroy_hw_filter(struct tcf_proto *tp,
 73				   struct cls_mall_head *head,
 74				   unsigned long cookie,
 75				   struct netlink_ext_ack *extack)
 76{
 77	struct tc_cls_matchall_offload cls_mall = {};
 78	struct tcf_block *block = tp->chain->block;
 79
 80	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 81	cls_mall.command = TC_CLSMATCHALL_DESTROY;
 82	cls_mall.cookie = cookie;
 83
 84	tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL, &cls_mall, false);
 85	tcf_block_offload_dec(block, &head->flags);
 86}
 87
 88static int mall_replace_hw_filter(struct tcf_proto *tp,
 89				  struct cls_mall_head *head,
 90				  unsigned long cookie,
 91				  struct netlink_ext_ack *extack)
 92{
 93	struct tc_cls_matchall_offload cls_mall = {};
 94	struct tcf_block *block = tp->chain->block;
 95	bool skip_sw = tc_skip_sw(head->flags);
 96	int err;
 97
 
 
 
 
 98	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 99	cls_mall.command = TC_CLSMATCHALL_REPLACE;
100	cls_mall.exts = &head->exts;
101	cls_mall.cookie = cookie;
102
103	err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSMATCHALL,
104			       &cls_mall, skip_sw);
105	if (err < 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106		mall_destroy_hw_filter(tp, head, cookie, NULL);
107		return err;
108	} else if (err > 0) {
109		tcf_block_offload_inc(block, &head->flags);
110	}
111
112	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
113		return -EINVAL;
114
115	return 0;
116}
117
118static void mall_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
 
119{
120	struct cls_mall_head *head = rtnl_dereference(tp->root);
121
122	if (!head)
123		return;
124
 
 
125	if (!tc_skip_hw(head->flags))
126		mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
127
128	if (tcf_exts_get_net(&head->exts))
129		call_rcu(&head->rcu, mall_destroy_rcu);
130	else
131		__mall_destroy(head);
132}
133
134static void *mall_get(struct tcf_proto *tp, u32 handle)
135{
 
 
 
 
 
136	return NULL;
137}
138
139static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
140	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
141	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
 
142};
143
144static int mall_set_parms(struct net *net, struct tcf_proto *tp,
145			  struct cls_mall_head *head,
146			  unsigned long base, struct nlattr **tb,
147			  struct nlattr *est, bool ovr,
148			  struct netlink_ext_ack *extack)
149{
150	int err;
151
152	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, extack);
 
153	if (err < 0)
154		return err;
155
156	if (tb[TCA_MATCHALL_CLASSID]) {
157		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
158		tcf_bind_filter(tp, &head->res, base);
159	}
160	return 0;
161}
162
163static int mall_change(struct net *net, struct sk_buff *in_skb,
164		       struct tcf_proto *tp, unsigned long base,
165		       u32 handle, struct nlattr **tca,
166		       void **arg, bool ovr, struct netlink_ext_ack *extack)
 
167{
168	struct cls_mall_head *head = rtnl_dereference(tp->root);
169	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
170	struct cls_mall_head *new;
171	u32 flags = 0;
172	int err;
173
174	if (!tca[TCA_OPTIONS])
175		return -EINVAL;
176
177	if (head)
178		return -EEXIST;
179
180	err = nla_parse_nested(tb, TCA_MATCHALL_MAX, tca[TCA_OPTIONS],
181			       mall_policy, NULL);
182	if (err < 0)
183		return err;
184
185	if (tb[TCA_MATCHALL_FLAGS]) {
186		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
187		if (!tc_flags_valid(flags))
188			return -EINVAL;
189	}
190
191	new = kzalloc(sizeof(*new), GFP_KERNEL);
192	if (!new)
193		return -ENOBUFS;
194
195	err = tcf_exts_init(&new->exts, TCA_MATCHALL_ACT, 0);
196	if (err)
197		goto err_exts_init;
198
199	if (!handle)
200		handle = 1;
201	new->handle = handle;
202	new->flags = flags;
 
 
 
 
 
203
204	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
205			     extack);
206	if (err)
207		goto err_set_parms;
208
209	if (!tc_skip_hw(new->flags)) {
210		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
211					     extack);
212		if (err)
213			goto err_replace_hw_filter;
214	}
215
216	if (!tc_in_hw(new->flags))
217		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
218
219	*arg = head;
220	rcu_assign_pointer(tp->root, new);
221	return 0;
222
223err_replace_hw_filter:
224err_set_parms:
 
 
225	tcf_exts_destroy(&new->exts);
226err_exts_init:
227	kfree(new);
228	return err;
229}
230
231static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
232		       struct netlink_ext_ack *extack)
233{
234	return -EOPNOTSUPP;
 
 
 
 
235}
236
237static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
 
238{
239	struct cls_mall_head *head = rtnl_dereference(tp->root);
240
241	if (arg->count < arg->skip)
242		goto skip;
 
 
 
243	if (arg->fn(tp, head, arg) < 0)
244		arg->stop = 1;
245skip:
246	arg->count++;
247}
248
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
250		     struct sk_buff *skb, struct tcmsg *t)
251{
 
252	struct cls_mall_head *head = fh;
253	struct nlattr *nest;
 
254
255	if (!head)
256		return skb->len;
257
 
 
 
258	t->tcm_handle = head->handle;
259
260	nest = nla_nest_start(skb, TCA_OPTIONS);
261	if (!nest)
262		goto nla_put_failure;
263
264	if (head->res.classid &&
265	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
266		goto nla_put_failure;
267
268	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
269		goto nla_put_failure;
270
 
 
 
 
 
 
 
 
 
 
 
271	if (tcf_exts_dump(skb, &head->exts))
272		goto nla_put_failure;
273
274	nla_nest_end(skb, nest);
275
276	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
277		goto nla_put_failure;
278
279	return skb->len;
280
281nla_put_failure:
282	nla_nest_cancel(skb, nest);
283	return -1;
284}
285
286static void mall_bind_class(void *fh, u32 classid, unsigned long cl)
 
287{
288	struct cls_mall_head *head = fh;
289
290	if (head && head->res.classid == classid)
291		head->res.class = cl;
 
 
 
 
292}
293
294static struct tcf_proto_ops cls_mall_ops __read_mostly = {
295	.kind		= "matchall",
296	.classify	= mall_classify,
297	.init		= mall_init,
298	.destroy	= mall_destroy,
299	.get		= mall_get,
300	.change		= mall_change,
301	.delete		= mall_delete,
302	.walk		= mall_walk,
 
303	.dump		= mall_dump,
304	.bind_class	= mall_bind_class,
305	.owner		= THIS_MODULE,
306};
307
308static int __init cls_mall_init(void)
309{
310	return register_tcf_proto_ops(&cls_mall_ops);
311}
312
313static void __exit cls_mall_exit(void)
314{
315	unregister_tcf_proto_ops(&cls_mall_ops);
316}
317
318module_init(cls_mall_init);
319module_exit(cls_mall_exit);
320
321MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
322MODULE_DESCRIPTION("Match-all classifier");
323MODULE_LICENSE("GPL v2");
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/cls_matchll.c		Match-all classifier
  4 *
  5 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
 
 
 
 
 
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/module.h>
 11#include <linux/percpu.h>
 12
 13#include <net/sch_generic.h>
 14#include <net/pkt_cls.h>
 15
 16struct cls_mall_head {
 17	struct tcf_exts exts;
 18	struct tcf_result res;
 19	u32 handle;
 20	u32 flags;
 21	unsigned int in_hw_count;
 22	struct tc_matchall_pcnt __percpu *pf;
 23	struct rcu_work rwork;
 24	bool deleting;
 25};
 26
 27static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 28			 struct tcf_result *res)
 29{
 30	struct cls_mall_head *head = rcu_dereference_bh(tp->root);
 31
 32	if (unlikely(!head))
 33		return -1;
 34
 35	if (tc_skip_sw(head->flags))
 36		return -1;
 37
 38	*res = head->res;
 39	__this_cpu_inc(head->pf->rhit);
 40	return tcf_exts_exec(skb, &head->exts, res);
 41}
 42
 43static int mall_init(struct tcf_proto *tp)
 44{
 45	return 0;
 46}
 47
 48static void __mall_destroy(struct cls_mall_head *head)
 49{
 50	tcf_exts_destroy(&head->exts);
 51	tcf_exts_put_net(&head->exts);
 52	free_percpu(head->pf);
 53	kfree(head);
 54}
 55
 56static void mall_destroy_work(struct work_struct *work)
 57{
 58	struct cls_mall_head *head = container_of(to_rcu_work(work),
 59						  struct cls_mall_head,
 60						  rwork);
 61	rtnl_lock();
 62	__mall_destroy(head);
 63	rtnl_unlock();
 64}
 65
 
 
 
 
 
 
 
 
 
 66static void mall_destroy_hw_filter(struct tcf_proto *tp,
 67				   struct cls_mall_head *head,
 68				   unsigned long cookie,
 69				   struct netlink_ext_ack *extack)
 70{
 71	struct tc_cls_matchall_offload cls_mall = {};
 72	struct tcf_block *block = tp->chain->block;
 73
 74	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 75	cls_mall.command = TC_CLSMATCHALL_DESTROY;
 76	cls_mall.cookie = cookie;
 77
 78	tc_setup_cb_destroy(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, false,
 79			    &head->flags, &head->in_hw_count, true);
 80}
 81
 82static int mall_replace_hw_filter(struct tcf_proto *tp,
 83				  struct cls_mall_head *head,
 84				  unsigned long cookie,
 85				  struct netlink_ext_ack *extack)
 86{
 87	struct tc_cls_matchall_offload cls_mall = {};
 88	struct tcf_block *block = tp->chain->block;
 89	bool skip_sw = tc_skip_sw(head->flags);
 90	int err;
 91
 92	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
 93	if (!cls_mall.rule)
 94		return -ENOMEM;
 95
 96	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
 97	cls_mall.command = TC_CLSMATCHALL_REPLACE;
 
 98	cls_mall.cookie = cookie;
 99
100	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
101	if (err) {
102		kfree(cls_mall.rule);
103		mall_destroy_hw_filter(tp, head, cookie, NULL);
104		if (skip_sw)
105			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
106		else
107			err = 0;
108
109		return err;
110	}
111
112	err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall,
113			      skip_sw, &head->flags, &head->in_hw_count, true);
114	tc_cleanup_flow_action(&cls_mall.rule->action);
115	kfree(cls_mall.rule);
116
117	if (err) {
118		mall_destroy_hw_filter(tp, head, cookie, NULL);
119		return err;
 
 
120	}
121
122	if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
123		return -EINVAL;
124
125	return 0;
126}
127
128static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
129			 struct netlink_ext_ack *extack)
130{
131	struct cls_mall_head *head = rtnl_dereference(tp->root);
132
133	if (!head)
134		return;
135
136	tcf_unbind_filter(tp, &head->res);
137
138	if (!tc_skip_hw(head->flags))
139		mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
140
141	if (tcf_exts_get_net(&head->exts))
142		tcf_queue_work(&head->rwork, mall_destroy_work);
143	else
144		__mall_destroy(head);
145}
146
147static void *mall_get(struct tcf_proto *tp, u32 handle)
148{
149	struct cls_mall_head *head = rtnl_dereference(tp->root);
150
151	if (head && head->handle == handle)
152		return head;
153
154	return NULL;
155}
156
157static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
158	[TCA_MATCHALL_UNSPEC]		= { .type = NLA_UNSPEC },
159	[TCA_MATCHALL_CLASSID]		= { .type = NLA_U32 },
160	[TCA_MATCHALL_FLAGS]		= { .type = NLA_U32 },
161};
162
163static int mall_set_parms(struct net *net, struct tcf_proto *tp,
164			  struct cls_mall_head *head,
165			  unsigned long base, struct nlattr **tb,
166			  struct nlattr *est, bool ovr,
167			  struct netlink_ext_ack *extack)
168{
169	int err;
170
171	err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
172				extack);
173	if (err < 0)
174		return err;
175
176	if (tb[TCA_MATCHALL_CLASSID]) {
177		head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
178		tcf_bind_filter(tp, &head->res, base);
179	}
180	return 0;
181}
182
183static int mall_change(struct net *net, struct sk_buff *in_skb,
184		       struct tcf_proto *tp, unsigned long base,
185		       u32 handle, struct nlattr **tca,
186		       void **arg, bool ovr, bool rtnl_held,
187		       struct netlink_ext_ack *extack)
188{
189	struct cls_mall_head *head = rtnl_dereference(tp->root);
190	struct nlattr *tb[TCA_MATCHALL_MAX + 1];
191	struct cls_mall_head *new;
192	u32 flags = 0;
193	int err;
194
195	if (!tca[TCA_OPTIONS])
196		return -EINVAL;
197
198	if (head)
199		return -EEXIST;
200
201	err = nla_parse_nested_deprecated(tb, TCA_MATCHALL_MAX,
202					  tca[TCA_OPTIONS], mall_policy, NULL);
203	if (err < 0)
204		return err;
205
206	if (tb[TCA_MATCHALL_FLAGS]) {
207		flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
208		if (!tc_flags_valid(flags))
209			return -EINVAL;
210	}
211
212	new = kzalloc(sizeof(*new), GFP_KERNEL);
213	if (!new)
214		return -ENOBUFS;
215
216	err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
217	if (err)
218		goto err_exts_init;
219
220	if (!handle)
221		handle = 1;
222	new->handle = handle;
223	new->flags = flags;
224	new->pf = alloc_percpu(struct tc_matchall_pcnt);
225	if (!new->pf) {
226		err = -ENOMEM;
227		goto err_alloc_percpu;
228	}
229
230	err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
231			     extack);
232	if (err)
233		goto err_set_parms;
234
235	if (!tc_skip_hw(new->flags)) {
236		err = mall_replace_hw_filter(tp, new, (unsigned long)new,
237					     extack);
238		if (err)
239			goto err_replace_hw_filter;
240	}
241
242	if (!tc_in_hw(new->flags))
243		new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
244
245	*arg = head;
246	rcu_assign_pointer(tp->root, new);
247	return 0;
248
249err_replace_hw_filter:
250err_set_parms:
251	free_percpu(new->pf);
252err_alloc_percpu:
253	tcf_exts_destroy(&new->exts);
254err_exts_init:
255	kfree(new);
256	return err;
257}
258
259static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
260		       bool rtnl_held, struct netlink_ext_ack *extack)
261{
262	struct cls_mall_head *head = rtnl_dereference(tp->root);
263
264	head->deleting = true;
265	*last = true;
266	return 0;
267}
268
269static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
270		      bool rtnl_held)
271{
272	struct cls_mall_head *head = rtnl_dereference(tp->root);
273
274	if (arg->count < arg->skip)
275		goto skip;
276
277	if (!head || head->deleting)
278		return;
279	if (arg->fn(tp, head, arg) < 0)
280		arg->stop = 1;
281skip:
282	arg->count++;
283}
284
285static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
286			  void *cb_priv, struct netlink_ext_ack *extack)
287{
288	struct cls_mall_head *head = rtnl_dereference(tp->root);
289	struct tc_cls_matchall_offload cls_mall = {};
290	struct tcf_block *block = tp->chain->block;
291	int err;
292
293	if (tc_skip_hw(head->flags))
294		return 0;
295
296	cls_mall.rule =	flow_rule_alloc(tcf_exts_num_actions(&head->exts));
297	if (!cls_mall.rule)
298		return -ENOMEM;
299
300	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
301	cls_mall.command = add ?
302		TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
303	cls_mall.cookie = (unsigned long)head;
304
305	err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts);
306	if (err) {
307		kfree(cls_mall.rule);
308		if (add && tc_skip_sw(head->flags)) {
309			NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
310			return err;
311		}
312		return 0;
313	}
314
315	err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL,
316				    &cls_mall, cb_priv, &head->flags,
317				    &head->in_hw_count);
318	tc_cleanup_flow_action(&cls_mall.rule->action);
319	kfree(cls_mall.rule);
320
321	if (err)
322		return err;
323
324	return 0;
325}
326
327static void mall_stats_hw_filter(struct tcf_proto *tp,
328				 struct cls_mall_head *head,
329				 unsigned long cookie)
330{
331	struct tc_cls_matchall_offload cls_mall = {};
332	struct tcf_block *block = tp->chain->block;
333
334	tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL);
335	cls_mall.command = TC_CLSMATCHALL_STATS;
336	cls_mall.cookie = cookie;
337
338	tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false, true);
339
340	tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes,
341			      cls_mall.stats.pkts, cls_mall.stats.drops,
342			      cls_mall.stats.lastused,
343			      cls_mall.stats.used_hw_stats,
344			      cls_mall.stats.used_hw_stats_valid);
345}
346
347static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
348		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
349{
350	struct tc_matchall_pcnt gpf = {};
351	struct cls_mall_head *head = fh;
352	struct nlattr *nest;
353	int cpu;
354
355	if (!head)
356		return skb->len;
357
358	if (!tc_skip_hw(head->flags))
359		mall_stats_hw_filter(tp, head, (unsigned long)head);
360
361	t->tcm_handle = head->handle;
362
363	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
364	if (!nest)
365		goto nla_put_failure;
366
367	if (head->res.classid &&
368	    nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
369		goto nla_put_failure;
370
371	if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
372		goto nla_put_failure;
373
374	for_each_possible_cpu(cpu) {
375		struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
376
377		gpf.rhit += pf->rhit;
378	}
379
380	if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
381			  sizeof(struct tc_matchall_pcnt),
382			  &gpf, TCA_MATCHALL_PAD))
383		goto nla_put_failure;
384
385	if (tcf_exts_dump(skb, &head->exts))
386		goto nla_put_failure;
387
388	nla_nest_end(skb, nest);
389
390	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
391		goto nla_put_failure;
392
393	return skb->len;
394
395nla_put_failure:
396	nla_nest_cancel(skb, nest);
397	return -1;
398}
399
400static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
401			    unsigned long base)
402{
403	struct cls_mall_head *head = fh;
404
405	if (head && head->res.classid == classid) {
406		if (cl)
407			__tcf_bind_filter(q, &head->res, base);
408		else
409			__tcf_unbind_filter(q, &head->res);
410	}
411}
412
413static struct tcf_proto_ops cls_mall_ops __read_mostly = {
414	.kind		= "matchall",
415	.classify	= mall_classify,
416	.init		= mall_init,
417	.destroy	= mall_destroy,
418	.get		= mall_get,
419	.change		= mall_change,
420	.delete		= mall_delete,
421	.walk		= mall_walk,
422	.reoffload	= mall_reoffload,
423	.dump		= mall_dump,
424	.bind_class	= mall_bind_class,
425	.owner		= THIS_MODULE,
426};
427
428static int __init cls_mall_init(void)
429{
430	return register_tcf_proto_ops(&cls_mall_ops);
431}
432
433static void __exit cls_mall_exit(void)
434{
435	unregister_tcf_proto_ops(&cls_mall_ops);
436}
437
438module_init(cls_mall_init);
439module_exit(cls_mall_exit);
440
441MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
442MODULE_DESCRIPTION("Match-all classifier");
443MODULE_LICENSE("GPL v2");