Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/cls_basic.c	Basic Packet Classifier.
  4 *
  5 * Authors:	Thomas Graf <tgraf@suug.ch>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/slab.h>
 10#include <linux/types.h>
 11#include <linux/kernel.h>
 12#include <linux/string.h>
 13#include <linux/errno.h>
 14#include <linux/rtnetlink.h>
 15#include <linux/skbuff.h>
 16#include <linux/idr.h>
 17#include <linux/percpu.h>
 18#include <net/netlink.h>
 19#include <net/act_api.h>
 20#include <net/pkt_cls.h>
 
 21
 22struct basic_head {
 23	struct list_head	flist;
 24	struct idr		handle_idr;
 25	struct rcu_head		rcu;
 26};
 27
 28struct basic_filter {
 29	u32			handle;
 30	struct tcf_exts		exts;
 31	struct tcf_ematch_tree	ematches;
 32	struct tcf_result	res;
 33	struct tcf_proto	*tp;
 34	struct list_head	link;
 35	struct tc_basic_pcnt __percpu *pf;
 36	struct rcu_work		rwork;
 37};
 38
 39static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 40			  struct tcf_result *res)
 
 41{
 42	int r;
 43	struct basic_head *head = rcu_dereference_bh(tp->root);
 44	struct basic_filter *f;
 45
 46	list_for_each_entry_rcu(f, &head->flist, link) {
 47		__this_cpu_inc(f->pf->rcnt);
 48		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
 49			continue;
 50		__this_cpu_inc(f->pf->rhit);
 51		*res = f->res;
 52		r = tcf_exts_exec(skb, &f->exts, res);
 53		if (r < 0)
 54			continue;
 55		return r;
 56	}
 57	return -1;
 58}
 59
 60static void *basic_get(struct tcf_proto *tp, u32 handle)
 61{
 62	struct basic_head *head = rtnl_dereference(tp->root);
 63	struct basic_filter *f;
 64
 65	list_for_each_entry(f, &head->flist, link) {
 66		if (f->handle == handle) {
 67			return f;
 68		}
 69	}
 70
 71	return NULL;
 72}
 73
 74static int basic_init(struct tcf_proto *tp)
 75{
 76	struct basic_head *head;
 77
 78	head = kzalloc(sizeof(*head), GFP_KERNEL);
 79	if (head == NULL)
 80		return -ENOBUFS;
 81	INIT_LIST_HEAD(&head->flist);
 82	idr_init(&head->handle_idr);
 83	rcu_assign_pointer(tp->root, head);
 84	return 0;
 85}
 86
 87static void __basic_delete_filter(struct basic_filter *f)
 88{
 89	tcf_exts_destroy(&f->exts);
 90	tcf_em_tree_destroy(&f->ematches);
 91	tcf_exts_put_net(&f->exts);
 92	free_percpu(f->pf);
 93	kfree(f);
 94}
 95
 96static void basic_delete_filter_work(struct work_struct *work)
 97{
 98	struct basic_filter *f = container_of(to_rcu_work(work),
 99					      struct basic_filter,
100					      rwork);
101	rtnl_lock();
102	__basic_delete_filter(f);
103	rtnl_unlock();
104}
105
106static void basic_destroy(struct tcf_proto *tp, bool rtnl_held,
107			  struct netlink_ext_ack *extack)
108{
109	struct basic_head *head = rtnl_dereference(tp->root);
110	struct basic_filter *f, *n;
111
112	list_for_each_entry_safe(f, n, &head->flist, link) {
113		list_del_rcu(&f->link);
114		tcf_unbind_filter(tp, &f->res);
115		idr_remove(&head->handle_idr, f->handle);
116		if (tcf_exts_get_net(&f->exts))
117			tcf_queue_work(&f->rwork, basic_delete_filter_work);
118		else
119			__basic_delete_filter(f);
120	}
121	idr_destroy(&head->handle_idr);
122	kfree_rcu(head, rcu);
123}
124
125static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
126			bool rtnl_held, struct netlink_ext_ack *extack)
127{
128	struct basic_head *head = rtnl_dereference(tp->root);
129	struct basic_filter *f = arg;
130
131	list_del_rcu(&f->link);
132	tcf_unbind_filter(tp, &f->res);
133	idr_remove(&head->handle_idr, f->handle);
134	tcf_exts_get_net(&f->exts);
135	tcf_queue_work(&f->rwork, basic_delete_filter_work);
136	*last = list_empty(&head->flist);
137	return 0;
138}
139
140static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
141	[TCA_BASIC_CLASSID]	= { .type = NLA_U32 },
142	[TCA_BASIC_EMATCHES]	= { .type = NLA_NESTED },
143};
144
145static int basic_set_parms(struct net *net, struct tcf_proto *tp,
146			   struct basic_filter *f, unsigned long base,
147			   struct nlattr **tb,
148			   struct nlattr *est, bool ovr,
149			   struct netlink_ext_ack *extack)
150{
151	int err;
152
153	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack);
154	if (err < 0)
155		return err;
156
157	err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &f->ematches);
158	if (err < 0)
159		return err;
160
161	if (tb[TCA_BASIC_CLASSID]) {
162		f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]);
163		tcf_bind_filter(tp, &f->res, base);
164	}
165
166	f->tp = tp;
167	return 0;
168}
169
170static int basic_change(struct net *net, struct sk_buff *in_skb,
171			struct tcf_proto *tp, unsigned long base, u32 handle,
172			struct nlattr **tca, void **arg, bool ovr,
173			bool rtnl_held, struct netlink_ext_ack *extack)
174{
175	int err;
176	struct basic_head *head = rtnl_dereference(tp->root);
177	struct nlattr *tb[TCA_BASIC_MAX + 1];
178	struct basic_filter *fold = (struct basic_filter *) *arg;
179	struct basic_filter *fnew;
180
181	if (tca[TCA_OPTIONS] == NULL)
182		return -EINVAL;
183
184	err = nla_parse_nested_deprecated(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS],
185					  basic_policy, NULL);
186	if (err < 0)
187		return err;
188
189	if (fold != NULL) {
190		if (handle && fold->handle != handle)
191			return -EINVAL;
192	}
193
194	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
195	if (!fnew)
196		return -ENOBUFS;
197
198	err = tcf_exts_init(&fnew->exts, net, TCA_BASIC_ACT, TCA_BASIC_POLICE);
199	if (err < 0)
200		goto errout;
201
202	if (!handle) {
203		handle = 1;
204		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
205				    INT_MAX, GFP_KERNEL);
206	} else if (!fold) {
207		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
208				    handle, GFP_KERNEL);
209	}
210	if (err)
211		goto errout;
212	fnew->handle = handle;
213	fnew->pf = alloc_percpu(struct tc_basic_pcnt);
214	if (!fnew->pf) {
215		err = -ENOMEM;
216		goto errout;
217	}
218
219	err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
220			      extack);
221	if (err < 0) {
222		if (!fold)
223			idr_remove(&head->handle_idr, fnew->handle);
224		goto errout;
225	}
226
227	*arg = fnew;
228
229	if (fold) {
230		idr_replace(&head->handle_idr, fnew, fnew->handle);
231		list_replace_rcu(&fold->link, &fnew->link);
232		tcf_unbind_filter(tp, &fold->res);
233		tcf_exts_get_net(&fold->exts);
234		tcf_queue_work(&fold->rwork, basic_delete_filter_work);
235	} else {
236		list_add_rcu(&fnew->link, &head->flist);
237	}
238
239	return 0;
240errout:
241	free_percpu(fnew->pf);
242	tcf_exts_destroy(&fnew->exts);
243	kfree(fnew);
244	return err;
245}
246
247static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg,
248		       bool rtnl_held)
249{
250	struct basic_head *head = rtnl_dereference(tp->root);
251	struct basic_filter *f;
252
253	list_for_each_entry(f, &head->flist, link) {
254		if (arg->count < arg->skip)
255			goto skip;
256
257		if (arg->fn(tp, f, arg) < 0) {
258			arg->stop = 1;
259			break;
260		}
261skip:
262		arg->count++;
263	}
264}
265
266static void basic_bind_class(void *fh, u32 classid, unsigned long cl)
 
267{
268	struct basic_filter *f = fh;
269
270	if (f && f->res.classid == classid)
271		f->res.class = cl;
272}
273
274static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
275		      struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
276{
277	struct tc_basic_pcnt gpf = {};
278	struct basic_filter *f = fh;
279	struct nlattr *nest;
280	int cpu;
281
282	if (f == NULL)
283		return skb->len;
284
285	t->tcm_handle = f->handle;
286
287	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
288	if (nest == NULL)
289		goto nla_put_failure;
290
291	if (f->res.classid &&
292	    nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
293		goto nla_put_failure;
294
295	for_each_possible_cpu(cpu) {
296		struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu);
297
298		gpf.rcnt += pf->rcnt;
299		gpf.rhit += pf->rhit;
300	}
301
302	if (nla_put_64bit(skb, TCA_BASIC_PCNT,
303			  sizeof(struct tc_basic_pcnt),
304			  &gpf, TCA_BASIC_PAD))
305		goto nla_put_failure;
306
307	if (tcf_exts_dump(skb, &f->exts) < 0 ||
308	    tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
309		goto nla_put_failure;
310
311	nla_nest_end(skb, nest);
312
313	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
314		goto nla_put_failure;
315
316	return skb->len;
317
318nla_put_failure:
319	nla_nest_cancel(skb, nest);
320	return -1;
321}
322
323static struct tcf_proto_ops cls_basic_ops __read_mostly = {
324	.kind		=	"basic",
325	.classify	=	basic_classify,
326	.init		=	basic_init,
327	.destroy	=	basic_destroy,
328	.get		=	basic_get,
329	.change		=	basic_change,
330	.delete		=	basic_delete,
331	.walk		=	basic_walk,
332	.dump		=	basic_dump,
333	.bind_class	=	basic_bind_class,
334	.owner		=	THIS_MODULE,
335};
336
337static int __init init_basic(void)
338{
339	return register_tcf_proto_ops(&cls_basic_ops);
340}
341
342static void __exit exit_basic(void)
343{
344	unregister_tcf_proto_ops(&cls_basic_ops);
345}
346
347module_init(init_basic)
348module_exit(exit_basic)
 
349MODULE_LICENSE("GPL");
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/cls_basic.c	Basic Packet Classifier.
  4 *
  5 * Authors:	Thomas Graf <tgraf@suug.ch>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/slab.h>
 10#include <linux/types.h>
 11#include <linux/kernel.h>
 12#include <linux/string.h>
 13#include <linux/errno.h>
 14#include <linux/rtnetlink.h>
 15#include <linux/skbuff.h>
 16#include <linux/idr.h>
 17#include <linux/percpu.h>
 18#include <net/netlink.h>
 19#include <net/act_api.h>
 20#include <net/pkt_cls.h>
 21#include <net/tc_wrapper.h>
 22
 23struct basic_head {
 24	struct list_head	flist;
 25	struct idr		handle_idr;
 26	struct rcu_head		rcu;
 27};
 28
 29struct basic_filter {
 30	u32			handle;
 31	struct tcf_exts		exts;
 32	struct tcf_ematch_tree	ematches;
 33	struct tcf_result	res;
 34	struct tcf_proto	*tp;
 35	struct list_head	link;
 36	struct tc_basic_pcnt __percpu *pf;
 37	struct rcu_work		rwork;
 38};
 39
 40TC_INDIRECT_SCOPE int basic_classify(struct sk_buff *skb,
 41				     const struct tcf_proto *tp,
 42				     struct tcf_result *res)
 43{
 44	int r;
 45	struct basic_head *head = rcu_dereference_bh(tp->root);
 46	struct basic_filter *f;
 47
 48	list_for_each_entry_rcu(f, &head->flist, link) {
 49		__this_cpu_inc(f->pf->rcnt);
 50		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
 51			continue;
 52		__this_cpu_inc(f->pf->rhit);
 53		*res = f->res;
 54		r = tcf_exts_exec(skb, &f->exts, res);
 55		if (r < 0)
 56			continue;
 57		return r;
 58	}
 59	return -1;
 60}
 61
 62static void *basic_get(struct tcf_proto *tp, u32 handle)
 63{
 64	struct basic_head *head = rtnl_dereference(tp->root);
 65	struct basic_filter *f;
 66
 67	list_for_each_entry(f, &head->flist, link) {
 68		if (f->handle == handle) {
 69			return f;
 70		}
 71	}
 72
 73	return NULL;
 74}
 75
 76static int basic_init(struct tcf_proto *tp)
 77{
 78	struct basic_head *head;
 79
 80	head = kzalloc(sizeof(*head), GFP_KERNEL);
 81	if (head == NULL)
 82		return -ENOBUFS;
 83	INIT_LIST_HEAD(&head->flist);
 84	idr_init(&head->handle_idr);
 85	rcu_assign_pointer(tp->root, head);
 86	return 0;
 87}
 88
 89static void __basic_delete_filter(struct basic_filter *f)
 90{
 91	tcf_exts_destroy(&f->exts);
 92	tcf_em_tree_destroy(&f->ematches);
 93	tcf_exts_put_net(&f->exts);
 94	free_percpu(f->pf);
 95	kfree(f);
 96}
 97
 98static void basic_delete_filter_work(struct work_struct *work)
 99{
100	struct basic_filter *f = container_of(to_rcu_work(work),
101					      struct basic_filter,
102					      rwork);
103	rtnl_lock();
104	__basic_delete_filter(f);
105	rtnl_unlock();
106}
107
108static void basic_destroy(struct tcf_proto *tp, bool rtnl_held,
109			  struct netlink_ext_ack *extack)
110{
111	struct basic_head *head = rtnl_dereference(tp->root);
112	struct basic_filter *f, *n;
113
114	list_for_each_entry_safe(f, n, &head->flist, link) {
115		list_del_rcu(&f->link);
116		tcf_unbind_filter(tp, &f->res);
117		idr_remove(&head->handle_idr, f->handle);
118		if (tcf_exts_get_net(&f->exts))
119			tcf_queue_work(&f->rwork, basic_delete_filter_work);
120		else
121			__basic_delete_filter(f);
122	}
123	idr_destroy(&head->handle_idr);
124	kfree_rcu(head, rcu);
125}
126
127static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
128			bool rtnl_held, struct netlink_ext_ack *extack)
129{
130	struct basic_head *head = rtnl_dereference(tp->root);
131	struct basic_filter *f = arg;
132
133	list_del_rcu(&f->link);
134	tcf_unbind_filter(tp, &f->res);
135	idr_remove(&head->handle_idr, f->handle);
136	tcf_exts_get_net(&f->exts);
137	tcf_queue_work(&f->rwork, basic_delete_filter_work);
138	*last = list_empty(&head->flist);
139	return 0;
140}
141
142static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
143	[TCA_BASIC_CLASSID]	= { .type = NLA_U32 },
144	[TCA_BASIC_EMATCHES]	= { .type = NLA_NESTED },
145};
146
147static int basic_set_parms(struct net *net, struct tcf_proto *tp,
148			   struct basic_filter *f, unsigned long base,
149			   struct nlattr **tb,
150			   struct nlattr *est, u32 flags,
151			   struct netlink_ext_ack *extack)
152{
153	int err;
154
155	err = tcf_exts_validate(net, tp, tb, est, &f->exts, flags, extack);
156	if (err < 0)
157		return err;
158
159	err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &f->ematches);
160	if (err < 0)
161		return err;
162
163	if (tb[TCA_BASIC_CLASSID]) {
164		f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]);
165		tcf_bind_filter(tp, &f->res, base);
166	}
167
168	f->tp = tp;
169	return 0;
170}
171
172static int basic_change(struct net *net, struct sk_buff *in_skb,
173			struct tcf_proto *tp, unsigned long base, u32 handle,
174			struct nlattr **tca, void **arg,
175			u32 flags, struct netlink_ext_ack *extack)
176{
177	int err;
178	struct basic_head *head = rtnl_dereference(tp->root);
179	struct nlattr *tb[TCA_BASIC_MAX + 1];
180	struct basic_filter *fold = (struct basic_filter *) *arg;
181	struct basic_filter *fnew;
182
183	if (tca[TCA_OPTIONS] == NULL)
184		return -EINVAL;
185
186	err = nla_parse_nested_deprecated(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS],
187					  basic_policy, NULL);
188	if (err < 0)
189		return err;
190
191	if (fold != NULL) {
192		if (handle && fold->handle != handle)
193			return -EINVAL;
194	}
195
196	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
197	if (!fnew)
198		return -ENOBUFS;
199
200	err = tcf_exts_init(&fnew->exts, net, TCA_BASIC_ACT, TCA_BASIC_POLICE);
201	if (err < 0)
202		goto errout;
203
204	if (!handle) {
205		handle = 1;
206		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
207				    INT_MAX, GFP_KERNEL);
208	} else if (!fold) {
209		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
210				    handle, GFP_KERNEL);
211	}
212	if (err)
213		goto errout;
214	fnew->handle = handle;
215	fnew->pf = alloc_percpu(struct tc_basic_pcnt);
216	if (!fnew->pf) {
217		err = -ENOMEM;
218		goto errout;
219	}
220
221	err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], flags,
222			      extack);
223	if (err < 0) {
224		if (!fold)
225			idr_remove(&head->handle_idr, fnew->handle);
226		goto errout;
227	}
228
229	*arg = fnew;
230
231	if (fold) {
232		idr_replace(&head->handle_idr, fnew, fnew->handle);
233		list_replace_rcu(&fold->link, &fnew->link);
234		tcf_unbind_filter(tp, &fold->res);
235		tcf_exts_get_net(&fold->exts);
236		tcf_queue_work(&fold->rwork, basic_delete_filter_work);
237	} else {
238		list_add_rcu(&fnew->link, &head->flist);
239	}
240
241	return 0;
242errout:
243	free_percpu(fnew->pf);
244	tcf_exts_destroy(&fnew->exts);
245	kfree(fnew);
246	return err;
247}
248
249static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg,
250		       bool rtnl_held)
251{
252	struct basic_head *head = rtnl_dereference(tp->root);
253	struct basic_filter *f;
254
255	list_for_each_entry(f, &head->flist, link) {
256		if (!tc_cls_stats_dump(tp, arg, f))
 
 
 
 
257			break;
 
 
 
258	}
259}
260
261static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
262			     unsigned long base)
263{
264	struct basic_filter *f = fh;
265
266	tc_cls_bind_class(classid, cl, q, &f->res, base);
 
267}
268
269static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
270		      struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
271{
272	struct tc_basic_pcnt gpf = {};
273	struct basic_filter *f = fh;
274	struct nlattr *nest;
275	int cpu;
276
277	if (f == NULL)
278		return skb->len;
279
280	t->tcm_handle = f->handle;
281
282	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
283	if (nest == NULL)
284		goto nla_put_failure;
285
286	if (f->res.classid &&
287	    nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
288		goto nla_put_failure;
289
290	for_each_possible_cpu(cpu) {
291		struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu);
292
293		gpf.rcnt += pf->rcnt;
294		gpf.rhit += pf->rhit;
295	}
296
297	if (nla_put_64bit(skb, TCA_BASIC_PCNT,
298			  sizeof(struct tc_basic_pcnt),
299			  &gpf, TCA_BASIC_PAD))
300		goto nla_put_failure;
301
302	if (tcf_exts_dump(skb, &f->exts) < 0 ||
303	    tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
304		goto nla_put_failure;
305
306	nla_nest_end(skb, nest);
307
308	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
309		goto nla_put_failure;
310
311	return skb->len;
312
313nla_put_failure:
314	nla_nest_cancel(skb, nest);
315	return -1;
316}
317
318static struct tcf_proto_ops cls_basic_ops __read_mostly = {
319	.kind		=	"basic",
320	.classify	=	basic_classify,
321	.init		=	basic_init,
322	.destroy	=	basic_destroy,
323	.get		=	basic_get,
324	.change		=	basic_change,
325	.delete		=	basic_delete,
326	.walk		=	basic_walk,
327	.dump		=	basic_dump,
328	.bind_class	=	basic_bind_class,
329	.owner		=	THIS_MODULE,
330};
331
332static int __init init_basic(void)
333{
334	return register_tcf_proto_ops(&cls_basic_ops);
335}
336
337static void __exit exit_basic(void)
338{
339	unregister_tcf_proto_ops(&cls_basic_ops);
340}
341
342module_init(init_basic)
343module_exit(exit_basic)
344MODULE_DESCRIPTION("TC basic classifier");
345MODULE_LICENSE("GPL");