Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/cls_basic.c	Basic Packet Classifier.
  4 *
  5 * Authors:	Thomas Graf <tgraf@suug.ch>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/slab.h>
 10#include <linux/types.h>
 11#include <linux/kernel.h>
 12#include <linux/string.h>
 13#include <linux/errno.h>
 14#include <linux/rtnetlink.h>
 15#include <linux/skbuff.h>
 16#include <linux/idr.h>
 17#include <linux/percpu.h>
 18#include <net/netlink.h>
 19#include <net/act_api.h>
 20#include <net/pkt_cls.h>
 21
 22struct basic_head {
 23	struct list_head	flist;
 24	struct idr		handle_idr;
 25	struct rcu_head		rcu;
 26};
 27
 28struct basic_filter {
 29	u32			handle;
 30	struct tcf_exts		exts;
 31	struct tcf_ematch_tree	ematches;
 32	struct tcf_result	res;
 33	struct tcf_proto	*tp;
 34	struct list_head	link;
 35	struct tc_basic_pcnt __percpu *pf;
 36	struct rcu_work		rwork;
 37};
 38
 39static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 40			  struct tcf_result *res)
 41{
 42	int r;
 43	struct basic_head *head = rcu_dereference_bh(tp->root);
 44	struct basic_filter *f;
 45
 46	list_for_each_entry_rcu(f, &head->flist, link) {
 47		__this_cpu_inc(f->pf->rcnt);
 48		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
 49			continue;
 50		__this_cpu_inc(f->pf->rhit);
 51		*res = f->res;
 52		r = tcf_exts_exec(skb, &f->exts, res);
 53		if (r < 0)
 54			continue;
 55		return r;
 56	}
 57	return -1;
 58}
 59
 60static void *basic_get(struct tcf_proto *tp, u32 handle)
 61{
 62	struct basic_head *head = rtnl_dereference(tp->root);
 63	struct basic_filter *f;
 64
 65	list_for_each_entry(f, &head->flist, link) {
 66		if (f->handle == handle) {
 67			return f;
 68		}
 69	}
 70
 71	return NULL;
 72}
 73
 74static int basic_init(struct tcf_proto *tp)
 75{
 76	struct basic_head *head;
 77
 78	head = kzalloc(sizeof(*head), GFP_KERNEL);
 79	if (head == NULL)
 80		return -ENOBUFS;
 81	INIT_LIST_HEAD(&head->flist);
 82	idr_init(&head->handle_idr);
 83	rcu_assign_pointer(tp->root, head);
 84	return 0;
 85}
 86
 87static void __basic_delete_filter(struct basic_filter *f)
 88{
 89	tcf_exts_destroy(&f->exts);
 90	tcf_em_tree_destroy(&f->ematches);
 91	tcf_exts_put_net(&f->exts);
 92	free_percpu(f->pf);
 93	kfree(f);
 94}
 95
 96static void basic_delete_filter_work(struct work_struct *work)
 97{
 98	struct basic_filter *f = container_of(to_rcu_work(work),
 99					      struct basic_filter,
100					      rwork);
101	rtnl_lock();
102	__basic_delete_filter(f);
103	rtnl_unlock();
104}
105
106static void basic_destroy(struct tcf_proto *tp, bool rtnl_held,
107			  struct netlink_ext_ack *extack)
108{
109	struct basic_head *head = rtnl_dereference(tp->root);
110	struct basic_filter *f, *n;
111
112	list_for_each_entry_safe(f, n, &head->flist, link) {
113		list_del_rcu(&f->link);
114		tcf_unbind_filter(tp, &f->res);
115		idr_remove(&head->handle_idr, f->handle);
116		if (tcf_exts_get_net(&f->exts))
117			tcf_queue_work(&f->rwork, basic_delete_filter_work);
118		else
119			__basic_delete_filter(f);
120	}
121	idr_destroy(&head->handle_idr);
122	kfree_rcu(head, rcu);
123}
124
125static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
126			bool rtnl_held, struct netlink_ext_ack *extack)
127{
128	struct basic_head *head = rtnl_dereference(tp->root);
129	struct basic_filter *f = arg;
130
131	list_del_rcu(&f->link);
132	tcf_unbind_filter(tp, &f->res);
133	idr_remove(&head->handle_idr, f->handle);
134	tcf_exts_get_net(&f->exts);
135	tcf_queue_work(&f->rwork, basic_delete_filter_work);
136	*last = list_empty(&head->flist);
137	return 0;
138}
139
140static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
141	[TCA_BASIC_CLASSID]	= { .type = NLA_U32 },
142	[TCA_BASIC_EMATCHES]	= { .type = NLA_NESTED },
143};
144
145static int basic_set_parms(struct net *net, struct tcf_proto *tp,
146			   struct basic_filter *f, unsigned long base,
147			   struct nlattr **tb,
148			   struct nlattr *est, bool ovr,
149			   struct netlink_ext_ack *extack)
150{
151	int err;
152
153	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack);
154	if (err < 0)
155		return err;
156
157	err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &f->ematches);
158	if (err < 0)
159		return err;
160
161	if (tb[TCA_BASIC_CLASSID]) {
162		f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]);
163		tcf_bind_filter(tp, &f->res, base);
164	}
165
166	f->tp = tp;
167	return 0;
168}
169
170static int basic_change(struct net *net, struct sk_buff *in_skb,
171			struct tcf_proto *tp, unsigned long base, u32 handle,
172			struct nlattr **tca, void **arg, bool ovr,
173			bool rtnl_held, struct netlink_ext_ack *extack)
174{
175	int err;
176	struct basic_head *head = rtnl_dereference(tp->root);
177	struct nlattr *tb[TCA_BASIC_MAX + 1];
178	struct basic_filter *fold = (struct basic_filter *) *arg;
179	struct basic_filter *fnew;
180
181	if (tca[TCA_OPTIONS] == NULL)
182		return -EINVAL;
183
184	err = nla_parse_nested_deprecated(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS],
185					  basic_policy, NULL);
186	if (err < 0)
187		return err;
188
189	if (fold != NULL) {
190		if (handle && fold->handle != handle)
191			return -EINVAL;
192	}
193
194	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
195	if (!fnew)
196		return -ENOBUFS;
197
198	err = tcf_exts_init(&fnew->exts, net, TCA_BASIC_ACT, TCA_BASIC_POLICE);
199	if (err < 0)
200		goto errout;
201
202	if (!handle) {
203		handle = 1;
204		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
205				    INT_MAX, GFP_KERNEL);
206	} else if (!fold) {
207		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
208				    handle, GFP_KERNEL);
209	}
210	if (err)
211		goto errout;
212	fnew->handle = handle;
213	fnew->pf = alloc_percpu(struct tc_basic_pcnt);
214	if (!fnew->pf) {
215		err = -ENOMEM;
216		goto errout;
217	}
218
219	err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
220			      extack);
221	if (err < 0) {
222		if (!fold)
223			idr_remove(&head->handle_idr, fnew->handle);
224		goto errout;
225	}
226
227	*arg = fnew;
228
229	if (fold) {
230		idr_replace(&head->handle_idr, fnew, fnew->handle);
231		list_replace_rcu(&fold->link, &fnew->link);
232		tcf_unbind_filter(tp, &fold->res);
233		tcf_exts_get_net(&fold->exts);
234		tcf_queue_work(&fold->rwork, basic_delete_filter_work);
235	} else {
236		list_add_rcu(&fnew->link, &head->flist);
237	}
238
239	return 0;
240errout:
241	free_percpu(fnew->pf);
242	tcf_exts_destroy(&fnew->exts);
243	kfree(fnew);
244	return err;
245}
246
247static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg,
248		       bool rtnl_held)
249{
250	struct basic_head *head = rtnl_dereference(tp->root);
251	struct basic_filter *f;
252
253	list_for_each_entry(f, &head->flist, link) {
254		if (arg->count < arg->skip)
255			goto skip;
256
257		if (arg->fn(tp, f, arg) < 0) {
258			arg->stop = 1;
259			break;
260		}
261skip:
262		arg->count++;
263	}
264}
265
266static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
267			     unsigned long base)
268{
269	struct basic_filter *f = fh;
270
271	if (f && f->res.classid == classid) {
272		if (cl)
273			__tcf_bind_filter(q, &f->res, base);
274		else
275			__tcf_unbind_filter(q, &f->res);
276	}
277}
278
279static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
280		      struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
281{
282	struct tc_basic_pcnt gpf = {};
283	struct basic_filter *f = fh;
284	struct nlattr *nest;
285	int cpu;
286
287	if (f == NULL)
288		return skb->len;
289
290	t->tcm_handle = f->handle;
291
292	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
293	if (nest == NULL)
294		goto nla_put_failure;
295
296	if (f->res.classid &&
297	    nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
298		goto nla_put_failure;
299
300	for_each_possible_cpu(cpu) {
301		struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu);
302
303		gpf.rcnt += pf->rcnt;
304		gpf.rhit += pf->rhit;
305	}
306
307	if (nla_put_64bit(skb, TCA_BASIC_PCNT,
308			  sizeof(struct tc_basic_pcnt),
309			  &gpf, TCA_BASIC_PAD))
310		goto nla_put_failure;
311
312	if (tcf_exts_dump(skb, &f->exts) < 0 ||
313	    tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
314		goto nla_put_failure;
315
316	nla_nest_end(skb, nest);
317
318	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
319		goto nla_put_failure;
320
321	return skb->len;
322
323nla_put_failure:
324	nla_nest_cancel(skb, nest);
325	return -1;
326}
327
328static struct tcf_proto_ops cls_basic_ops __read_mostly = {
329	.kind		=	"basic",
330	.classify	=	basic_classify,
331	.init		=	basic_init,
332	.destroy	=	basic_destroy,
333	.get		=	basic_get,
334	.change		=	basic_change,
335	.delete		=	basic_delete,
336	.walk		=	basic_walk,
337	.dump		=	basic_dump,
338	.bind_class	=	basic_bind_class,
339	.owner		=	THIS_MODULE,
340};
341
342static int __init init_basic(void)
343{
344	return register_tcf_proto_ops(&cls_basic_ops);
345}
346
347static void __exit exit_basic(void)
348{
349	unregister_tcf_proto_ops(&cls_basic_ops);
350}
351
352module_init(init_basic)
353module_exit(exit_basic)
354MODULE_LICENSE("GPL");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/cls_basic.c	Basic Packet Classifier.
  4 *
  5 * Authors:	Thomas Graf <tgraf@suug.ch>
  6 */
  7
  8#include <linux/module.h>
  9#include <linux/slab.h>
 10#include <linux/types.h>
 11#include <linux/kernel.h>
 12#include <linux/string.h>
 13#include <linux/errno.h>
 14#include <linux/rtnetlink.h>
 15#include <linux/skbuff.h>
 16#include <linux/idr.h>
 17#include <linux/percpu.h>
 18#include <net/netlink.h>
 19#include <net/act_api.h>
 20#include <net/pkt_cls.h>
 21
 22struct basic_head {
 23	struct list_head	flist;
 24	struct idr		handle_idr;
 25	struct rcu_head		rcu;
 26};
 27
 28struct basic_filter {
 29	u32			handle;
 30	struct tcf_exts		exts;
 31	struct tcf_ematch_tree	ematches;
 32	struct tcf_result	res;
 33	struct tcf_proto	*tp;
 34	struct list_head	link;
 35	struct tc_basic_pcnt __percpu *pf;
 36	struct rcu_work		rwork;
 37};
 38
 39static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 40			  struct tcf_result *res)
 41{
 42	int r;
 43	struct basic_head *head = rcu_dereference_bh(tp->root);
 44	struct basic_filter *f;
 45
 46	list_for_each_entry_rcu(f, &head->flist, link) {
 47		__this_cpu_inc(f->pf->rcnt);
 48		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
 49			continue;
 50		__this_cpu_inc(f->pf->rhit);
 51		*res = f->res;
 52		r = tcf_exts_exec(skb, &f->exts, res);
 53		if (r < 0)
 54			continue;
 55		return r;
 56	}
 57	return -1;
 58}
 59
 60static void *basic_get(struct tcf_proto *tp, u32 handle)
 61{
 62	struct basic_head *head = rtnl_dereference(tp->root);
 63	struct basic_filter *f;
 64
 65	list_for_each_entry(f, &head->flist, link) {
 66		if (f->handle == handle) {
 67			return f;
 68		}
 69	}
 70
 71	return NULL;
 72}
 73
 74static int basic_init(struct tcf_proto *tp)
 75{
 76	struct basic_head *head;
 77
 78	head = kzalloc(sizeof(*head), GFP_KERNEL);
 79	if (head == NULL)
 80		return -ENOBUFS;
 81	INIT_LIST_HEAD(&head->flist);
 82	idr_init(&head->handle_idr);
 83	rcu_assign_pointer(tp->root, head);
 84	return 0;
 85}
 86
 87static void __basic_delete_filter(struct basic_filter *f)
 88{
 89	tcf_exts_destroy(&f->exts);
 90	tcf_em_tree_destroy(&f->ematches);
 91	tcf_exts_put_net(&f->exts);
 92	free_percpu(f->pf);
 93	kfree(f);
 94}
 95
 96static void basic_delete_filter_work(struct work_struct *work)
 97{
 98	struct basic_filter *f = container_of(to_rcu_work(work),
 99					      struct basic_filter,
100					      rwork);
101	rtnl_lock();
102	__basic_delete_filter(f);
103	rtnl_unlock();
104}
105
106static void basic_destroy(struct tcf_proto *tp, bool rtnl_held,
107			  struct netlink_ext_ack *extack)
108{
109	struct basic_head *head = rtnl_dereference(tp->root);
110	struct basic_filter *f, *n;
111
112	list_for_each_entry_safe(f, n, &head->flist, link) {
113		list_del_rcu(&f->link);
114		tcf_unbind_filter(tp, &f->res);
115		idr_remove(&head->handle_idr, f->handle);
116		if (tcf_exts_get_net(&f->exts))
117			tcf_queue_work(&f->rwork, basic_delete_filter_work);
118		else
119			__basic_delete_filter(f);
120	}
121	idr_destroy(&head->handle_idr);
122	kfree_rcu(head, rcu);
123}
124
125static int basic_delete(struct tcf_proto *tp, void *arg, bool *last,
126			bool rtnl_held, struct netlink_ext_ack *extack)
127{
128	struct basic_head *head = rtnl_dereference(tp->root);
129	struct basic_filter *f = arg;
130
131	list_del_rcu(&f->link);
132	tcf_unbind_filter(tp, &f->res);
133	idr_remove(&head->handle_idr, f->handle);
134	tcf_exts_get_net(&f->exts);
135	tcf_queue_work(&f->rwork, basic_delete_filter_work);
136	*last = list_empty(&head->flist);
137	return 0;
138}
139
140static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
141	[TCA_BASIC_CLASSID]	= { .type = NLA_U32 },
142	[TCA_BASIC_EMATCHES]	= { .type = NLA_NESTED },
143};
144
145static int basic_set_parms(struct net *net, struct tcf_proto *tp,
146			   struct basic_filter *f, unsigned long base,
147			   struct nlattr **tb,
148			   struct nlattr *est, bool ovr,
149			   struct netlink_ext_ack *extack)
150{
151	int err;
152
153	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack);
154	if (err < 0)
155		return err;
156
157	err = tcf_em_tree_validate(tp, tb[TCA_BASIC_EMATCHES], &f->ematches);
158	if (err < 0)
159		return err;
160
161	if (tb[TCA_BASIC_CLASSID]) {
162		f->res.classid = nla_get_u32(tb[TCA_BASIC_CLASSID]);
163		tcf_bind_filter(tp, &f->res, base);
164	}
165
166	f->tp = tp;
167	return 0;
168}
169
170static int basic_change(struct net *net, struct sk_buff *in_skb,
171			struct tcf_proto *tp, unsigned long base, u32 handle,
172			struct nlattr **tca, void **arg, bool ovr,
173			bool rtnl_held, struct netlink_ext_ack *extack)
174{
175	int err;
176	struct basic_head *head = rtnl_dereference(tp->root);
177	struct nlattr *tb[TCA_BASIC_MAX + 1];
178	struct basic_filter *fold = (struct basic_filter *) *arg;
179	struct basic_filter *fnew;
180
181	if (tca[TCA_OPTIONS] == NULL)
182		return -EINVAL;
183
184	err = nla_parse_nested_deprecated(tb, TCA_BASIC_MAX, tca[TCA_OPTIONS],
185					  basic_policy, NULL);
186	if (err < 0)
187		return err;
188
189	if (fold != NULL) {
190		if (handle && fold->handle != handle)
191			return -EINVAL;
192	}
193
194	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
195	if (!fnew)
196		return -ENOBUFS;
197
198	err = tcf_exts_init(&fnew->exts, net, TCA_BASIC_ACT, TCA_BASIC_POLICE);
199	if (err < 0)
200		goto errout;
201
202	if (!handle) {
203		handle = 1;
204		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
205				    INT_MAX, GFP_KERNEL);
206	} else if (!fold) {
207		err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
208				    handle, GFP_KERNEL);
209	}
210	if (err)
211		goto errout;
212	fnew->handle = handle;
213	fnew->pf = alloc_percpu(struct tc_basic_pcnt);
214	if (!fnew->pf) {
215		err = -ENOMEM;
216		goto errout;
217	}
218
219	err = basic_set_parms(net, tp, fnew, base, tb, tca[TCA_RATE], ovr,
220			      extack);
221	if (err < 0) {
222		if (!fold)
223			idr_remove(&head->handle_idr, fnew->handle);
224		goto errout;
225	}
226
227	*arg = fnew;
228
229	if (fold) {
230		idr_replace(&head->handle_idr, fnew, fnew->handle);
231		list_replace_rcu(&fold->link, &fnew->link);
232		tcf_unbind_filter(tp, &fold->res);
233		tcf_exts_get_net(&fold->exts);
234		tcf_queue_work(&fold->rwork, basic_delete_filter_work);
235	} else {
236		list_add_rcu(&fnew->link, &head->flist);
237	}
238
239	return 0;
240errout:
241	free_percpu(fnew->pf);
242	tcf_exts_destroy(&fnew->exts);
243	kfree(fnew);
244	return err;
245}
246
247static void basic_walk(struct tcf_proto *tp, struct tcf_walker *arg,
248		       bool rtnl_held)
249{
250	struct basic_head *head = rtnl_dereference(tp->root);
251	struct basic_filter *f;
252
253	list_for_each_entry(f, &head->flist, link) {
254		if (arg->count < arg->skip)
255			goto skip;
256
257		if (arg->fn(tp, f, arg) < 0) {
258			arg->stop = 1;
259			break;
260		}
261skip:
262		arg->count++;
263	}
264}
265
266static void basic_bind_class(void *fh, u32 classid, unsigned long cl)
 
267{
268	struct basic_filter *f = fh;
269
270	if (f && f->res.classid == classid)
271		f->res.class = cl;
 
 
 
 
272}
273
274static int basic_dump(struct net *net, struct tcf_proto *tp, void *fh,
275		      struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
276{
277	struct tc_basic_pcnt gpf = {};
278	struct basic_filter *f = fh;
279	struct nlattr *nest;
280	int cpu;
281
282	if (f == NULL)
283		return skb->len;
284
285	t->tcm_handle = f->handle;
286
287	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
288	if (nest == NULL)
289		goto nla_put_failure;
290
291	if (f->res.classid &&
292	    nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
293		goto nla_put_failure;
294
295	for_each_possible_cpu(cpu) {
296		struct tc_basic_pcnt *pf = per_cpu_ptr(f->pf, cpu);
297
298		gpf.rcnt += pf->rcnt;
299		gpf.rhit += pf->rhit;
300	}
301
302	if (nla_put_64bit(skb, TCA_BASIC_PCNT,
303			  sizeof(struct tc_basic_pcnt),
304			  &gpf, TCA_BASIC_PAD))
305		goto nla_put_failure;
306
307	if (tcf_exts_dump(skb, &f->exts) < 0 ||
308	    tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
309		goto nla_put_failure;
310
311	nla_nest_end(skb, nest);
312
313	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
314		goto nla_put_failure;
315
316	return skb->len;
317
318nla_put_failure:
319	nla_nest_cancel(skb, nest);
320	return -1;
321}
322
323static struct tcf_proto_ops cls_basic_ops __read_mostly = {
324	.kind		=	"basic",
325	.classify	=	basic_classify,
326	.init		=	basic_init,
327	.destroy	=	basic_destroy,
328	.get		=	basic_get,
329	.change		=	basic_change,
330	.delete		=	basic_delete,
331	.walk		=	basic_walk,
332	.dump		=	basic_dump,
333	.bind_class	=	basic_bind_class,
334	.owner		=	THIS_MODULE,
335};
336
337static int __init init_basic(void)
338{
339	return register_tcf_proto_ops(&cls_basic_ops);
340}
341
342static void __exit exit_basic(void)
343{
344	unregister_tcf_proto_ops(&cls_basic_ops);
345}
346
347module_init(init_basic)
348module_exit(exit_basic)
349MODULE_LICENSE("GPL");