Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * net/sched/cls_cgroup.c	Control Group Classifier
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Thomas Graf <tgraf@suug.ch>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/slab.h>
 14#include <linux/types.h>
 15#include <linux/string.h>
 16#include <linux/errno.h>
 17#include <linux/skbuff.h>
 18#include <linux/cgroup.h>
 19#include <linux/rcupdate.h>
 20#include <net/rtnetlink.h>
 21#include <net/pkt_cls.h>
 22#include <net/sock.h>
 23#include <net/cls_cgroup.h>
 24
 25static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
 26					       struct cgroup *cgrp);
 27static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
 28static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
 29
 30struct cgroup_subsys net_cls_subsys = {
 31	.name		= "net_cls",
 32	.create		= cgrp_create,
 33	.destroy	= cgrp_destroy,
 34	.populate	= cgrp_populate,
 35#ifdef CONFIG_NET_CLS_CGROUP
 36	.subsys_id	= net_cls_subsys_id,
 37#endif
 38	.module		= THIS_MODULE,
 39};
 40
 41
 42static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
 43{
 44	return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
 45			    struct cgroup_cls_state, css);
 46}
 47
 48static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
 49{
 50	return container_of(task_subsys_state(p, net_cls_subsys_id),
 51			    struct cgroup_cls_state, css);
 52}
 53
 54static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
 55						 struct cgroup *cgrp)
 56{
 57	struct cgroup_cls_state *cs;
 58
 59	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
 60	if (!cs)
 61		return ERR_PTR(-ENOMEM);
 62
 63	if (cgrp->parent)
 64		cs->classid = cgrp_cls_state(cgrp->parent)->classid;
 65
 66	return &cs->css;
 67}
 68
 69static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
 70{
 71	kfree(cgrp_cls_state(cgrp));
 72}
 73
 74static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
 75{
 76	return cgrp_cls_state(cgrp)->classid;
 77}
 78
 79static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
 80{
 81	cgrp_cls_state(cgrp)->classid = (u32) value;
 82	return 0;
 83}
 84
 85static struct cftype ss_files[] = {
 86	{
 87		.name = "classid",
 88		.read_u64 = read_classid,
 89		.write_u64 = write_classid,
 90	},
 91};
 92
 93static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
 94{
 95	return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
 96}
 97
 98struct cls_cgroup_head {
 99	u32			handle;
100	struct tcf_exts		exts;
101	struct tcf_ematch_tree	ematches;
 
 
102};
103
104static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
105			       struct tcf_result *res)
106{
107	struct cls_cgroup_head *head = tp->root;
108	u32 classid;
109
110	rcu_read_lock();
111	classid = task_cls_state(current)->classid;
112	rcu_read_unlock();
113
114	/*
115	 * Due to the nature of the classifier it is required to ignore all
116	 * packets originating from softirq context as accessing `current'
117	 * would lead to false results.
118	 *
119	 * This test assumes that all callers of dev_queue_xmit() explicitely
120	 * disable bh. Knowing this, it is possible to detect softirq based
121	 * calls by looking at the number of nested bh disable calls because
122	 * softirqs always disables bh.
123	 */
124	if (in_serving_softirq()) {
125		/* If there is an sk_classid we'll use that. */
126		if (!skb->sk)
127			return -1;
128		classid = skb->sk->sk_classid;
129	}
130
131	if (!classid)
132		return -1;
133
134	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
135		return -1;
136
137	res->classid = classid;
138	res->class = 0;
 
139	return tcf_exts_exec(skb, &head->exts, res);
140}
141
142static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
143{
144	return 0UL;
145}
146
147static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
148{
149}
150
151static int cls_cgroup_init(struct tcf_proto *tp)
152{
153	return 0;
154}
155
156static const struct tcf_ext_map cgroup_ext_map = {
157	.action = TCA_CGROUP_ACT,
158	.police = TCA_CGROUP_POLICE,
159};
160
161static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
162	[TCA_CGROUP_EMATCHES]	= { .type = NLA_NESTED },
163};
164
165static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
 
 
 
 
 
 
 
 
 
 
 
 
166			     u32 handle, struct nlattr **tca,
167			     unsigned long *arg)
168{
169	struct nlattr *tb[TCA_CGROUP_MAX + 1];
170	struct cls_cgroup_head *head = tp->root;
 
171	struct tcf_ematch_tree t;
172	struct tcf_exts e;
173	int err;
174
175	if (!tca[TCA_OPTIONS])
176		return -EINVAL;
177
178	if (head == NULL) {
179		if (!handle)
180			return -EINVAL;
181
182		head = kzalloc(sizeof(*head), GFP_KERNEL);
183		if (head == NULL)
184			return -ENOBUFS;
185
186		head->handle = handle;
187
188		tcf_tree_lock(tp);
189		tp->root = head;
190		tcf_tree_unlock(tp);
191	}
192
193	if (handle != head->handle)
194		return -ENOENT;
195
 
 
 
 
 
 
 
196	err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
197			       cgroup_policy);
198	if (err < 0)
199		return err;
200
201	err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map);
 
202	if (err < 0)
203		return err;
204
205	err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
206	if (err < 0)
207		return err;
 
 
208
209	tcf_exts_change(tp, &head->exts, &e);
210	tcf_em_tree_change(tp, &head->ematches, &t);
211
 
 
 
212	return 0;
 
 
 
213}
214
215static void cls_cgroup_destroy(struct tcf_proto *tp)
216{
217	struct cls_cgroup_head *head = tp->root;
 
 
 
218
219	if (head) {
220		tcf_exts_destroy(tp, &head->exts);
221		tcf_em_tree_destroy(tp, &head->ematches);
222		kfree(head);
223	}
 
224}
225
226static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
227{
228	return -EOPNOTSUPP;
229}
230
231static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
232{
233	struct cls_cgroup_head *head = tp->root;
234
235	if (arg->count < arg->skip)
236		goto skip;
237
238	if (arg->fn(tp, (unsigned long) head, arg) < 0) {
239		arg->stop = 1;
240		return;
241	}
242skip:
243	arg->count++;
244}
245
246static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
247			   struct sk_buff *skb, struct tcmsg *t)
248{
249	struct cls_cgroup_head *head = tp->root;
250	unsigned char *b = skb_tail_pointer(skb);
251	struct nlattr *nest;
252
253	t->tcm_handle = head->handle;
254
255	nest = nla_nest_start(skb, TCA_OPTIONS);
256	if (nest == NULL)
257		goto nla_put_failure;
258
259	if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
260	    tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
261		goto nla_put_failure;
262
263	nla_nest_end(skb, nest);
264
265	if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
266		goto nla_put_failure;
267
268	return skb->len;
269
270nla_put_failure:
271	nlmsg_trim(skb, b);
272	return -1;
273}
274
275static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
276	.kind		=	"cgroup",
277	.init		=	cls_cgroup_init,
278	.change		=	cls_cgroup_change,
279	.classify	=	cls_cgroup_classify,
280	.destroy	=	cls_cgroup_destroy,
281	.get		=	cls_cgroup_get,
282	.put		=	cls_cgroup_put,
283	.delete		=	cls_cgroup_delete,
284	.walk		=	cls_cgroup_walk,
285	.dump		=	cls_cgroup_dump,
286	.owner		=	THIS_MODULE,
287};
288
289static int __init init_cgroup_cls(void)
290{
291	int ret;
292
293	ret = cgroup_load_subsys(&net_cls_subsys);
294	if (ret)
295		goto out;
296
297#ifndef CONFIG_NET_CLS_CGROUP
298	/* We can't use rcu_assign_pointer because this is an int. */
299	smp_wmb();
300	net_cls_subsys_id = net_cls_subsys.subsys_id;
301#endif
302
303	ret = register_tcf_proto_ops(&cls_cgroup_ops);
304	if (ret)
305		cgroup_unload_subsys(&net_cls_subsys);
306
307out:
308	return ret;
309}
310
311static void __exit exit_cgroup_cls(void)
312{
313	unregister_tcf_proto_ops(&cls_cgroup_ops);
314
315#ifndef CONFIG_NET_CLS_CGROUP
316	net_cls_subsys_id = -1;
317	synchronize_rcu();
318#endif
319
320	cgroup_unload_subsys(&net_cls_subsys);
321}
322
323module_init(init_cgroup_cls);
324module_exit(exit_cgroup_cls);
325MODULE_LICENSE("GPL");
v4.6
  1/*
  2 * net/sched/cls_cgroup.c	Control Group Classifier
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Thomas Graf <tgraf@suug.ch>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/slab.h>
 
 
 
 14#include <linux/skbuff.h>
 
 15#include <linux/rcupdate.h>
 16#include <net/rtnetlink.h>
 17#include <net/pkt_cls.h>
 18#include <net/sock.h>
 19#include <net/cls_cgroup.h>
 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21struct cls_cgroup_head {
 22	u32			handle;
 23	struct tcf_exts		exts;
 24	struct tcf_ematch_tree	ematches;
 25	struct tcf_proto	*tp;
 26	struct rcu_head		rcu;
 27};
 28
 29static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
 30			       struct tcf_result *res)
 31{
 32	struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
 33	u32 classid = task_get_classid(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35	if (!classid)
 36		return -1;
 
 37	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
 38		return -1;
 39
 40	res->classid = classid;
 41	res->class = 0;
 42
 43	return tcf_exts_exec(skb, &head->exts, res);
 44}
 45
 46static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
 47{
 48	return 0UL;
 49}
 50
 
 
 
 
 51static int cls_cgroup_init(struct tcf_proto *tp)
 52{
 53	return 0;
 54}
 55
 
 
 
 
 
 56static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
 57	[TCA_CGROUP_EMATCHES]	= { .type = NLA_NESTED },
 58};
 59
 60static void cls_cgroup_destroy_rcu(struct rcu_head *root)
 61{
 62	struct cls_cgroup_head *head = container_of(root,
 63						    struct cls_cgroup_head,
 64						    rcu);
 65
 66	tcf_exts_destroy(&head->exts);
 67	tcf_em_tree_destroy(&head->ematches);
 68	kfree(head);
 69}
 70
 71static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
 72			     struct tcf_proto *tp, unsigned long base,
 73			     u32 handle, struct nlattr **tca,
 74			     unsigned long *arg, bool ovr)
 75{
 76	struct nlattr *tb[TCA_CGROUP_MAX + 1];
 77	struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 78	struct cls_cgroup_head *new;
 79	struct tcf_ematch_tree t;
 80	struct tcf_exts e;
 81	int err;
 82
 83	if (!tca[TCA_OPTIONS])
 84		return -EINVAL;
 85
 86	if (!head && !handle)
 87		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 88
 89	if (head && handle != head->handle)
 90		return -ENOENT;
 91
 92	new = kzalloc(sizeof(*head), GFP_KERNEL);
 93	if (!new)
 94		return -ENOBUFS;
 95
 96	tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
 97	new->handle = handle;
 98	new->tp = tp;
 99	err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
100			       cgroup_policy);
101	if (err < 0)
102		goto errout;
103
104	tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
105	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
106	if (err < 0)
107		goto errout;
108
109	err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
110	if (err < 0) {
111		tcf_exts_destroy(&e);
112		goto errout;
113	}
114
115	tcf_exts_change(tp, &new->exts, &e);
116	tcf_em_tree_change(tp, &new->ematches, &t);
117
118	rcu_assign_pointer(tp->root, new);
119	if (head)
120		call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
121	return 0;
122errout:
123	kfree(new);
124	return err;
125}
126
127static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
128{
129	struct cls_cgroup_head *head = rtnl_dereference(tp->root);
130
131	if (!force)
132		return false;
133
134	if (head) {
135		RCU_INIT_POINTER(tp->root, NULL);
136		call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
 
137	}
138	return true;
139}
140
141static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
142{
143	return -EOPNOTSUPP;
144}
145
146static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
147{
148	struct cls_cgroup_head *head = rtnl_dereference(tp->root);
149
150	if (arg->count < arg->skip)
151		goto skip;
152
153	if (arg->fn(tp, (unsigned long) head, arg) < 0) {
154		arg->stop = 1;
155		return;
156	}
157skip:
158	arg->count++;
159}
160
161static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
162			   struct sk_buff *skb, struct tcmsg *t)
163{
164	struct cls_cgroup_head *head = rtnl_dereference(tp->root);
 
165	struct nlattr *nest;
166
167	t->tcm_handle = head->handle;
168
169	nest = nla_nest_start(skb, TCA_OPTIONS);
170	if (nest == NULL)
171		goto nla_put_failure;
172
173	if (tcf_exts_dump(skb, &head->exts) < 0 ||
174	    tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
175		goto nla_put_failure;
176
177	nla_nest_end(skb, nest);
178
179	if (tcf_exts_dump_stats(skb, &head->exts) < 0)
180		goto nla_put_failure;
181
182	return skb->len;
183
184nla_put_failure:
185	nla_nest_cancel(skb, nest);
186	return -1;
187}
188
189static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
190	.kind		=	"cgroup",
191	.init		=	cls_cgroup_init,
192	.change		=	cls_cgroup_change,
193	.classify	=	cls_cgroup_classify,
194	.destroy	=	cls_cgroup_destroy,
195	.get		=	cls_cgroup_get,
 
196	.delete		=	cls_cgroup_delete,
197	.walk		=	cls_cgroup_walk,
198	.dump		=	cls_cgroup_dump,
199	.owner		=	THIS_MODULE,
200};
201
202static int __init init_cgroup_cls(void)
203{
204	return register_tcf_proto_ops(&cls_cgroup_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205}
206
207static void __exit exit_cgroup_cls(void)
208{
209	unregister_tcf_proto_ops(&cls_cgroup_ops);
 
 
 
 
 
 
 
210}
211
212module_init(init_cgroup_cls);
213module_exit(exit_cgroup_cls);
214MODULE_LICENSE("GPL");