Loading...
1/*
2 * net/sched/cls_cgroup.c Control Group Classifier
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/skbuff.h>
15#include <linux/rcupdate.h>
16#include <net/rtnetlink.h>
17#include <net/pkt_cls.h>
18#include <net/sock.h>
19#include <net/cls_cgroup.h>
20
21struct cls_cgroup_head {
22 u32 handle;
23 struct tcf_exts exts;
24 struct tcf_ematch_tree ematches;
25};
26
27static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
28 struct tcf_result *res)
29{
30 struct cls_cgroup_head *head = tp->root;
31 u32 classid;
32
33 rcu_read_lock();
34 classid = task_cls_state(current)->classid;
35 rcu_read_unlock();
36
37 /*
38 * Due to the nature of the classifier it is required to ignore all
39 * packets originating from softirq context as accessing `current'
40 * would lead to false results.
41 *
42 * This test assumes that all callers of dev_queue_xmit() explicitely
43 * disable bh. Knowing this, it is possible to detect softirq based
44 * calls by looking at the number of nested bh disable calls because
45 * softirqs always disables bh.
46 */
47 if (in_serving_softirq()) {
48 /* If there is an sk_classid we'll use that. */
49 if (!skb->sk)
50 return -1;
51 classid = skb->sk->sk_classid;
52 }
53
54 if (!classid)
55 return -1;
56
57 if (!tcf_em_tree_match(skb, &head->ematches, NULL))
58 return -1;
59
60 res->classid = classid;
61 res->class = 0;
62 return tcf_exts_exec(skb, &head->exts, res);
63}
64
65static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
66{
67 return 0UL;
68}
69
70static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
71{
72}
73
74static int cls_cgroup_init(struct tcf_proto *tp)
75{
76 return 0;
77}
78
79static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
80 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
81};
82
83static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
84 struct tcf_proto *tp, unsigned long base,
85 u32 handle, struct nlattr **tca,
86 unsigned long *arg)
87{
88 struct nlattr *tb[TCA_CGROUP_MAX + 1];
89 struct cls_cgroup_head *head = tp->root;
90 struct tcf_ematch_tree t;
91 struct tcf_exts e;
92 int err;
93
94 if (!tca[TCA_OPTIONS])
95 return -EINVAL;
96
97 if (head == NULL) {
98 if (!handle)
99 return -EINVAL;
100
101 head = kzalloc(sizeof(*head), GFP_KERNEL);
102 if (head == NULL)
103 return -ENOBUFS;
104
105 tcf_exts_init(&head->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
106 head->handle = handle;
107
108 tcf_tree_lock(tp);
109 tp->root = head;
110 tcf_tree_unlock(tp);
111 }
112
113 if (handle != head->handle)
114 return -ENOENT;
115
116 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
117 cgroup_policy);
118 if (err < 0)
119 return err;
120
121 tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
122 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e);
123 if (err < 0)
124 return err;
125
126 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
127 if (err < 0)
128 return err;
129
130 tcf_exts_change(tp, &head->exts, &e);
131 tcf_em_tree_change(tp, &head->ematches, &t);
132
133 return 0;
134}
135
136static void cls_cgroup_destroy(struct tcf_proto *tp)
137{
138 struct cls_cgroup_head *head = tp->root;
139
140 if (head) {
141 tcf_exts_destroy(tp, &head->exts);
142 tcf_em_tree_destroy(tp, &head->ematches);
143 kfree(head);
144 }
145}
146
147static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
148{
149 return -EOPNOTSUPP;
150}
151
152static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
153{
154 struct cls_cgroup_head *head = tp->root;
155
156 if (arg->count < arg->skip)
157 goto skip;
158
159 if (arg->fn(tp, (unsigned long) head, arg) < 0) {
160 arg->stop = 1;
161 return;
162 }
163skip:
164 arg->count++;
165}
166
167static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
168 struct sk_buff *skb, struct tcmsg *t)
169{
170 struct cls_cgroup_head *head = tp->root;
171 unsigned char *b = skb_tail_pointer(skb);
172 struct nlattr *nest;
173
174 t->tcm_handle = head->handle;
175
176 nest = nla_nest_start(skb, TCA_OPTIONS);
177 if (nest == NULL)
178 goto nla_put_failure;
179
180 if (tcf_exts_dump(skb, &head->exts) < 0 ||
181 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
182 goto nla_put_failure;
183
184 nla_nest_end(skb, nest);
185
186 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
187 goto nla_put_failure;
188
189 return skb->len;
190
191nla_put_failure:
192 nlmsg_trim(skb, b);
193 return -1;
194}
195
196static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
197 .kind = "cgroup",
198 .init = cls_cgroup_init,
199 .change = cls_cgroup_change,
200 .classify = cls_cgroup_classify,
201 .destroy = cls_cgroup_destroy,
202 .get = cls_cgroup_get,
203 .put = cls_cgroup_put,
204 .delete = cls_cgroup_delete,
205 .walk = cls_cgroup_walk,
206 .dump = cls_cgroup_dump,
207 .owner = THIS_MODULE,
208};
209
210static int __init init_cgroup_cls(void)
211{
212 return register_tcf_proto_ops(&cls_cgroup_ops);
213}
214
215static void __exit exit_cgroup_cls(void)
216{
217 unregister_tcf_proto_ops(&cls_cgroup_ops);
218}
219
220module_init(init_cgroup_cls);
221module_exit(exit_cgroup_cls);
222MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/cls_cgroup.c Control Group Classifier
4 *
5 * Authors: Thomas Graf <tgraf@suug.ch>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/skbuff.h>
11#include <linux/rcupdate.h>
12#include <net/rtnetlink.h>
13#include <net/pkt_cls.h>
14#include <net/sock.h>
15#include <net/cls_cgroup.h>
16#include <net/tc_wrapper.h>
17
18struct cls_cgroup_head {
19 u32 handle;
20 struct tcf_exts exts;
21 struct tcf_ematch_tree ematches;
22 struct tcf_proto *tp;
23 struct rcu_work rwork;
24};
25
26TC_INDIRECT_SCOPE int cls_cgroup_classify(struct sk_buff *skb,
27 const struct tcf_proto *tp,
28 struct tcf_result *res)
29{
30 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
31 u32 classid = task_get_classid(skb);
32
33 if (unlikely(!head))
34 return -1;
35 if (!classid)
36 return -1;
37 if (!tcf_em_tree_match(skb, &head->ematches, NULL))
38 return -1;
39
40 res->classid = classid;
41 res->class = 0;
42
43 return tcf_exts_exec(skb, &head->exts, res);
44}
45
46static void *cls_cgroup_get(struct tcf_proto *tp, u32 handle)
47{
48 return NULL;
49}
50
51static int cls_cgroup_init(struct tcf_proto *tp)
52{
53 return 0;
54}
55
56static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
57 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
58};
59
60static void __cls_cgroup_destroy(struct cls_cgroup_head *head)
61{
62 tcf_exts_destroy(&head->exts);
63 tcf_em_tree_destroy(&head->ematches);
64 tcf_exts_put_net(&head->exts);
65 kfree(head);
66}
67
68static void cls_cgroup_destroy_work(struct work_struct *work)
69{
70 struct cls_cgroup_head *head = container_of(to_rcu_work(work),
71 struct cls_cgroup_head,
72 rwork);
73 rtnl_lock();
74 __cls_cgroup_destroy(head);
75 rtnl_unlock();
76}
77
78static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
79 struct tcf_proto *tp, unsigned long base,
80 u32 handle, struct nlattr **tca,
81 void **arg, u32 flags,
82 struct netlink_ext_ack *extack)
83{
84 struct nlattr *tb[TCA_CGROUP_MAX + 1];
85 struct cls_cgroup_head *head = rtnl_dereference(tp->root);
86 struct cls_cgroup_head *new;
87 int err;
88
89 if (!tca[TCA_OPTIONS])
90 return -EINVAL;
91
92 if (!head && !handle)
93 return -EINVAL;
94
95 if (head && handle != head->handle)
96 return -ENOENT;
97
98 new = kzalloc(sizeof(*head), GFP_KERNEL);
99 if (!new)
100 return -ENOBUFS;
101
102 err = tcf_exts_init(&new->exts, net, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
103 if (err < 0)
104 goto errout;
105 new->handle = handle;
106 new->tp = tp;
107 err = nla_parse_nested_deprecated(tb, TCA_CGROUP_MAX,
108 tca[TCA_OPTIONS], cgroup_policy,
109 NULL);
110 if (err < 0)
111 goto errout;
112
113 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &new->exts, flags,
114 extack);
115 if (err < 0)
116 goto errout;
117
118 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &new->ematches);
119 if (err < 0)
120 goto errout;
121
122 rcu_assign_pointer(tp->root, new);
123 if (head) {
124 tcf_exts_get_net(&head->exts);
125 tcf_queue_work(&head->rwork, cls_cgroup_destroy_work);
126 }
127 return 0;
128errout:
129 tcf_exts_destroy(&new->exts);
130 kfree(new);
131 return err;
132}
133
134static void cls_cgroup_destroy(struct tcf_proto *tp, bool rtnl_held,
135 struct netlink_ext_ack *extack)
136{
137 struct cls_cgroup_head *head = rtnl_dereference(tp->root);
138
139 /* Head can still be NULL due to cls_cgroup_init(). */
140 if (head) {
141 if (tcf_exts_get_net(&head->exts))
142 tcf_queue_work(&head->rwork, cls_cgroup_destroy_work);
143 else
144 __cls_cgroup_destroy(head);
145 }
146}
147
148static int cls_cgroup_delete(struct tcf_proto *tp, void *arg, bool *last,
149 bool rtnl_held, struct netlink_ext_ack *extack)
150{
151 return -EOPNOTSUPP;
152}
153
154static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg,
155 bool rtnl_held)
156{
157 struct cls_cgroup_head *head = rtnl_dereference(tp->root);
158
159 if (arg->count < arg->skip)
160 goto skip;
161
162 if (!head)
163 return;
164 if (arg->fn(tp, head, arg) < 0) {
165 arg->stop = 1;
166 return;
167 }
168skip:
169 arg->count++;
170}
171
172static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, void *fh,
173 struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
174{
175 struct cls_cgroup_head *head = rtnl_dereference(tp->root);
176 struct nlattr *nest;
177
178 t->tcm_handle = head->handle;
179
180 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
181 if (nest == NULL)
182 goto nla_put_failure;
183
184 if (tcf_exts_dump(skb, &head->exts) < 0 ||
185 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
186 goto nla_put_failure;
187
188 nla_nest_end(skb, nest);
189
190 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
191 goto nla_put_failure;
192
193 return skb->len;
194
195nla_put_failure:
196 nla_nest_cancel(skb, nest);
197 return -1;
198}
199
200static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
201 .kind = "cgroup",
202 .init = cls_cgroup_init,
203 .change = cls_cgroup_change,
204 .classify = cls_cgroup_classify,
205 .destroy = cls_cgroup_destroy,
206 .get = cls_cgroup_get,
207 .delete = cls_cgroup_delete,
208 .walk = cls_cgroup_walk,
209 .dump = cls_cgroup_dump,
210 .owner = THIS_MODULE,
211};
212
213static int __init init_cgroup_cls(void)
214{
215 return register_tcf_proto_ops(&cls_cgroup_ops);
216}
217
218static void __exit exit_cgroup_cls(void)
219{
220 unregister_tcf_proto_ops(&cls_cgroup_ops);
221}
222
223module_init(init_cgroup_cls);
224module_exit(exit_cgroup_cls);
225MODULE_DESCRIPTION("TC cgroup classifier");
226MODULE_LICENSE("GPL");