Loading...
1/*
2 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/filter.h>
16#include <linux/bpf.h>
17
18#include <net/netlink.h>
19#include <net/pkt_sched.h>
20
21#include <linux/tc_act/tc_bpf.h>
22#include <net/tc_act/tc_bpf.h>
23
24#define BPF_TAB_MASK 15
25#define ACT_BPF_NAME_LEN 256
26
27struct tcf_bpf_cfg {
28 struct bpf_prog *filter;
29 struct sock_filter *bpf_ops;
30 const char *bpf_name;
31 u32 bpf_fd;
32 u16 bpf_num_ops;
33 bool is_ebpf;
34};
35
36static int bpf_net_id;
37
38static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
39 struct tcf_result *res)
40{
41 struct tcf_bpf *prog = act->priv;
42 struct bpf_prog *filter;
43 int action, filter_res;
44 bool at_ingress = G_TC_AT(skb->tc_verd) & AT_INGRESS;
45
46 if (unlikely(!skb_mac_header_was_set(skb)))
47 return TC_ACT_UNSPEC;
48
49 tcf_lastuse_update(&prog->tcf_tm);
50 bstats_cpu_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
51
52 rcu_read_lock();
53 filter = rcu_dereference(prog->filter);
54 if (at_ingress) {
55 __skb_push(skb, skb->mac_len);
56 filter_res = BPF_PROG_RUN(filter, skb);
57 __skb_pull(skb, skb->mac_len);
58 } else {
59 filter_res = BPF_PROG_RUN(filter, skb);
60 }
61 rcu_read_unlock();
62
63 /* A BPF program may overwrite the default action opcode.
64 * Similarly as in cls_bpf, if filter_res == -1 we use the
65 * default action specified from tc.
66 *
67 * In case a different well-known TC_ACT opcode has been
68 * returned, it will overwrite the default one.
69 *
70 * For everything else that is unkown, TC_ACT_UNSPEC is
71 * returned.
72 */
73 switch (filter_res) {
74 case TC_ACT_PIPE:
75 case TC_ACT_RECLASSIFY:
76 case TC_ACT_OK:
77 case TC_ACT_REDIRECT:
78 action = filter_res;
79 break;
80 case TC_ACT_SHOT:
81 action = filter_res;
82 qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
83 break;
84 case TC_ACT_UNSPEC:
85 action = prog->tcf_action;
86 break;
87 default:
88 action = TC_ACT_UNSPEC;
89 break;
90 }
91
92 return action;
93}
94
95static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
96{
97 return !prog->bpf_ops;
98}
99
100static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
101 struct sk_buff *skb)
102{
103 struct nlattr *nla;
104
105 if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
106 return -EMSGSIZE;
107
108 nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
109 sizeof(struct sock_filter));
110 if (nla == NULL)
111 return -EMSGSIZE;
112
113 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
114
115 return 0;
116}
117
118static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
119 struct sk_buff *skb)
120{
121 if (nla_put_u32(skb, TCA_ACT_BPF_FD, prog->bpf_fd))
122 return -EMSGSIZE;
123
124 if (prog->bpf_name &&
125 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
126 return -EMSGSIZE;
127
128 return 0;
129}
130
131static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
132 int bind, int ref)
133{
134 unsigned char *tp = skb_tail_pointer(skb);
135 struct tcf_bpf *prog = act->priv;
136 struct tc_act_bpf opt = {
137 .index = prog->tcf_index,
138 .refcnt = prog->tcf_refcnt - ref,
139 .bindcnt = prog->tcf_bindcnt - bind,
140 .action = prog->tcf_action,
141 };
142 struct tcf_t tm;
143 int ret;
144
145 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
146 goto nla_put_failure;
147
148 if (tcf_bpf_is_ebpf(prog))
149 ret = tcf_bpf_dump_ebpf_info(prog, skb);
150 else
151 ret = tcf_bpf_dump_bpf_info(prog, skb);
152 if (ret)
153 goto nla_put_failure;
154
155 tm.install = jiffies_to_clock_t(jiffies - prog->tcf_tm.install);
156 tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse);
157 tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires);
158
159 if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm))
160 goto nla_put_failure;
161
162 return skb->len;
163
164nla_put_failure:
165 nlmsg_trim(skb, tp);
166 return -1;
167}
168
169static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
170 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
171 [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
172 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING, .len = ACT_BPF_NAME_LEN },
173 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
174 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
175 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
176};
177
178static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
179{
180 struct sock_filter *bpf_ops;
181 struct sock_fprog_kern fprog_tmp;
182 struct bpf_prog *fp;
183 u16 bpf_size, bpf_num_ops;
184 int ret;
185
186 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
187 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
188 return -EINVAL;
189
190 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
191 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
192 return -EINVAL;
193
194 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
195 if (bpf_ops == NULL)
196 return -ENOMEM;
197
198 memcpy(bpf_ops, nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size);
199
200 fprog_tmp.len = bpf_num_ops;
201 fprog_tmp.filter = bpf_ops;
202
203 ret = bpf_prog_create(&fp, &fprog_tmp);
204 if (ret < 0) {
205 kfree(bpf_ops);
206 return ret;
207 }
208
209 cfg->bpf_ops = bpf_ops;
210 cfg->bpf_num_ops = bpf_num_ops;
211 cfg->filter = fp;
212 cfg->is_ebpf = false;
213
214 return 0;
215}
216
217static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
218{
219 struct bpf_prog *fp;
220 char *name = NULL;
221 u32 bpf_fd;
222
223 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
224
225 fp = bpf_prog_get(bpf_fd);
226 if (IS_ERR(fp))
227 return PTR_ERR(fp);
228
229 if (fp->type != BPF_PROG_TYPE_SCHED_ACT) {
230 bpf_prog_put(fp);
231 return -EINVAL;
232 }
233
234 if (tb[TCA_ACT_BPF_NAME]) {
235 name = kmemdup(nla_data(tb[TCA_ACT_BPF_NAME]),
236 nla_len(tb[TCA_ACT_BPF_NAME]),
237 GFP_KERNEL);
238 if (!name) {
239 bpf_prog_put(fp);
240 return -ENOMEM;
241 }
242 }
243
244 cfg->bpf_fd = bpf_fd;
245 cfg->bpf_name = name;
246 cfg->filter = fp;
247 cfg->is_ebpf = true;
248
249 return 0;
250}
251
252static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
253{
254 if (cfg->is_ebpf)
255 bpf_prog_put(cfg->filter);
256 else
257 bpf_prog_destroy(cfg->filter);
258
259 kfree(cfg->bpf_ops);
260 kfree(cfg->bpf_name);
261}
262
263static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
264 struct tcf_bpf_cfg *cfg)
265{
266 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
267 /* updates to prog->filter are prevented, since it's called either
268 * with rtnl lock or during final cleanup in rcu callback
269 */
270 cfg->filter = rcu_dereference_protected(prog->filter, 1);
271
272 cfg->bpf_ops = prog->bpf_ops;
273 cfg->bpf_name = prog->bpf_name;
274}
275
276static int tcf_bpf_init(struct net *net, struct nlattr *nla,
277 struct nlattr *est, struct tc_action *act,
278 int replace, int bind)
279{
280 struct tc_action_net *tn = net_generic(net, bpf_net_id);
281 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
282 struct tcf_bpf_cfg cfg, old;
283 struct tc_act_bpf *parm;
284 struct tcf_bpf *prog;
285 bool is_bpf, is_ebpf;
286 int ret, res = 0;
287
288 if (!nla)
289 return -EINVAL;
290
291 ret = nla_parse_nested(tb, TCA_ACT_BPF_MAX, nla, act_bpf_policy);
292 if (ret < 0)
293 return ret;
294
295 if (!tb[TCA_ACT_BPF_PARMS])
296 return -EINVAL;
297
298 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
299
300 if (!tcf_hash_check(tn, parm->index, act, bind)) {
301 ret = tcf_hash_create(tn, parm->index, est, act,
302 sizeof(*prog), bind, true);
303 if (ret < 0)
304 return ret;
305
306 res = ACT_P_CREATED;
307 } else {
308 /* Don't override defaults. */
309 if (bind)
310 return 0;
311
312 tcf_hash_release(act, bind);
313 if (!replace)
314 return -EEXIST;
315 }
316
317 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
318 is_ebpf = tb[TCA_ACT_BPF_FD];
319
320 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
321 ret = -EINVAL;
322 goto out;
323 }
324
325 memset(&cfg, 0, sizeof(cfg));
326
327 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
328 tcf_bpf_init_from_efd(tb, &cfg);
329 if (ret < 0)
330 goto out;
331
332 prog = to_bpf(act);
333 ASSERT_RTNL();
334
335 if (res != ACT_P_CREATED)
336 tcf_bpf_prog_fill_cfg(prog, &old);
337
338 prog->bpf_ops = cfg.bpf_ops;
339 prog->bpf_name = cfg.bpf_name;
340
341 if (cfg.bpf_num_ops)
342 prog->bpf_num_ops = cfg.bpf_num_ops;
343 if (cfg.bpf_fd)
344 prog->bpf_fd = cfg.bpf_fd;
345
346 prog->tcf_action = parm->action;
347 rcu_assign_pointer(prog->filter, cfg.filter);
348
349 if (res == ACT_P_CREATED) {
350 tcf_hash_insert(tn, act);
351 } else {
352 /* make sure the program being replaced is no longer executing */
353 synchronize_rcu();
354 tcf_bpf_cfg_cleanup(&old);
355 }
356
357 return res;
358out:
359 if (res == ACT_P_CREATED)
360 tcf_hash_cleanup(act, est);
361
362 return ret;
363}
364
365static void tcf_bpf_cleanup(struct tc_action *act, int bind)
366{
367 struct tcf_bpf_cfg tmp;
368
369 tcf_bpf_prog_fill_cfg(act->priv, &tmp);
370 tcf_bpf_cfg_cleanup(&tmp);
371}
372
373static int tcf_bpf_walker(struct net *net, struct sk_buff *skb,
374 struct netlink_callback *cb, int type,
375 struct tc_action *a)
376{
377 struct tc_action_net *tn = net_generic(net, bpf_net_id);
378
379 return tcf_generic_walker(tn, skb, cb, type, a);
380}
381
382static int tcf_bpf_search(struct net *net, struct tc_action *a, u32 index)
383{
384 struct tc_action_net *tn = net_generic(net, bpf_net_id);
385
386 return tcf_hash_search(tn, a, index);
387}
388
389static struct tc_action_ops act_bpf_ops __read_mostly = {
390 .kind = "bpf",
391 .type = TCA_ACT_BPF,
392 .owner = THIS_MODULE,
393 .act = tcf_bpf,
394 .dump = tcf_bpf_dump,
395 .cleanup = tcf_bpf_cleanup,
396 .init = tcf_bpf_init,
397 .walk = tcf_bpf_walker,
398 .lookup = tcf_bpf_search,
399};
400
401static __net_init int bpf_init_net(struct net *net)
402{
403 struct tc_action_net *tn = net_generic(net, bpf_net_id);
404
405 return tc_action_net_init(tn, &act_bpf_ops, BPF_TAB_MASK);
406}
407
408static void __net_exit bpf_exit_net(struct net *net)
409{
410 struct tc_action_net *tn = net_generic(net, bpf_net_id);
411
412 tc_action_net_exit(tn);
413}
414
415static struct pernet_operations bpf_net_ops = {
416 .init = bpf_init_net,
417 .exit = bpf_exit_net,
418 .id = &bpf_net_id,
419 .size = sizeof(struct tc_action_net),
420};
421
422static int __init bpf_init_module(void)
423{
424 return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
425}
426
427static void __exit bpf_cleanup_module(void)
428{
429 tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
430}
431
432module_init(bpf_init_module);
433module_exit(bpf_cleanup_module);
434
435MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
436MODULE_DESCRIPTION("TC BPF based action");
437MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
4 */
5
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/kernel.h>
9#include <linux/skbuff.h>
10#include <linux/rtnetlink.h>
11#include <linux/filter.h>
12#include <linux/bpf.h>
13
14#include <net/netlink.h>
15#include <net/sock.h>
16#include <net/pkt_sched.h>
17#include <net/pkt_cls.h>
18
19#include <linux/tc_act/tc_bpf.h>
20#include <net/tc_act/tc_bpf.h>
21#include <net/tc_wrapper.h>
22
23#define ACT_BPF_NAME_LEN 256
24
25struct tcf_bpf_cfg {
26 struct bpf_prog *filter;
27 struct sock_filter *bpf_ops;
28 const char *bpf_name;
29 u16 bpf_num_ops;
30 bool is_ebpf;
31};
32
33static struct tc_action_ops act_bpf_ops;
34
35TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
36 const struct tc_action *act,
37 struct tcf_result *res)
38{
39 bool at_ingress = skb_at_tc_ingress(skb);
40 struct tcf_bpf *prog = to_bpf(act);
41 struct bpf_prog *filter;
42 int action, filter_res;
43
44 tcf_lastuse_update(&prog->tcf_tm);
45 bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
46
47 filter = rcu_dereference(prog->filter);
48 if (at_ingress) {
49 __skb_push(skb, skb->mac_len);
50 bpf_compute_data_pointers(skb);
51 filter_res = bpf_prog_run(filter, skb);
52 __skb_pull(skb, skb->mac_len);
53 } else {
54 bpf_compute_data_pointers(skb);
55 filter_res = bpf_prog_run(filter, skb);
56 }
57 if (unlikely(!skb->tstamp && skb->tstamp_type))
58 skb->tstamp_type = SKB_CLOCK_REALTIME;
59 if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
60 skb_orphan(skb);
61
62 /* A BPF program may overwrite the default action opcode.
63 * Similarly as in cls_bpf, if filter_res == -1 we use the
64 * default action specified from tc.
65 *
66 * In case a different well-known TC_ACT opcode has been
67 * returned, it will overwrite the default one.
68 *
69 * For everything else that is unknown, TC_ACT_UNSPEC is
70 * returned.
71 */
72 switch (filter_res) {
73 case TC_ACT_PIPE:
74 case TC_ACT_RECLASSIFY:
75 case TC_ACT_OK:
76 case TC_ACT_REDIRECT:
77 action = filter_res;
78 break;
79 case TC_ACT_SHOT:
80 action = filter_res;
81 qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
82 break;
83 case TC_ACT_UNSPEC:
84 action = prog->tcf_action;
85 break;
86 default:
87 action = TC_ACT_UNSPEC;
88 break;
89 }
90
91 return action;
92}
93
94static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
95{
96 return !prog->bpf_ops;
97}
98
99static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
100 struct sk_buff *skb)
101{
102 struct nlattr *nla;
103
104 if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
105 return -EMSGSIZE;
106
107 nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
108 sizeof(struct sock_filter));
109 if (nla == NULL)
110 return -EMSGSIZE;
111
112 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
113
114 return 0;
115}
116
117static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
118 struct sk_buff *skb)
119{
120 struct nlattr *nla;
121
122 if (prog->bpf_name &&
123 nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
124 return -EMSGSIZE;
125
126 if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
127 return -EMSGSIZE;
128
129 nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
130 if (nla == NULL)
131 return -EMSGSIZE;
132
133 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
134
135 return 0;
136}
137
138static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
139 int bind, int ref)
140{
141 unsigned char *tp = skb_tail_pointer(skb);
142 struct tcf_bpf *prog = to_bpf(act);
143 struct tc_act_bpf opt = {
144 .index = prog->tcf_index,
145 .refcnt = refcount_read(&prog->tcf_refcnt) - ref,
146 .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
147 };
148 struct tcf_t tm;
149 int ret;
150
151 spin_lock_bh(&prog->tcf_lock);
152 opt.action = prog->tcf_action;
153 if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
154 goto nla_put_failure;
155
156 if (tcf_bpf_is_ebpf(prog))
157 ret = tcf_bpf_dump_ebpf_info(prog, skb);
158 else
159 ret = tcf_bpf_dump_bpf_info(prog, skb);
160 if (ret)
161 goto nla_put_failure;
162
163 tcf_tm_dump(&tm, &prog->tcf_tm);
164 if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
165 TCA_ACT_BPF_PAD))
166 goto nla_put_failure;
167
168 spin_unlock_bh(&prog->tcf_lock);
169 return skb->len;
170
171nla_put_failure:
172 spin_unlock_bh(&prog->tcf_lock);
173 nlmsg_trim(skb, tp);
174 return -1;
175}
176
177static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
178 [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
179 [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
180 [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
181 .len = ACT_BPF_NAME_LEN },
182 [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
183 [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
184 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
185};
186
187static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
188{
189 struct sock_filter *bpf_ops;
190 struct sock_fprog_kern fprog_tmp;
191 struct bpf_prog *fp;
192 u16 bpf_size, bpf_num_ops;
193 int ret;
194
195 bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
196 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
197 return -EINVAL;
198
199 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
200 if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
201 return -EINVAL;
202
203 bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
204 if (bpf_ops == NULL)
205 return -ENOMEM;
206
207 fprog_tmp.len = bpf_num_ops;
208 fprog_tmp.filter = bpf_ops;
209
210 ret = bpf_prog_create(&fp, &fprog_tmp);
211 if (ret < 0) {
212 kfree(bpf_ops);
213 return ret;
214 }
215
216 cfg->bpf_ops = bpf_ops;
217 cfg->bpf_num_ops = bpf_num_ops;
218 cfg->filter = fp;
219 cfg->is_ebpf = false;
220
221 return 0;
222}
223
224static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
225{
226 struct bpf_prog *fp;
227 char *name = NULL;
228 u32 bpf_fd;
229
230 bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
231
232 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
233 if (IS_ERR(fp))
234 return PTR_ERR(fp);
235
236 if (tb[TCA_ACT_BPF_NAME]) {
237 name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
238 if (!name) {
239 bpf_prog_put(fp);
240 return -ENOMEM;
241 }
242 }
243
244 cfg->bpf_name = name;
245 cfg->filter = fp;
246 cfg->is_ebpf = true;
247
248 return 0;
249}
250
251static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
252{
253 struct bpf_prog *filter = cfg->filter;
254
255 if (filter) {
256 if (cfg->is_ebpf)
257 bpf_prog_put(filter);
258 else
259 bpf_prog_destroy(filter);
260 }
261
262 kfree(cfg->bpf_ops);
263 kfree(cfg->bpf_name);
264}
265
266static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
267 struct tcf_bpf_cfg *cfg)
268{
269 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
270 /* updates to prog->filter are prevented, since it's called either
271 * with tcf lock or during final cleanup in rcu callback
272 */
273 cfg->filter = rcu_dereference_protected(prog->filter, 1);
274
275 cfg->bpf_ops = prog->bpf_ops;
276 cfg->bpf_name = prog->bpf_name;
277}
278
279static int tcf_bpf_init(struct net *net, struct nlattr *nla,
280 struct nlattr *est, struct tc_action **act,
281 struct tcf_proto *tp, u32 flags,
282 struct netlink_ext_ack *extack)
283{
284 struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
285 bool bind = flags & TCA_ACT_FLAGS_BIND;
286 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
287 struct tcf_chain *goto_ch = NULL;
288 struct tcf_bpf_cfg cfg, old;
289 struct tc_act_bpf *parm;
290 struct tcf_bpf *prog;
291 bool is_bpf, is_ebpf;
292 int ret, res = 0;
293 u32 index;
294
295 if (!nla)
296 return -EINVAL;
297
298 ret = nla_parse_nested_deprecated(tb, TCA_ACT_BPF_MAX, nla,
299 act_bpf_policy, NULL);
300 if (ret < 0)
301 return ret;
302
303 if (!tb[TCA_ACT_BPF_PARMS])
304 return -EINVAL;
305
306 parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
307 index = parm->index;
308 ret = tcf_idr_check_alloc(tn, &index, act, bind);
309 if (!ret) {
310 ret = tcf_idr_create(tn, index, est, act,
311 &act_bpf_ops, bind, true, flags);
312 if (ret < 0) {
313 tcf_idr_cleanup(tn, index);
314 return ret;
315 }
316
317 res = ACT_P_CREATED;
318 } else if (ret > 0) {
319 /* Don't override defaults. */
320 if (bind)
321 return ACT_P_BOUND;
322
323 if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
324 tcf_idr_release(*act, bind);
325 return -EEXIST;
326 }
327 } else {
328 return ret;
329 }
330
331 ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
332 if (ret < 0)
333 goto release_idr;
334
335 is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
336 is_ebpf = tb[TCA_ACT_BPF_FD];
337
338 if (is_bpf == is_ebpf) {
339 ret = -EINVAL;
340 goto put_chain;
341 }
342
343 memset(&cfg, 0, sizeof(cfg));
344
345 ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
346 tcf_bpf_init_from_efd(tb, &cfg);
347 if (ret < 0)
348 goto put_chain;
349
350 prog = to_bpf(*act);
351
352 spin_lock_bh(&prog->tcf_lock);
353 if (res != ACT_P_CREATED)
354 tcf_bpf_prog_fill_cfg(prog, &old);
355
356 prog->bpf_ops = cfg.bpf_ops;
357 prog->bpf_name = cfg.bpf_name;
358
359 if (cfg.bpf_num_ops)
360 prog->bpf_num_ops = cfg.bpf_num_ops;
361
362 goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
363 rcu_assign_pointer(prog->filter, cfg.filter);
364 spin_unlock_bh(&prog->tcf_lock);
365
366 if (goto_ch)
367 tcf_chain_put_by_act(goto_ch);
368
369 if (res != ACT_P_CREATED) {
370 /* make sure the program being replaced is no longer executing */
371 synchronize_rcu();
372 tcf_bpf_cfg_cleanup(&old);
373 }
374
375 return res;
376
377put_chain:
378 if (goto_ch)
379 tcf_chain_put_by_act(goto_ch);
380
381release_idr:
382 tcf_idr_release(*act, bind);
383 return ret;
384}
385
386static void tcf_bpf_cleanup(struct tc_action *act)
387{
388 struct tcf_bpf_cfg tmp;
389
390 tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
391 tcf_bpf_cfg_cleanup(&tmp);
392}
393
394static struct tc_action_ops act_bpf_ops __read_mostly = {
395 .kind = "bpf",
396 .id = TCA_ID_BPF,
397 .owner = THIS_MODULE,
398 .act = tcf_bpf_act,
399 .dump = tcf_bpf_dump,
400 .cleanup = tcf_bpf_cleanup,
401 .init = tcf_bpf_init,
402 .size = sizeof(struct tcf_bpf),
403};
404MODULE_ALIAS_NET_ACT("bpf");
405
406static __net_init int bpf_init_net(struct net *net)
407{
408 struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
409
410 return tc_action_net_init(net, tn, &act_bpf_ops);
411}
412
413static void __net_exit bpf_exit_net(struct list_head *net_list)
414{
415 tc_action_net_exit(net_list, act_bpf_ops.net_id);
416}
417
418static struct pernet_operations bpf_net_ops = {
419 .init = bpf_init_net,
420 .exit_batch = bpf_exit_net,
421 .id = &act_bpf_ops.net_id,
422 .size = sizeof(struct tc_action_net),
423};
424
425static int __init bpf_init_module(void)
426{
427 return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
428}
429
430static void __exit bpf_cleanup_module(void)
431{
432 tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
433}
434
435module_init(bpf_init_module);
436module_exit(bpf_cleanup_module);
437
438MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
439MODULE_DESCRIPTION("TC BPF based action");
440MODULE_LICENSE("GPL v2");