Loading...
1/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
19#include <linux/bpf.h>
20
21#include <net/rtnetlink.h>
22#include <net/pkt_cls.h>
23#include <net/sock.h>
24
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27MODULE_DESCRIPTION("TC BPF based classifier");
28
29#define CLS_BPF_NAME_LEN 256
30
31struct cls_bpf_head {
32 struct list_head plist;
33 u32 hgen;
34 struct rcu_head rcu;
35};
36
37struct cls_bpf_prog {
38 struct bpf_prog *filter;
39 struct list_head link;
40 struct tcf_result res;
41 bool exts_integrated;
42 struct tcf_exts exts;
43 u32 handle;
44 union {
45 u32 bpf_fd;
46 u16 bpf_num_ops;
47 };
48 struct sock_filter *bpf_ops;
49 const char *bpf_name;
50 struct tcf_proto *tp;
51 struct rcu_head rcu;
52};
53
54static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
55 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
56 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
57 [TCA_BPF_FD] = { .type = NLA_U32 },
58 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING, .len = CLS_BPF_NAME_LEN },
59 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
60 [TCA_BPF_OPS] = { .type = NLA_BINARY,
61 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
62};
63
64static int cls_bpf_exec_opcode(int code)
65{
66 switch (code) {
67 case TC_ACT_OK:
68 case TC_ACT_SHOT:
69 case TC_ACT_STOLEN:
70 case TC_ACT_REDIRECT:
71 case TC_ACT_UNSPEC:
72 return code;
73 default:
74 return TC_ACT_UNSPEC;
75 }
76}
77
78static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
79 struct tcf_result *res)
80{
81 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
82 bool at_ingress = skb_at_tc_ingress(skb);
83 struct cls_bpf_prog *prog;
84 int ret = -1;
85
86 if (unlikely(!skb_mac_header_was_set(skb)))
87 return -1;
88
89 /* Needed here for accessing maps. */
90 rcu_read_lock();
91 list_for_each_entry_rcu(prog, &head->plist, link) {
92 int filter_res;
93
94 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
95
96 if (at_ingress) {
97 /* It is safe to push/pull even if skb_shared() */
98 __skb_push(skb, skb->mac_len);
99 filter_res = BPF_PROG_RUN(prog->filter, skb);
100 __skb_pull(skb, skb->mac_len);
101 } else {
102 filter_res = BPF_PROG_RUN(prog->filter, skb);
103 }
104
105 if (prog->exts_integrated) {
106 res->class = 0;
107 res->classid = TC_H_MAJ(prog->res.classid) |
108 qdisc_skb_cb(skb)->tc_classid;
109
110 ret = cls_bpf_exec_opcode(filter_res);
111 if (ret == TC_ACT_UNSPEC)
112 continue;
113 break;
114 }
115
116 if (filter_res == 0)
117 continue;
118 if (filter_res != -1) {
119 res->class = 0;
120 res->classid = filter_res;
121 } else {
122 *res = prog->res;
123 }
124
125 ret = tcf_exts_exec(skb, &prog->exts, res);
126 if (ret < 0)
127 continue;
128
129 break;
130 }
131 rcu_read_unlock();
132
133 return ret;
134}
135
136static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
137{
138 return !prog->bpf_ops;
139}
140
141static int cls_bpf_init(struct tcf_proto *tp)
142{
143 struct cls_bpf_head *head;
144
145 head = kzalloc(sizeof(*head), GFP_KERNEL);
146 if (head == NULL)
147 return -ENOBUFS;
148
149 INIT_LIST_HEAD_RCU(&head->plist);
150 rcu_assign_pointer(tp->root, head);
151
152 return 0;
153}
154
155static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
156{
157 tcf_exts_destroy(&prog->exts);
158
159 if (cls_bpf_is_ebpf(prog))
160 bpf_prog_put(prog->filter);
161 else
162 bpf_prog_destroy(prog->filter);
163
164 kfree(prog->bpf_name);
165 kfree(prog->bpf_ops);
166 kfree(prog);
167}
168
169static void __cls_bpf_delete_prog(struct rcu_head *rcu)
170{
171 struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
172
173 cls_bpf_delete_prog(prog->tp, prog);
174}
175
176static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
177{
178 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) arg;
179
180 list_del_rcu(&prog->link);
181 tcf_unbind_filter(tp, &prog->res);
182 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
183
184 return 0;
185}
186
187static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
188{
189 struct cls_bpf_head *head = rtnl_dereference(tp->root);
190 struct cls_bpf_prog *prog, *tmp;
191
192 if (!force && !list_empty(&head->plist))
193 return false;
194
195 list_for_each_entry_safe(prog, tmp, &head->plist, link) {
196 list_del_rcu(&prog->link);
197 tcf_unbind_filter(tp, &prog->res);
198 call_rcu(&prog->rcu, __cls_bpf_delete_prog);
199 }
200
201 RCU_INIT_POINTER(tp->root, NULL);
202 kfree_rcu(head, rcu);
203 return true;
204}
205
206static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
207{
208 struct cls_bpf_head *head = rtnl_dereference(tp->root);
209 struct cls_bpf_prog *prog;
210 unsigned long ret = 0UL;
211
212 if (head == NULL)
213 return 0UL;
214
215 list_for_each_entry(prog, &head->plist, link) {
216 if (prog->handle == handle) {
217 ret = (unsigned long) prog;
218 break;
219 }
220 }
221
222 return ret;
223}
224
225static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
226{
227 struct sock_filter *bpf_ops;
228 struct sock_fprog_kern fprog_tmp;
229 struct bpf_prog *fp;
230 u16 bpf_size, bpf_num_ops;
231 int ret;
232
233 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
234 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
235 return -EINVAL;
236
237 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
238 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
239 return -EINVAL;
240
241 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
242 if (bpf_ops == NULL)
243 return -ENOMEM;
244
245 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
246
247 fprog_tmp.len = bpf_num_ops;
248 fprog_tmp.filter = bpf_ops;
249
250 ret = bpf_prog_create(&fp, &fprog_tmp);
251 if (ret < 0) {
252 kfree(bpf_ops);
253 return ret;
254 }
255
256 prog->bpf_ops = bpf_ops;
257 prog->bpf_num_ops = bpf_num_ops;
258 prog->bpf_name = NULL;
259 prog->filter = fp;
260
261 return 0;
262}
263
264static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
265 const struct tcf_proto *tp)
266{
267 struct bpf_prog *fp;
268 char *name = NULL;
269 u32 bpf_fd;
270
271 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
272
273 fp = bpf_prog_get(bpf_fd);
274 if (IS_ERR(fp))
275 return PTR_ERR(fp);
276
277 if (fp->type != BPF_PROG_TYPE_SCHED_CLS) {
278 bpf_prog_put(fp);
279 return -EINVAL;
280 }
281
282 if (tb[TCA_BPF_NAME]) {
283 name = kmemdup(nla_data(tb[TCA_BPF_NAME]),
284 nla_len(tb[TCA_BPF_NAME]),
285 GFP_KERNEL);
286 if (!name) {
287 bpf_prog_put(fp);
288 return -ENOMEM;
289 }
290 }
291
292 prog->bpf_ops = NULL;
293 prog->bpf_fd = bpf_fd;
294 prog->bpf_name = name;
295 prog->filter = fp;
296
297 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
298 netif_keep_dst(qdisc_dev(tp->q));
299
300 return 0;
301}
302
303static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
304 struct cls_bpf_prog *prog,
305 unsigned long base, struct nlattr **tb,
306 struct nlattr *est, bool ovr)
307{
308 bool is_bpf, is_ebpf, have_exts = false;
309 struct tcf_exts exts;
310 int ret;
311
312 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
313 is_ebpf = tb[TCA_BPF_FD];
314 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
315 return -EINVAL;
316
317 tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
318 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
319 if (ret < 0)
320 return ret;
321
322 if (tb[TCA_BPF_FLAGS]) {
323 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
324
325 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
326 tcf_exts_destroy(&exts);
327 return -EINVAL;
328 }
329
330 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
331 }
332
333 prog->exts_integrated = have_exts;
334
335 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
336 cls_bpf_prog_from_efd(tb, prog, tp);
337 if (ret < 0) {
338 tcf_exts_destroy(&exts);
339 return ret;
340 }
341
342 if (tb[TCA_BPF_CLASSID]) {
343 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
344 tcf_bind_filter(tp, &prog->res, base);
345 }
346
347 tcf_exts_change(tp, &prog->exts, &exts);
348 return 0;
349}
350
351static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
352 struct cls_bpf_head *head)
353{
354 unsigned int i = 0x80000000;
355 u32 handle;
356
357 do {
358 if (++head->hgen == 0x7FFFFFFF)
359 head->hgen = 1;
360 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
361
362 if (unlikely(i == 0)) {
363 pr_err("Insufficient number of handles\n");
364 handle = 0;
365 } else {
366 handle = head->hgen;
367 }
368
369 return handle;
370}
371
372static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
373 struct tcf_proto *tp, unsigned long base,
374 u32 handle, struct nlattr **tca,
375 unsigned long *arg, bool ovr)
376{
377 struct cls_bpf_head *head = rtnl_dereference(tp->root);
378 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
379 struct nlattr *tb[TCA_BPF_MAX + 1];
380 struct cls_bpf_prog *prog;
381 int ret;
382
383 if (tca[TCA_OPTIONS] == NULL)
384 return -EINVAL;
385
386 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
387 if (ret < 0)
388 return ret;
389
390 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
391 if (!prog)
392 return -ENOBUFS;
393
394 tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
395
396 if (oldprog) {
397 if (handle && oldprog->handle != handle) {
398 ret = -EINVAL;
399 goto errout;
400 }
401 }
402
403 if (handle == 0)
404 prog->handle = cls_bpf_grab_new_handle(tp, head);
405 else
406 prog->handle = handle;
407 if (prog->handle == 0) {
408 ret = -EINVAL;
409 goto errout;
410 }
411
412 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
413 if (ret < 0)
414 goto errout;
415
416 if (oldprog) {
417 list_replace_rcu(&oldprog->link, &prog->link);
418 tcf_unbind_filter(tp, &oldprog->res);
419 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
420 } else {
421 list_add_rcu(&prog->link, &head->plist);
422 }
423
424 *arg = (unsigned long) prog;
425 return 0;
426errout:
427 kfree(prog);
428
429 return ret;
430}
431
432static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
433 struct sk_buff *skb)
434{
435 struct nlattr *nla;
436
437 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
438 return -EMSGSIZE;
439
440 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
441 sizeof(struct sock_filter));
442 if (nla == NULL)
443 return -EMSGSIZE;
444
445 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
446
447 return 0;
448}
449
450static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
451 struct sk_buff *skb)
452{
453 if (nla_put_u32(skb, TCA_BPF_FD, prog->bpf_fd))
454 return -EMSGSIZE;
455
456 if (prog->bpf_name &&
457 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
458 return -EMSGSIZE;
459
460 return 0;
461}
462
463static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
464 struct sk_buff *skb, struct tcmsg *tm)
465{
466 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
467 struct nlattr *nest;
468 u32 bpf_flags = 0;
469 int ret;
470
471 if (prog == NULL)
472 return skb->len;
473
474 tm->tcm_handle = prog->handle;
475
476 nest = nla_nest_start(skb, TCA_OPTIONS);
477 if (nest == NULL)
478 goto nla_put_failure;
479
480 if (prog->res.classid &&
481 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
482 goto nla_put_failure;
483
484 if (cls_bpf_is_ebpf(prog))
485 ret = cls_bpf_dump_ebpf_info(prog, skb);
486 else
487 ret = cls_bpf_dump_bpf_info(prog, skb);
488 if (ret)
489 goto nla_put_failure;
490
491 if (tcf_exts_dump(skb, &prog->exts) < 0)
492 goto nla_put_failure;
493
494 if (prog->exts_integrated)
495 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
496 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
497 goto nla_put_failure;
498
499 nla_nest_end(skb, nest);
500
501 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
502 goto nla_put_failure;
503
504 return skb->len;
505
506nla_put_failure:
507 nla_nest_cancel(skb, nest);
508 return -1;
509}
510
511static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
512{
513 struct cls_bpf_head *head = rtnl_dereference(tp->root);
514 struct cls_bpf_prog *prog;
515
516 list_for_each_entry(prog, &head->plist, link) {
517 if (arg->count < arg->skip)
518 goto skip;
519 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
520 arg->stop = 1;
521 break;
522 }
523skip:
524 arg->count++;
525 }
526}
527
528static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
529 .kind = "bpf",
530 .owner = THIS_MODULE,
531 .classify = cls_bpf_classify,
532 .init = cls_bpf_init,
533 .destroy = cls_bpf_destroy,
534 .get = cls_bpf_get,
535 .change = cls_bpf_change,
536 .delete = cls_bpf_delete,
537 .walk = cls_bpf_walk,
538 .dump = cls_bpf_dump,
539};
540
541static int __init cls_bpf_init_mod(void)
542{
543 return register_tcf_proto_ops(&cls_bpf_ops);
544}
545
546static void __exit cls_bpf_exit_mod(void)
547{
548 unregister_tcf_proto_ops(&cls_bpf_ops);
549}
550
551module_init(cls_bpf_init_mod);
552module_exit(cls_bpf_exit_mod);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Berkeley Packet Filter based traffic classifier
4 *
5 * Might be used to classify traffic through flexible, user-defined and
6 * possibly JIT-ed BPF filters for traffic control as an alternative to
7 * ematches.
8 *
9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/skbuff.h>
15#include <linux/filter.h>
16#include <linux/bpf.h>
17#include <linux/idr.h>
18
19#include <net/rtnetlink.h>
20#include <net/pkt_cls.h>
21#include <net/sock.h>
22#include <net/tc_wrapper.h>
23
24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
26MODULE_DESCRIPTION("TC BPF based classifier");
27
28#define CLS_BPF_NAME_LEN 256
29#define CLS_BPF_SUPPORTED_GEN_FLAGS \
30 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
31
32struct cls_bpf_head {
33 struct list_head plist;
34 struct idr handle_idr;
35 struct rcu_head rcu;
36};
37
38struct cls_bpf_prog {
39 struct bpf_prog *filter;
40 struct list_head link;
41 struct tcf_result res;
42 bool exts_integrated;
43 u32 gen_flags;
44 unsigned int in_hw_count;
45 struct tcf_exts exts;
46 u32 handle;
47 u16 bpf_num_ops;
48 struct sock_filter *bpf_ops;
49 const char *bpf_name;
50 struct tcf_proto *tp;
51 struct rcu_work rwork;
52};
53
54static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
55 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
56 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
57 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
58 [TCA_BPF_FD] = { .type = NLA_U32 },
59 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
60 .len = CLS_BPF_NAME_LEN },
61 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
62 [TCA_BPF_OPS] = { .type = NLA_BINARY,
63 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
64};
65
66static int cls_bpf_exec_opcode(int code)
67{
68 switch (code) {
69 case TC_ACT_OK:
70 case TC_ACT_SHOT:
71 case TC_ACT_STOLEN:
72 case TC_ACT_TRAP:
73 case TC_ACT_REDIRECT:
74 case TC_ACT_UNSPEC:
75 return code;
76 default:
77 return TC_ACT_UNSPEC;
78 }
79}
80
81TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
82 const struct tcf_proto *tp,
83 struct tcf_result *res)
84{
85 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
86 bool at_ingress = skb_at_tc_ingress(skb);
87 struct cls_bpf_prog *prog;
88 int ret = -1;
89
90 list_for_each_entry_rcu(prog, &head->plist, link) {
91 int filter_res;
92
93 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
94
95 if (tc_skip_sw(prog->gen_flags)) {
96 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
97 } else if (at_ingress) {
98 /* It is safe to push/pull even if skb_shared() */
99 __skb_push(skb, skb->mac_len);
100 bpf_compute_data_pointers(skb);
101 filter_res = bpf_prog_run(prog->filter, skb);
102 __skb_pull(skb, skb->mac_len);
103 } else {
104 bpf_compute_data_pointers(skb);
105 filter_res = bpf_prog_run(prog->filter, skb);
106 }
107 if (unlikely(!skb->tstamp && skb->mono_delivery_time))
108 skb->mono_delivery_time = 0;
109
110 if (prog->exts_integrated) {
111 res->class = 0;
112 res->classid = TC_H_MAJ(prog->res.classid) |
113 qdisc_skb_cb(skb)->tc_classid;
114
115 ret = cls_bpf_exec_opcode(filter_res);
116 if (ret == TC_ACT_UNSPEC)
117 continue;
118 break;
119 }
120
121 if (filter_res == 0)
122 continue;
123 if (filter_res != -1) {
124 res->class = 0;
125 res->classid = filter_res;
126 } else {
127 *res = prog->res;
128 }
129
130 ret = tcf_exts_exec(skb, &prog->exts, res);
131 if (ret < 0)
132 continue;
133
134 break;
135 }
136
137 return ret;
138}
139
140static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
141{
142 return !prog->bpf_ops;
143}
144
145static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
146 struct cls_bpf_prog *oldprog,
147 struct netlink_ext_ack *extack)
148{
149 struct tcf_block *block = tp->chain->block;
150 struct tc_cls_bpf_offload cls_bpf = {};
151 struct cls_bpf_prog *obj;
152 bool skip_sw;
153 int err;
154
155 skip_sw = prog && tc_skip_sw(prog->gen_flags);
156 obj = prog ?: oldprog;
157
158 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
159 cls_bpf.command = TC_CLSBPF_OFFLOAD;
160 cls_bpf.exts = &obj->exts;
161 cls_bpf.prog = prog ? prog->filter : NULL;
162 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
163 cls_bpf.name = obj->bpf_name;
164 cls_bpf.exts_integrated = obj->exts_integrated;
165
166 if (oldprog && prog)
167 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
168 skip_sw, &oldprog->gen_flags,
169 &oldprog->in_hw_count,
170 &prog->gen_flags, &prog->in_hw_count,
171 true);
172 else if (prog)
173 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
174 skip_sw, &prog->gen_flags,
175 &prog->in_hw_count, true);
176 else
177 err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
178 skip_sw, &oldprog->gen_flags,
179 &oldprog->in_hw_count, true);
180
181 if (prog && err) {
182 cls_bpf_offload_cmd(tp, oldprog, prog, extack);
183 return err;
184 }
185
186 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
187 return -EINVAL;
188
189 return 0;
190}
191
192static u32 cls_bpf_flags(u32 flags)
193{
194 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
195}
196
197static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
198 struct cls_bpf_prog *oldprog,
199 struct netlink_ext_ack *extack)
200{
201 if (prog && oldprog &&
202 cls_bpf_flags(prog->gen_flags) !=
203 cls_bpf_flags(oldprog->gen_flags))
204 return -EINVAL;
205
206 if (prog && tc_skip_hw(prog->gen_flags))
207 prog = NULL;
208 if (oldprog && tc_skip_hw(oldprog->gen_flags))
209 oldprog = NULL;
210 if (!prog && !oldprog)
211 return 0;
212
213 return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
214}
215
216static void cls_bpf_stop_offload(struct tcf_proto *tp,
217 struct cls_bpf_prog *prog,
218 struct netlink_ext_ack *extack)
219{
220 int err;
221
222 err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
223 if (err)
224 pr_err("Stopping hardware offload failed: %d\n", err);
225}
226
227static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
228 struct cls_bpf_prog *prog)
229{
230 struct tcf_block *block = tp->chain->block;
231 struct tc_cls_bpf_offload cls_bpf = {};
232
233 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
234 cls_bpf.command = TC_CLSBPF_STATS;
235 cls_bpf.exts = &prog->exts;
236 cls_bpf.prog = prog->filter;
237 cls_bpf.name = prog->bpf_name;
238 cls_bpf.exts_integrated = prog->exts_integrated;
239
240 tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
241}
242
243static int cls_bpf_init(struct tcf_proto *tp)
244{
245 struct cls_bpf_head *head;
246
247 head = kzalloc(sizeof(*head), GFP_KERNEL);
248 if (head == NULL)
249 return -ENOBUFS;
250
251 INIT_LIST_HEAD_RCU(&head->plist);
252 idr_init(&head->handle_idr);
253 rcu_assign_pointer(tp->root, head);
254
255 return 0;
256}
257
258static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
259{
260 if (cls_bpf_is_ebpf(prog))
261 bpf_prog_put(prog->filter);
262 else
263 bpf_prog_destroy(prog->filter);
264
265 kfree(prog->bpf_name);
266 kfree(prog->bpf_ops);
267}
268
269static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
270{
271 tcf_exts_destroy(&prog->exts);
272 tcf_exts_put_net(&prog->exts);
273
274 cls_bpf_free_parms(prog);
275 kfree(prog);
276}
277
278static void cls_bpf_delete_prog_work(struct work_struct *work)
279{
280 struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
281 struct cls_bpf_prog,
282 rwork);
283 rtnl_lock();
284 __cls_bpf_delete_prog(prog);
285 rtnl_unlock();
286}
287
288static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
289 struct netlink_ext_ack *extack)
290{
291 struct cls_bpf_head *head = rtnl_dereference(tp->root);
292
293 idr_remove(&head->handle_idr, prog->handle);
294 cls_bpf_stop_offload(tp, prog, extack);
295 list_del_rcu(&prog->link);
296 tcf_unbind_filter(tp, &prog->res);
297 if (tcf_exts_get_net(&prog->exts))
298 tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
299 else
300 __cls_bpf_delete_prog(prog);
301}
302
303static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
304 bool rtnl_held, struct netlink_ext_ack *extack)
305{
306 struct cls_bpf_head *head = rtnl_dereference(tp->root);
307
308 __cls_bpf_delete(tp, arg, extack);
309 *last = list_empty(&head->plist);
310 return 0;
311}
312
313static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
314 struct netlink_ext_ack *extack)
315{
316 struct cls_bpf_head *head = rtnl_dereference(tp->root);
317 struct cls_bpf_prog *prog, *tmp;
318
319 list_for_each_entry_safe(prog, tmp, &head->plist, link)
320 __cls_bpf_delete(tp, prog, extack);
321
322 idr_destroy(&head->handle_idr);
323 kfree_rcu(head, rcu);
324}
325
326static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
327{
328 struct cls_bpf_head *head = rtnl_dereference(tp->root);
329 struct cls_bpf_prog *prog;
330
331 list_for_each_entry(prog, &head->plist, link) {
332 if (prog->handle == handle)
333 return prog;
334 }
335
336 return NULL;
337}
338
339static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
340{
341 struct sock_filter *bpf_ops;
342 struct sock_fprog_kern fprog_tmp;
343 struct bpf_prog *fp;
344 u16 bpf_size, bpf_num_ops;
345 int ret;
346
347 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
348 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
349 return -EINVAL;
350
351 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
352 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
353 return -EINVAL;
354
355 bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
356 if (bpf_ops == NULL)
357 return -ENOMEM;
358
359 fprog_tmp.len = bpf_num_ops;
360 fprog_tmp.filter = bpf_ops;
361
362 ret = bpf_prog_create(&fp, &fprog_tmp);
363 if (ret < 0) {
364 kfree(bpf_ops);
365 return ret;
366 }
367
368 prog->bpf_ops = bpf_ops;
369 prog->bpf_num_ops = bpf_num_ops;
370 prog->bpf_name = NULL;
371 prog->filter = fp;
372
373 return 0;
374}
375
376static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
377 u32 gen_flags, const struct tcf_proto *tp)
378{
379 struct bpf_prog *fp;
380 char *name = NULL;
381 bool skip_sw;
382 u32 bpf_fd;
383
384 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
385 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
386
387 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
388 if (IS_ERR(fp))
389 return PTR_ERR(fp);
390
391 if (tb[TCA_BPF_NAME]) {
392 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
393 if (!name) {
394 bpf_prog_put(fp);
395 return -ENOMEM;
396 }
397 }
398
399 prog->bpf_ops = NULL;
400 prog->bpf_name = name;
401 prog->filter = fp;
402
403 if (fp->dst_needed)
404 tcf_block_netif_keep_dst(tp->chain->block);
405
406 return 0;
407}
408
409static int cls_bpf_set_parms(struct net *net, struct tcf_proto *tp,
410 struct cls_bpf_prog *prog, unsigned long base,
411 struct nlattr **tb, struct nlattr *est, u32 flags,
412 struct netlink_ext_ack *extack)
413{
414 bool is_bpf, is_ebpf, have_exts = false;
415 u32 gen_flags = 0;
416 int ret;
417
418 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
419 is_ebpf = tb[TCA_BPF_FD];
420 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
421 return -EINVAL;
422
423 ret = tcf_exts_validate(net, tp, tb, est, &prog->exts, flags,
424 extack);
425 if (ret < 0)
426 return ret;
427
428 if (tb[TCA_BPF_FLAGS]) {
429 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
430
431 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT)
432 return -EINVAL;
433
434 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
435 }
436 if (tb[TCA_BPF_FLAGS_GEN]) {
437 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
438 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
439 !tc_flags_valid(gen_flags))
440 return -EINVAL;
441 }
442
443 prog->exts_integrated = have_exts;
444 prog->gen_flags = gen_flags;
445
446 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
447 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
448 if (ret < 0)
449 return ret;
450
451 if (tb[TCA_BPF_CLASSID]) {
452 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
453 tcf_bind_filter(tp, &prog->res, base);
454 }
455
456 return 0;
457}
458
459static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
460 struct tcf_proto *tp, unsigned long base,
461 u32 handle, struct nlattr **tca,
462 void **arg, u32 flags,
463 struct netlink_ext_ack *extack)
464{
465 struct cls_bpf_head *head = rtnl_dereference(tp->root);
466 struct cls_bpf_prog *oldprog = *arg;
467 struct nlattr *tb[TCA_BPF_MAX + 1];
468 struct cls_bpf_prog *prog;
469 int ret;
470
471 if (tca[TCA_OPTIONS] == NULL)
472 return -EINVAL;
473
474 ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
475 bpf_policy, NULL);
476 if (ret < 0)
477 return ret;
478
479 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
480 if (!prog)
481 return -ENOBUFS;
482
483 ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
484 if (ret < 0)
485 goto errout;
486
487 if (oldprog) {
488 if (handle && oldprog->handle != handle) {
489 ret = -EINVAL;
490 goto errout;
491 }
492 }
493
494 if (handle == 0) {
495 handle = 1;
496 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
497 INT_MAX, GFP_KERNEL);
498 } else if (!oldprog) {
499 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
500 handle, GFP_KERNEL);
501 }
502
503 if (ret)
504 goto errout;
505 prog->handle = handle;
506
507 ret = cls_bpf_set_parms(net, tp, prog, base, tb, tca[TCA_RATE], flags,
508 extack);
509 if (ret < 0)
510 goto errout_idr;
511
512 ret = cls_bpf_offload(tp, prog, oldprog, extack);
513 if (ret)
514 goto errout_parms;
515
516 if (!tc_in_hw(prog->gen_flags))
517 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
518
519 if (oldprog) {
520 idr_replace(&head->handle_idr, prog, handle);
521 list_replace_rcu(&oldprog->link, &prog->link);
522 tcf_unbind_filter(tp, &oldprog->res);
523 tcf_exts_get_net(&oldprog->exts);
524 tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
525 } else {
526 list_add_rcu(&prog->link, &head->plist);
527 }
528
529 *arg = prog;
530 return 0;
531
532errout_parms:
533 cls_bpf_free_parms(prog);
534errout_idr:
535 if (!oldprog)
536 idr_remove(&head->handle_idr, prog->handle);
537errout:
538 tcf_exts_destroy(&prog->exts);
539 kfree(prog);
540 return ret;
541}
542
543static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
544 struct sk_buff *skb)
545{
546 struct nlattr *nla;
547
548 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
549 return -EMSGSIZE;
550
551 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
552 sizeof(struct sock_filter));
553 if (nla == NULL)
554 return -EMSGSIZE;
555
556 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
557
558 return 0;
559}
560
561static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
562 struct sk_buff *skb)
563{
564 struct nlattr *nla;
565
566 if (prog->bpf_name &&
567 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
568 return -EMSGSIZE;
569
570 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
571 return -EMSGSIZE;
572
573 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
574 if (nla == NULL)
575 return -EMSGSIZE;
576
577 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
578
579 return 0;
580}
581
582static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
583 struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
584{
585 struct cls_bpf_prog *prog = fh;
586 struct nlattr *nest;
587 u32 bpf_flags = 0;
588 int ret;
589
590 if (prog == NULL)
591 return skb->len;
592
593 tm->tcm_handle = prog->handle;
594
595 cls_bpf_offload_update_stats(tp, prog);
596
597 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
598 if (nest == NULL)
599 goto nla_put_failure;
600
601 if (prog->res.classid &&
602 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
603 goto nla_put_failure;
604
605 if (cls_bpf_is_ebpf(prog))
606 ret = cls_bpf_dump_ebpf_info(prog, skb);
607 else
608 ret = cls_bpf_dump_bpf_info(prog, skb);
609 if (ret)
610 goto nla_put_failure;
611
612 if (tcf_exts_dump(skb, &prog->exts) < 0)
613 goto nla_put_failure;
614
615 if (prog->exts_integrated)
616 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
617 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
618 goto nla_put_failure;
619 if (prog->gen_flags &&
620 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
621 goto nla_put_failure;
622
623 nla_nest_end(skb, nest);
624
625 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
626 goto nla_put_failure;
627
628 return skb->len;
629
630nla_put_failure:
631 nla_nest_cancel(skb, nest);
632 return -1;
633}
634
635static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
636 void *q, unsigned long base)
637{
638 struct cls_bpf_prog *prog = fh;
639
640 tc_cls_bind_class(classid, cl, q, &prog->res, base);
641}
642
643static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
644 bool rtnl_held)
645{
646 struct cls_bpf_head *head = rtnl_dereference(tp->root);
647 struct cls_bpf_prog *prog;
648
649 list_for_each_entry(prog, &head->plist, link) {
650 if (!tc_cls_stats_dump(tp, arg, prog))
651 break;
652 }
653}
654
655static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
656 void *cb_priv, struct netlink_ext_ack *extack)
657{
658 struct cls_bpf_head *head = rtnl_dereference(tp->root);
659 struct tcf_block *block = tp->chain->block;
660 struct tc_cls_bpf_offload cls_bpf = {};
661 struct cls_bpf_prog *prog;
662 int err;
663
664 list_for_each_entry(prog, &head->plist, link) {
665 if (tc_skip_hw(prog->gen_flags))
666 continue;
667
668 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
669 extack);
670 cls_bpf.command = TC_CLSBPF_OFFLOAD;
671 cls_bpf.exts = &prog->exts;
672 cls_bpf.prog = add ? prog->filter : NULL;
673 cls_bpf.oldprog = add ? NULL : prog->filter;
674 cls_bpf.name = prog->bpf_name;
675 cls_bpf.exts_integrated = prog->exts_integrated;
676
677 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
678 &cls_bpf, cb_priv, &prog->gen_flags,
679 &prog->in_hw_count);
680 if (err)
681 return err;
682 }
683
684 return 0;
685}
686
687static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
688 .kind = "bpf",
689 .owner = THIS_MODULE,
690 .classify = cls_bpf_classify,
691 .init = cls_bpf_init,
692 .destroy = cls_bpf_destroy,
693 .get = cls_bpf_get,
694 .change = cls_bpf_change,
695 .delete = cls_bpf_delete,
696 .walk = cls_bpf_walk,
697 .reoffload = cls_bpf_reoffload,
698 .dump = cls_bpf_dump,
699 .bind_class = cls_bpf_bind_class,
700};
701
702static int __init cls_bpf_init_mod(void)
703{
704 return register_tcf_proto_ops(&cls_bpf_ops);
705}
706
707static void __exit cls_bpf_exit_mod(void)
708{
709 unregister_tcf_proto_ops(&cls_bpf_ops);
710}
711
712module_init(cls_bpf_init_mod);
713module_exit(cls_bpf_exit_mod);