Loading...
1/*
2 * Berkeley Packet Filter based traffic classifier
3 *
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
7 *
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/skbuff.h>
18#include <linux/filter.h>
19#include <linux/bpf.h>
20
21#include <net/rtnetlink.h>
22#include <net/pkt_cls.h>
23#include <net/sock.h>
24
25MODULE_LICENSE("GPL");
26MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27MODULE_DESCRIPTION("TC BPF based classifier");
28
29#define CLS_BPF_NAME_LEN 256
30#define CLS_BPF_SUPPORTED_GEN_FLAGS \
31 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
32
33struct cls_bpf_head {
34 struct list_head plist;
35 u32 hgen;
36 struct rcu_head rcu;
37};
38
39struct cls_bpf_prog {
40 struct bpf_prog *filter;
41 struct list_head link;
42 struct tcf_result res;
43 bool exts_integrated;
44 bool offloaded;
45 u32 gen_flags;
46 struct tcf_exts exts;
47 u32 handle;
48 u16 bpf_num_ops;
49 struct sock_filter *bpf_ops;
50 const char *bpf_name;
51 struct tcf_proto *tp;
52 struct rcu_head rcu;
53};
54
55static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
56 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
57 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
58 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
59 [TCA_BPF_FD] = { .type = NLA_U32 },
60 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
61 .len = CLS_BPF_NAME_LEN },
62 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
63 [TCA_BPF_OPS] = { .type = NLA_BINARY,
64 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
65};
66
67static int cls_bpf_exec_opcode(int code)
68{
69 switch (code) {
70 case TC_ACT_OK:
71 case TC_ACT_SHOT:
72 case TC_ACT_STOLEN:
73 case TC_ACT_REDIRECT:
74 case TC_ACT_UNSPEC:
75 return code;
76 default:
77 return TC_ACT_UNSPEC;
78 }
79}
80
81static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
82 struct tcf_result *res)
83{
84 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
85 bool at_ingress = skb_at_tc_ingress(skb);
86 struct cls_bpf_prog *prog;
87 int ret = -1;
88
89 /* Needed here for accessing maps. */
90 rcu_read_lock();
91 list_for_each_entry_rcu(prog, &head->plist, link) {
92 int filter_res;
93
94 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
95
96 if (tc_skip_sw(prog->gen_flags)) {
97 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
98 } else if (at_ingress) {
99 /* It is safe to push/pull even if skb_shared() */
100 __skb_push(skb, skb->mac_len);
101 bpf_compute_data_end(skb);
102 filter_res = BPF_PROG_RUN(prog->filter, skb);
103 __skb_pull(skb, skb->mac_len);
104 } else {
105 bpf_compute_data_end(skb);
106 filter_res = BPF_PROG_RUN(prog->filter, skb);
107 }
108
109 if (prog->exts_integrated) {
110 res->class = 0;
111 res->classid = TC_H_MAJ(prog->res.classid) |
112 qdisc_skb_cb(skb)->tc_classid;
113
114 ret = cls_bpf_exec_opcode(filter_res);
115 if (ret == TC_ACT_UNSPEC)
116 continue;
117 break;
118 }
119
120 if (filter_res == 0)
121 continue;
122 if (filter_res != -1) {
123 res->class = 0;
124 res->classid = filter_res;
125 } else {
126 *res = prog->res;
127 }
128
129 ret = tcf_exts_exec(skb, &prog->exts, res);
130 if (ret < 0)
131 continue;
132
133 break;
134 }
135 rcu_read_unlock();
136
137 return ret;
138}
139
140static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
141{
142 return !prog->bpf_ops;
143}
144
145static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
146 enum tc_clsbpf_command cmd)
147{
148 struct net_device *dev = tp->q->dev_queue->dev;
149 struct tc_cls_bpf_offload bpf_offload = {};
150 struct tc_to_netdev offload;
151
152 offload.type = TC_SETUP_CLSBPF;
153 offload.cls_bpf = &bpf_offload;
154
155 bpf_offload.command = cmd;
156 bpf_offload.exts = &prog->exts;
157 bpf_offload.prog = prog->filter;
158 bpf_offload.name = prog->bpf_name;
159 bpf_offload.exts_integrated = prog->exts_integrated;
160 bpf_offload.gen_flags = prog->gen_flags;
161
162 return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
163 tp->protocol, &offload);
164}
165
166static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
167 struct cls_bpf_prog *oldprog)
168{
169 struct net_device *dev = tp->q->dev_queue->dev;
170 struct cls_bpf_prog *obj = prog;
171 enum tc_clsbpf_command cmd;
172 bool skip_sw;
173 int ret;
174
175 skip_sw = tc_skip_sw(prog->gen_flags) ||
176 (oldprog && tc_skip_sw(oldprog->gen_flags));
177
178 if (oldprog && oldprog->offloaded) {
179 if (tc_should_offload(dev, tp, prog->gen_flags)) {
180 cmd = TC_CLSBPF_REPLACE;
181 } else if (!tc_skip_sw(prog->gen_flags)) {
182 obj = oldprog;
183 cmd = TC_CLSBPF_DESTROY;
184 } else {
185 return -EINVAL;
186 }
187 } else {
188 if (!tc_should_offload(dev, tp, prog->gen_flags))
189 return skip_sw ? -EINVAL : 0;
190 cmd = TC_CLSBPF_ADD;
191 }
192
193 ret = cls_bpf_offload_cmd(tp, obj, cmd);
194 if (ret)
195 return skip_sw ? ret : 0;
196
197 obj->offloaded = true;
198 if (oldprog)
199 oldprog->offloaded = false;
200
201 return 0;
202}
203
204static void cls_bpf_stop_offload(struct tcf_proto *tp,
205 struct cls_bpf_prog *prog)
206{
207 int err;
208
209 if (!prog->offloaded)
210 return;
211
212 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
213 if (err) {
214 pr_err("Stopping hardware offload failed: %d\n", err);
215 return;
216 }
217
218 prog->offloaded = false;
219}
220
221static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
222 struct cls_bpf_prog *prog)
223{
224 if (!prog->offloaded)
225 return;
226
227 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
228}
229
230static int cls_bpf_init(struct tcf_proto *tp)
231{
232 struct cls_bpf_head *head;
233
234 head = kzalloc(sizeof(*head), GFP_KERNEL);
235 if (head == NULL)
236 return -ENOBUFS;
237
238 INIT_LIST_HEAD_RCU(&head->plist);
239 rcu_assign_pointer(tp->root, head);
240
241 return 0;
242}
243
244static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
245{
246 tcf_exts_destroy(&prog->exts);
247
248 if (cls_bpf_is_ebpf(prog))
249 bpf_prog_put(prog->filter);
250 else
251 bpf_prog_destroy(prog->filter);
252
253 kfree(prog->bpf_name);
254 kfree(prog->bpf_ops);
255 kfree(prog);
256}
257
258static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
259{
260 __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
261}
262
263static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
264{
265 cls_bpf_stop_offload(tp, prog);
266 list_del_rcu(&prog->link);
267 tcf_unbind_filter(tp, &prog->res);
268 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
269}
270
271static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
272{
273 __cls_bpf_delete(tp, (struct cls_bpf_prog *) arg);
274 return 0;
275}
276
277static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
278{
279 struct cls_bpf_head *head = rtnl_dereference(tp->root);
280 struct cls_bpf_prog *prog, *tmp;
281
282 if (!force && !list_empty(&head->plist))
283 return false;
284
285 list_for_each_entry_safe(prog, tmp, &head->plist, link)
286 __cls_bpf_delete(tp, prog);
287
288 kfree_rcu(head, rcu);
289 return true;
290}
291
292static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
293{
294 struct cls_bpf_head *head = rtnl_dereference(tp->root);
295 struct cls_bpf_prog *prog;
296 unsigned long ret = 0UL;
297
298 list_for_each_entry(prog, &head->plist, link) {
299 if (prog->handle == handle) {
300 ret = (unsigned long) prog;
301 break;
302 }
303 }
304
305 return ret;
306}
307
308static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
309{
310 struct sock_filter *bpf_ops;
311 struct sock_fprog_kern fprog_tmp;
312 struct bpf_prog *fp;
313 u16 bpf_size, bpf_num_ops;
314 int ret;
315
316 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
317 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
318 return -EINVAL;
319
320 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
321 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
322 return -EINVAL;
323
324 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
325 if (bpf_ops == NULL)
326 return -ENOMEM;
327
328 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
329
330 fprog_tmp.len = bpf_num_ops;
331 fprog_tmp.filter = bpf_ops;
332
333 ret = bpf_prog_create(&fp, &fprog_tmp);
334 if (ret < 0) {
335 kfree(bpf_ops);
336 return ret;
337 }
338
339 prog->bpf_ops = bpf_ops;
340 prog->bpf_num_ops = bpf_num_ops;
341 prog->bpf_name = NULL;
342 prog->filter = fp;
343
344 return 0;
345}
346
347static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
348 const struct tcf_proto *tp)
349{
350 struct bpf_prog *fp;
351 char *name = NULL;
352 u32 bpf_fd;
353
354 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
355
356 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
357 if (IS_ERR(fp))
358 return PTR_ERR(fp);
359
360 if (tb[TCA_BPF_NAME]) {
361 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
362 if (!name) {
363 bpf_prog_put(fp);
364 return -ENOMEM;
365 }
366 }
367
368 prog->bpf_ops = NULL;
369 prog->bpf_name = name;
370 prog->filter = fp;
371
372 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
373 netif_keep_dst(qdisc_dev(tp->q));
374
375 return 0;
376}
377
378static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
379 struct cls_bpf_prog *prog,
380 unsigned long base, struct nlattr **tb,
381 struct nlattr *est, bool ovr)
382{
383 bool is_bpf, is_ebpf, have_exts = false;
384 struct tcf_exts exts;
385 u32 gen_flags = 0;
386 int ret;
387
388 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
389 is_ebpf = tb[TCA_BPF_FD];
390 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
391 return -EINVAL;
392
393 ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
394 if (ret < 0)
395 return ret;
396 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
397 if (ret < 0)
398 goto errout;
399
400 if (tb[TCA_BPF_FLAGS]) {
401 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
402
403 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
404 ret = -EINVAL;
405 goto errout;
406 }
407
408 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
409 }
410 if (tb[TCA_BPF_FLAGS_GEN]) {
411 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
412 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
413 !tc_flags_valid(gen_flags)) {
414 ret = -EINVAL;
415 goto errout;
416 }
417 }
418
419 prog->exts_integrated = have_exts;
420 prog->gen_flags = gen_flags;
421
422 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
423 cls_bpf_prog_from_efd(tb, prog, tp);
424 if (ret < 0)
425 goto errout;
426
427 if (tb[TCA_BPF_CLASSID]) {
428 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
429 tcf_bind_filter(tp, &prog->res, base);
430 }
431
432 tcf_exts_change(tp, &prog->exts, &exts);
433 return 0;
434
435errout:
436 tcf_exts_destroy(&exts);
437 return ret;
438}
439
440static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
441 struct cls_bpf_head *head)
442{
443 unsigned int i = 0x80000000;
444 u32 handle;
445
446 do {
447 if (++head->hgen == 0x7FFFFFFF)
448 head->hgen = 1;
449 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
450
451 if (unlikely(i == 0)) {
452 pr_err("Insufficient number of handles\n");
453 handle = 0;
454 } else {
455 handle = head->hgen;
456 }
457
458 return handle;
459}
460
461static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
462 struct tcf_proto *tp, unsigned long base,
463 u32 handle, struct nlattr **tca,
464 unsigned long *arg, bool ovr)
465{
466 struct cls_bpf_head *head = rtnl_dereference(tp->root);
467 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
468 struct nlattr *tb[TCA_BPF_MAX + 1];
469 struct cls_bpf_prog *prog;
470 int ret;
471
472 if (tca[TCA_OPTIONS] == NULL)
473 return -EINVAL;
474
475 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
476 if (ret < 0)
477 return ret;
478
479 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
480 if (!prog)
481 return -ENOBUFS;
482
483 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
484 if (ret < 0)
485 goto errout;
486
487 if (oldprog) {
488 if (handle && oldprog->handle != handle) {
489 ret = -EINVAL;
490 goto errout;
491 }
492 }
493
494 if (handle == 0)
495 prog->handle = cls_bpf_grab_new_handle(tp, head);
496 else
497 prog->handle = handle;
498 if (prog->handle == 0) {
499 ret = -EINVAL;
500 goto errout;
501 }
502
503 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
504 ovr);
505 if (ret < 0)
506 goto errout;
507
508 ret = cls_bpf_offload(tp, prog, oldprog);
509 if (ret) {
510 __cls_bpf_delete_prog(prog);
511 return ret;
512 }
513
514 if (oldprog) {
515 list_replace_rcu(&oldprog->link, &prog->link);
516 tcf_unbind_filter(tp, &oldprog->res);
517 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
518 } else {
519 list_add_rcu(&prog->link, &head->plist);
520 }
521
522 *arg = (unsigned long) prog;
523 return 0;
524
525errout:
526 tcf_exts_destroy(&prog->exts);
527 kfree(prog);
528 return ret;
529}
530
531static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
532 struct sk_buff *skb)
533{
534 struct nlattr *nla;
535
536 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
537 return -EMSGSIZE;
538
539 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
540 sizeof(struct sock_filter));
541 if (nla == NULL)
542 return -EMSGSIZE;
543
544 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
545
546 return 0;
547}
548
549static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
550 struct sk_buff *skb)
551{
552 struct nlattr *nla;
553
554 if (prog->bpf_name &&
555 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
556 return -EMSGSIZE;
557
558 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
559 if (nla == NULL)
560 return -EMSGSIZE;
561
562 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
563
564 return 0;
565}
566
567static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
568 struct sk_buff *skb, struct tcmsg *tm)
569{
570 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
571 struct nlattr *nest;
572 u32 bpf_flags = 0;
573 int ret;
574
575 if (prog == NULL)
576 return skb->len;
577
578 tm->tcm_handle = prog->handle;
579
580 cls_bpf_offload_update_stats(tp, prog);
581
582 nest = nla_nest_start(skb, TCA_OPTIONS);
583 if (nest == NULL)
584 goto nla_put_failure;
585
586 if (prog->res.classid &&
587 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
588 goto nla_put_failure;
589
590 if (cls_bpf_is_ebpf(prog))
591 ret = cls_bpf_dump_ebpf_info(prog, skb);
592 else
593 ret = cls_bpf_dump_bpf_info(prog, skb);
594 if (ret)
595 goto nla_put_failure;
596
597 if (tcf_exts_dump(skb, &prog->exts) < 0)
598 goto nla_put_failure;
599
600 if (prog->exts_integrated)
601 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
602 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
603 goto nla_put_failure;
604 if (prog->gen_flags &&
605 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
606 goto nla_put_failure;
607
608 nla_nest_end(skb, nest);
609
610 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
611 goto nla_put_failure;
612
613 return skb->len;
614
615nla_put_failure:
616 nla_nest_cancel(skb, nest);
617 return -1;
618}
619
620static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
621{
622 struct cls_bpf_head *head = rtnl_dereference(tp->root);
623 struct cls_bpf_prog *prog;
624
625 list_for_each_entry(prog, &head->plist, link) {
626 if (arg->count < arg->skip)
627 goto skip;
628 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
629 arg->stop = 1;
630 break;
631 }
632skip:
633 arg->count++;
634 }
635}
636
637static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
638 .kind = "bpf",
639 .owner = THIS_MODULE,
640 .classify = cls_bpf_classify,
641 .init = cls_bpf_init,
642 .destroy = cls_bpf_destroy,
643 .get = cls_bpf_get,
644 .change = cls_bpf_change,
645 .delete = cls_bpf_delete,
646 .walk = cls_bpf_walk,
647 .dump = cls_bpf_dump,
648};
649
650static int __init cls_bpf_init_mod(void)
651{
652 return register_tcf_proto_ops(&cls_bpf_ops);
653}
654
655static void __exit cls_bpf_exit_mod(void)
656{
657 unregister_tcf_proto_ops(&cls_bpf_ops);
658}
659
660module_init(cls_bpf_init_mod);
661module_exit(cls_bpf_exit_mod);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Berkeley Packet Filter based traffic classifier
4 *
5 * Might be used to classify traffic through flexible, user-defined and
6 * possibly JIT-ed BPF filters for traffic control as an alternative to
7 * ematches.
8 *
9 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 */
11
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/skbuff.h>
15#include <linux/filter.h>
16#include <linux/bpf.h>
17#include <linux/idr.h>
18
19#include <net/rtnetlink.h>
20#include <net/pkt_cls.h>
21#include <net/sock.h>
22#include <net/tc_wrapper.h>
23
24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
26MODULE_DESCRIPTION("TC BPF based classifier");
27
28#define CLS_BPF_NAME_LEN 256
29#define CLS_BPF_SUPPORTED_GEN_FLAGS \
30 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
31
32struct cls_bpf_head {
33 struct list_head plist;
34 struct idr handle_idr;
35 struct rcu_head rcu;
36};
37
38struct cls_bpf_prog {
39 struct bpf_prog *filter;
40 struct list_head link;
41 struct tcf_result res;
42 bool exts_integrated;
43 u32 gen_flags;
44 unsigned int in_hw_count;
45 struct tcf_exts exts;
46 u32 handle;
47 u16 bpf_num_ops;
48 struct sock_filter *bpf_ops;
49 const char *bpf_name;
50 struct tcf_proto *tp;
51 struct rcu_work rwork;
52};
53
54static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
55 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
56 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
57 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
58 [TCA_BPF_FD] = { .type = NLA_U32 },
59 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
60 .len = CLS_BPF_NAME_LEN },
61 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
62 [TCA_BPF_OPS] = { .type = NLA_BINARY,
63 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
64};
65
66static int cls_bpf_exec_opcode(int code)
67{
68 switch (code) {
69 case TC_ACT_OK:
70 case TC_ACT_SHOT:
71 case TC_ACT_STOLEN:
72 case TC_ACT_TRAP:
73 case TC_ACT_REDIRECT:
74 case TC_ACT_UNSPEC:
75 return code;
76 default:
77 return TC_ACT_UNSPEC;
78 }
79}
80
81TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
82 const struct tcf_proto *tp,
83 struct tcf_result *res)
84{
85 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
86 bool at_ingress = skb_at_tc_ingress(skb);
87 struct cls_bpf_prog *prog;
88 int ret = -1;
89
90 list_for_each_entry_rcu(prog, &head->plist, link) {
91 int filter_res;
92
93 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
94
95 if (tc_skip_sw(prog->gen_flags)) {
96 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
97 } else if (at_ingress) {
98 /* It is safe to push/pull even if skb_shared() */
99 __skb_push(skb, skb->mac_len);
100 bpf_compute_data_pointers(skb);
101 filter_res = bpf_prog_run(prog->filter, skb);
102 __skb_pull(skb, skb->mac_len);
103 } else {
104 bpf_compute_data_pointers(skb);
105 filter_res = bpf_prog_run(prog->filter, skb);
106 }
107 if (unlikely(!skb->tstamp && skb->tstamp_type))
108 skb->tstamp_type = SKB_CLOCK_REALTIME;
109
110 if (prog->exts_integrated) {
111 res->class = 0;
112 res->classid = TC_H_MAJ(prog->res.classid) |
113 qdisc_skb_cb(skb)->tc_classid;
114
115 ret = cls_bpf_exec_opcode(filter_res);
116 if (ret == TC_ACT_UNSPEC)
117 continue;
118 break;
119 }
120
121 if (filter_res == 0)
122 continue;
123 if (filter_res != -1) {
124 res->class = 0;
125 res->classid = filter_res;
126 } else {
127 *res = prog->res;
128 }
129
130 ret = tcf_exts_exec(skb, &prog->exts, res);
131 if (ret < 0)
132 continue;
133
134 break;
135 }
136
137 return ret;
138}
139
140static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
141{
142 return !prog->bpf_ops;
143}
144
145static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
146 struct cls_bpf_prog *oldprog,
147 struct netlink_ext_ack *extack)
148{
149 struct tcf_block *block = tp->chain->block;
150 struct tc_cls_bpf_offload cls_bpf = {};
151 struct cls_bpf_prog *obj;
152 bool skip_sw;
153 int err;
154
155 skip_sw = prog && tc_skip_sw(prog->gen_flags);
156 obj = prog ?: oldprog;
157
158 tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
159 cls_bpf.command = TC_CLSBPF_OFFLOAD;
160 cls_bpf.exts = &obj->exts;
161 cls_bpf.prog = prog ? prog->filter : NULL;
162 cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
163 cls_bpf.name = obj->bpf_name;
164 cls_bpf.exts_integrated = obj->exts_integrated;
165
166 if (oldprog && prog)
167 err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
168 skip_sw, &oldprog->gen_flags,
169 &oldprog->in_hw_count,
170 &prog->gen_flags, &prog->in_hw_count,
171 true);
172 else if (prog)
173 err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
174 skip_sw, &prog->gen_flags,
175 &prog->in_hw_count, true);
176 else
177 err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
178 skip_sw, &oldprog->gen_flags,
179 &oldprog->in_hw_count, true);
180
181 if (prog && err) {
182 cls_bpf_offload_cmd(tp, oldprog, prog, extack);
183 return err;
184 }
185
186 if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
187 return -EINVAL;
188
189 return 0;
190}
191
192static u32 cls_bpf_flags(u32 flags)
193{
194 return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
195}
196
197static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
198 struct cls_bpf_prog *oldprog,
199 struct netlink_ext_ack *extack)
200{
201 if (prog && oldprog &&
202 cls_bpf_flags(prog->gen_flags) !=
203 cls_bpf_flags(oldprog->gen_flags))
204 return -EINVAL;
205
206 if (prog && tc_skip_hw(prog->gen_flags))
207 prog = NULL;
208 if (oldprog && tc_skip_hw(oldprog->gen_flags))
209 oldprog = NULL;
210 if (!prog && !oldprog)
211 return 0;
212
213 return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
214}
215
216static void cls_bpf_stop_offload(struct tcf_proto *tp,
217 struct cls_bpf_prog *prog,
218 struct netlink_ext_ack *extack)
219{
220 int err;
221
222 err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
223 if (err)
224 pr_err("Stopping hardware offload failed: %d\n", err);
225}
226
227static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
228 struct cls_bpf_prog *prog)
229{
230 struct tcf_block *block = tp->chain->block;
231 struct tc_cls_bpf_offload cls_bpf = {};
232
233 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
234 cls_bpf.command = TC_CLSBPF_STATS;
235 cls_bpf.exts = &prog->exts;
236 cls_bpf.prog = prog->filter;
237 cls_bpf.name = prog->bpf_name;
238 cls_bpf.exts_integrated = prog->exts_integrated;
239
240 tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
241}
242
243static int cls_bpf_init(struct tcf_proto *tp)
244{
245 struct cls_bpf_head *head;
246
247 head = kzalloc(sizeof(*head), GFP_KERNEL);
248 if (head == NULL)
249 return -ENOBUFS;
250
251 INIT_LIST_HEAD_RCU(&head->plist);
252 idr_init(&head->handle_idr);
253 rcu_assign_pointer(tp->root, head);
254
255 return 0;
256}
257
258static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
259{
260 if (cls_bpf_is_ebpf(prog))
261 bpf_prog_put(prog->filter);
262 else
263 bpf_prog_destroy(prog->filter);
264
265 kfree(prog->bpf_name);
266 kfree(prog->bpf_ops);
267}
268
269static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
270{
271 tcf_exts_destroy(&prog->exts);
272 tcf_exts_put_net(&prog->exts);
273
274 cls_bpf_free_parms(prog);
275 kfree(prog);
276}
277
278static void cls_bpf_delete_prog_work(struct work_struct *work)
279{
280 struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
281 struct cls_bpf_prog,
282 rwork);
283 rtnl_lock();
284 __cls_bpf_delete_prog(prog);
285 rtnl_unlock();
286}
287
288static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
289 struct netlink_ext_ack *extack)
290{
291 struct cls_bpf_head *head = rtnl_dereference(tp->root);
292
293 idr_remove(&head->handle_idr, prog->handle);
294 cls_bpf_stop_offload(tp, prog, extack);
295 list_del_rcu(&prog->link);
296 tcf_unbind_filter(tp, &prog->res);
297 if (tcf_exts_get_net(&prog->exts))
298 tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
299 else
300 __cls_bpf_delete_prog(prog);
301}
302
303static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
304 bool rtnl_held, struct netlink_ext_ack *extack)
305{
306 struct cls_bpf_head *head = rtnl_dereference(tp->root);
307
308 __cls_bpf_delete(tp, arg, extack);
309 *last = list_empty(&head->plist);
310 return 0;
311}
312
313static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
314 struct netlink_ext_ack *extack)
315{
316 struct cls_bpf_head *head = rtnl_dereference(tp->root);
317 struct cls_bpf_prog *prog, *tmp;
318
319 list_for_each_entry_safe(prog, tmp, &head->plist, link)
320 __cls_bpf_delete(tp, prog, extack);
321
322 idr_destroy(&head->handle_idr);
323 kfree_rcu(head, rcu);
324}
325
326static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
327{
328 struct cls_bpf_head *head = rtnl_dereference(tp->root);
329 struct cls_bpf_prog *prog;
330
331 list_for_each_entry(prog, &head->plist, link) {
332 if (prog->handle == handle)
333 return prog;
334 }
335
336 return NULL;
337}
338
339static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
340{
341 struct sock_filter *bpf_ops;
342 struct sock_fprog_kern fprog_tmp;
343 struct bpf_prog *fp;
344 u16 bpf_size, bpf_num_ops;
345 int ret;
346
347 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
348 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
349 return -EINVAL;
350
351 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
352 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
353 return -EINVAL;
354
355 bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
356 if (bpf_ops == NULL)
357 return -ENOMEM;
358
359 fprog_tmp.len = bpf_num_ops;
360 fprog_tmp.filter = bpf_ops;
361
362 ret = bpf_prog_create(&fp, &fprog_tmp);
363 if (ret < 0) {
364 kfree(bpf_ops);
365 return ret;
366 }
367
368 prog->bpf_ops = bpf_ops;
369 prog->bpf_num_ops = bpf_num_ops;
370 prog->bpf_name = NULL;
371 prog->filter = fp;
372
373 return 0;
374}
375
376static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
377 u32 gen_flags, const struct tcf_proto *tp)
378{
379 struct bpf_prog *fp;
380 char *name = NULL;
381 bool skip_sw;
382 u32 bpf_fd;
383
384 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
385 skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
386
387 fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
388 if (IS_ERR(fp))
389 return PTR_ERR(fp);
390
391 if (tb[TCA_BPF_NAME]) {
392 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
393 if (!name) {
394 bpf_prog_put(fp);
395 return -ENOMEM;
396 }
397 }
398
399 prog->bpf_ops = NULL;
400 prog->bpf_name = name;
401 prog->filter = fp;
402
403 if (fp->dst_needed)
404 tcf_block_netif_keep_dst(tp->chain->block);
405
406 return 0;
407}
408
409static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
410 struct tcf_proto *tp, unsigned long base,
411 u32 handle, struct nlattr **tca,
412 void **arg, u32 flags,
413 struct netlink_ext_ack *extack)
414{
415 struct cls_bpf_head *head = rtnl_dereference(tp->root);
416 bool is_bpf, is_ebpf, have_exts = false;
417 struct cls_bpf_prog *oldprog = *arg;
418 struct nlattr *tb[TCA_BPF_MAX + 1];
419 bool bound_to_filter = false;
420 struct cls_bpf_prog *prog;
421 u32 gen_flags = 0;
422 int ret;
423
424 if (tca[TCA_OPTIONS] == NULL)
425 return -EINVAL;
426
427 ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
428 bpf_policy, NULL);
429 if (ret < 0)
430 return ret;
431
432 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
433 if (!prog)
434 return -ENOBUFS;
435
436 ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
437 if (ret < 0)
438 goto errout;
439
440 if (oldprog) {
441 if (handle && oldprog->handle != handle) {
442 ret = -EINVAL;
443 goto errout;
444 }
445 }
446
447 if (handle == 0) {
448 handle = 1;
449 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
450 INT_MAX, GFP_KERNEL);
451 } else if (!oldprog) {
452 ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
453 handle, GFP_KERNEL);
454 }
455
456 if (ret)
457 goto errout;
458 prog->handle = handle;
459
460 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
461 is_ebpf = tb[TCA_BPF_FD];
462 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
463 ret = -EINVAL;
464 goto errout_idr;
465 }
466
467 ret = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &prog->exts,
468 flags, extack);
469 if (ret < 0)
470 goto errout_idr;
471
472 if (tb[TCA_BPF_FLAGS]) {
473 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
474
475 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
476 ret = -EINVAL;
477 goto errout_idr;
478 }
479
480 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
481 }
482 if (tb[TCA_BPF_FLAGS_GEN]) {
483 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
484 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
485 !tc_flags_valid(gen_flags)) {
486 ret = -EINVAL;
487 goto errout_idr;
488 }
489 }
490
491 prog->exts_integrated = have_exts;
492 prog->gen_flags = gen_flags;
493
494 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
495 cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
496 if (ret < 0)
497 goto errout_idr;
498
499 if (tb[TCA_BPF_CLASSID]) {
500 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
501 tcf_bind_filter(tp, &prog->res, base);
502 bound_to_filter = true;
503 }
504
505 ret = cls_bpf_offload(tp, prog, oldprog, extack);
506 if (ret)
507 goto errout_parms;
508
509 if (!tc_in_hw(prog->gen_flags))
510 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
511
512 tcf_proto_update_usesw(tp, prog->gen_flags);
513
514 if (oldprog) {
515 idr_replace(&head->handle_idr, prog, handle);
516 list_replace_rcu(&oldprog->link, &prog->link);
517 tcf_unbind_filter(tp, &oldprog->res);
518 tcf_exts_get_net(&oldprog->exts);
519 tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
520 } else {
521 list_add_rcu(&prog->link, &head->plist);
522 }
523
524 *arg = prog;
525 return 0;
526
527errout_parms:
528 if (bound_to_filter)
529 tcf_unbind_filter(tp, &prog->res);
530 cls_bpf_free_parms(prog);
531errout_idr:
532 if (!oldprog)
533 idr_remove(&head->handle_idr, prog->handle);
534errout:
535 tcf_exts_destroy(&prog->exts);
536 kfree(prog);
537 return ret;
538}
539
540static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
541 struct sk_buff *skb)
542{
543 struct nlattr *nla;
544
545 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
546 return -EMSGSIZE;
547
548 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
549 sizeof(struct sock_filter));
550 if (nla == NULL)
551 return -EMSGSIZE;
552
553 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
554
555 return 0;
556}
557
558static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
559 struct sk_buff *skb)
560{
561 struct nlattr *nla;
562
563 if (prog->bpf_name &&
564 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
565 return -EMSGSIZE;
566
567 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
568 return -EMSGSIZE;
569
570 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
571 if (nla == NULL)
572 return -EMSGSIZE;
573
574 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
575
576 return 0;
577}
578
579static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
580 struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
581{
582 struct cls_bpf_prog *prog = fh;
583 struct nlattr *nest;
584 u32 bpf_flags = 0;
585 int ret;
586
587 if (prog == NULL)
588 return skb->len;
589
590 tm->tcm_handle = prog->handle;
591
592 cls_bpf_offload_update_stats(tp, prog);
593
594 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
595 if (nest == NULL)
596 goto nla_put_failure;
597
598 if (prog->res.classid &&
599 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
600 goto nla_put_failure;
601
602 if (cls_bpf_is_ebpf(prog))
603 ret = cls_bpf_dump_ebpf_info(prog, skb);
604 else
605 ret = cls_bpf_dump_bpf_info(prog, skb);
606 if (ret)
607 goto nla_put_failure;
608
609 if (tcf_exts_dump(skb, &prog->exts) < 0)
610 goto nla_put_failure;
611
612 if (prog->exts_integrated)
613 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
614 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
615 goto nla_put_failure;
616 if (prog->gen_flags &&
617 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
618 goto nla_put_failure;
619
620 nla_nest_end(skb, nest);
621
622 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
623 goto nla_put_failure;
624
625 return skb->len;
626
627nla_put_failure:
628 nla_nest_cancel(skb, nest);
629 return -1;
630}
631
632static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
633 void *q, unsigned long base)
634{
635 struct cls_bpf_prog *prog = fh;
636
637 tc_cls_bind_class(classid, cl, q, &prog->res, base);
638}
639
640static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
641 bool rtnl_held)
642{
643 struct cls_bpf_head *head = rtnl_dereference(tp->root);
644 struct cls_bpf_prog *prog;
645
646 list_for_each_entry(prog, &head->plist, link) {
647 if (!tc_cls_stats_dump(tp, arg, prog))
648 break;
649 }
650}
651
652static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
653 void *cb_priv, struct netlink_ext_ack *extack)
654{
655 struct cls_bpf_head *head = rtnl_dereference(tp->root);
656 struct tcf_block *block = tp->chain->block;
657 struct tc_cls_bpf_offload cls_bpf = {};
658 struct cls_bpf_prog *prog;
659 int err;
660
661 list_for_each_entry(prog, &head->plist, link) {
662 if (tc_skip_hw(prog->gen_flags))
663 continue;
664
665 tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
666 extack);
667 cls_bpf.command = TC_CLSBPF_OFFLOAD;
668 cls_bpf.exts = &prog->exts;
669 cls_bpf.prog = add ? prog->filter : NULL;
670 cls_bpf.oldprog = add ? NULL : prog->filter;
671 cls_bpf.name = prog->bpf_name;
672 cls_bpf.exts_integrated = prog->exts_integrated;
673
674 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
675 &cls_bpf, cb_priv, &prog->gen_flags,
676 &prog->in_hw_count);
677 if (err)
678 return err;
679 }
680
681 return 0;
682}
683
684static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
685 .kind = "bpf",
686 .owner = THIS_MODULE,
687 .classify = cls_bpf_classify,
688 .init = cls_bpf_init,
689 .destroy = cls_bpf_destroy,
690 .get = cls_bpf_get,
691 .change = cls_bpf_change,
692 .delete = cls_bpf_delete,
693 .walk = cls_bpf_walk,
694 .reoffload = cls_bpf_reoffload,
695 .dump = cls_bpf_dump,
696 .bind_class = cls_bpf_bind_class,
697};
698MODULE_ALIAS_NET_CLS("bpf");
699
700static int __init cls_bpf_init_mod(void)
701{
702 return register_tcf_proto_ops(&cls_bpf_ops);
703}
704
705static void __exit cls_bpf_exit_mod(void)
706{
707 unregister_tcf_proto_ops(&cls_bpf_ops);
708}
709
710module_init(cls_bpf_init_mod);
711module_exit(cls_bpf_exit_mod);