Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 Facebook
3 */
4#include <linux/bpf.h>
5#include <linux/btf_ids.h>
6#include <linux/slab.h>
7#include <linux/vmalloc.h>
8#include <linux/etherdevice.h>
9#include <linux/filter.h>
10#include <linux/rcupdate_trace.h>
11#include <linux/sched/signal.h>
12#include <net/bpf_sk_storage.h>
13#include <net/sock.h>
14#include <net/tcp.h>
15#include <net/net_namespace.h>
16#include <linux/error-injection.h>
17#include <linux/smp.h>
18#include <linux/sock_diag.h>
19
20#define CREATE_TRACE_POINTS
21#include <trace/events/bpf_test_run.h>
22
23struct bpf_test_timer {
24 enum { NO_PREEMPT, NO_MIGRATE } mode;
25 u32 i;
26 u64 time_start, time_spent;
27};
28
29static void bpf_test_timer_enter(struct bpf_test_timer *t)
30 __acquires(rcu)
31{
32 rcu_read_lock();
33 if (t->mode == NO_PREEMPT)
34 preempt_disable();
35 else
36 migrate_disable();
37
38 t->time_start = ktime_get_ns();
39}
40
41static void bpf_test_timer_leave(struct bpf_test_timer *t)
42 __releases(rcu)
43{
44 t->time_start = 0;
45
46 if (t->mode == NO_PREEMPT)
47 preempt_enable();
48 else
49 migrate_enable();
50 rcu_read_unlock();
51}
52
53static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
54 __must_hold(rcu)
55{
56 t->i++;
57 if (t->i >= repeat) {
58 /* We're done. */
59 t->time_spent += ktime_get_ns() - t->time_start;
60 do_div(t->time_spent, t->i);
61 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
62 *err = 0;
63 goto reset;
64 }
65
66 if (signal_pending(current)) {
67 /* During iteration: we've been cancelled, abort. */
68 *err = -EINTR;
69 goto reset;
70 }
71
72 if (need_resched()) {
73 /* During iteration: we need to reschedule between runs. */
74 t->time_spent += ktime_get_ns() - t->time_start;
75 bpf_test_timer_leave(t);
76 cond_resched();
77 bpf_test_timer_enter(t);
78 }
79
80 /* Do another round. */
81 return true;
82
83reset:
84 t->i = 0;
85 return false;
86}
87
88static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
89 u32 *retval, u32 *time, bool xdp)
90{
91 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
92 struct bpf_test_timer t = { NO_MIGRATE };
93 enum bpf_cgroup_storage_type stype;
94 int ret;
95
96 for_each_cgroup_storage_type(stype) {
97 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
98 if (IS_ERR(storage[stype])) {
99 storage[stype] = NULL;
100 for_each_cgroup_storage_type(stype)
101 bpf_cgroup_storage_free(storage[stype]);
102 return -ENOMEM;
103 }
104 }
105
106 if (!repeat)
107 repeat = 1;
108
109 bpf_test_timer_enter(&t);
110 do {
111 ret = bpf_cgroup_storage_set(storage);
112 if (ret)
113 break;
114
115 if (xdp)
116 *retval = bpf_prog_run_xdp(prog, ctx);
117 else
118 *retval = BPF_PROG_RUN(prog, ctx);
119
120 bpf_cgroup_storage_unset();
121 } while (bpf_test_timer_continue(&t, repeat, &ret, time));
122 bpf_test_timer_leave(&t);
123
124 for_each_cgroup_storage_type(stype)
125 bpf_cgroup_storage_free(storage[stype]);
126
127 return ret;
128}
129
130static int bpf_test_finish(const union bpf_attr *kattr,
131 union bpf_attr __user *uattr, const void *data,
132 u32 size, u32 retval, u32 duration)
133{
134 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
135 int err = -EFAULT;
136 u32 copy_size = size;
137
138 /* Clamp copy if the user has provided a size hint, but copy the full
139 * buffer if not to retain old behaviour.
140 */
141 if (kattr->test.data_size_out &&
142 copy_size > kattr->test.data_size_out) {
143 copy_size = kattr->test.data_size_out;
144 err = -ENOSPC;
145 }
146
147 if (data_out && copy_to_user(data_out, data, copy_size))
148 goto out;
149 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
150 goto out;
151 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
152 goto out;
153 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
154 goto out;
155 if (err != -ENOSPC)
156 err = 0;
157out:
158 trace_bpf_test_finish(&err);
159 return err;
160}
161
162/* Integer types of various sizes and pointer combinations cover variety of
163 * architecture dependent calling conventions. 7+ can be supported in the
164 * future.
165 */
166__diag_push();
167__diag_ignore(GCC, 8, "-Wmissing-prototypes",
168 "Global functions as their definitions will be in vmlinux BTF");
169int noinline bpf_fentry_test1(int a)
170{
171 return a + 1;
172}
173
174int noinline bpf_fentry_test2(int a, u64 b)
175{
176 return a + b;
177}
178
179int noinline bpf_fentry_test3(char a, int b, u64 c)
180{
181 return a + b + c;
182}
183
184int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
185{
186 return (long)a + b + c + d;
187}
188
189int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
190{
191 return a + (long)b + c + d + e;
192}
193
194int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
195{
196 return a + (long)b + c + d + (long)e + f;
197}
198
199struct bpf_fentry_test_t {
200 struct bpf_fentry_test_t *a;
201};
202
203int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
204{
205 return (long)arg;
206}
207
208int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
209{
210 return (long)arg->a;
211}
212
213int noinline bpf_modify_return_test(int a, int *b)
214{
215 *b += 1;
216 return a + *b;
217}
218
219u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
220{
221 return a + b + c + d;
222}
223
224int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
225{
226 return a + b;
227}
228
229struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
230{
231 return sk;
232}
233
234__diag_pop();
235
236ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
237
238BTF_SET_START(test_sk_kfunc_ids)
239BTF_ID(func, bpf_kfunc_call_test1)
240BTF_ID(func, bpf_kfunc_call_test2)
241BTF_ID(func, bpf_kfunc_call_test3)
242BTF_SET_END(test_sk_kfunc_ids)
243
244bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
245{
246 return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id);
247}
248
249static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
250 u32 headroom, u32 tailroom)
251{
252 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
253 u32 user_size = kattr->test.data_size_in;
254 void *data;
255
256 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
257 return ERR_PTR(-EINVAL);
258
259 if (user_size > size)
260 return ERR_PTR(-EMSGSIZE);
261
262 data = kzalloc(size + headroom + tailroom, GFP_USER);
263 if (!data)
264 return ERR_PTR(-ENOMEM);
265
266 if (copy_from_user(data + headroom, data_in, user_size)) {
267 kfree(data);
268 return ERR_PTR(-EFAULT);
269 }
270
271 return data;
272}
273
274int bpf_prog_test_run_tracing(struct bpf_prog *prog,
275 const union bpf_attr *kattr,
276 union bpf_attr __user *uattr)
277{
278 struct bpf_fentry_test_t arg = {};
279 u16 side_effect = 0, ret = 0;
280 int b = 2, err = -EFAULT;
281 u32 retval = 0;
282
283 if (kattr->test.flags || kattr->test.cpu)
284 return -EINVAL;
285
286 switch (prog->expected_attach_type) {
287 case BPF_TRACE_FENTRY:
288 case BPF_TRACE_FEXIT:
289 if (bpf_fentry_test1(1) != 2 ||
290 bpf_fentry_test2(2, 3) != 5 ||
291 bpf_fentry_test3(4, 5, 6) != 15 ||
292 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
293 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
294 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
295 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
296 bpf_fentry_test8(&arg) != 0)
297 goto out;
298 break;
299 case BPF_MODIFY_RETURN:
300 ret = bpf_modify_return_test(1, &b);
301 if (b != 2)
302 side_effect = 1;
303 break;
304 default:
305 goto out;
306 }
307
308 retval = ((u32)side_effect << 16) | ret;
309 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
310 goto out;
311
312 err = 0;
313out:
314 trace_bpf_test_finish(&err);
315 return err;
316}
317
318struct bpf_raw_tp_test_run_info {
319 struct bpf_prog *prog;
320 void *ctx;
321 u32 retval;
322};
323
324static void
325__bpf_prog_test_run_raw_tp(void *data)
326{
327 struct bpf_raw_tp_test_run_info *info = data;
328
329 rcu_read_lock();
330 info->retval = BPF_PROG_RUN(info->prog, info->ctx);
331 rcu_read_unlock();
332}
333
334int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
335 const union bpf_attr *kattr,
336 union bpf_attr __user *uattr)
337{
338 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
339 __u32 ctx_size_in = kattr->test.ctx_size_in;
340 struct bpf_raw_tp_test_run_info info;
341 int cpu = kattr->test.cpu, err = 0;
342 int current_cpu;
343
344 /* doesn't support data_in/out, ctx_out, duration, or repeat */
345 if (kattr->test.data_in || kattr->test.data_out ||
346 kattr->test.ctx_out || kattr->test.duration ||
347 kattr->test.repeat)
348 return -EINVAL;
349
350 if (ctx_size_in < prog->aux->max_ctx_offset ||
351 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
352 return -EINVAL;
353
354 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
355 return -EINVAL;
356
357 if (ctx_size_in) {
358 info.ctx = kzalloc(ctx_size_in, GFP_USER);
359 if (!info.ctx)
360 return -ENOMEM;
361 if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
362 err = -EFAULT;
363 goto out;
364 }
365 } else {
366 info.ctx = NULL;
367 }
368
369 info.prog = prog;
370
371 current_cpu = get_cpu();
372 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
373 cpu == current_cpu) {
374 __bpf_prog_test_run_raw_tp(&info);
375 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
376 /* smp_call_function_single() also checks cpu_online()
377 * after csd_lock(). However, since cpu is from user
378 * space, let's do an extra quick check to filter out
379 * invalid value before smp_call_function_single().
380 */
381 err = -ENXIO;
382 } else {
383 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
384 &info, 1);
385 }
386 put_cpu();
387
388 if (!err &&
389 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
390 err = -EFAULT;
391
392out:
393 kfree(info.ctx);
394 return err;
395}
396
397static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
398{
399 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
400 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
401 u32 size = kattr->test.ctx_size_in;
402 void *data;
403 int err;
404
405 if (!data_in && !data_out)
406 return NULL;
407
408 data = kzalloc(max_size, GFP_USER);
409 if (!data)
410 return ERR_PTR(-ENOMEM);
411
412 if (data_in) {
413 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
414 if (err) {
415 kfree(data);
416 return ERR_PTR(err);
417 }
418
419 size = min_t(u32, max_size, size);
420 if (copy_from_user(data, data_in, size)) {
421 kfree(data);
422 return ERR_PTR(-EFAULT);
423 }
424 }
425 return data;
426}
427
428static int bpf_ctx_finish(const union bpf_attr *kattr,
429 union bpf_attr __user *uattr, const void *data,
430 u32 size)
431{
432 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
433 int err = -EFAULT;
434 u32 copy_size = size;
435
436 if (!data || !data_out)
437 return 0;
438
439 if (copy_size > kattr->test.ctx_size_out) {
440 copy_size = kattr->test.ctx_size_out;
441 err = -ENOSPC;
442 }
443
444 if (copy_to_user(data_out, data, copy_size))
445 goto out;
446 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
447 goto out;
448 if (err != -ENOSPC)
449 err = 0;
450out:
451 return err;
452}
453
454/**
455 * range_is_zero - test whether buffer is initialized
456 * @buf: buffer to check
457 * @from: check from this position
458 * @to: check up until (excluding) this position
459 *
460 * This function returns true if the there is a non-zero byte
461 * in the buf in the range [from,to).
462 */
463static inline bool range_is_zero(void *buf, size_t from, size_t to)
464{
465 return !memchr_inv((u8 *)buf + from, 0, to - from);
466}
467
468static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
469{
470 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
471
472 if (!__skb)
473 return 0;
474
475 /* make sure the fields we don't use are zeroed */
476 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
477 return -EINVAL;
478
479 /* mark is allowed */
480
481 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
482 offsetof(struct __sk_buff, priority)))
483 return -EINVAL;
484
485 /* priority is allowed */
486
487 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
488 offsetof(struct __sk_buff, ifindex)))
489 return -EINVAL;
490
491 /* ifindex is allowed */
492
493 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
494 offsetof(struct __sk_buff, cb)))
495 return -EINVAL;
496
497 /* cb is allowed */
498
499 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
500 offsetof(struct __sk_buff, tstamp)))
501 return -EINVAL;
502
503 /* tstamp is allowed */
504 /* wire_len is allowed */
505 /* gso_segs is allowed */
506
507 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
508 offsetof(struct __sk_buff, gso_size)))
509 return -EINVAL;
510
511 /* gso_size is allowed */
512
513 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
514 sizeof(struct __sk_buff)))
515 return -EINVAL;
516
517 skb->mark = __skb->mark;
518 skb->priority = __skb->priority;
519 skb->tstamp = __skb->tstamp;
520 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
521
522 if (__skb->wire_len == 0) {
523 cb->pkt_len = skb->len;
524 } else {
525 if (__skb->wire_len < skb->len ||
526 __skb->wire_len > GSO_MAX_SIZE)
527 return -EINVAL;
528 cb->pkt_len = __skb->wire_len;
529 }
530
531 if (__skb->gso_segs > GSO_MAX_SEGS)
532 return -EINVAL;
533 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
534 skb_shinfo(skb)->gso_size = __skb->gso_size;
535
536 return 0;
537}
538
539static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
540{
541 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
542
543 if (!__skb)
544 return;
545
546 __skb->mark = skb->mark;
547 __skb->priority = skb->priority;
548 __skb->ifindex = skb->dev->ifindex;
549 __skb->tstamp = skb->tstamp;
550 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
551 __skb->wire_len = cb->pkt_len;
552 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
553}
554
555static struct proto bpf_dummy_proto = {
556 .name = "bpf_dummy",
557 .owner = THIS_MODULE,
558 .obj_size = sizeof(struct sock),
559};
560
561int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
562 union bpf_attr __user *uattr)
563{
564 bool is_l2 = false, is_direct_pkt_access = false;
565 struct net *net = current->nsproxy->net_ns;
566 struct net_device *dev = net->loopback_dev;
567 u32 size = kattr->test.data_size_in;
568 u32 repeat = kattr->test.repeat;
569 struct __sk_buff *ctx = NULL;
570 u32 retval, duration;
571 int hh_len = ETH_HLEN;
572 struct sk_buff *skb;
573 struct sock *sk;
574 void *data;
575 int ret;
576
577 if (kattr->test.flags || kattr->test.cpu)
578 return -EINVAL;
579
580 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
581 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
582 if (IS_ERR(data))
583 return PTR_ERR(data);
584
585 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
586 if (IS_ERR(ctx)) {
587 kfree(data);
588 return PTR_ERR(ctx);
589 }
590
591 switch (prog->type) {
592 case BPF_PROG_TYPE_SCHED_CLS:
593 case BPF_PROG_TYPE_SCHED_ACT:
594 is_l2 = true;
595 fallthrough;
596 case BPF_PROG_TYPE_LWT_IN:
597 case BPF_PROG_TYPE_LWT_OUT:
598 case BPF_PROG_TYPE_LWT_XMIT:
599 is_direct_pkt_access = true;
600 break;
601 default:
602 break;
603 }
604
605 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
606 if (!sk) {
607 kfree(data);
608 kfree(ctx);
609 return -ENOMEM;
610 }
611 sock_init_data(NULL, sk);
612
613 skb = build_skb(data, 0);
614 if (!skb) {
615 kfree(data);
616 kfree(ctx);
617 sk_free(sk);
618 return -ENOMEM;
619 }
620 skb->sk = sk;
621
622 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
623 __skb_put(skb, size);
624 if (ctx && ctx->ifindex > 1) {
625 dev = dev_get_by_index(net, ctx->ifindex);
626 if (!dev) {
627 ret = -ENODEV;
628 goto out;
629 }
630 }
631 skb->protocol = eth_type_trans(skb, dev);
632 skb_reset_network_header(skb);
633
634 switch (skb->protocol) {
635 case htons(ETH_P_IP):
636 sk->sk_family = AF_INET;
637 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
638 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
639 sk->sk_daddr = ip_hdr(skb)->daddr;
640 }
641 break;
642#if IS_ENABLED(CONFIG_IPV6)
643 case htons(ETH_P_IPV6):
644 sk->sk_family = AF_INET6;
645 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
646 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
647 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
648 }
649 break;
650#endif
651 default:
652 break;
653 }
654
655 if (is_l2)
656 __skb_push(skb, hh_len);
657 if (is_direct_pkt_access)
658 bpf_compute_data_pointers(skb);
659 ret = convert___skb_to_skb(skb, ctx);
660 if (ret)
661 goto out;
662 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
663 if (ret)
664 goto out;
665 if (!is_l2) {
666 if (skb_headroom(skb) < hh_len) {
667 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
668
669 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
670 ret = -ENOMEM;
671 goto out;
672 }
673 }
674 memset(__skb_push(skb, hh_len), 0, hh_len);
675 }
676 convert_skb_to___skb(skb, ctx);
677
678 size = skb->len;
679 /* bpf program can never convert linear skb to non-linear */
680 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
681 size = skb_headlen(skb);
682 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
683 if (!ret)
684 ret = bpf_ctx_finish(kattr, uattr, ctx,
685 sizeof(struct __sk_buff));
686out:
687 if (dev && dev != net->loopback_dev)
688 dev_put(dev);
689 kfree_skb(skb);
690 sk_free(sk);
691 kfree(ctx);
692 return ret;
693}
694
695int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
696 union bpf_attr __user *uattr)
697{
698 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
699 u32 headroom = XDP_PACKET_HEADROOM;
700 u32 size = kattr->test.data_size_in;
701 u32 repeat = kattr->test.repeat;
702 struct netdev_rx_queue *rxqueue;
703 struct xdp_buff xdp = {};
704 u32 retval, duration;
705 u32 max_data_sz;
706 void *data;
707 int ret;
708
709 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
710 prog->expected_attach_type == BPF_XDP_CPUMAP)
711 return -EINVAL;
712 if (kattr->test.ctx_in || kattr->test.ctx_out)
713 return -EINVAL;
714
715 /* XDP have extra tailroom as (most) drivers use full page */
716 max_data_sz = 4096 - headroom - tailroom;
717
718 data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
719 if (IS_ERR(data))
720 return PTR_ERR(data);
721
722 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
723 xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
724 &rxqueue->xdp_rxq);
725 xdp_prepare_buff(&xdp, data, headroom, size, true);
726
727 bpf_prog_change_xdp(NULL, prog);
728 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
729 if (ret)
730 goto out;
731 if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
732 size = xdp.data_end - xdp.data;
733 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
734out:
735 bpf_prog_change_xdp(prog, NULL);
736 kfree(data);
737 return ret;
738}
739
740static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
741{
742 /* make sure the fields we don't use are zeroed */
743 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
744 return -EINVAL;
745
746 /* flags is allowed */
747
748 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
749 sizeof(struct bpf_flow_keys)))
750 return -EINVAL;
751
752 return 0;
753}
754
755int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
756 const union bpf_attr *kattr,
757 union bpf_attr __user *uattr)
758{
759 struct bpf_test_timer t = { NO_PREEMPT };
760 u32 size = kattr->test.data_size_in;
761 struct bpf_flow_dissector ctx = {};
762 u32 repeat = kattr->test.repeat;
763 struct bpf_flow_keys *user_ctx;
764 struct bpf_flow_keys flow_keys;
765 const struct ethhdr *eth;
766 unsigned int flags = 0;
767 u32 retval, duration;
768 void *data;
769 int ret;
770
771 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
772 return -EINVAL;
773
774 if (kattr->test.flags || kattr->test.cpu)
775 return -EINVAL;
776
777 if (size < ETH_HLEN)
778 return -EINVAL;
779
780 data = bpf_test_init(kattr, size, 0, 0);
781 if (IS_ERR(data))
782 return PTR_ERR(data);
783
784 eth = (struct ethhdr *)data;
785
786 if (!repeat)
787 repeat = 1;
788
789 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
790 if (IS_ERR(user_ctx)) {
791 kfree(data);
792 return PTR_ERR(user_ctx);
793 }
794 if (user_ctx) {
795 ret = verify_user_bpf_flow_keys(user_ctx);
796 if (ret)
797 goto out;
798 flags = user_ctx->flags;
799 }
800
801 ctx.flow_keys = &flow_keys;
802 ctx.data = data;
803 ctx.data_end = (__u8 *)data + size;
804
805 bpf_test_timer_enter(&t);
806 do {
807 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
808 size, flags);
809 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
810 bpf_test_timer_leave(&t);
811
812 if (ret < 0)
813 goto out;
814
815 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
816 retval, duration);
817 if (!ret)
818 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
819 sizeof(struct bpf_flow_keys));
820
821out:
822 kfree(user_ctx);
823 kfree(data);
824 return ret;
825}
826
827int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
828 union bpf_attr __user *uattr)
829{
830 struct bpf_test_timer t = { NO_PREEMPT };
831 struct bpf_prog_array *progs = NULL;
832 struct bpf_sk_lookup_kern ctx = {};
833 u32 repeat = kattr->test.repeat;
834 struct bpf_sk_lookup *user_ctx;
835 u32 retval, duration;
836 int ret = -EINVAL;
837
838 if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
839 return -EINVAL;
840
841 if (kattr->test.flags || kattr->test.cpu)
842 return -EINVAL;
843
844 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
845 kattr->test.data_size_out)
846 return -EINVAL;
847
848 if (!repeat)
849 repeat = 1;
850
851 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
852 if (IS_ERR(user_ctx))
853 return PTR_ERR(user_ctx);
854
855 if (!user_ctx)
856 return -EINVAL;
857
858 if (user_ctx->sk)
859 goto out;
860
861 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
862 goto out;
863
864 if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
865 ret = -ERANGE;
866 goto out;
867 }
868
869 ctx.family = (u16)user_ctx->family;
870 ctx.protocol = (u16)user_ctx->protocol;
871 ctx.dport = (u16)user_ctx->local_port;
872 ctx.sport = (__force __be16)user_ctx->remote_port;
873
874 switch (ctx.family) {
875 case AF_INET:
876 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
877 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
878 break;
879
880#if IS_ENABLED(CONFIG_IPV6)
881 case AF_INET6:
882 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
883 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
884 break;
885#endif
886
887 default:
888 ret = -EAFNOSUPPORT;
889 goto out;
890 }
891
892 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
893 if (!progs) {
894 ret = -ENOMEM;
895 goto out;
896 }
897
898 progs->items[0].prog = prog;
899
900 bpf_test_timer_enter(&t);
901 do {
902 ctx.selected_sk = NULL;
903 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN);
904 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
905 bpf_test_timer_leave(&t);
906
907 if (ret < 0)
908 goto out;
909
910 user_ctx->cookie = 0;
911 if (ctx.selected_sk) {
912 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
913 ret = -EOPNOTSUPP;
914 goto out;
915 }
916
917 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
918 }
919
920 ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
921 if (!ret)
922 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
923
924out:
925 bpf_prog_array_free(progs);
926 kfree(user_ctx);
927 return ret;
928}
929
930int bpf_prog_test_run_syscall(struct bpf_prog *prog,
931 const union bpf_attr *kattr,
932 union bpf_attr __user *uattr)
933{
934 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
935 __u32 ctx_size_in = kattr->test.ctx_size_in;
936 void *ctx = NULL;
937 u32 retval;
938 int err = 0;
939
940 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
941 if (kattr->test.data_in || kattr->test.data_out ||
942 kattr->test.ctx_out || kattr->test.duration ||
943 kattr->test.repeat || kattr->test.flags)
944 return -EINVAL;
945
946 if (ctx_size_in < prog->aux->max_ctx_offset ||
947 ctx_size_in > U16_MAX)
948 return -EINVAL;
949
950 if (ctx_size_in) {
951 ctx = kzalloc(ctx_size_in, GFP_USER);
952 if (!ctx)
953 return -ENOMEM;
954 if (copy_from_user(ctx, ctx_in, ctx_size_in)) {
955 err = -EFAULT;
956 goto out;
957 }
958 }
959
960 rcu_read_lock_trace();
961 retval = bpf_prog_run_pin_on_cpu(prog, ctx);
962 rcu_read_unlock_trace();
963
964 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
965 err = -EFAULT;
966 goto out;
967 }
968 if (ctx_size_in)
969 if (copy_to_user(ctx_in, ctx, ctx_size_in))
970 err = -EFAULT;
971out:
972 kfree(ctx);
973 return err;
974}
1/* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/bpf.h>
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
10#include <linux/etherdevice.h>
11#include <linux/filter.h>
12#include <linux/sched/signal.h>
13
14static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
15{
16 u32 ret;
17
18 preempt_disable();
19 rcu_read_lock();
20 ret = BPF_PROG_RUN(prog, ctx);
21 rcu_read_unlock();
22 preempt_enable();
23
24 return ret;
25}
26
27static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
28{
29 u64 time_start, time_spent = 0;
30 u32 ret = 0, i;
31
32 if (!repeat)
33 repeat = 1;
34 time_start = ktime_get_ns();
35 for (i = 0; i < repeat; i++) {
36 ret = bpf_test_run_one(prog, ctx);
37 if (need_resched()) {
38 if (signal_pending(current))
39 break;
40 time_spent += ktime_get_ns() - time_start;
41 cond_resched();
42 time_start = ktime_get_ns();
43 }
44 }
45 time_spent += ktime_get_ns() - time_start;
46 do_div(time_spent, repeat);
47 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
48
49 return ret;
50}
51
52static int bpf_test_finish(const union bpf_attr *kattr,
53 union bpf_attr __user *uattr, const void *data,
54 u32 size, u32 retval, u32 duration)
55{
56 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
57 int err = -EFAULT;
58
59 if (data_out && copy_to_user(data_out, data, size))
60 goto out;
61 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
62 goto out;
63 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
64 goto out;
65 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
66 goto out;
67 err = 0;
68out:
69 return err;
70}
71
72static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
73 u32 headroom, u32 tailroom)
74{
75 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
76 void *data;
77
78 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
79 return ERR_PTR(-EINVAL);
80
81 data = kzalloc(size + headroom + tailroom, GFP_USER);
82 if (!data)
83 return ERR_PTR(-ENOMEM);
84
85 if (copy_from_user(data + headroom, data_in, size)) {
86 kfree(data);
87 return ERR_PTR(-EFAULT);
88 }
89 return data;
90}
91
92int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
93 union bpf_attr __user *uattr)
94{
95 bool is_l2 = false, is_direct_pkt_access = false;
96 u32 size = kattr->test.data_size_in;
97 u32 repeat = kattr->test.repeat;
98 u32 retval, duration;
99 struct sk_buff *skb;
100 void *data;
101 int ret;
102
103 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
104 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
105 if (IS_ERR(data))
106 return PTR_ERR(data);
107
108 switch (prog->type) {
109 case BPF_PROG_TYPE_SCHED_CLS:
110 case BPF_PROG_TYPE_SCHED_ACT:
111 is_l2 = true;
112 /* fall through */
113 case BPF_PROG_TYPE_LWT_IN:
114 case BPF_PROG_TYPE_LWT_OUT:
115 case BPF_PROG_TYPE_LWT_XMIT:
116 is_direct_pkt_access = true;
117 break;
118 default:
119 break;
120 }
121
122 skb = build_skb(data, 0);
123 if (!skb) {
124 kfree(data);
125 return -ENOMEM;
126 }
127
128 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
129 __skb_put(skb, size);
130 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
131 skb_reset_network_header(skb);
132
133 if (is_l2)
134 __skb_push(skb, ETH_HLEN);
135 if (is_direct_pkt_access)
136 bpf_compute_data_pointers(skb);
137 retval = bpf_test_run(prog, skb, repeat, &duration);
138 if (!is_l2)
139 __skb_push(skb, ETH_HLEN);
140 size = skb->len;
141 /* bpf program can never convert linear skb to non-linear */
142 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
143 size = skb_headlen(skb);
144 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
145 kfree_skb(skb);
146 return ret;
147}
148
149int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
150 union bpf_attr __user *uattr)
151{
152 u32 size = kattr->test.data_size_in;
153 u32 repeat = kattr->test.repeat;
154 struct netdev_rx_queue *rxqueue;
155 struct xdp_buff xdp = {};
156 u32 retval, duration;
157 void *data;
158 int ret;
159
160 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
161 if (IS_ERR(data))
162 return PTR_ERR(data);
163
164 xdp.data_hard_start = data;
165 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
166 xdp.data_meta = xdp.data;
167 xdp.data_end = xdp.data + size;
168
169 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
170 xdp.rxq = &rxqueue->xdp_rxq;
171
172 retval = bpf_test_run(prog, &xdp, repeat, &duration);
173 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN)
174 size = xdp.data_end - xdp.data;
175 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
176 kfree(data);
177 return ret;
178}