Loading...
1/* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/bpf.h>
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
10#include <linux/etherdevice.h>
11#include <linux/filter.h>
12#include <linux/sched/signal.h>
13
14static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx)
15{
16 u32 ret;
17
18 preempt_disable();
19 rcu_read_lock();
20 ret = BPF_PROG_RUN(prog, ctx);
21 rcu_read_unlock();
22 preempt_enable();
23
24 return ret;
25}
26
27static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
28{
29 u64 time_start, time_spent = 0;
30 u32 ret = 0, i;
31
32 if (!repeat)
33 repeat = 1;
34 time_start = ktime_get_ns();
35 for (i = 0; i < repeat; i++) {
36 ret = bpf_test_run_one(prog, ctx);
37 if (need_resched()) {
38 if (signal_pending(current))
39 break;
40 time_spent += ktime_get_ns() - time_start;
41 cond_resched();
42 time_start = ktime_get_ns();
43 }
44 }
45 time_spent += ktime_get_ns() - time_start;
46 do_div(time_spent, repeat);
47 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
48
49 return ret;
50}
51
52static int bpf_test_finish(const union bpf_attr *kattr,
53 union bpf_attr __user *uattr, const void *data,
54 u32 size, u32 retval, u32 duration)
55{
56 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
57 int err = -EFAULT;
58
59 if (data_out && copy_to_user(data_out, data, size))
60 goto out;
61 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
62 goto out;
63 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
64 goto out;
65 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
66 goto out;
67 err = 0;
68out:
69 return err;
70}
71
72static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
73 u32 headroom, u32 tailroom)
74{
75 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
76 void *data;
77
78 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
79 return ERR_PTR(-EINVAL);
80
81 data = kzalloc(size + headroom + tailroom, GFP_USER);
82 if (!data)
83 return ERR_PTR(-ENOMEM);
84
85 if (copy_from_user(data + headroom, data_in, size)) {
86 kfree(data);
87 return ERR_PTR(-EFAULT);
88 }
89 return data;
90}
91
92int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
93 union bpf_attr __user *uattr)
94{
95 bool is_l2 = false, is_direct_pkt_access = false;
96 u32 size = kattr->test.data_size_in;
97 u32 repeat = kattr->test.repeat;
98 u32 retval, duration;
99 struct sk_buff *skb;
100 void *data;
101 int ret;
102
103 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
104 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
105 if (IS_ERR(data))
106 return PTR_ERR(data);
107
108 switch (prog->type) {
109 case BPF_PROG_TYPE_SCHED_CLS:
110 case BPF_PROG_TYPE_SCHED_ACT:
111 is_l2 = true;
112 /* fall through */
113 case BPF_PROG_TYPE_LWT_IN:
114 case BPF_PROG_TYPE_LWT_OUT:
115 case BPF_PROG_TYPE_LWT_XMIT:
116 is_direct_pkt_access = true;
117 break;
118 default:
119 break;
120 }
121
122 skb = build_skb(data, 0);
123 if (!skb) {
124 kfree(data);
125 return -ENOMEM;
126 }
127
128 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
129 __skb_put(skb, size);
130 skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
131 skb_reset_network_header(skb);
132
133 if (is_l2)
134 __skb_push(skb, ETH_HLEN);
135 if (is_direct_pkt_access)
136 bpf_compute_data_pointers(skb);
137 retval = bpf_test_run(prog, skb, repeat, &duration);
138 if (!is_l2)
139 __skb_push(skb, ETH_HLEN);
140 size = skb->len;
141 /* bpf program can never convert linear skb to non-linear */
142 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
143 size = skb_headlen(skb);
144 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
145 kfree_skb(skb);
146 return ret;
147}
148
149int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
150 union bpf_attr __user *uattr)
151{
152 u32 size = kattr->test.data_size_in;
153 u32 repeat = kattr->test.repeat;
154 struct netdev_rx_queue *rxqueue;
155 struct xdp_buff xdp = {};
156 u32 retval, duration;
157 void *data;
158 int ret;
159
160 data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
161 if (IS_ERR(data))
162 return PTR_ERR(data);
163
164 xdp.data_hard_start = data;
165 xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
166 xdp.data_meta = xdp.data;
167 xdp.data_end = xdp.data + size;
168
169 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
170 xdp.rxq = &rxqueue->xdp_rxq;
171
172 retval = bpf_test_run(prog, &xdp, repeat, &duration);
173 if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN)
174 size = xdp.data_end - xdp.data;
175 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
176 kfree(data);
177 return ret;
178}
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 Facebook
3 */
4#include <linux/bpf.h>
5#include <linux/slab.h>
6#include <linux/vmalloc.h>
7#include <linux/etherdevice.h>
8#include <linux/filter.h>
9#include <linux/sched/signal.h>
10#include <net/bpf_sk_storage.h>
11#include <net/sock.h>
12#include <net/tcp.h>
13#include <linux/error-injection.h>
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/bpf_test_run.h>
17
18static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
19 u32 *retval, u32 *time, bool xdp)
20{
21 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
22 enum bpf_cgroup_storage_type stype;
23 u64 time_start, time_spent = 0;
24 int ret = 0;
25 u32 i;
26
27 for_each_cgroup_storage_type(stype) {
28 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
29 if (IS_ERR(storage[stype])) {
30 storage[stype] = NULL;
31 for_each_cgroup_storage_type(stype)
32 bpf_cgroup_storage_free(storage[stype]);
33 return -ENOMEM;
34 }
35 }
36
37 if (!repeat)
38 repeat = 1;
39
40 rcu_read_lock();
41 migrate_disable();
42 time_start = ktime_get_ns();
43 for (i = 0; i < repeat; i++) {
44 bpf_cgroup_storage_set(storage);
45
46 if (xdp)
47 *retval = bpf_prog_run_xdp(prog, ctx);
48 else
49 *retval = BPF_PROG_RUN(prog, ctx);
50
51 if (signal_pending(current)) {
52 ret = -EINTR;
53 break;
54 }
55
56 if (need_resched()) {
57 time_spent += ktime_get_ns() - time_start;
58 migrate_enable();
59 rcu_read_unlock();
60
61 cond_resched();
62
63 rcu_read_lock();
64 migrate_disable();
65 time_start = ktime_get_ns();
66 }
67 }
68 time_spent += ktime_get_ns() - time_start;
69 migrate_enable();
70 rcu_read_unlock();
71
72 do_div(time_spent, repeat);
73 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
74
75 for_each_cgroup_storage_type(stype)
76 bpf_cgroup_storage_free(storage[stype]);
77
78 return ret;
79}
80
81static int bpf_test_finish(const union bpf_attr *kattr,
82 union bpf_attr __user *uattr, const void *data,
83 u32 size, u32 retval, u32 duration)
84{
85 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
86 int err = -EFAULT;
87 u32 copy_size = size;
88
89 /* Clamp copy if the user has provided a size hint, but copy the full
90 * buffer if not to retain old behaviour.
91 */
92 if (kattr->test.data_size_out &&
93 copy_size > kattr->test.data_size_out) {
94 copy_size = kattr->test.data_size_out;
95 err = -ENOSPC;
96 }
97
98 if (data_out && copy_to_user(data_out, data, copy_size))
99 goto out;
100 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
101 goto out;
102 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
103 goto out;
104 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
105 goto out;
106 if (err != -ENOSPC)
107 err = 0;
108out:
109 trace_bpf_test_finish(&err);
110 return err;
111}
112
113/* Integer types of various sizes and pointer combinations cover variety of
114 * architecture dependent calling conventions. 7+ can be supported in the
115 * future.
116 */
117__diag_push();
118__diag_ignore(GCC, 8, "-Wmissing-prototypes",
119 "Global functions as their definitions will be in vmlinux BTF");
120int noinline bpf_fentry_test1(int a)
121{
122 return a + 1;
123}
124
125int noinline bpf_fentry_test2(int a, u64 b)
126{
127 return a + b;
128}
129
130int noinline bpf_fentry_test3(char a, int b, u64 c)
131{
132 return a + b + c;
133}
134
135int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
136{
137 return (long)a + b + c + d;
138}
139
140int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
141{
142 return a + (long)b + c + d + e;
143}
144
145int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
146{
147 return a + (long)b + c + d + (long)e + f;
148}
149
150struct bpf_fentry_test_t {
151 struct bpf_fentry_test_t *a;
152};
153
154int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
155{
156 return (long)arg;
157}
158
159int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
160{
161 return (long)arg->a;
162}
163
164int noinline bpf_modify_return_test(int a, int *b)
165{
166 *b += 1;
167 return a + *b;
168}
169__diag_pop();
170
171ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
172
173static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
174 u32 headroom, u32 tailroom)
175{
176 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
177 u32 user_size = kattr->test.data_size_in;
178 void *data;
179
180 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
181 return ERR_PTR(-EINVAL);
182
183 if (user_size > size)
184 return ERR_PTR(-EMSGSIZE);
185
186 data = kzalloc(size + headroom + tailroom, GFP_USER);
187 if (!data)
188 return ERR_PTR(-ENOMEM);
189
190 if (copy_from_user(data + headroom, data_in, user_size)) {
191 kfree(data);
192 return ERR_PTR(-EFAULT);
193 }
194
195 return data;
196}
197
198int bpf_prog_test_run_tracing(struct bpf_prog *prog,
199 const union bpf_attr *kattr,
200 union bpf_attr __user *uattr)
201{
202 struct bpf_fentry_test_t arg = {};
203 u16 side_effect = 0, ret = 0;
204 int b = 2, err = -EFAULT;
205 u32 retval = 0;
206
207 switch (prog->expected_attach_type) {
208 case BPF_TRACE_FENTRY:
209 case BPF_TRACE_FEXIT:
210 if (bpf_fentry_test1(1) != 2 ||
211 bpf_fentry_test2(2, 3) != 5 ||
212 bpf_fentry_test3(4, 5, 6) != 15 ||
213 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
214 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
215 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
216 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
217 bpf_fentry_test8(&arg) != 0)
218 goto out;
219 break;
220 case BPF_MODIFY_RETURN:
221 ret = bpf_modify_return_test(1, &b);
222 if (b != 2)
223 side_effect = 1;
224 break;
225 default:
226 goto out;
227 }
228
229 retval = ((u32)side_effect << 16) | ret;
230 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
231 goto out;
232
233 err = 0;
234out:
235 trace_bpf_test_finish(&err);
236 return err;
237}
238
239static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
240{
241 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
242 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
243 u32 size = kattr->test.ctx_size_in;
244 void *data;
245 int err;
246
247 if (!data_in && !data_out)
248 return NULL;
249
250 data = kzalloc(max_size, GFP_USER);
251 if (!data)
252 return ERR_PTR(-ENOMEM);
253
254 if (data_in) {
255 err = bpf_check_uarg_tail_zero(data_in, max_size, size);
256 if (err) {
257 kfree(data);
258 return ERR_PTR(err);
259 }
260
261 size = min_t(u32, max_size, size);
262 if (copy_from_user(data, data_in, size)) {
263 kfree(data);
264 return ERR_PTR(-EFAULT);
265 }
266 }
267 return data;
268}
269
270static int bpf_ctx_finish(const union bpf_attr *kattr,
271 union bpf_attr __user *uattr, const void *data,
272 u32 size)
273{
274 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
275 int err = -EFAULT;
276 u32 copy_size = size;
277
278 if (!data || !data_out)
279 return 0;
280
281 if (copy_size > kattr->test.ctx_size_out) {
282 copy_size = kattr->test.ctx_size_out;
283 err = -ENOSPC;
284 }
285
286 if (copy_to_user(data_out, data, copy_size))
287 goto out;
288 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
289 goto out;
290 if (err != -ENOSPC)
291 err = 0;
292out:
293 return err;
294}
295
296/**
297 * range_is_zero - test whether buffer is initialized
298 * @buf: buffer to check
299 * @from: check from this position
300 * @to: check up until (excluding) this position
301 *
302 * This function returns true if the there is a non-zero byte
303 * in the buf in the range [from,to).
304 */
305static inline bool range_is_zero(void *buf, size_t from, size_t to)
306{
307 return !memchr_inv((u8 *)buf + from, 0, to - from);
308}
309
310static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
311{
312 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
313
314 if (!__skb)
315 return 0;
316
317 /* make sure the fields we don't use are zeroed */
318 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
319 return -EINVAL;
320
321 /* mark is allowed */
322
323 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
324 offsetof(struct __sk_buff, priority)))
325 return -EINVAL;
326
327 /* priority is allowed */
328
329 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
330 offsetof(struct __sk_buff, ifindex)))
331 return -EINVAL;
332
333 /* ifindex is allowed */
334
335 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
336 offsetof(struct __sk_buff, cb)))
337 return -EINVAL;
338
339 /* cb is allowed */
340
341 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
342 offsetof(struct __sk_buff, tstamp)))
343 return -EINVAL;
344
345 /* tstamp is allowed */
346 /* wire_len is allowed */
347 /* gso_segs is allowed */
348
349 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
350 offsetof(struct __sk_buff, gso_size)))
351 return -EINVAL;
352
353 /* gso_size is allowed */
354
355 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
356 sizeof(struct __sk_buff)))
357 return -EINVAL;
358
359 skb->mark = __skb->mark;
360 skb->priority = __skb->priority;
361 skb->tstamp = __skb->tstamp;
362 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
363
364 if (__skb->wire_len == 0) {
365 cb->pkt_len = skb->len;
366 } else {
367 if (__skb->wire_len < skb->len ||
368 __skb->wire_len > GSO_MAX_SIZE)
369 return -EINVAL;
370 cb->pkt_len = __skb->wire_len;
371 }
372
373 if (__skb->gso_segs > GSO_MAX_SEGS)
374 return -EINVAL;
375 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
376 skb_shinfo(skb)->gso_size = __skb->gso_size;
377
378 return 0;
379}
380
381static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
382{
383 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
384
385 if (!__skb)
386 return;
387
388 __skb->mark = skb->mark;
389 __skb->priority = skb->priority;
390 __skb->ifindex = skb->dev->ifindex;
391 __skb->tstamp = skb->tstamp;
392 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
393 __skb->wire_len = cb->pkt_len;
394 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
395}
396
397int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
398 union bpf_attr __user *uattr)
399{
400 bool is_l2 = false, is_direct_pkt_access = false;
401 struct net *net = current->nsproxy->net_ns;
402 struct net_device *dev = net->loopback_dev;
403 u32 size = kattr->test.data_size_in;
404 u32 repeat = kattr->test.repeat;
405 struct __sk_buff *ctx = NULL;
406 u32 retval, duration;
407 int hh_len = ETH_HLEN;
408 struct sk_buff *skb;
409 struct sock *sk;
410 void *data;
411 int ret;
412
413 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
414 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
415 if (IS_ERR(data))
416 return PTR_ERR(data);
417
418 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
419 if (IS_ERR(ctx)) {
420 kfree(data);
421 return PTR_ERR(ctx);
422 }
423
424 switch (prog->type) {
425 case BPF_PROG_TYPE_SCHED_CLS:
426 case BPF_PROG_TYPE_SCHED_ACT:
427 is_l2 = true;
428 fallthrough;
429 case BPF_PROG_TYPE_LWT_IN:
430 case BPF_PROG_TYPE_LWT_OUT:
431 case BPF_PROG_TYPE_LWT_XMIT:
432 is_direct_pkt_access = true;
433 break;
434 default:
435 break;
436 }
437
438 sk = kzalloc(sizeof(struct sock), GFP_USER);
439 if (!sk) {
440 kfree(data);
441 kfree(ctx);
442 return -ENOMEM;
443 }
444 sock_net_set(sk, net);
445 sock_init_data(NULL, sk);
446
447 skb = build_skb(data, 0);
448 if (!skb) {
449 kfree(data);
450 kfree(ctx);
451 kfree(sk);
452 return -ENOMEM;
453 }
454 skb->sk = sk;
455
456 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
457 __skb_put(skb, size);
458 if (ctx && ctx->ifindex > 1) {
459 dev = dev_get_by_index(net, ctx->ifindex);
460 if (!dev) {
461 ret = -ENODEV;
462 goto out;
463 }
464 }
465 skb->protocol = eth_type_trans(skb, dev);
466 skb_reset_network_header(skb);
467
468 switch (skb->protocol) {
469 case htons(ETH_P_IP):
470 sk->sk_family = AF_INET;
471 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
472 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
473 sk->sk_daddr = ip_hdr(skb)->daddr;
474 }
475 break;
476#if IS_ENABLED(CONFIG_IPV6)
477 case htons(ETH_P_IPV6):
478 sk->sk_family = AF_INET6;
479 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
480 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
481 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
482 }
483 break;
484#endif
485 default:
486 break;
487 }
488
489 if (is_l2)
490 __skb_push(skb, hh_len);
491 if (is_direct_pkt_access)
492 bpf_compute_data_pointers(skb);
493 ret = convert___skb_to_skb(skb, ctx);
494 if (ret)
495 goto out;
496 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
497 if (ret)
498 goto out;
499 if (!is_l2) {
500 if (skb_headroom(skb) < hh_len) {
501 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
502
503 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
504 ret = -ENOMEM;
505 goto out;
506 }
507 }
508 memset(__skb_push(skb, hh_len), 0, hh_len);
509 }
510 convert_skb_to___skb(skb, ctx);
511
512 size = skb->len;
513 /* bpf program can never convert linear skb to non-linear */
514 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
515 size = skb_headlen(skb);
516 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
517 if (!ret)
518 ret = bpf_ctx_finish(kattr, uattr, ctx,
519 sizeof(struct __sk_buff));
520out:
521 if (dev && dev != net->loopback_dev)
522 dev_put(dev);
523 kfree_skb(skb);
524 bpf_sk_storage_free(sk);
525 kfree(sk);
526 kfree(ctx);
527 return ret;
528}
529
530int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
531 union bpf_attr __user *uattr)
532{
533 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
534 u32 headroom = XDP_PACKET_HEADROOM;
535 u32 size = kattr->test.data_size_in;
536 u32 repeat = kattr->test.repeat;
537 struct netdev_rx_queue *rxqueue;
538 struct xdp_buff xdp = {};
539 u32 retval, duration;
540 u32 max_data_sz;
541 void *data;
542 int ret;
543
544 if (kattr->test.ctx_in || kattr->test.ctx_out)
545 return -EINVAL;
546
547 /* XDP have extra tailroom as (most) drivers use full page */
548 max_data_sz = 4096 - headroom - tailroom;
549
550 data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
551 if (IS_ERR(data))
552 return PTR_ERR(data);
553
554 xdp.data_hard_start = data;
555 xdp.data = data + headroom;
556 xdp.data_meta = xdp.data;
557 xdp.data_end = xdp.data + size;
558 xdp.frame_sz = headroom + max_data_sz + tailroom;
559
560 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
561 xdp.rxq = &rxqueue->xdp_rxq;
562 bpf_prog_change_xdp(NULL, prog);
563 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
564 if (ret)
565 goto out;
566 if (xdp.data != data + headroom || xdp.data_end != xdp.data + size)
567 size = xdp.data_end - xdp.data;
568 ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
569out:
570 bpf_prog_change_xdp(prog, NULL);
571 kfree(data);
572 return ret;
573}
574
575static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
576{
577 /* make sure the fields we don't use are zeroed */
578 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
579 return -EINVAL;
580
581 /* flags is allowed */
582
583 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
584 sizeof(struct bpf_flow_keys)))
585 return -EINVAL;
586
587 return 0;
588}
589
590int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
591 const union bpf_attr *kattr,
592 union bpf_attr __user *uattr)
593{
594 u32 size = kattr->test.data_size_in;
595 struct bpf_flow_dissector ctx = {};
596 u32 repeat = kattr->test.repeat;
597 struct bpf_flow_keys *user_ctx;
598 struct bpf_flow_keys flow_keys;
599 u64 time_start, time_spent = 0;
600 const struct ethhdr *eth;
601 unsigned int flags = 0;
602 u32 retval, duration;
603 void *data;
604 int ret;
605 u32 i;
606
607 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
608 return -EINVAL;
609
610 if (size < ETH_HLEN)
611 return -EINVAL;
612
613 data = bpf_test_init(kattr, size, 0, 0);
614 if (IS_ERR(data))
615 return PTR_ERR(data);
616
617 eth = (struct ethhdr *)data;
618
619 if (!repeat)
620 repeat = 1;
621
622 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
623 if (IS_ERR(user_ctx)) {
624 kfree(data);
625 return PTR_ERR(user_ctx);
626 }
627 if (user_ctx) {
628 ret = verify_user_bpf_flow_keys(user_ctx);
629 if (ret)
630 goto out;
631 flags = user_ctx->flags;
632 }
633
634 ctx.flow_keys = &flow_keys;
635 ctx.data = data;
636 ctx.data_end = (__u8 *)data + size;
637
638 rcu_read_lock();
639 preempt_disable();
640 time_start = ktime_get_ns();
641 for (i = 0; i < repeat; i++) {
642 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
643 size, flags);
644
645 if (signal_pending(current)) {
646 preempt_enable();
647 rcu_read_unlock();
648
649 ret = -EINTR;
650 goto out;
651 }
652
653 if (need_resched()) {
654 time_spent += ktime_get_ns() - time_start;
655 preempt_enable();
656 rcu_read_unlock();
657
658 cond_resched();
659
660 rcu_read_lock();
661 preempt_disable();
662 time_start = ktime_get_ns();
663 }
664 }
665 time_spent += ktime_get_ns() - time_start;
666 preempt_enable();
667 rcu_read_unlock();
668
669 do_div(time_spent, repeat);
670 duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
671
672 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
673 retval, duration);
674 if (!ret)
675 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
676 sizeof(struct bpf_flow_keys));
677
678out:
679 kfree(user_ctx);
680 kfree(data);
681 return ret;
682}