Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2020 Facebook */
3#include <linux/btf.h>
4#include <linux/btf_ids.h>
5#include <linux/error-injection.h>
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/percpu-defs.h>
9#include <linux/sysfs.h>
10#include <linux/tracepoint.h>
11#include "bpf_testmod.h"
12#include "bpf_testmod_kfunc.h"
13
14#define CREATE_TRACE_POINTS
15#include "bpf_testmod-events.h"
16
17typedef int (*func_proto_typedef)(long);
18typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
19typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
20
21DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
22long bpf_testmod_test_struct_arg_result;
23
24struct bpf_testmod_struct_arg_1 {
25 int a;
26};
27struct bpf_testmod_struct_arg_2 {
28 long a;
29 long b;
30};
31
32struct bpf_testmod_struct_arg_3 {
33 int a;
34 int b[];
35};
36
37struct bpf_testmod_struct_arg_4 {
38 u64 a;
39 int b;
40};
41
42__bpf_hook_start();
43
44noinline int
45bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
46 bpf_testmod_test_struct_arg_result = a.a + a.b + b + c;
47 return bpf_testmod_test_struct_arg_result;
48}
49
50noinline int
51bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
52 bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
53 return bpf_testmod_test_struct_arg_result;
54}
55
56noinline int
57bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
58 bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
59 return bpf_testmod_test_struct_arg_result;
60}
61
62noinline int
63bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
64 int c, int d, struct bpf_testmod_struct_arg_2 e) {
65 bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
66 return bpf_testmod_test_struct_arg_result;
67}
68
69noinline int
70bpf_testmod_test_struct_arg_5(void) {
71 bpf_testmod_test_struct_arg_result = 1;
72 return bpf_testmod_test_struct_arg_result;
73}
74
75noinline int
76bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
77 bpf_testmod_test_struct_arg_result = a->b[0];
78 return bpf_testmod_test_struct_arg_result;
79}
80
81noinline int
82bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
83 struct bpf_testmod_struct_arg_4 f)
84{
85 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
86 (long)e + f.a + f.b;
87 return bpf_testmod_test_struct_arg_result;
88}
89
90noinline int
91bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
92 struct bpf_testmod_struct_arg_4 f, int g)
93{
94 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
95 (long)e + f.a + f.b + g;
96 return bpf_testmod_test_struct_arg_result;
97}
98
99noinline int
100bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
101 bpf_testmod_test_struct_arg_result = a->a;
102 return bpf_testmod_test_struct_arg_result;
103}
104
105__bpf_kfunc void
106bpf_testmod_test_mod_kfunc(int i)
107{
108 *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
109}
110
111__bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
112{
113 if (cnt < 0) {
114 it->cnt = 0;
115 return -EINVAL;
116 }
117
118 it->value = value;
119 it->cnt = cnt;
120
121 return 0;
122}
123
124__bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
125{
126 if (it->cnt <= 0)
127 return NULL;
128
129 it->cnt--;
130
131 return &it->value;
132}
133
134__bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
135{
136 it->cnt = 0;
137}
138
139__bpf_kfunc void bpf_kfunc_common_test(void)
140{
141}
142
143struct bpf_testmod_btf_type_tag_1 {
144 int a;
145};
146
147struct bpf_testmod_btf_type_tag_2 {
148 struct bpf_testmod_btf_type_tag_1 __user *p;
149};
150
151struct bpf_testmod_btf_type_tag_3 {
152 struct bpf_testmod_btf_type_tag_1 __percpu *p;
153};
154
155noinline int
156bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
157 BTF_TYPE_EMIT(func_proto_typedef);
158 BTF_TYPE_EMIT(func_proto_typedef_nested1);
159 BTF_TYPE_EMIT(func_proto_typedef_nested2);
160 return arg->a;
161}
162
163noinline int
164bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
165 return arg->p->a;
166}
167
168noinline int
169bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
170 return arg->a;
171}
172
173noinline int
174bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
175 return arg->p->a;
176}
177
178noinline int bpf_testmod_loop_test(int n)
179{
180 /* Make sum volatile, so smart compilers, such as clang, will not
181 * optimize the code by removing the loop.
182 */
183 volatile int sum = 0;
184 int i;
185
186 /* the primary goal of this test is to test LBR. Create a lot of
187 * branches in the function, so we can catch it easily.
188 */
189 for (i = 0; i < n; i++)
190 sum += i;
191 return sum;
192}
193
194__weak noinline struct file *bpf_testmod_return_ptr(int arg)
195{
196 static struct file f = {};
197
198 switch (arg) {
199 case 1: return (void *)EINVAL; /* user addr */
200 case 2: return (void *)0xcafe4a11; /* user addr */
201 case 3: return (void *)-EINVAL; /* canonical, but invalid */
202 case 4: return (void *)(1ull << 60); /* non-canonical and invalid */
203 case 5: return (void *)~(1ull << 30); /* trigger extable */
204 case 6: return &f; /* valid addr */
205 case 7: return (void *)((long)&f | 1); /* kernel tricks */
206 default: return NULL;
207 }
208}
209
210noinline int bpf_testmod_fentry_test1(int a)
211{
212 return a + 1;
213}
214
215noinline int bpf_testmod_fentry_test2(int a, u64 b)
216{
217 return a + b;
218}
219
220noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
221{
222 return a + b + c;
223}
224
225noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
226 void *e, char f, int g)
227{
228 return a + (long)b + c + d + (long)e + f + g;
229}
230
231noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
232 void *e, char f, int g,
233 unsigned int h, long i, __u64 j,
234 unsigned long k)
235{
236 return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
237}
238
239int bpf_testmod_fentry_ok;
240
241noinline ssize_t
242bpf_testmod_test_read(struct file *file, struct kobject *kobj,
243 struct bin_attribute *bin_attr,
244 char *buf, loff_t off, size_t len)
245{
246 struct bpf_testmod_test_read_ctx ctx = {
247 .buf = buf,
248 .off = off,
249 .len = len,
250 };
251 struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
252 struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
253 struct bpf_testmod_struct_arg_3 *struct_arg3;
254 struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
255 int i = 1;
256
257 while (bpf_testmod_return_ptr(i))
258 i++;
259
260 (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
261 (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
262 (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
263 (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
264 (void)bpf_testmod_test_struct_arg_5();
265 (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
266 (void *)20, struct_arg4);
267 (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
268 (void *)20, struct_arg4, 23);
269
270 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
271
272 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
273 sizeof(int)), GFP_KERNEL);
274 if (struct_arg3 != NULL) {
275 struct_arg3->b[0] = 1;
276 (void)bpf_testmod_test_struct_arg_6(struct_arg3);
277 kfree(struct_arg3);
278 }
279
280 /* This is always true. Use the check to make sure the compiler
281 * doesn't remove bpf_testmod_loop_test.
282 */
283 if (bpf_testmod_loop_test(101) > 100)
284 trace_bpf_testmod_test_read(current, &ctx);
285
286 /* Magic number to enable writable tp */
287 if (len == 64) {
288 struct bpf_testmod_test_writable_ctx writable = {
289 .val = 1024,
290 };
291 trace_bpf_testmod_test_writable_bare(&writable);
292 if (writable.early_ret)
293 return snprintf(buf, len, "%d\n", writable.val);
294 }
295
296 if (bpf_testmod_fentry_test1(1) != 2 ||
297 bpf_testmod_fentry_test2(2, 3) != 5 ||
298 bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
299 bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
300 21, 22) != 133 ||
301 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
302 21, 22, 23, 24, 25, 26) != 231)
303 goto out;
304
305 bpf_testmod_fentry_ok = 1;
306out:
307 return -EIO; /* always fail */
308}
309EXPORT_SYMBOL(bpf_testmod_test_read);
310ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
311
312noinline ssize_t
313bpf_testmod_test_write(struct file *file, struct kobject *kobj,
314 struct bin_attribute *bin_attr,
315 char *buf, loff_t off, size_t len)
316{
317 struct bpf_testmod_test_write_ctx ctx = {
318 .buf = buf,
319 .off = off,
320 .len = len,
321 };
322
323 trace_bpf_testmod_test_write_bare(current, &ctx);
324
325 return -EIO; /* always fail */
326}
327EXPORT_SYMBOL(bpf_testmod_test_write);
328ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
329
330noinline int bpf_fentry_shadow_test(int a)
331{
332 return a + 2;
333}
334EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
335
336__bpf_hook_end();
337
338static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
339 .attr = { .name = "bpf_testmod", .mode = 0666, },
340 .read = bpf_testmod_test_read,
341 .write = bpf_testmod_test_write,
342};
343
344BTF_SET8_START(bpf_testmod_common_kfunc_ids)
345BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
346BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
347BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
348BTF_ID_FLAGS(func, bpf_kfunc_common_test)
349BTF_SET8_END(bpf_testmod_common_kfunc_ids)
350
351static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
352 .owner = THIS_MODULE,
353 .set = &bpf_testmod_common_kfunc_ids,
354};
355
356__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
357{
358 return a + b + c + d;
359}
360
361__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
362{
363 return a + b;
364}
365
366__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
367{
368 return sk;
369}
370
371__bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
372{
373 /* Provoke the compiler to assume that the caller has sign-extended a,
374 * b and c on platforms where this is required (e.g. s390x).
375 */
376 return (long)a + (long)b + (long)c + d;
377}
378
379static struct prog_test_ref_kfunc prog_test_struct = {
380 .a = 42,
381 .b = 108,
382 .next = &prog_test_struct,
383 .cnt = REFCOUNT_INIT(1),
384};
385
386__bpf_kfunc struct prog_test_ref_kfunc *
387bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
388{
389 refcount_inc(&prog_test_struct.cnt);
390 return &prog_test_struct;
391}
392
393__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
394{
395 WARN_ON_ONCE(1);
396}
397
398__bpf_kfunc struct prog_test_member *
399bpf_kfunc_call_memb_acquire(void)
400{
401 WARN_ON_ONCE(1);
402 return NULL;
403}
404
405__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
406{
407 WARN_ON_ONCE(1);
408}
409
410static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
411{
412 if (size > 2 * sizeof(int))
413 return NULL;
414
415 return (int *)p;
416}
417
418__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
419 const int rdwr_buf_size)
420{
421 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
422}
423
424__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
425 const int rdonly_buf_size)
426{
427 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
428}
429
430/* the next 2 ones can't be really used for testing expect to ensure
431 * that the verifier rejects the call.
432 * Acquire functions must return struct pointers, so these ones are
433 * failing.
434 */
435__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
436 const int rdonly_buf_size)
437{
438 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
439}
440
441__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
442{
443}
444
445__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
446{
447}
448
449__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
450{
451}
452
453__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
454{
455}
456
457__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
458{
459}
460
461__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
462{
463}
464
465__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
466{
467}
468
469__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
470{
471}
472
473__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
474{
475}
476
477__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
478{
479}
480
481__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
482{
483 /* p != NULL, but p->cnt could be 0 */
484}
485
486__bpf_kfunc void bpf_kfunc_call_test_destructive(void)
487{
488}
489
490__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
491{
492 return arg;
493}
494
495BTF_SET8_START(bpf_testmod_check_kfunc_ids)
496BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
497BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
498BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
499BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
500BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
501BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
502BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
503BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
504BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
505BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
506BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
507BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
508BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
509BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
510BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
511BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
512BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
513BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
514BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
515BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
516BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
517BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
518BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
519BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
520BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
521BTF_SET8_END(bpf_testmod_check_kfunc_ids)
522
523static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
524 .owner = THIS_MODULE,
525 .set = &bpf_testmod_check_kfunc_ids,
526};
527
528extern int bpf_fentry_test1(int a);
529
530static int bpf_testmod_init(void)
531{
532 int ret;
533
534 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
535 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
536 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
537 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
538 if (ret < 0)
539 return ret;
540 if (bpf_fentry_test1(0) < 0)
541 return -EINVAL;
542 return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
543}
544
545static void bpf_testmod_exit(void)
546{
547 return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
548}
549
550module_init(bpf_testmod_init);
551module_exit(bpf_testmod_exit);
552
553MODULE_AUTHOR("Andrii Nakryiko");
554MODULE_DESCRIPTION("BPF selftests module");
555MODULE_LICENSE("Dual BSD/GPL");
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2020 Facebook */
3#include <linux/bpf.h>
4#include <linux/btf.h>
5#include <linux/btf_ids.h>
6#include <linux/delay.h>
7#include <linux/error-injection.h>
8#include <linux/init.h>
9#include <linux/module.h>
10#include <linux/percpu-defs.h>
11#include <linux/sysfs.h>
12#include <linux/tracepoint.h>
13#include "bpf_testmod.h"
14#include "bpf_testmod_kfunc.h"
15
16#define CREATE_TRACE_POINTS
17#include "bpf_testmod-events.h"
18
19typedef int (*func_proto_typedef)(long);
20typedef int (*func_proto_typedef_nested1)(func_proto_typedef);
21typedef int (*func_proto_typedef_nested2)(func_proto_typedef_nested1);
22
23DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
24long bpf_testmod_test_struct_arg_result;
25
26struct bpf_testmod_struct_arg_1 {
27 int a;
28};
29struct bpf_testmod_struct_arg_2 {
30 long a;
31 long b;
32};
33
34struct bpf_testmod_struct_arg_3 {
35 int a;
36 int b[];
37};
38
39struct bpf_testmod_struct_arg_4 {
40 u64 a;
41 int b;
42};
43
44__bpf_hook_start();
45
46noinline int
47bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
48 bpf_testmod_test_struct_arg_result = a.a + a.b + b + c;
49 return bpf_testmod_test_struct_arg_result;
50}
51
52noinline int
53bpf_testmod_test_struct_arg_2(int a, struct bpf_testmod_struct_arg_2 b, int c) {
54 bpf_testmod_test_struct_arg_result = a + b.a + b.b + c;
55 return bpf_testmod_test_struct_arg_result;
56}
57
58noinline int
59bpf_testmod_test_struct_arg_3(int a, int b, struct bpf_testmod_struct_arg_2 c) {
60 bpf_testmod_test_struct_arg_result = a + b + c.a + c.b;
61 return bpf_testmod_test_struct_arg_result;
62}
63
64noinline int
65bpf_testmod_test_struct_arg_4(struct bpf_testmod_struct_arg_1 a, int b,
66 int c, int d, struct bpf_testmod_struct_arg_2 e) {
67 bpf_testmod_test_struct_arg_result = a.a + b + c + d + e.a + e.b;
68 return bpf_testmod_test_struct_arg_result;
69}
70
71noinline int
72bpf_testmod_test_struct_arg_5(void) {
73 bpf_testmod_test_struct_arg_result = 1;
74 return bpf_testmod_test_struct_arg_result;
75}
76
77noinline int
78bpf_testmod_test_struct_arg_6(struct bpf_testmod_struct_arg_3 *a) {
79 bpf_testmod_test_struct_arg_result = a->b[0];
80 return bpf_testmod_test_struct_arg_result;
81}
82
83noinline int
84bpf_testmod_test_struct_arg_7(u64 a, void *b, short c, int d, void *e,
85 struct bpf_testmod_struct_arg_4 f)
86{
87 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
88 (long)e + f.a + f.b;
89 return bpf_testmod_test_struct_arg_result;
90}
91
92noinline int
93bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e,
94 struct bpf_testmod_struct_arg_4 f, int g)
95{
96 bpf_testmod_test_struct_arg_result = a + (long)b + c + d +
97 (long)e + f.a + f.b + g;
98 return bpf_testmod_test_struct_arg_result;
99}
100
101noinline int
102bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) {
103 bpf_testmod_test_struct_arg_result = a->a;
104 return bpf_testmod_test_struct_arg_result;
105}
106
107__bpf_kfunc void
108bpf_testmod_test_mod_kfunc(int i)
109{
110 *(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
111}
112
113__bpf_kfunc int bpf_iter_testmod_seq_new(struct bpf_iter_testmod_seq *it, s64 value, int cnt)
114{
115 if (cnt < 0) {
116 it->cnt = 0;
117 return -EINVAL;
118 }
119
120 it->value = value;
121 it->cnt = cnt;
122
123 return 0;
124}
125
126__bpf_kfunc s64 *bpf_iter_testmod_seq_next(struct bpf_iter_testmod_seq* it)
127{
128 if (it->cnt <= 0)
129 return NULL;
130
131 it->cnt--;
132
133 return &it->value;
134}
135
136__bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
137{
138 it->cnt = 0;
139}
140
141__bpf_kfunc void bpf_kfunc_common_test(void)
142{
143}
144
145struct bpf_testmod_btf_type_tag_1 {
146 int a;
147};
148
149struct bpf_testmod_btf_type_tag_2 {
150 struct bpf_testmod_btf_type_tag_1 __user *p;
151};
152
153struct bpf_testmod_btf_type_tag_3 {
154 struct bpf_testmod_btf_type_tag_1 __percpu *p;
155};
156
157noinline int
158bpf_testmod_test_btf_type_tag_user_1(struct bpf_testmod_btf_type_tag_1 __user *arg) {
159 BTF_TYPE_EMIT(func_proto_typedef);
160 BTF_TYPE_EMIT(func_proto_typedef_nested1);
161 BTF_TYPE_EMIT(func_proto_typedef_nested2);
162 return arg->a;
163}
164
165noinline int
166bpf_testmod_test_btf_type_tag_user_2(struct bpf_testmod_btf_type_tag_2 *arg) {
167 return arg->p->a;
168}
169
170noinline int
171bpf_testmod_test_btf_type_tag_percpu_1(struct bpf_testmod_btf_type_tag_1 __percpu *arg) {
172 return arg->a;
173}
174
175noinline int
176bpf_testmod_test_btf_type_tag_percpu_2(struct bpf_testmod_btf_type_tag_3 *arg) {
177 return arg->p->a;
178}
179
180noinline int bpf_testmod_loop_test(int n)
181{
182 /* Make sum volatile, so smart compilers, such as clang, will not
183 * optimize the code by removing the loop.
184 */
185 volatile int sum = 0;
186 int i;
187
188 /* the primary goal of this test is to test LBR. Create a lot of
189 * branches in the function, so we can catch it easily.
190 */
191 for (i = 0; i < n; i++)
192 sum += i;
193 return sum;
194}
195
196__weak noinline struct file *bpf_testmod_return_ptr(int arg)
197{
198 static struct file f = {};
199
200 switch (arg) {
201 case 1: return (void *)EINVAL; /* user addr */
202 case 2: return (void *)0xcafe4a11; /* user addr */
203 case 3: return (void *)-EINVAL; /* canonical, but invalid */
204 case 4: return (void *)(1ull << 60); /* non-canonical and invalid */
205 case 5: return (void *)~(1ull << 30); /* trigger extable */
206 case 6: return &f; /* valid addr */
207 case 7: return (void *)((long)&f | 1); /* kernel tricks */
208#ifdef CONFIG_X86_64
209 case 8: return (void *)VSYSCALL_ADDR; /* vsyscall page address */
210#endif
211 default: return NULL;
212 }
213}
214
215noinline int bpf_testmod_fentry_test1(int a)
216{
217 return a + 1;
218}
219
220noinline int bpf_testmod_fentry_test2(int a, u64 b)
221{
222 return a + b;
223}
224
225noinline int bpf_testmod_fentry_test3(char a, int b, u64 c)
226{
227 return a + b + c;
228}
229
230noinline int bpf_testmod_fentry_test7(u64 a, void *b, short c, int d,
231 void *e, char f, int g)
232{
233 return a + (long)b + c + d + (long)e + f + g;
234}
235
236noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
237 void *e, char f, int g,
238 unsigned int h, long i, __u64 j,
239 unsigned long k)
240{
241 return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
242}
243
244int bpf_testmod_fentry_ok;
245
246noinline ssize_t
247bpf_testmod_test_read(struct file *file, struct kobject *kobj,
248 struct bin_attribute *bin_attr,
249 char *buf, loff_t off, size_t len)
250{
251 struct bpf_testmod_test_read_ctx ctx = {
252 .buf = buf,
253 .off = off,
254 .len = len,
255 };
256 struct bpf_testmod_struct_arg_1 struct_arg1 = {10}, struct_arg1_2 = {-1};
257 struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3};
258 struct bpf_testmod_struct_arg_3 *struct_arg3;
259 struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22};
260 int i = 1;
261
262 while (bpf_testmod_return_ptr(i))
263 i++;
264
265 (void)bpf_testmod_test_struct_arg_1(struct_arg2, 1, 4);
266 (void)bpf_testmod_test_struct_arg_2(1, struct_arg2, 4);
267 (void)bpf_testmod_test_struct_arg_3(1, 4, struct_arg2);
268 (void)bpf_testmod_test_struct_arg_4(struct_arg1, 1, 2, 3, struct_arg2);
269 (void)bpf_testmod_test_struct_arg_5();
270 (void)bpf_testmod_test_struct_arg_7(16, (void *)17, 18, 19,
271 (void *)20, struct_arg4);
272 (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19,
273 (void *)20, struct_arg4, 23);
274
275 (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2);
276
277 struct_arg3 = kmalloc((sizeof(struct bpf_testmod_struct_arg_3) +
278 sizeof(int)), GFP_KERNEL);
279 if (struct_arg3 != NULL) {
280 struct_arg3->b[0] = 1;
281 (void)bpf_testmod_test_struct_arg_6(struct_arg3);
282 kfree(struct_arg3);
283 }
284
285 /* This is always true. Use the check to make sure the compiler
286 * doesn't remove bpf_testmod_loop_test.
287 */
288 if (bpf_testmod_loop_test(101) > 100)
289 trace_bpf_testmod_test_read(current, &ctx);
290
291 /* Magic number to enable writable tp */
292 if (len == 64) {
293 struct bpf_testmod_test_writable_ctx writable = {
294 .val = 1024,
295 };
296 trace_bpf_testmod_test_writable_bare(&writable);
297 if (writable.early_ret)
298 return snprintf(buf, len, "%d\n", writable.val);
299 }
300
301 if (bpf_testmod_fentry_test1(1) != 2 ||
302 bpf_testmod_fentry_test2(2, 3) != 5 ||
303 bpf_testmod_fentry_test3(4, 5, 6) != 15 ||
304 bpf_testmod_fentry_test7(16, (void *)17, 18, 19, (void *)20,
305 21, 22) != 133 ||
306 bpf_testmod_fentry_test11(16, (void *)17, 18, 19, (void *)20,
307 21, 22, 23, 24, 25, 26) != 231)
308 goto out;
309
310 bpf_testmod_fentry_ok = 1;
311out:
312 return -EIO; /* always fail */
313}
314EXPORT_SYMBOL(bpf_testmod_test_read);
315ALLOW_ERROR_INJECTION(bpf_testmod_test_read, ERRNO);
316
317noinline ssize_t
318bpf_testmod_test_write(struct file *file, struct kobject *kobj,
319 struct bin_attribute *bin_attr,
320 char *buf, loff_t off, size_t len)
321{
322 struct bpf_testmod_test_write_ctx ctx = {
323 .buf = buf,
324 .off = off,
325 .len = len,
326 };
327
328 trace_bpf_testmod_test_write_bare(current, &ctx);
329
330 return -EIO; /* always fail */
331}
332EXPORT_SYMBOL(bpf_testmod_test_write);
333ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO);
334
335noinline int bpf_fentry_shadow_test(int a)
336{
337 return a + 2;
338}
339EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
340
341__bpf_hook_end();
342
343static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
344 .attr = { .name = "bpf_testmod", .mode = 0666, },
345 .read = bpf_testmod_test_read,
346 .write = bpf_testmod_test_write,
347};
348
349BTF_KFUNCS_START(bpf_testmod_common_kfunc_ids)
350BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
351BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
352BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
353BTF_ID_FLAGS(func, bpf_kfunc_common_test)
354BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids)
355
356static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {
357 .owner = THIS_MODULE,
358 .set = &bpf_testmod_common_kfunc_ids,
359};
360
361__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
362{
363 return a + b + c + d;
364}
365
366__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
367{
368 return a + b;
369}
370
371__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk)
372{
373 return sk;
374}
375
376__bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
377{
378 /* Provoke the compiler to assume that the caller has sign-extended a,
379 * b and c on platforms where this is required (e.g. s390x).
380 */
381 return (long)a + (long)b + (long)c + d;
382}
383
384static struct prog_test_ref_kfunc prog_test_struct = {
385 .a = 42,
386 .b = 108,
387 .next = &prog_test_struct,
388 .cnt = REFCOUNT_INIT(1),
389};
390
391__bpf_kfunc struct prog_test_ref_kfunc *
392bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
393{
394 refcount_inc(&prog_test_struct.cnt);
395 return &prog_test_struct;
396}
397
398__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
399{
400 WARN_ON_ONCE(1);
401}
402
403__bpf_kfunc struct prog_test_member *
404bpf_kfunc_call_memb_acquire(void)
405{
406 WARN_ON_ONCE(1);
407 return NULL;
408}
409
410__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p)
411{
412 WARN_ON_ONCE(1);
413}
414
415static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size)
416{
417 if (size > 2 * sizeof(int))
418 return NULL;
419
420 return (int *)p;
421}
422
423__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p,
424 const int rdwr_buf_size)
425{
426 return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size);
427}
428
429__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p,
430 const int rdonly_buf_size)
431{
432 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
433}
434
435/* the next 2 ones can't be really used for testing expect to ensure
436 * that the verifier rejects the call.
437 * Acquire functions must return struct pointers, so these ones are
438 * failing.
439 */
440__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p,
441 const int rdonly_buf_size)
442{
443 return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size);
444}
445
446__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p)
447{
448}
449
450__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb)
451{
452}
453
454__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p)
455{
456}
457
458__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p)
459{
460}
461
462__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p)
463{
464}
465
466__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p)
467{
468}
469
470__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p)
471{
472}
473
474__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz)
475{
476}
477
478__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len)
479{
480}
481
482__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len)
483{
484}
485
486__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p)
487{
488 /* p != NULL, but p->cnt could be 0 */
489}
490
491__bpf_kfunc void bpf_kfunc_call_test_destructive(void)
492{
493}
494
495__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused)
496{
497 return arg;
498}
499
500BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
501BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
502BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
503BTF_ID_FLAGS(func, bpf_kfunc_call_test2)
504BTF_ID_FLAGS(func, bpf_kfunc_call_test3)
505BTF_ID_FLAGS(func, bpf_kfunc_call_test4)
506BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1)
507BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1)
508BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
509BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL)
510BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL)
511BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE)
512BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL)
513BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL)
514BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL)
515BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE)
516BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx)
517BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1)
518BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2)
519BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1)
520BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2)
521BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3)
522BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
523BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
524BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
525BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
526BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
527
528static int bpf_testmod_ops_init(struct btf *btf)
529{
530 return 0;
531}
532
533static bool bpf_testmod_ops_is_valid_access(int off, int size,
534 enum bpf_access_type type,
535 const struct bpf_prog *prog,
536 struct bpf_insn_access_aux *info)
537{
538 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
539}
540
541static int bpf_testmod_ops_init_member(const struct btf_type *t,
542 const struct btf_member *member,
543 void *kdata, const void *udata)
544{
545 if (member->offset == offsetof(struct bpf_testmod_ops, data) * 8) {
546 /* For data fields, this function has to copy it and return
547 * 1 to indicate that the data has been handled by the
548 * struct_ops type, or the verifier will reject the map if
549 * the value of the data field is not zero.
550 */
551 ((struct bpf_testmod_ops *)kdata)->data = ((struct bpf_testmod_ops *)udata)->data;
552 return 1;
553 }
554 return 0;
555}
556
557static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = {
558 .owner = THIS_MODULE,
559 .set = &bpf_testmod_check_kfunc_ids,
560};
561
562static const struct bpf_verifier_ops bpf_testmod_verifier_ops = {
563 .is_valid_access = bpf_testmod_ops_is_valid_access,
564};
565
566static int bpf_dummy_reg(void *kdata)
567{
568 struct bpf_testmod_ops *ops = kdata;
569
570 if (ops->test_1)
571 ops->test_1();
572 /* Some test cases (ex. struct_ops_maybe_null) may not have test_2
573 * initialized, so we need to check for NULL.
574 */
575 if (ops->test_2)
576 ops->test_2(4, ops->data);
577
578 return 0;
579}
580
581static void bpf_dummy_unreg(void *kdata)
582{
583}
584
585static int bpf_testmod_test_1(void)
586{
587 return 0;
588}
589
590static void bpf_testmod_test_2(int a, int b)
591{
592}
593
594static int bpf_testmod_ops__test_maybe_null(int dummy,
595 struct task_struct *task__nullable)
596{
597 return 0;
598}
599
600static struct bpf_testmod_ops __bpf_testmod_ops = {
601 .test_1 = bpf_testmod_test_1,
602 .test_2 = bpf_testmod_test_2,
603 .test_maybe_null = bpf_testmod_ops__test_maybe_null,
604};
605
606struct bpf_struct_ops bpf_bpf_testmod_ops = {
607 .verifier_ops = &bpf_testmod_verifier_ops,
608 .init = bpf_testmod_ops_init,
609 .init_member = bpf_testmod_ops_init_member,
610 .reg = bpf_dummy_reg,
611 .unreg = bpf_dummy_unreg,
612 .cfi_stubs = &__bpf_testmod_ops,
613 .name = "bpf_testmod_ops",
614 .owner = THIS_MODULE,
615};
616
617static int bpf_dummy_reg2(void *kdata)
618{
619 struct bpf_testmod_ops2 *ops = kdata;
620
621 ops->test_1();
622 return 0;
623}
624
625static struct bpf_testmod_ops2 __bpf_testmod_ops2 = {
626 .test_1 = bpf_testmod_test_1,
627};
628
629struct bpf_struct_ops bpf_testmod_ops2 = {
630 .verifier_ops = &bpf_testmod_verifier_ops,
631 .init = bpf_testmod_ops_init,
632 .init_member = bpf_testmod_ops_init_member,
633 .reg = bpf_dummy_reg2,
634 .unreg = bpf_dummy_unreg,
635 .cfi_stubs = &__bpf_testmod_ops2,
636 .name = "bpf_testmod_ops2",
637 .owner = THIS_MODULE,
638};
639
640extern int bpf_fentry_test1(int a);
641
642static int bpf_testmod_init(void)
643{
644 int ret;
645
646 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set);
647 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
648 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
649 ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
650 ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
651 ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
652 if (ret < 0)
653 return ret;
654 if (bpf_fentry_test1(0) < 0)
655 return -EINVAL;
656 return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
657}
658
659static void bpf_testmod_exit(void)
660{
661 /* Need to wait for all references to be dropped because
662 * bpf_kfunc_call_test_release() which currently resides in kernel can
663 * be called after bpf_testmod is unloaded. Once release function is
664 * moved into the module this wait can be removed.
665 */
666 while (refcount_read(&prog_test_struct.cnt) > 1)
667 msleep(20);
668
669 sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
670}
671
672module_init(bpf_testmod_init);
673module_exit(bpf_testmod_exit);
674
675MODULE_AUTHOR("Andrii Nakryiko");
676MODULE_DESCRIPTION("BPF selftests module");
677MODULE_LICENSE("Dual BSD/GPL");