Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <bpf/btf.h>
3#include <test_btf.h>
4#include <linux/btf.h>
5#include <test_progs.h>
6#include <network_helpers.h>
7
8#include "linked_list.skel.h"
9#include "linked_list_fail.skel.h"
10
11static char log_buf[1024 * 1024];
12
13static struct {
14 const char *prog_name;
15 const char *err_msg;
16} linked_list_fail_tests[] = {
17#define TEST(test, off) \
18 { #test "_missing_lock_push_front", \
19 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
20 { #test "_missing_lock_push_back", \
21 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
22 { #test "_missing_lock_pop_front", \
23 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
24 { #test "_missing_lock_pop_back", \
25 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
26 TEST(kptr, 40)
27 TEST(global, 16)
28 TEST(map, 0)
29 TEST(inner_map, 0)
30#undef TEST
31#define TEST(test, op) \
32 { #test "_kptr_incorrect_lock_" #op, \
33 "held lock and object are not in the same allocation\n" \
34 "bpf_spin_lock at off=40 must be held for bpf_list_head" }, \
35 { #test "_global_incorrect_lock_" #op, \
36 "held lock and object are not in the same allocation\n" \
37 "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
38 { #test "_map_incorrect_lock_" #op, \
39 "held lock and object are not in the same allocation\n" \
40 "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
41 { #test "_inner_map_incorrect_lock_" #op, \
42 "held lock and object are not in the same allocation\n" \
43 "bpf_spin_lock at off=0 must be held for bpf_list_head" },
44 TEST(kptr, push_front)
45 TEST(kptr, push_back)
46 TEST(kptr, pop_front)
47 TEST(kptr, pop_back)
48 TEST(global, push_front)
49 TEST(global, push_back)
50 TEST(global, pop_front)
51 TEST(global, pop_back)
52 TEST(map, push_front)
53 TEST(map, push_back)
54 TEST(map, pop_front)
55 TEST(map, pop_back)
56 TEST(inner_map, push_front)
57 TEST(inner_map, push_back)
58 TEST(inner_map, pop_front)
59 TEST(inner_map, pop_back)
60#undef TEST
61 { "map_compat_kprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
62 { "map_compat_kretprobe", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
63 { "map_compat_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
64 { "map_compat_perf", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
65 { "map_compat_raw_tp", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
66 { "map_compat_raw_tp_w", "tracing progs cannot use bpf_{list_head,rb_root} yet" },
67 { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
68 { "obj_new_no_composite", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
69 { "obj_new_no_struct", "bpf_obj_new/bpf_percpu_obj_new type ID argument must be of a struct" },
70 { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
71 { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
72 { "obj_new_acq", "Unreleased reference id=" },
73 { "use_after_drop", "invalid mem access 'scalar'" },
74 { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
75 { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
76 { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
77 { "direct_read_head", "direct access to bpf_list_head is disallowed" },
78 { "direct_write_head", "direct access to bpf_list_head is disallowed" },
79 { "direct_read_node", "direct access to bpf_list_node is disallowed" },
80 { "direct_write_node", "direct access to bpf_list_node is disallowed" },
81 { "use_after_unlock_push_front", "invalid mem access 'scalar'" },
82 { "use_after_unlock_push_back", "invalid mem access 'scalar'" },
83 { "double_push_front", "arg#1 expected pointer to allocated object" },
84 { "double_push_back", "arg#1 expected pointer to allocated object" },
85 { "no_node_value_type", "bpf_list_node not found at offset=0" },
86 { "incorrect_value_type",
87 "operation on bpf_list_head expects arg#1 bpf_list_node at offset=48 in struct foo, "
88 "but arg is at offset=0 in struct bar" },
89 { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
90 { "incorrect_node_off1", "bpf_list_node not found at offset=49" },
91 { "incorrect_node_off2", "arg#1 offset=0, but expected bpf_list_node at offset=48 in struct foo" },
92 { "no_head_type", "bpf_list_head not found at offset=0" },
93 { "incorrect_head_var_off1", "R1 doesn't have constant offset" },
94 { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
95 { "incorrect_head_off1", "bpf_list_head not found at offset=25" },
96 { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
97 { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
98 { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" },
99};
100
101static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
102{
103 LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
104 .kernel_log_size = sizeof(log_buf),
105 .kernel_log_level = 1);
106 struct linked_list_fail *skel;
107 struct bpf_program *prog;
108 int ret;
109
110 skel = linked_list_fail__open_opts(&opts);
111 if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
112 return;
113
114 prog = bpf_object__find_program_by_name(skel->obj, prog_name);
115 if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
116 goto end;
117
118 bpf_program__set_autoload(prog, true);
119
120 ret = linked_list_fail__load(skel);
121 if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
122 goto end;
123
124 if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
125 fprintf(stderr, "Expected: %s\n", err_msg);
126 fprintf(stderr, "Verifier: %s\n", log_buf);
127 }
128
129end:
130 linked_list_fail__destroy(skel);
131}
132
133static void clear_fields(struct bpf_map *map)
134{
135 char buf[24];
136 int key = 0;
137
138 memset(buf, 0xff, sizeof(buf));
139 ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
140}
141
142enum {
143 TEST_ALL,
144 PUSH_POP,
145 PUSH_POP_MULT,
146 LIST_IN_LIST,
147};
148
149static void test_linked_list_success(int mode, bool leave_in_map)
150{
151 LIBBPF_OPTS(bpf_test_run_opts, opts,
152 .data_in = &pkt_v4,
153 .data_size_in = sizeof(pkt_v4),
154 .repeat = 1,
155 );
156 struct linked_list *skel;
157 int ret;
158
159 skel = linked_list__open_and_load();
160 if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
161 return;
162
163 if (mode == LIST_IN_LIST)
164 goto lil;
165 if (mode == PUSH_POP_MULT)
166 goto ppm;
167
168 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
169 ASSERT_OK(ret, "map_list_push_pop");
170 ASSERT_OK(opts.retval, "map_list_push_pop retval");
171 if (!leave_in_map)
172 clear_fields(skel->maps.array_map);
173
174 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
175 ASSERT_OK(ret, "inner_map_list_push_pop");
176 ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
177 if (!leave_in_map)
178 clear_fields(skel->maps.inner_map);
179
180 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
181 ASSERT_OK(ret, "global_list_push_pop");
182 ASSERT_OK(opts.retval, "global_list_push_pop retval");
183 if (!leave_in_map)
184 clear_fields(skel->maps.bss_A);
185
186 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_nested), &opts);
187 ASSERT_OK(ret, "global_list_push_pop_nested");
188 ASSERT_OK(opts.retval, "global_list_push_pop_nested retval");
189 if (!leave_in_map)
190 clear_fields(skel->maps.bss_A);
191
192 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_array_push_pop), &opts);
193 ASSERT_OK(ret, "global_list_array_push_pop");
194 ASSERT_OK(opts.retval, "global_list_array_push_pop retval");
195 if (!leave_in_map)
196 clear_fields(skel->maps.bss_A);
197
198 if (mode == PUSH_POP)
199 goto end;
200
201ppm:
202 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
203 ASSERT_OK(ret, "map_list_push_pop_multiple");
204 ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
205 if (!leave_in_map)
206 clear_fields(skel->maps.array_map);
207
208 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
209 ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
210 ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
211 if (!leave_in_map)
212 clear_fields(skel->maps.inner_map);
213
214 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
215 ASSERT_OK(ret, "global_list_push_pop_multiple");
216 ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
217 if (!leave_in_map)
218 clear_fields(skel->maps.bss_A);
219
220 if (mode == PUSH_POP_MULT)
221 goto end;
222
223lil:
224 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
225 ASSERT_OK(ret, "map_list_in_list");
226 ASSERT_OK(opts.retval, "map_list_in_list retval");
227 if (!leave_in_map)
228 clear_fields(skel->maps.array_map);
229
230 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
231 ASSERT_OK(ret, "inner_map_list_in_list");
232 ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
233 if (!leave_in_map)
234 clear_fields(skel->maps.inner_map);
235
236 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
237 ASSERT_OK(ret, "global_list_in_list");
238 ASSERT_OK(opts.retval, "global_list_in_list retval");
239 if (!leave_in_map)
240 clear_fields(skel->maps.bss_A);
241end:
242 linked_list__destroy(skel);
243}
244
245#define SPIN_LOCK 2
246#define LIST_HEAD 3
247#define LIST_NODE 4
248
249static struct btf *init_btf(void)
250{
251 int id, lid, hid, nid;
252 struct btf *btf;
253
254 btf = btf__new_empty();
255 if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
256 return NULL;
257 id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
258 if (!ASSERT_EQ(id, 1, "btf__add_int"))
259 goto end;
260 lid = btf__add_struct(btf, "bpf_spin_lock", 4);
261 if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
262 goto end;
263 hid = btf__add_struct(btf, "bpf_list_head", 16);
264 if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
265 goto end;
266 nid = btf__add_struct(btf, "bpf_list_node", 24);
267 if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
268 goto end;
269 return btf;
270end:
271 btf__free(btf);
272 return NULL;
273}
274
275static void list_and_rb_node_same_struct(bool refcount_field)
276{
277 int bpf_rb_node_btf_id, bpf_refcount_btf_id = 0, foo_btf_id;
278 struct btf *btf;
279 int id, err;
280
281 btf = init_btf();
282 if (!ASSERT_OK_PTR(btf, "init_btf"))
283 return;
284
285 bpf_rb_node_btf_id = btf__add_struct(btf, "bpf_rb_node", 32);
286 if (!ASSERT_GT(bpf_rb_node_btf_id, 0, "btf__add_struct bpf_rb_node"))
287 return;
288
289 if (refcount_field) {
290 bpf_refcount_btf_id = btf__add_struct(btf, "bpf_refcount", 4);
291 if (!ASSERT_GT(bpf_refcount_btf_id, 0, "btf__add_struct bpf_refcount"))
292 return;
293 }
294
295 id = btf__add_struct(btf, "bar", refcount_field ? 60 : 56);
296 if (!ASSERT_GT(id, 0, "btf__add_struct bar"))
297 return;
298 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
299 if (!ASSERT_OK(err, "btf__add_field bar::a"))
300 return;
301 err = btf__add_field(btf, "c", bpf_rb_node_btf_id, 192, 0);
302 if (!ASSERT_OK(err, "btf__add_field bar::c"))
303 return;
304 if (refcount_field) {
305 err = btf__add_field(btf, "ref", bpf_refcount_btf_id, 448, 0);
306 if (!ASSERT_OK(err, "btf__add_field bar::ref"))
307 return;
308 }
309
310 foo_btf_id = btf__add_struct(btf, "foo", 20);
311 if (!ASSERT_GT(foo_btf_id, 0, "btf__add_struct foo"))
312 return;
313 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
314 if (!ASSERT_OK(err, "btf__add_field foo::a"))
315 return;
316 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
317 if (!ASSERT_OK(err, "btf__add_field foo::b"))
318 return;
319 id = btf__add_decl_tag(btf, "contains:bar:a", foo_btf_id, 0);
320 if (!ASSERT_GT(id, 0, "btf__add_decl_tag contains:bar:a"))
321 return;
322
323 err = btf__load_into_kernel(btf);
324 ASSERT_EQ(err, refcount_field ? 0 : -EINVAL, "check btf");
325 btf__free(btf);
326}
327
328static void test_btf(void)
329{
330 struct btf *btf = NULL;
331 int id, err;
332
333 while (test__start_subtest("btf: too many locks")) {
334 btf = init_btf();
335 if (!ASSERT_OK_PTR(btf, "init_btf"))
336 break;
337 id = btf__add_struct(btf, "foo", 24);
338 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
339 break;
340 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
341 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
342 break;
343 err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
344 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
345 break;
346 err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
347 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
348 break;
349
350 err = btf__load_into_kernel(btf);
351 ASSERT_EQ(err, -E2BIG, "check btf");
352 btf__free(btf);
353 break;
354 }
355
356 while (test__start_subtest("btf: missing lock")) {
357 btf = init_btf();
358 if (!ASSERT_OK_PTR(btf, "init_btf"))
359 break;
360 id = btf__add_struct(btf, "foo", 16);
361 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
362 break;
363 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
364 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
365 break;
366 id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
367 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
368 break;
369 id = btf__add_struct(btf, "baz", 16);
370 if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
371 break;
372 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
373 if (!ASSERT_OK(err, "btf__add_field baz::a"))
374 break;
375
376 err = btf__load_into_kernel(btf);
377 ASSERT_EQ(err, -EINVAL, "check btf");
378 btf__free(btf);
379 break;
380 }
381
382 while (test__start_subtest("btf: bad offset")) {
383 btf = init_btf();
384 if (!ASSERT_OK_PTR(btf, "init_btf"))
385 break;
386 id = btf__add_struct(btf, "foo", 36);
387 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
388 break;
389 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
390 if (!ASSERT_OK(err, "btf__add_field foo::a"))
391 break;
392 err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
393 if (!ASSERT_OK(err, "btf__add_field foo::b"))
394 break;
395 err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
396 if (!ASSERT_OK(err, "btf__add_field foo::c"))
397 break;
398 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
399 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
400 break;
401
402 err = btf__load_into_kernel(btf);
403 ASSERT_EQ(err, -EEXIST, "check btf");
404 btf__free(btf);
405 break;
406 }
407
408 while (test__start_subtest("btf: missing contains:")) {
409 btf = init_btf();
410 if (!ASSERT_OK_PTR(btf, "init_btf"))
411 break;
412 id = btf__add_struct(btf, "foo", 24);
413 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
414 break;
415 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
416 if (!ASSERT_OK(err, "btf__add_field foo::a"))
417 break;
418 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
419 if (!ASSERT_OK(err, "btf__add_field foo::b"))
420 break;
421
422 err = btf__load_into_kernel(btf);
423 ASSERT_EQ(err, -EINVAL, "check btf");
424 btf__free(btf);
425 break;
426 }
427
428 while (test__start_subtest("btf: missing struct")) {
429 btf = init_btf();
430 if (!ASSERT_OK_PTR(btf, "init_btf"))
431 break;
432 id = btf__add_struct(btf, "foo", 24);
433 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
434 break;
435 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
436 if (!ASSERT_OK(err, "btf__add_field foo::a"))
437 break;
438 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
439 if (!ASSERT_OK(err, "btf__add_field foo::b"))
440 break;
441 id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
442 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
443 break;
444
445 err = btf__load_into_kernel(btf);
446 ASSERT_EQ(err, -ENOENT, "check btf");
447 btf__free(btf);
448 break;
449 }
450
451 while (test__start_subtest("btf: missing node")) {
452 btf = init_btf();
453 if (!ASSERT_OK_PTR(btf, "init_btf"))
454 break;
455 id = btf__add_struct(btf, "foo", 24);
456 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
457 break;
458 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
459 if (!ASSERT_OK(err, "btf__add_field foo::a"))
460 break;
461 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
462 if (!ASSERT_OK(err, "btf__add_field foo::b"))
463 break;
464 id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
465 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
466 break;
467
468 err = btf__load_into_kernel(btf);
469 btf__free(btf);
470 ASSERT_EQ(err, -ENOENT, "check btf");
471 break;
472 }
473
474 while (test__start_subtest("btf: node incorrect type")) {
475 btf = init_btf();
476 if (!ASSERT_OK_PTR(btf, "init_btf"))
477 break;
478 id = btf__add_struct(btf, "foo", 20);
479 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
480 break;
481 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
482 if (!ASSERT_OK(err, "btf__add_field foo::a"))
483 break;
484 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
485 if (!ASSERT_OK(err, "btf__add_field foo::b"))
486 break;
487 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
488 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
489 break;
490 id = btf__add_struct(btf, "bar", 4);
491 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
492 break;
493 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
494 if (!ASSERT_OK(err, "btf__add_field bar::a"))
495 break;
496
497 err = btf__load_into_kernel(btf);
498 ASSERT_EQ(err, -EINVAL, "check btf");
499 btf__free(btf);
500 break;
501 }
502
503 while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
504 btf = init_btf();
505 if (!ASSERT_OK_PTR(btf, "init_btf"))
506 break;
507 id = btf__add_struct(btf, "foo", 52);
508 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
509 break;
510 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
511 if (!ASSERT_OK(err, "btf__add_field foo::a"))
512 break;
513 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
514 if (!ASSERT_OK(err, "btf__add_field foo::b"))
515 break;
516 err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
517 if (!ASSERT_OK(err, "btf__add_field foo::c"))
518 break;
519 err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
520 if (!ASSERT_OK(err, "btf__add_field foo::d"))
521 break;
522 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
523 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
524 break;
525
526 err = btf__load_into_kernel(btf);
527 ASSERT_EQ(err, -EINVAL, "check btf");
528 btf__free(btf);
529 break;
530 }
531
532 while (test__start_subtest("btf: owning | owned AA cycle")) {
533 btf = init_btf();
534 if (!ASSERT_OK_PTR(btf, "init_btf"))
535 break;
536 id = btf__add_struct(btf, "foo", 44);
537 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
538 break;
539 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
540 if (!ASSERT_OK(err, "btf__add_field foo::a"))
541 break;
542 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
543 if (!ASSERT_OK(err, "btf__add_field foo::b"))
544 break;
545 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
546 if (!ASSERT_OK(err, "btf__add_field foo::c"))
547 break;
548 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
549 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
550 break;
551
552 err = btf__load_into_kernel(btf);
553 ASSERT_EQ(err, -ELOOP, "check btf");
554 btf__free(btf);
555 break;
556 }
557
558 while (test__start_subtest("btf: owning | owned ABA cycle")) {
559 btf = init_btf();
560 if (!ASSERT_OK_PTR(btf, "init_btf"))
561 break;
562 id = btf__add_struct(btf, "foo", 44);
563 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
564 break;
565 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
566 if (!ASSERT_OK(err, "btf__add_field foo::a"))
567 break;
568 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
569 if (!ASSERT_OK(err, "btf__add_field foo::b"))
570 break;
571 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
572 if (!ASSERT_OK(err, "btf__add_field foo::c"))
573 break;
574 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
575 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
576 break;
577 id = btf__add_struct(btf, "bar", 44);
578 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
579 break;
580 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
581 if (!ASSERT_OK(err, "btf__add_field bar::a"))
582 break;
583 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
584 if (!ASSERT_OK(err, "btf__add_field bar::b"))
585 break;
586 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
587 if (!ASSERT_OK(err, "btf__add_field bar::c"))
588 break;
589 id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
590 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
591 break;
592
593 err = btf__load_into_kernel(btf);
594 ASSERT_EQ(err, -ELOOP, "check btf");
595 btf__free(btf);
596 break;
597 }
598
599 while (test__start_subtest("btf: owning -> owned")) {
600 btf = init_btf();
601 if (!ASSERT_OK_PTR(btf, "init_btf"))
602 break;
603 id = btf__add_struct(btf, "foo", 28);
604 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
605 break;
606 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
607 if (!ASSERT_OK(err, "btf__add_field foo::a"))
608 break;
609 err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
610 if (!ASSERT_OK(err, "btf__add_field foo::b"))
611 break;
612 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
613 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
614 break;
615 id = btf__add_struct(btf, "bar", 24);
616 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
617 break;
618 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
619 if (!ASSERT_OK(err, "btf__add_field bar::a"))
620 break;
621
622 err = btf__load_into_kernel(btf);
623 ASSERT_EQ(err, 0, "check btf");
624 btf__free(btf);
625 break;
626 }
627
628 while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
629 btf = init_btf();
630 if (!ASSERT_OK_PTR(btf, "init_btf"))
631 break;
632 id = btf__add_struct(btf, "foo", 28);
633 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
634 break;
635 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
636 if (!ASSERT_OK(err, "btf__add_field foo::a"))
637 break;
638 err = btf__add_field(btf, "b", SPIN_LOCK, 192, 0);
639 if (!ASSERT_OK(err, "btf__add_field foo::b"))
640 break;
641 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
642 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
643 break;
644 id = btf__add_struct(btf, "bar", 44);
645 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
646 break;
647 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
648 if (!ASSERT_OK(err, "btf__add_field bar::a"))
649 break;
650 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
651 if (!ASSERT_OK(err, "btf__add_field bar::b"))
652 break;
653 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
654 if (!ASSERT_OK(err, "btf__add_field bar::c"))
655 break;
656 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
657 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
658 break;
659 id = btf__add_struct(btf, "baz", 24);
660 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
661 break;
662 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
663 if (!ASSERT_OK(err, "btf__add_field baz:a"))
664 break;
665
666 err = btf__load_into_kernel(btf);
667 ASSERT_EQ(err, 0, "check btf");
668 btf__free(btf);
669 break;
670 }
671
672 while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
673 btf = init_btf();
674 if (!ASSERT_OK_PTR(btf, "init_btf"))
675 break;
676 id = btf__add_struct(btf, "foo", 44);
677 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
678 break;
679 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
680 if (!ASSERT_OK(err, "btf__add_field foo::a"))
681 break;
682 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
683 if (!ASSERT_OK(err, "btf__add_field foo::b"))
684 break;
685 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
686 if (!ASSERT_OK(err, "btf__add_field foo::c"))
687 break;
688 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
689 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
690 break;
691 id = btf__add_struct(btf, "bar", 44);
692 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
693 break;
694 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
695 if (!ASSERT_OK(err, "btf__add_field bar:a"))
696 break;
697 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
698 if (!ASSERT_OK(err, "btf__add_field bar:b"))
699 break;
700 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
701 if (!ASSERT_OK(err, "btf__add_field bar:c"))
702 break;
703 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
704 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
705 break;
706 id = btf__add_struct(btf, "baz", 24);
707 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
708 break;
709 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
710 if (!ASSERT_OK(err, "btf__add_field baz:a"))
711 break;
712
713 err = btf__load_into_kernel(btf);
714 ASSERT_EQ(err, -ELOOP, "check btf");
715 btf__free(btf);
716 break;
717 }
718
719 while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
720 btf = init_btf();
721 if (!ASSERT_OK_PTR(btf, "init_btf"))
722 break;
723 id = btf__add_struct(btf, "foo", 20);
724 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
725 break;
726 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
727 if (!ASSERT_OK(err, "btf__add_field foo::a"))
728 break;
729 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
730 if (!ASSERT_OK(err, "btf__add_field foo::b"))
731 break;
732 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
733 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
734 break;
735 id = btf__add_struct(btf, "bar", 44);
736 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
737 break;
738 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
739 if (!ASSERT_OK(err, "btf__add_field bar::a"))
740 break;
741 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
742 if (!ASSERT_OK(err, "btf__add_field bar::b"))
743 break;
744 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
745 if (!ASSERT_OK(err, "btf__add_field bar::c"))
746 break;
747 id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
748 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
749 break;
750 id = btf__add_struct(btf, "baz", 44);
751 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
752 break;
753 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
754 if (!ASSERT_OK(err, "btf__add_field bar::a"))
755 break;
756 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
757 if (!ASSERT_OK(err, "btf__add_field bar::b"))
758 break;
759 err = btf__add_field(btf, "c", SPIN_LOCK, 320, 0);
760 if (!ASSERT_OK(err, "btf__add_field bar::c"))
761 break;
762 id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
763 if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
764 break;
765 id = btf__add_struct(btf, "bam", 24);
766 if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
767 break;
768 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
769 if (!ASSERT_OK(err, "btf__add_field bam::a"))
770 break;
771
772 err = btf__load_into_kernel(btf);
773 ASSERT_EQ(err, -ELOOP, "check btf");
774 btf__free(btf);
775 break;
776 }
777
778 while (test__start_subtest("btf: list_node and rb_node in same struct")) {
779 list_and_rb_node_same_struct(true);
780 break;
781 }
782
783 while (test__start_subtest("btf: list_node and rb_node in same struct, no bpf_refcount")) {
784 list_and_rb_node_same_struct(false);
785 break;
786 }
787}
788
789void test_linked_list(void)
790{
791 int i;
792
793 for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
794 if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
795 continue;
796 test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
797 linked_list_fail_tests[i].err_msg);
798 }
799 test_btf();
800 test_linked_list_success(PUSH_POP, false);
801 test_linked_list_success(PUSH_POP, true);
802 test_linked_list_success(PUSH_POP_MULT, false);
803 test_linked_list_success(PUSH_POP_MULT, true);
804 test_linked_list_success(LIST_IN_LIST, false);
805 test_linked_list_success(LIST_IN_LIST, true);
806 test_linked_list_success(TEST_ALL, false);
807}
1// SPDX-License-Identifier: GPL-2.0
2#include <bpf/btf.h>
3#include <test_btf.h>
4#include <linux/btf.h>
5#include <test_progs.h>
6#include <network_helpers.h>
7
8#include "linked_list.skel.h"
9#include "linked_list_fail.skel.h"
10
11static char log_buf[1024 * 1024];
12
13static struct {
14 const char *prog_name;
15 const char *err_msg;
16} linked_list_fail_tests[] = {
17#define TEST(test, off) \
18 { #test "_missing_lock_push_front", \
19 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
20 { #test "_missing_lock_push_back", \
21 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
22 { #test "_missing_lock_pop_front", \
23 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" }, \
24 { #test "_missing_lock_pop_back", \
25 "bpf_spin_lock at off=" #off " must be held for bpf_list_head" },
26 TEST(kptr, 32)
27 TEST(global, 16)
28 TEST(map, 0)
29 TEST(inner_map, 0)
30#undef TEST
31#define TEST(test, op) \
32 { #test "_kptr_incorrect_lock_" #op, \
33 "held lock and object are not in the same allocation\n" \
34 "bpf_spin_lock at off=32 must be held for bpf_list_head" }, \
35 { #test "_global_incorrect_lock_" #op, \
36 "held lock and object are not in the same allocation\n" \
37 "bpf_spin_lock at off=16 must be held for bpf_list_head" }, \
38 { #test "_map_incorrect_lock_" #op, \
39 "held lock and object are not in the same allocation\n" \
40 "bpf_spin_lock at off=0 must be held for bpf_list_head" }, \
41 { #test "_inner_map_incorrect_lock_" #op, \
42 "held lock and object are not in the same allocation\n" \
43 "bpf_spin_lock at off=0 must be held for bpf_list_head" },
44 TEST(kptr, push_front)
45 TEST(kptr, push_back)
46 TEST(kptr, pop_front)
47 TEST(kptr, pop_back)
48 TEST(global, push_front)
49 TEST(global, push_back)
50 TEST(global, pop_front)
51 TEST(global, pop_back)
52 TEST(map, push_front)
53 TEST(map, push_back)
54 TEST(map, pop_front)
55 TEST(map, pop_back)
56 TEST(inner_map, push_front)
57 TEST(inner_map, push_back)
58 TEST(inner_map, pop_front)
59 TEST(inner_map, pop_back)
60#undef TEST
61 { "map_compat_kprobe", "tracing progs cannot use bpf_list_head yet" },
62 { "map_compat_kretprobe", "tracing progs cannot use bpf_list_head yet" },
63 { "map_compat_tp", "tracing progs cannot use bpf_list_head yet" },
64 { "map_compat_perf", "tracing progs cannot use bpf_list_head yet" },
65 { "map_compat_raw_tp", "tracing progs cannot use bpf_list_head yet" },
66 { "map_compat_raw_tp_w", "tracing progs cannot use bpf_list_head yet" },
67 { "obj_type_id_oor", "local type ID argument must be in range [0, U32_MAX]" },
68 { "obj_new_no_composite", "bpf_obj_new type ID argument must be of a struct" },
69 { "obj_new_no_struct", "bpf_obj_new type ID argument must be of a struct" },
70 { "obj_drop_non_zero_off", "R1 must have zero offset when passed to release func" },
71 { "new_null_ret", "R0 invalid mem access 'ptr_or_null_'" },
72 { "obj_new_acq", "Unreleased reference id=" },
73 { "use_after_drop", "invalid mem access 'scalar'" },
74 { "ptr_walk_scalar", "type=scalar expected=percpu_ptr_" },
75 { "direct_read_lock", "direct access to bpf_spin_lock is disallowed" },
76 { "direct_write_lock", "direct access to bpf_spin_lock is disallowed" },
77 { "direct_read_head", "direct access to bpf_list_head is disallowed" },
78 { "direct_write_head", "direct access to bpf_list_head is disallowed" },
79 { "direct_read_node", "direct access to bpf_list_node is disallowed" },
80 { "direct_write_node", "direct access to bpf_list_node is disallowed" },
81 { "write_after_push_front", "only read is supported" },
82 { "write_after_push_back", "only read is supported" },
83 { "use_after_unlock_push_front", "invalid mem access 'scalar'" },
84 { "use_after_unlock_push_back", "invalid mem access 'scalar'" },
85 { "double_push_front", "arg#1 expected pointer to allocated object" },
86 { "double_push_back", "arg#1 expected pointer to allocated object" },
87 { "no_node_value_type", "bpf_list_node not found at offset=0" },
88 { "incorrect_value_type",
89 "operation on bpf_list_head expects arg#1 bpf_list_node at offset=0 in struct foo, "
90 "but arg is at offset=0 in struct bar" },
91 { "incorrect_node_var_off", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
92 { "incorrect_node_off1", "bpf_list_node not found at offset=1" },
93 { "incorrect_node_off2", "arg#1 offset=40, but expected bpf_list_node at offset=0 in struct foo" },
94 { "no_head_type", "bpf_list_head not found at offset=0" },
95 { "incorrect_head_var_off1", "R1 doesn't have constant offset" },
96 { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" },
97 { "incorrect_head_off1", "bpf_list_head not found at offset=17" },
98 { "incorrect_head_off2", "bpf_list_head not found at offset=1" },
99 { "pop_front_off",
100 "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) "
101 "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n"
102 "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
103 { "pop_back_off",
104 "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) "
105 "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=40,imm=0) refs=2,4\n"
106 "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" },
107};
108
109static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg)
110{
111 LIBBPF_OPTS(bpf_object_open_opts, opts, .kernel_log_buf = log_buf,
112 .kernel_log_size = sizeof(log_buf),
113 .kernel_log_level = 1);
114 struct linked_list_fail *skel;
115 struct bpf_program *prog;
116 int ret;
117
118 skel = linked_list_fail__open_opts(&opts);
119 if (!ASSERT_OK_PTR(skel, "linked_list_fail__open_opts"))
120 return;
121
122 prog = bpf_object__find_program_by_name(skel->obj, prog_name);
123 if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
124 goto end;
125
126 bpf_program__set_autoload(prog, true);
127
128 ret = linked_list_fail__load(skel);
129 if (!ASSERT_ERR(ret, "linked_list_fail__load must fail"))
130 goto end;
131
132 if (!ASSERT_OK_PTR(strstr(log_buf, err_msg), "expected error message")) {
133 fprintf(stderr, "Expected: %s\n", err_msg);
134 fprintf(stderr, "Verifier: %s\n", log_buf);
135 }
136
137end:
138 linked_list_fail__destroy(skel);
139}
140
141static void clear_fields(struct bpf_map *map)
142{
143 char buf[24];
144 int key = 0;
145
146 memset(buf, 0xff, sizeof(buf));
147 ASSERT_OK(bpf_map__update_elem(map, &key, sizeof(key), buf, sizeof(buf), 0), "check_and_free_fields");
148}
149
150enum {
151 TEST_ALL,
152 PUSH_POP,
153 PUSH_POP_MULT,
154 LIST_IN_LIST,
155};
156
157static void test_linked_list_success(int mode, bool leave_in_map)
158{
159 LIBBPF_OPTS(bpf_test_run_opts, opts,
160 .data_in = &pkt_v4,
161 .data_size_in = sizeof(pkt_v4),
162 .repeat = 1,
163 );
164 struct linked_list *skel;
165 int ret;
166
167 skel = linked_list__open_and_load();
168 if (!ASSERT_OK_PTR(skel, "linked_list__open_and_load"))
169 return;
170
171 if (mode == LIST_IN_LIST)
172 goto lil;
173 if (mode == PUSH_POP_MULT)
174 goto ppm;
175
176 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop), &opts);
177 ASSERT_OK(ret, "map_list_push_pop");
178 ASSERT_OK(opts.retval, "map_list_push_pop retval");
179 if (!leave_in_map)
180 clear_fields(skel->maps.array_map);
181
182 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop), &opts);
183 ASSERT_OK(ret, "inner_map_list_push_pop");
184 ASSERT_OK(opts.retval, "inner_map_list_push_pop retval");
185 if (!leave_in_map)
186 clear_fields(skel->maps.inner_map);
187
188 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop), &opts);
189 ASSERT_OK(ret, "global_list_push_pop");
190 ASSERT_OK(opts.retval, "global_list_push_pop retval");
191 if (!leave_in_map)
192 clear_fields(skel->maps.bss_A);
193
194 if (mode == PUSH_POP)
195 goto end;
196
197ppm:
198 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_push_pop_multiple), &opts);
199 ASSERT_OK(ret, "map_list_push_pop_multiple");
200 ASSERT_OK(opts.retval, "map_list_push_pop_multiple retval");
201 if (!leave_in_map)
202 clear_fields(skel->maps.array_map);
203
204 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_push_pop_multiple), &opts);
205 ASSERT_OK(ret, "inner_map_list_push_pop_multiple");
206 ASSERT_OK(opts.retval, "inner_map_list_push_pop_multiple retval");
207 if (!leave_in_map)
208 clear_fields(skel->maps.inner_map);
209
210 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_multiple), &opts);
211 ASSERT_OK(ret, "global_list_push_pop_multiple");
212 ASSERT_OK(opts.retval, "global_list_push_pop_multiple retval");
213 if (!leave_in_map)
214 clear_fields(skel->maps.bss_A);
215
216 if (mode == PUSH_POP_MULT)
217 goto end;
218
219lil:
220 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.map_list_in_list), &opts);
221 ASSERT_OK(ret, "map_list_in_list");
222 ASSERT_OK(opts.retval, "map_list_in_list retval");
223 if (!leave_in_map)
224 clear_fields(skel->maps.array_map);
225
226 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.inner_map_list_in_list), &opts);
227 ASSERT_OK(ret, "inner_map_list_in_list");
228 ASSERT_OK(opts.retval, "inner_map_list_in_list retval");
229 if (!leave_in_map)
230 clear_fields(skel->maps.inner_map);
231
232 ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_in_list), &opts);
233 ASSERT_OK(ret, "global_list_in_list");
234 ASSERT_OK(opts.retval, "global_list_in_list retval");
235 if (!leave_in_map)
236 clear_fields(skel->maps.bss_A);
237end:
238 linked_list__destroy(skel);
239}
240
241#define SPIN_LOCK 2
242#define LIST_HEAD 3
243#define LIST_NODE 4
244
245static struct btf *init_btf(void)
246{
247 int id, lid, hid, nid;
248 struct btf *btf;
249
250 btf = btf__new_empty();
251 if (!ASSERT_OK_PTR(btf, "btf__new_empty"))
252 return NULL;
253 id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
254 if (!ASSERT_EQ(id, 1, "btf__add_int"))
255 goto end;
256 lid = btf__add_struct(btf, "bpf_spin_lock", 4);
257 if (!ASSERT_EQ(lid, SPIN_LOCK, "btf__add_struct bpf_spin_lock"))
258 goto end;
259 hid = btf__add_struct(btf, "bpf_list_head", 16);
260 if (!ASSERT_EQ(hid, LIST_HEAD, "btf__add_struct bpf_list_head"))
261 goto end;
262 nid = btf__add_struct(btf, "bpf_list_node", 16);
263 if (!ASSERT_EQ(nid, LIST_NODE, "btf__add_struct bpf_list_node"))
264 goto end;
265 return btf;
266end:
267 btf__free(btf);
268 return NULL;
269}
270
271static void test_btf(void)
272{
273 struct btf *btf = NULL;
274 int id, err;
275
276 while (test__start_subtest("btf: too many locks")) {
277 btf = init_btf();
278 if (!ASSERT_OK_PTR(btf, "init_btf"))
279 break;
280 id = btf__add_struct(btf, "foo", 24);
281 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
282 break;
283 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
284 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
285 break;
286 err = btf__add_field(btf, "b", SPIN_LOCK, 32, 0);
287 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
288 break;
289 err = btf__add_field(btf, "c", LIST_HEAD, 64, 0);
290 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
291 break;
292
293 err = btf__load_into_kernel(btf);
294 ASSERT_EQ(err, -E2BIG, "check btf");
295 btf__free(btf);
296 break;
297 }
298
299 while (test__start_subtest("btf: missing lock")) {
300 btf = init_btf();
301 if (!ASSERT_OK_PTR(btf, "init_btf"))
302 break;
303 id = btf__add_struct(btf, "foo", 16);
304 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
305 break;
306 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
307 if (!ASSERT_OK(err, "btf__add_struct foo::a"))
308 break;
309 id = btf__add_decl_tag(btf, "contains:baz:a", 5, 0);
310 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:baz:a"))
311 break;
312 id = btf__add_struct(btf, "baz", 16);
313 if (!ASSERT_EQ(id, 7, "btf__add_struct baz"))
314 break;
315 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
316 if (!ASSERT_OK(err, "btf__add_field baz::a"))
317 break;
318
319 err = btf__load_into_kernel(btf);
320 ASSERT_EQ(err, -EINVAL, "check btf");
321 btf__free(btf);
322 break;
323 }
324
325 while (test__start_subtest("btf: bad offset")) {
326 btf = init_btf();
327 if (!ASSERT_OK_PTR(btf, "init_btf"))
328 break;
329 id = btf__add_struct(btf, "foo", 36);
330 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
331 break;
332 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
333 if (!ASSERT_OK(err, "btf__add_field foo::a"))
334 break;
335 err = btf__add_field(btf, "b", LIST_NODE, 0, 0);
336 if (!ASSERT_OK(err, "btf__add_field foo::b"))
337 break;
338 err = btf__add_field(btf, "c", SPIN_LOCK, 0, 0);
339 if (!ASSERT_OK(err, "btf__add_field foo::c"))
340 break;
341 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
342 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
343 break;
344
345 err = btf__load_into_kernel(btf);
346 ASSERT_EQ(err, -EEXIST, "check btf");
347 btf__free(btf);
348 break;
349 }
350
351 while (test__start_subtest("btf: missing contains:")) {
352 btf = init_btf();
353 if (!ASSERT_OK_PTR(btf, "init_btf"))
354 break;
355 id = btf__add_struct(btf, "foo", 24);
356 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
357 break;
358 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
359 if (!ASSERT_OK(err, "btf__add_field foo::a"))
360 break;
361 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
362 if (!ASSERT_OK(err, "btf__add_field foo::b"))
363 break;
364
365 err = btf__load_into_kernel(btf);
366 ASSERT_EQ(err, -EINVAL, "check btf");
367 btf__free(btf);
368 break;
369 }
370
371 while (test__start_subtest("btf: missing struct")) {
372 btf = init_btf();
373 if (!ASSERT_OK_PTR(btf, "init_btf"))
374 break;
375 id = btf__add_struct(btf, "foo", 24);
376 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
377 break;
378 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
379 if (!ASSERT_OK(err, "btf__add_field foo::a"))
380 break;
381 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
382 if (!ASSERT_OK(err, "btf__add_field foo::b"))
383 break;
384 id = btf__add_decl_tag(btf, "contains:bar:bar", 5, 1);
385 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:bar"))
386 break;
387
388 err = btf__load_into_kernel(btf);
389 ASSERT_EQ(err, -ENOENT, "check btf");
390 btf__free(btf);
391 break;
392 }
393
394 while (test__start_subtest("btf: missing node")) {
395 btf = init_btf();
396 if (!ASSERT_OK_PTR(btf, "init_btf"))
397 break;
398 id = btf__add_struct(btf, "foo", 24);
399 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
400 break;
401 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
402 if (!ASSERT_OK(err, "btf__add_field foo::a"))
403 break;
404 err = btf__add_field(btf, "b", LIST_HEAD, 64, 0);
405 if (!ASSERT_OK(err, "btf__add_field foo::b"))
406 break;
407 id = btf__add_decl_tag(btf, "contains:foo:c", 5, 1);
408 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:c"))
409 break;
410
411 err = btf__load_into_kernel(btf);
412 btf__free(btf);
413 ASSERT_EQ(err, -ENOENT, "check btf");
414 break;
415 }
416
417 while (test__start_subtest("btf: node incorrect type")) {
418 btf = init_btf();
419 if (!ASSERT_OK_PTR(btf, "init_btf"))
420 break;
421 id = btf__add_struct(btf, "foo", 20);
422 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
423 break;
424 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
425 if (!ASSERT_OK(err, "btf__add_field foo::a"))
426 break;
427 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
428 if (!ASSERT_OK(err, "btf__add_field foo::b"))
429 break;
430 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
431 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
432 break;
433 id = btf__add_struct(btf, "bar", 4);
434 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
435 break;
436 err = btf__add_field(btf, "a", SPIN_LOCK, 0, 0);
437 if (!ASSERT_OK(err, "btf__add_field bar::a"))
438 break;
439
440 err = btf__load_into_kernel(btf);
441 ASSERT_EQ(err, -EINVAL, "check btf");
442 btf__free(btf);
443 break;
444 }
445
446 while (test__start_subtest("btf: multiple bpf_list_node with name b")) {
447 btf = init_btf();
448 if (!ASSERT_OK_PTR(btf, "init_btf"))
449 break;
450 id = btf__add_struct(btf, "foo", 52);
451 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
452 break;
453 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
454 if (!ASSERT_OK(err, "btf__add_field foo::a"))
455 break;
456 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
457 if (!ASSERT_OK(err, "btf__add_field foo::b"))
458 break;
459 err = btf__add_field(btf, "b", LIST_NODE, 256, 0);
460 if (!ASSERT_OK(err, "btf__add_field foo::c"))
461 break;
462 err = btf__add_field(btf, "d", SPIN_LOCK, 384, 0);
463 if (!ASSERT_OK(err, "btf__add_field foo::d"))
464 break;
465 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
466 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
467 break;
468
469 err = btf__load_into_kernel(btf);
470 ASSERT_EQ(err, -EINVAL, "check btf");
471 btf__free(btf);
472 break;
473 }
474
475 while (test__start_subtest("btf: owning | owned AA cycle")) {
476 btf = init_btf();
477 if (!ASSERT_OK_PTR(btf, "init_btf"))
478 break;
479 id = btf__add_struct(btf, "foo", 36);
480 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
481 break;
482 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
483 if (!ASSERT_OK(err, "btf__add_field foo::a"))
484 break;
485 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
486 if (!ASSERT_OK(err, "btf__add_field foo::b"))
487 break;
488 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
489 if (!ASSERT_OK(err, "btf__add_field foo::c"))
490 break;
491 id = btf__add_decl_tag(btf, "contains:foo:b", 5, 0);
492 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:foo:b"))
493 break;
494
495 err = btf__load_into_kernel(btf);
496 ASSERT_EQ(err, -ELOOP, "check btf");
497 btf__free(btf);
498 break;
499 }
500
501 while (test__start_subtest("btf: owning | owned ABA cycle")) {
502 btf = init_btf();
503 if (!ASSERT_OK_PTR(btf, "init_btf"))
504 break;
505 id = btf__add_struct(btf, "foo", 36);
506 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
507 break;
508 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
509 if (!ASSERT_OK(err, "btf__add_field foo::a"))
510 break;
511 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
512 if (!ASSERT_OK(err, "btf__add_field foo::b"))
513 break;
514 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
515 if (!ASSERT_OK(err, "btf__add_field foo::c"))
516 break;
517 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
518 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
519 break;
520 id = btf__add_struct(btf, "bar", 36);
521 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
522 break;
523 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
524 if (!ASSERT_OK(err, "btf__add_field bar::a"))
525 break;
526 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
527 if (!ASSERT_OK(err, "btf__add_field bar::b"))
528 break;
529 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
530 if (!ASSERT_OK(err, "btf__add_field bar::c"))
531 break;
532 id = btf__add_decl_tag(btf, "contains:foo:b", 7, 0);
533 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:foo:b"))
534 break;
535
536 err = btf__load_into_kernel(btf);
537 ASSERT_EQ(err, -ELOOP, "check btf");
538 btf__free(btf);
539 break;
540 }
541
542 while (test__start_subtest("btf: owning -> owned")) {
543 btf = init_btf();
544 if (!ASSERT_OK_PTR(btf, "init_btf"))
545 break;
546 id = btf__add_struct(btf, "foo", 20);
547 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
548 break;
549 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
550 if (!ASSERT_OK(err, "btf__add_field foo::a"))
551 break;
552 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
553 if (!ASSERT_OK(err, "btf__add_field foo::b"))
554 break;
555 id = btf__add_decl_tag(btf, "contains:bar:a", 5, 0);
556 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:a"))
557 break;
558 id = btf__add_struct(btf, "bar", 16);
559 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
560 break;
561 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
562 if (!ASSERT_OK(err, "btf__add_field bar::a"))
563 break;
564
565 err = btf__load_into_kernel(btf);
566 ASSERT_EQ(err, 0, "check btf");
567 btf__free(btf);
568 break;
569 }
570
571 while (test__start_subtest("btf: owning -> owning | owned -> owned")) {
572 btf = init_btf();
573 if (!ASSERT_OK_PTR(btf, "init_btf"))
574 break;
575 id = btf__add_struct(btf, "foo", 20);
576 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
577 break;
578 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
579 if (!ASSERT_OK(err, "btf__add_field foo::a"))
580 break;
581 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
582 if (!ASSERT_OK(err, "btf__add_field foo::b"))
583 break;
584 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
585 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
586 break;
587 id = btf__add_struct(btf, "bar", 36);
588 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
589 break;
590 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
591 if (!ASSERT_OK(err, "btf__add_field bar::a"))
592 break;
593 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
594 if (!ASSERT_OK(err, "btf__add_field bar::b"))
595 break;
596 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
597 if (!ASSERT_OK(err, "btf__add_field bar::c"))
598 break;
599 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
600 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
601 break;
602 id = btf__add_struct(btf, "baz", 16);
603 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
604 break;
605 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
606 if (!ASSERT_OK(err, "btf__add_field baz:a"))
607 break;
608
609 err = btf__load_into_kernel(btf);
610 ASSERT_EQ(err, 0, "check btf");
611 btf__free(btf);
612 break;
613 }
614
615 while (test__start_subtest("btf: owning | owned -> owning | owned -> owned")) {
616 btf = init_btf();
617 if (!ASSERT_OK_PTR(btf, "init_btf"))
618 break;
619 id = btf__add_struct(btf, "foo", 36);
620 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
621 break;
622 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
623 if (!ASSERT_OK(err, "btf__add_field foo::a"))
624 break;
625 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
626 if (!ASSERT_OK(err, "btf__add_field foo::b"))
627 break;
628 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
629 if (!ASSERT_OK(err, "btf__add_field foo::c"))
630 break;
631 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
632 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
633 break;
634 id = btf__add_struct(btf, "bar", 36);
635 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
636 break;
637 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
638 if (!ASSERT_OK(err, "btf__add_field bar:a"))
639 break;
640 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
641 if (!ASSERT_OK(err, "btf__add_field bar:b"))
642 break;
643 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
644 if (!ASSERT_OK(err, "btf__add_field bar:c"))
645 break;
646 id = btf__add_decl_tag(btf, "contains:baz:a", 7, 0);
647 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag contains:baz:a"))
648 break;
649 id = btf__add_struct(btf, "baz", 16);
650 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
651 break;
652 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
653 if (!ASSERT_OK(err, "btf__add_field baz:a"))
654 break;
655
656 err = btf__load_into_kernel(btf);
657 ASSERT_EQ(err, -ELOOP, "check btf");
658 btf__free(btf);
659 break;
660 }
661
662 while (test__start_subtest("btf: owning -> owning | owned -> owning | owned -> owned")) {
663 btf = init_btf();
664 if (!ASSERT_OK_PTR(btf, "init_btf"))
665 break;
666 id = btf__add_struct(btf, "foo", 20);
667 if (!ASSERT_EQ(id, 5, "btf__add_struct foo"))
668 break;
669 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
670 if (!ASSERT_OK(err, "btf__add_field foo::a"))
671 break;
672 err = btf__add_field(btf, "b", SPIN_LOCK, 128, 0);
673 if (!ASSERT_OK(err, "btf__add_field foo::b"))
674 break;
675 id = btf__add_decl_tag(btf, "contains:bar:b", 5, 0);
676 if (!ASSERT_EQ(id, 6, "btf__add_decl_tag contains:bar:b"))
677 break;
678 id = btf__add_struct(btf, "bar", 36);
679 if (!ASSERT_EQ(id, 7, "btf__add_struct bar"))
680 break;
681 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
682 if (!ASSERT_OK(err, "btf__add_field bar::a"))
683 break;
684 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
685 if (!ASSERT_OK(err, "btf__add_field bar::b"))
686 break;
687 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
688 if (!ASSERT_OK(err, "btf__add_field bar::c"))
689 break;
690 id = btf__add_decl_tag(btf, "contains:baz:b", 7, 0);
691 if (!ASSERT_EQ(id, 8, "btf__add_decl_tag"))
692 break;
693 id = btf__add_struct(btf, "baz", 36);
694 if (!ASSERT_EQ(id, 9, "btf__add_struct baz"))
695 break;
696 err = btf__add_field(btf, "a", LIST_HEAD, 0, 0);
697 if (!ASSERT_OK(err, "btf__add_field bar::a"))
698 break;
699 err = btf__add_field(btf, "b", LIST_NODE, 128, 0);
700 if (!ASSERT_OK(err, "btf__add_field bar::b"))
701 break;
702 err = btf__add_field(btf, "c", SPIN_LOCK, 256, 0);
703 if (!ASSERT_OK(err, "btf__add_field bar::c"))
704 break;
705 id = btf__add_decl_tag(btf, "contains:bam:a", 9, 0);
706 if (!ASSERT_EQ(id, 10, "btf__add_decl_tag contains:bam:a"))
707 break;
708 id = btf__add_struct(btf, "bam", 16);
709 if (!ASSERT_EQ(id, 11, "btf__add_struct bam"))
710 break;
711 err = btf__add_field(btf, "a", LIST_NODE, 0, 0);
712 if (!ASSERT_OK(err, "btf__add_field bam::a"))
713 break;
714
715 err = btf__load_into_kernel(btf);
716 ASSERT_EQ(err, -ELOOP, "check btf");
717 btf__free(btf);
718 break;
719 }
720}
721
722void test_linked_list(void)
723{
724 int i;
725
726 for (i = 0; i < ARRAY_SIZE(linked_list_fail_tests); i++) {
727 if (!test__start_subtest(linked_list_fail_tests[i].prog_name))
728 continue;
729 test_linked_list_fail_prog(linked_list_fail_tests[i].prog_name,
730 linked_list_fail_tests[i].err_msg);
731 }
732 test_btf();
733 test_linked_list_success(PUSH_POP, false);
734 test_linked_list_success(PUSH_POP, true);
735 test_linked_list_success(PUSH_POP_MULT, false);
736 test_linked_list_success(PUSH_POP_MULT, true);
737 test_linked_list_success(LIST_IN_LIST, false);
738 test_linked_list_success(LIST_IN_LIST, true);
739 test_linked_list_success(TEST_ALL, false);
740}