Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2017 Facebook
3 */
4#define _GNU_SOURCE
5#include "test_progs.h"
6#include "testing_helpers.h"
7#include "cgroup_helpers.h"
8#include <argp.h>
9#include <pthread.h>
10#include <sched.h>
11#include <signal.h>
12#include <string.h>
13#include <sys/sysinfo.h> /* get_nprocs */
14#include <netinet/in.h>
15#include <sys/select.h>
16#include <sys/socket.h>
17#include <sys/un.h>
18#include <bpf/btf.h>
19#include <time.h>
20#include "json_writer.h"
21
22#include "network_helpers.h"
23
24/* backtrace() and backtrace_symbols_fd() are glibc specific,
25 * use header file when glibc is available and provide stub
26 * implementations when another libc implementation is used.
27 */
28#ifdef __GLIBC__
29#include <execinfo.h> /* backtrace */
30#else
31__weak int backtrace(void **buffer, int size)
32{
33 return 0;
34}
35
36__weak void backtrace_symbols_fd(void *const *buffer, int size, int fd)
37{
38 dprintf(fd, "<backtrace not supported>\n");
39}
40#endif /*__GLIBC__ */
41
42int env_verbosity = 0;
43
44static bool verbose(void)
45{
46 return env.verbosity > VERBOSE_NONE;
47}
48
49static void stdio_hijack_init(char **log_buf, size_t *log_cnt)
50{
51#ifdef __GLIBC__
52 if (verbose() && env.worker_id == -1) {
53 /* nothing to do, output to stdout by default */
54 return;
55 }
56
57 fflush(stdout);
58 fflush(stderr);
59
60 stdout = open_memstream(log_buf, log_cnt);
61 if (!stdout) {
62 stdout = env.stdout_saved;
63 perror("open_memstream");
64 return;
65 }
66
67 if (env.subtest_state)
68 env.subtest_state->stdout_saved = stdout;
69 else
70 env.test_state->stdout_saved = stdout;
71
72 stderr = stdout;
73#endif
74}
75
76static void stdio_hijack(char **log_buf, size_t *log_cnt)
77{
78#ifdef __GLIBC__
79 if (verbose() && env.worker_id == -1) {
80 /* nothing to do, output to stdout by default */
81 return;
82 }
83
84 env.stdout_saved = stdout;
85 env.stderr_saved = stderr;
86
87 stdio_hijack_init(log_buf, log_cnt);
88#endif
89}
90
91static void stdio_restore_cleanup(void)
92{
93#ifdef __GLIBC__
94 if (verbose() && env.worker_id == -1) {
95 /* nothing to do, output to stdout by default */
96 return;
97 }
98
99 fflush(stdout);
100
101 if (env.subtest_state) {
102 fclose(env.subtest_state->stdout_saved);
103 env.subtest_state->stdout_saved = NULL;
104 stdout = env.test_state->stdout_saved;
105 stderr = env.test_state->stdout_saved;
106 } else {
107 fclose(env.test_state->stdout_saved);
108 env.test_state->stdout_saved = NULL;
109 }
110#endif
111}
112
113static void stdio_restore(void)
114{
115#ifdef __GLIBC__
116 if (verbose() && env.worker_id == -1) {
117 /* nothing to do, output to stdout by default */
118 return;
119 }
120
121 if (stdout == env.stdout_saved)
122 return;
123
124 stdio_restore_cleanup();
125
126 stdout = env.stdout_saved;
127 stderr = env.stderr_saved;
128#endif
129}
130
131/* Adapted from perf/util/string.c */
132static bool glob_match(const char *str, const char *pat)
133{
134 while (*str && *pat && *pat != '*') {
135 if (*str != *pat)
136 return false;
137 str++;
138 pat++;
139 }
140 /* Check wild card */
141 if (*pat == '*') {
142 while (*pat == '*')
143 pat++;
144 if (!*pat) /* Tail wild card matches all */
145 return true;
146 while (*str)
147 if (glob_match(str++, pat))
148 return true;
149 }
150 return !*str && !*pat;
151}
152
153#define EXIT_NO_TEST 2
154#define EXIT_ERR_SETUP_INFRA 3
155
156/* defined in test_progs.h */
157struct test_env env = {};
158
159struct prog_test_def {
160 const char *test_name;
161 int test_num;
162 void (*run_test)(void);
163 void (*run_serial_test)(void);
164 bool should_run;
165 bool need_cgroup_cleanup;
166 bool should_tmon;
167};
168
169/* Override C runtime library's usleep() implementation to ensure nanosleep()
170 * is always called. Usleep is frequently used in selftests as a way to
171 * trigger kprobe and tracepoints.
172 */
173int usleep(useconds_t usec)
174{
175 struct timespec ts = {
176 .tv_sec = usec / 1000000,
177 .tv_nsec = (usec % 1000000) * 1000,
178 };
179
180 return syscall(__NR_nanosleep, &ts, NULL);
181}
182
183/* Watchdog timer is started by watchdog_start() and stopped by watchdog_stop().
184 * If timer is active for longer than env.secs_till_notify,
185 * it prints the name of the current test to the stderr.
186 * If timer is active for longer than env.secs_till_kill,
187 * it kills the thread executing the test by sending a SIGSEGV signal to it.
188 */
189static void watchdog_timer_func(union sigval sigval)
190{
191 struct itimerspec timeout = {};
192 char test_name[256];
193 int err;
194
195 if (env.subtest_state)
196 snprintf(test_name, sizeof(test_name), "%s/%s",
197 env.test->test_name, env.subtest_state->name);
198 else
199 snprintf(test_name, sizeof(test_name), "%s",
200 env.test->test_name);
201
202 switch (env.watchdog_state) {
203 case WD_NOTIFY:
204 fprintf(env.stderr_saved, "WATCHDOG: test case %s executes for %d seconds...\n",
205 test_name, env.secs_till_notify);
206 timeout.it_value.tv_sec = env.secs_till_kill - env.secs_till_notify;
207 env.watchdog_state = WD_KILL;
208 err = timer_settime(env.watchdog, 0, &timeout, NULL);
209 if (err)
210 fprintf(env.stderr_saved, "Failed to arm watchdog timer\n");
211 break;
212 case WD_KILL:
213 fprintf(env.stderr_saved,
214 "WATCHDOG: test case %s executes for %d seconds, terminating with SIGSEGV\n",
215 test_name, env.secs_till_kill);
216 pthread_kill(env.main_thread, SIGSEGV);
217 break;
218 }
219}
220
221static void watchdog_start(void)
222{
223 struct itimerspec timeout = {};
224 int err;
225
226 if (env.secs_till_kill == 0)
227 return;
228 if (env.secs_till_notify > 0) {
229 env.watchdog_state = WD_NOTIFY;
230 timeout.it_value.tv_sec = env.secs_till_notify;
231 } else {
232 env.watchdog_state = WD_KILL;
233 timeout.it_value.tv_sec = env.secs_till_kill;
234 }
235 err = timer_settime(env.watchdog, 0, &timeout, NULL);
236 if (err)
237 fprintf(env.stderr_saved, "Failed to start watchdog timer\n");
238}
239
240static void watchdog_stop(void)
241{
242 struct itimerspec timeout = {};
243 int err;
244
245 env.watchdog_state = WD_NOTIFY;
246 err = timer_settime(env.watchdog, 0, &timeout, NULL);
247 if (err)
248 fprintf(env.stderr_saved, "Failed to stop watchdog timer\n");
249}
250
251static void watchdog_init(void)
252{
253 struct sigevent watchdog_sev = {
254 .sigev_notify = SIGEV_THREAD,
255 .sigev_notify_function = watchdog_timer_func,
256 };
257 int err;
258
259 env.main_thread = pthread_self();
260 err = timer_create(CLOCK_MONOTONIC, &watchdog_sev, &env.watchdog);
261 if (err)
262 fprintf(stderr, "Failed to initialize watchdog timer\n");
263}
264
265static bool should_run(struct test_selector *sel, int num, const char *name)
266{
267 int i;
268
269 for (i = 0; i < sel->blacklist.cnt; i++) {
270 if (glob_match(name, sel->blacklist.tests[i].name) &&
271 !sel->blacklist.tests[i].subtest_cnt)
272 return false;
273 }
274
275 for (i = 0; i < sel->whitelist.cnt; i++) {
276 if (glob_match(name, sel->whitelist.tests[i].name))
277 return true;
278 }
279
280 if (!sel->whitelist.cnt && !sel->num_set)
281 return true;
282
283 return num < sel->num_set_len && sel->num_set[num];
284}
285
286static bool match_subtest(struct test_filter_set *filter,
287 const char *test_name,
288 const char *subtest_name)
289{
290 int i, j;
291
292 for (i = 0; i < filter->cnt; i++) {
293 if (glob_match(test_name, filter->tests[i].name)) {
294 if (!filter->tests[i].subtest_cnt)
295 return true;
296
297 for (j = 0; j < filter->tests[i].subtest_cnt; j++) {
298 if (glob_match(subtest_name,
299 filter->tests[i].subtests[j]))
300 return true;
301 }
302 }
303 }
304
305 return false;
306}
307
308static bool should_run_subtest(struct test_selector *sel,
309 struct test_selector *subtest_sel,
310 int subtest_num,
311 const char *test_name,
312 const char *subtest_name)
313{
314 if (match_subtest(&sel->blacklist, test_name, subtest_name))
315 return false;
316
317 if (match_subtest(&sel->whitelist, test_name, subtest_name))
318 return true;
319
320 if (!sel->whitelist.cnt && !subtest_sel->num_set)
321 return true;
322
323 return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
324}
325
326static bool should_tmon(struct test_selector *sel, const char *name)
327{
328 int i;
329
330 for (i = 0; i < sel->whitelist.cnt; i++) {
331 if (glob_match(name, sel->whitelist.tests[i].name) &&
332 !sel->whitelist.tests[i].subtest_cnt)
333 return true;
334 }
335
336 return false;
337}
338
339static char *test_result(bool failed, bool skipped)
340{
341 return failed ? "FAIL" : (skipped ? "SKIP" : "OK");
342}
343
344#define TEST_NUM_WIDTH 7
345
346static void print_test_result(const struct prog_test_def *test, const struct test_state *test_state)
347{
348 int skipped_cnt = test_state->skip_cnt;
349 int subtests_cnt = test_state->subtest_num;
350
351 fprintf(env.stdout_saved, "#%-*d %s:", TEST_NUM_WIDTH, test->test_num, test->test_name);
352 if (test_state->error_cnt)
353 fprintf(env.stdout_saved, "FAIL");
354 else if (!skipped_cnt)
355 fprintf(env.stdout_saved, "OK");
356 else if (skipped_cnt == subtests_cnt || !subtests_cnt)
357 fprintf(env.stdout_saved, "SKIP");
358 else
359 fprintf(env.stdout_saved, "OK (SKIP: %d/%d)", skipped_cnt, subtests_cnt);
360
361 fprintf(env.stdout_saved, "\n");
362}
363
364static void print_test_log(char *log_buf, size_t log_cnt)
365{
366 log_buf[log_cnt] = '\0';
367 fprintf(env.stdout_saved, "%s", log_buf);
368 if (log_buf[log_cnt - 1] != '\n')
369 fprintf(env.stdout_saved, "\n");
370}
371
372static void print_subtest_name(int test_num, int subtest_num,
373 const char *test_name, char *subtest_name,
374 char *result)
375{
376 char test_num_str[32];
377
378 snprintf(test_num_str, sizeof(test_num_str), "%d/%d", test_num, subtest_num);
379
380 fprintf(env.stdout_saved, "#%-*s %s/%s",
381 TEST_NUM_WIDTH, test_num_str,
382 test_name, subtest_name);
383
384 if (result)
385 fprintf(env.stdout_saved, ":%s", result);
386
387 fprintf(env.stdout_saved, "\n");
388}
389
390static void jsonw_write_log_message(json_writer_t *w, char *log_buf, size_t log_cnt)
391{
392 /* open_memstream (from stdio_hijack_init) ensures that log_bug is terminated by a
393 * null byte. Yet in parallel mode, log_buf will be NULL if there is no message.
394 */
395 if (log_cnt) {
396 jsonw_string_field(w, "message", log_buf);
397 } else {
398 jsonw_string_field(w, "message", "");
399 }
400}
401
402static void dump_test_log(const struct prog_test_def *test,
403 const struct test_state *test_state,
404 bool skip_ok_subtests,
405 bool par_exec_result,
406 json_writer_t *w)
407{
408 bool test_failed = test_state->error_cnt > 0;
409 bool force_log = test_state->force_log;
410 bool print_test = verbose() || force_log || test_failed;
411 int i;
412 struct subtest_state *subtest_state;
413 bool subtest_failed;
414 bool subtest_filtered;
415 bool print_subtest;
416
417 /* we do not print anything in the worker thread */
418 if (env.worker_id != -1)
419 return;
420
421 /* there is nothing to print when verbose log is used and execution
422 * is not in parallel mode
423 */
424 if (verbose() && !par_exec_result)
425 return;
426
427 if (test_state->log_cnt && print_test)
428 print_test_log(test_state->log_buf, test_state->log_cnt);
429
430 if (w && print_test) {
431 jsonw_start_object(w);
432 jsonw_string_field(w, "name", test->test_name);
433 jsonw_uint_field(w, "number", test->test_num);
434 jsonw_write_log_message(w, test_state->log_buf, test_state->log_cnt);
435 jsonw_bool_field(w, "failed", test_failed);
436 jsonw_name(w, "subtests");
437 jsonw_start_array(w);
438 }
439
440 for (i = 0; i < test_state->subtest_num; i++) {
441 subtest_state = &test_state->subtest_states[i];
442 subtest_failed = subtest_state->error_cnt;
443 subtest_filtered = subtest_state->filtered;
444 print_subtest = verbose() || force_log || subtest_failed;
445
446 if ((skip_ok_subtests && !subtest_failed) || subtest_filtered)
447 continue;
448
449 if (subtest_state->log_cnt && print_subtest) {
450 print_test_log(subtest_state->log_buf,
451 subtest_state->log_cnt);
452 }
453
454 print_subtest_name(test->test_num, i + 1,
455 test->test_name, subtest_state->name,
456 test_result(subtest_state->error_cnt,
457 subtest_state->skipped));
458
459 if (w && print_subtest) {
460 jsonw_start_object(w);
461 jsonw_string_field(w, "name", subtest_state->name);
462 jsonw_uint_field(w, "number", i+1);
463 jsonw_write_log_message(w, subtest_state->log_buf, subtest_state->log_cnt);
464 jsonw_bool_field(w, "failed", subtest_failed);
465 jsonw_end_object(w);
466 }
467 }
468
469 if (w && print_test) {
470 jsonw_end_array(w);
471 jsonw_end_object(w);
472 }
473
474 print_test_result(test, test_state);
475}
476
477static void stdio_restore(void);
478
479/* A bunch of tests set custom affinity per-thread and/or per-process. Reset
480 * it after each test/sub-test.
481 */
482static void reset_affinity(void)
483{
484 cpu_set_t cpuset;
485 int i, err;
486
487 CPU_ZERO(&cpuset);
488 for (i = 0; i < env.nr_cpus; i++)
489 CPU_SET(i, &cpuset);
490
491 err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
492 if (err < 0) {
493 stdio_restore();
494 fprintf(stderr, "Failed to reset process affinity: %d!\n", err);
495 exit(EXIT_ERR_SETUP_INFRA);
496 }
497 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
498 if (err < 0) {
499 stdio_restore();
500 fprintf(stderr, "Failed to reset thread affinity: %d!\n", err);
501 exit(EXIT_ERR_SETUP_INFRA);
502 }
503}
504
505static void save_netns(void)
506{
507 env.saved_netns_fd = open("/proc/self/ns/net", O_RDONLY);
508 if (env.saved_netns_fd == -1) {
509 perror("open(/proc/self/ns/net)");
510 exit(EXIT_ERR_SETUP_INFRA);
511 }
512}
513
514static void restore_netns(void)
515{
516 if (setns(env.saved_netns_fd, CLONE_NEWNET) == -1) {
517 stdio_restore();
518 perror("setns(CLONE_NEWNS)");
519 exit(EXIT_ERR_SETUP_INFRA);
520 }
521}
522
523void test__end_subtest(void)
524{
525 struct prog_test_def *test = env.test;
526 struct test_state *test_state = env.test_state;
527 struct subtest_state *subtest_state = env.subtest_state;
528
529 if (subtest_state->error_cnt) {
530 test_state->error_cnt++;
531 } else {
532 if (!subtest_state->skipped)
533 test_state->sub_succ_cnt++;
534 else
535 test_state->skip_cnt++;
536 }
537
538 if (verbose() && !env.workers)
539 print_subtest_name(test->test_num, test_state->subtest_num,
540 test->test_name, subtest_state->name,
541 test_result(subtest_state->error_cnt,
542 subtest_state->skipped));
543
544 stdio_restore_cleanup();
545 env.subtest_state = NULL;
546}
547
548bool test__start_subtest(const char *subtest_name)
549{
550 struct prog_test_def *test = env.test;
551 struct test_state *state = env.test_state;
552 struct subtest_state *subtest_state;
553 size_t sub_state_size = sizeof(*subtest_state);
554
555 if (env.subtest_state)
556 test__end_subtest();
557
558 state->subtest_num++;
559 state->subtest_states =
560 realloc(state->subtest_states,
561 state->subtest_num * sub_state_size);
562 if (!state->subtest_states) {
563 fprintf(stderr, "Not enough memory to allocate subtest result\n");
564 return false;
565 }
566
567 subtest_state = &state->subtest_states[state->subtest_num - 1];
568
569 memset(subtest_state, 0, sub_state_size);
570
571 if (!subtest_name || !subtest_name[0]) {
572 fprintf(env.stderr_saved,
573 "Subtest #%d didn't provide sub-test name!\n",
574 state->subtest_num);
575 return false;
576 }
577
578 subtest_state->name = strdup(subtest_name);
579 if (!subtest_state->name) {
580 fprintf(env.stderr_saved,
581 "Subtest #%d: failed to copy subtest name!\n",
582 state->subtest_num);
583 return false;
584 }
585
586 if (!should_run_subtest(&env.test_selector,
587 &env.subtest_selector,
588 state->subtest_num,
589 test->test_name,
590 subtest_name)) {
591 subtest_state->filtered = true;
592 return false;
593 }
594
595 subtest_state->should_tmon = match_subtest(&env.tmon_selector.whitelist,
596 test->test_name,
597 subtest_name);
598
599 env.subtest_state = subtest_state;
600 stdio_hijack_init(&subtest_state->log_buf, &subtest_state->log_cnt);
601 watchdog_start();
602
603 return true;
604}
605
606void test__force_log(void)
607{
608 env.test_state->force_log = true;
609}
610
611void test__skip(void)
612{
613 if (env.subtest_state)
614 env.subtest_state->skipped = true;
615 else
616 env.test_state->skip_cnt++;
617}
618
619void test__fail(void)
620{
621 if (env.subtest_state)
622 env.subtest_state->error_cnt++;
623 else
624 env.test_state->error_cnt++;
625}
626
627int test__join_cgroup(const char *path)
628{
629 int fd;
630
631 if (!env.test->need_cgroup_cleanup) {
632 if (setup_cgroup_environment()) {
633 fprintf(stderr,
634 "#%d %s: Failed to setup cgroup environment\n",
635 env.test->test_num, env.test->test_name);
636 return -1;
637 }
638
639 env.test->need_cgroup_cleanup = true;
640 }
641
642 fd = create_and_get_cgroup(path);
643 if (fd < 0) {
644 fprintf(stderr,
645 "#%d %s: Failed to create cgroup '%s' (errno=%d)\n",
646 env.test->test_num, env.test->test_name, path, errno);
647 return fd;
648 }
649
650 if (join_cgroup(path)) {
651 fprintf(stderr,
652 "#%d %s: Failed to join cgroup '%s' (errno=%d)\n",
653 env.test->test_num, env.test->test_name, path, errno);
654 return -1;
655 }
656
657 return fd;
658}
659
660int bpf_find_map(const char *test, struct bpf_object *obj, const char *name)
661{
662 struct bpf_map *map;
663
664 map = bpf_object__find_map_by_name(obj, name);
665 if (!map) {
666 fprintf(stdout, "%s:FAIL:map '%s' not found\n", test, name);
667 test__fail();
668 return -1;
669 }
670 return bpf_map__fd(map);
671}
672
673int compare_map_keys(int map1_fd, int map2_fd)
674{
675 __u32 key, next_key;
676 char val_buf[PERF_MAX_STACK_DEPTH *
677 sizeof(struct bpf_stack_build_id)];
678 int err;
679
680 err = bpf_map_get_next_key(map1_fd, NULL, &key);
681 if (err)
682 return err;
683 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
684 if (err)
685 return err;
686
687 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
688 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
689 if (err)
690 return err;
691
692 key = next_key;
693 }
694 if (errno != ENOENT)
695 return -1;
696
697 return 0;
698}
699
700int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
701{
702 __u32 key, next_key, *cur_key_p, *next_key_p;
703 char *val_buf1, *val_buf2;
704 int i, err = 0;
705
706 val_buf1 = malloc(stack_trace_len);
707 val_buf2 = malloc(stack_trace_len);
708 cur_key_p = NULL;
709 next_key_p = &key;
710 while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
711 err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
712 if (err)
713 goto out;
714 err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
715 if (err)
716 goto out;
717 for (i = 0; i < stack_trace_len; i++) {
718 if (val_buf1[i] != val_buf2[i]) {
719 err = -1;
720 goto out;
721 }
722 }
723 key = *next_key_p;
724 cur_key_p = &key;
725 next_key_p = &next_key;
726 }
727 if (errno != ENOENT)
728 err = -1;
729
730out:
731 free(val_buf1);
732 free(val_buf2);
733 return err;
734}
735
736struct netns_obj {
737 char *nsname;
738 struct tmonitor_ctx *tmon;
739 struct nstoken *nstoken;
740};
741
742/* Create a new network namespace with the given name.
743 *
744 * Create a new network namespace and set the network namespace of the
745 * current process to the new network namespace if the argument "open" is
746 * true. This function should be paired with netns_free() to release the
747 * resource and delete the network namespace.
748 *
749 * It also implements the functionality of the option "-m" by starting
750 * traffic monitor on the background to capture the packets in this network
751 * namespace if the current test or subtest matching the pattern.
752 *
753 * nsname: the name of the network namespace to create.
754 * open: open the network namespace if true.
755 *
756 * Return: the network namespace object on success, NULL on failure.
757 */
758struct netns_obj *netns_new(const char *nsname, bool open)
759{
760 struct netns_obj *netns_obj = malloc(sizeof(*netns_obj));
761 const char *test_name, *subtest_name;
762 int r;
763
764 if (!netns_obj)
765 return NULL;
766 memset(netns_obj, 0, sizeof(*netns_obj));
767
768 netns_obj->nsname = strdup(nsname);
769 if (!netns_obj->nsname)
770 goto fail;
771
772 /* Create the network namespace */
773 r = make_netns(nsname);
774 if (r)
775 goto fail;
776
777 /* Start traffic monitor */
778 if (env.test->should_tmon ||
779 (env.subtest_state && env.subtest_state->should_tmon)) {
780 test_name = env.test->test_name;
781 subtest_name = env.subtest_state ? env.subtest_state->name : NULL;
782 netns_obj->tmon = traffic_monitor_start(nsname, test_name, subtest_name);
783 if (!netns_obj->tmon) {
784 fprintf(stderr, "Failed to start traffic monitor for %s\n", nsname);
785 goto fail;
786 }
787 } else {
788 netns_obj->tmon = NULL;
789 }
790
791 if (open) {
792 netns_obj->nstoken = open_netns(nsname);
793 if (!netns_obj->nstoken)
794 goto fail;
795 }
796
797 return netns_obj;
798fail:
799 traffic_monitor_stop(netns_obj->tmon);
800 remove_netns(nsname);
801 free(netns_obj->nsname);
802 free(netns_obj);
803 return NULL;
804}
805
806/* Delete the network namespace.
807 *
808 * This function should be paired with netns_new() to delete the namespace
809 * created by netns_new().
810 */
811void netns_free(struct netns_obj *netns_obj)
812{
813 if (!netns_obj)
814 return;
815 traffic_monitor_stop(netns_obj->tmon);
816 close_netns(netns_obj->nstoken);
817 remove_netns(netns_obj->nsname);
818 free(netns_obj->nsname);
819 free(netns_obj);
820}
821
822/* extern declarations for test funcs */
823#define DEFINE_TEST(name) \
824 extern void test_##name(void) __weak; \
825 extern void serial_test_##name(void) __weak;
826#include <prog_tests/tests.h>
827#undef DEFINE_TEST
828
829static struct prog_test_def prog_test_defs[] = {
830#define DEFINE_TEST(name) { \
831 .test_name = #name, \
832 .run_test = &test_##name, \
833 .run_serial_test = &serial_test_##name, \
834},
835#include <prog_tests/tests.h>
836#undef DEFINE_TEST
837};
838
839static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
840
841static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
842
843const char *argp_program_version = "test_progs 0.1";
844const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
845static const char argp_program_doc[] =
846"BPF selftests test runner\v"
847"Options accepting the NAMES parameter take either a comma-separated list\n"
848"of test names, or a filename prefixed with @. The file contains one name\n"
849"(or wildcard pattern) per line, and comments beginning with # are ignored.\n"
850"\n"
851"These options can be passed repeatedly to read multiple files.\n";
852
853enum ARG_KEYS {
854 ARG_TEST_NUM = 'n',
855 ARG_TEST_NAME = 't',
856 ARG_TEST_NAME_BLACKLIST = 'b',
857 ARG_VERIFIER_STATS = 's',
858 ARG_VERBOSE = 'v',
859 ARG_GET_TEST_CNT = 'c',
860 ARG_LIST_TEST_NAMES = 'l',
861 ARG_TEST_NAME_GLOB_ALLOWLIST = 'a',
862 ARG_TEST_NAME_GLOB_DENYLIST = 'd',
863 ARG_NUM_WORKERS = 'j',
864 ARG_DEBUG = -1,
865 ARG_JSON_SUMMARY = 'J',
866 ARG_TRAFFIC_MONITOR = 'm',
867 ARG_WATCHDOG_TIMEOUT = 'w',
868};
869
870static const struct argp_option opts[] = {
871 { "num", ARG_TEST_NUM, "NUM", 0,
872 "Run test number NUM only " },
873 { "name", ARG_TEST_NAME, "NAMES", 0,
874 "Run tests with names containing any string from NAMES list" },
875 { "name-blacklist", ARG_TEST_NAME_BLACKLIST, "NAMES", 0,
876 "Don't run tests with names containing any string from NAMES list" },
877 { "verifier-stats", ARG_VERIFIER_STATS, NULL, 0,
878 "Output verifier statistics", },
879 { "verbose", ARG_VERBOSE, "LEVEL", OPTION_ARG_OPTIONAL,
880 "Verbose output (use -vv or -vvv for progressively verbose output)" },
881 { "count", ARG_GET_TEST_CNT, NULL, 0,
882 "Get number of selected top-level tests " },
883 { "list", ARG_LIST_TEST_NAMES, NULL, 0,
884 "List test names that would run (without running them) " },
885 { "allow", ARG_TEST_NAME_GLOB_ALLOWLIST, "NAMES", 0,
886 "Run tests with name matching the pattern (supports '*' wildcard)." },
887 { "deny", ARG_TEST_NAME_GLOB_DENYLIST, "NAMES", 0,
888 "Don't run tests with name matching the pattern (supports '*' wildcard)." },
889 { "workers", ARG_NUM_WORKERS, "WORKERS", OPTION_ARG_OPTIONAL,
890 "Number of workers to run in parallel, default to number of cpus." },
891 { "debug", ARG_DEBUG, NULL, 0,
892 "print extra debug information for test_progs." },
893 { "json-summary", ARG_JSON_SUMMARY, "FILE", 0, "Write report in json format to this file."},
894#ifdef TRAFFIC_MONITOR
895 { "traffic-monitor", ARG_TRAFFIC_MONITOR, "NAMES", 0,
896 "Monitor network traffic of tests with name matching the pattern (supports '*' wildcard)." },
897#endif
898 { "watchdog-timeout", ARG_WATCHDOG_TIMEOUT, "SECONDS", 0,
899 "Kill the process if tests are not making progress for specified number of seconds." },
900 {},
901};
902
903static FILE *libbpf_capture_stream;
904
905static struct {
906 char *buf;
907 size_t buf_sz;
908} libbpf_output_capture;
909
910/* Creates a global memstream capturing INFO and WARN level output
911 * passed to libbpf_print_fn.
912 * Returns 0 on success, negative value on failure.
913 * On failure the description is printed using PRINT_FAIL and
914 * current test case is marked as fail.
915 */
916int start_libbpf_log_capture(void)
917{
918 if (libbpf_capture_stream) {
919 PRINT_FAIL("%s: libbpf_capture_stream != NULL\n", __func__);
920 return -EINVAL;
921 }
922
923 libbpf_capture_stream = open_memstream(&libbpf_output_capture.buf,
924 &libbpf_output_capture.buf_sz);
925 if (!libbpf_capture_stream) {
926 PRINT_FAIL("%s: open_memstream failed errno=%d\n", __func__, errno);
927 return -EINVAL;
928 }
929
930 return 0;
931}
932
933/* Destroys global memstream created by start_libbpf_log_capture().
934 * Returns a pointer to captured data which has to be freed.
935 * Returned buffer is null terminated.
936 */
937char *stop_libbpf_log_capture(void)
938{
939 char *buf;
940
941 if (!libbpf_capture_stream)
942 return NULL;
943
944 fputc(0, libbpf_capture_stream);
945 fclose(libbpf_capture_stream);
946 libbpf_capture_stream = NULL;
947 /* get 'buf' after fclose(), see open_memstream() documentation */
948 buf = libbpf_output_capture.buf;
949 memset(&libbpf_output_capture, 0, sizeof(libbpf_output_capture));
950 return buf;
951}
952
953static int libbpf_print_fn(enum libbpf_print_level level,
954 const char *format, va_list args)
955{
956 if (libbpf_capture_stream && level != LIBBPF_DEBUG) {
957 va_list args2;
958
959 va_copy(args2, args);
960 vfprintf(libbpf_capture_stream, format, args2);
961 va_end(args2);
962 }
963
964 if (env.verbosity < VERBOSE_VERY && level == LIBBPF_DEBUG)
965 return 0;
966
967 vfprintf(stdout, format, args);
968 return 0;
969}
970
971static void free_test_filter_set(const struct test_filter_set *set)
972{
973 int i, j;
974
975 if (!set)
976 return;
977
978 for (i = 0; i < set->cnt; i++) {
979 free((void *)set->tests[i].name);
980 for (j = 0; j < set->tests[i].subtest_cnt; j++)
981 free((void *)set->tests[i].subtests[j]);
982
983 free((void *)set->tests[i].subtests);
984 }
985
986 free((void *)set->tests);
987}
988
989static void free_test_selector(struct test_selector *test_selector)
990{
991 free_test_filter_set(&test_selector->blacklist);
992 free_test_filter_set(&test_selector->whitelist);
993 free(test_selector->num_set);
994}
995
996extern int extra_prog_load_log_flags;
997
998static error_t parse_arg(int key, char *arg, struct argp_state *state)
999{
1000 struct test_env *env = state->input;
1001 int err = 0;
1002
1003 switch (key) {
1004 case ARG_TEST_NUM: {
1005 char *subtest_str = strchr(arg, '/');
1006
1007 if (subtest_str) {
1008 *subtest_str = '\0';
1009 if (parse_num_list(subtest_str + 1,
1010 &env->subtest_selector.num_set,
1011 &env->subtest_selector.num_set_len)) {
1012 fprintf(stderr,
1013 "Failed to parse subtest numbers.\n");
1014 return -EINVAL;
1015 }
1016 }
1017 if (parse_num_list(arg, &env->test_selector.num_set,
1018 &env->test_selector.num_set_len)) {
1019 fprintf(stderr, "Failed to parse test numbers.\n");
1020 return -EINVAL;
1021 }
1022 break;
1023 }
1024 case ARG_TEST_NAME_GLOB_ALLOWLIST:
1025 case ARG_TEST_NAME: {
1026 if (arg[0] == '@')
1027 err = parse_test_list_file(arg + 1,
1028 &env->test_selector.whitelist,
1029 key == ARG_TEST_NAME_GLOB_ALLOWLIST);
1030 else
1031 err = parse_test_list(arg,
1032 &env->test_selector.whitelist,
1033 key == ARG_TEST_NAME_GLOB_ALLOWLIST);
1034
1035 break;
1036 }
1037 case ARG_TEST_NAME_GLOB_DENYLIST:
1038 case ARG_TEST_NAME_BLACKLIST: {
1039 if (arg[0] == '@')
1040 err = parse_test_list_file(arg + 1,
1041 &env->test_selector.blacklist,
1042 key == ARG_TEST_NAME_GLOB_DENYLIST);
1043 else
1044 err = parse_test_list(arg,
1045 &env->test_selector.blacklist,
1046 key == ARG_TEST_NAME_GLOB_DENYLIST);
1047
1048 break;
1049 }
1050 case ARG_VERIFIER_STATS:
1051 env->verifier_stats = true;
1052 break;
1053 case ARG_VERBOSE:
1054 env->verbosity = VERBOSE_NORMAL;
1055 if (arg) {
1056 if (strcmp(arg, "v") == 0) {
1057 env->verbosity = VERBOSE_VERY;
1058 extra_prog_load_log_flags = 1;
1059 } else if (strcmp(arg, "vv") == 0) {
1060 env->verbosity = VERBOSE_SUPER;
1061 extra_prog_load_log_flags = 2;
1062 } else {
1063 fprintf(stderr,
1064 "Unrecognized verbosity setting ('%s'), only -v and -vv are supported\n",
1065 arg);
1066 return -EINVAL;
1067 }
1068 }
1069 env_verbosity = env->verbosity;
1070
1071 if (verbose()) {
1072 if (setenv("SELFTESTS_VERBOSE", "1", 1) == -1) {
1073 fprintf(stderr,
1074 "Unable to setenv SELFTESTS_VERBOSE=1 (errno=%d)",
1075 errno);
1076 return -EINVAL;
1077 }
1078 }
1079
1080 break;
1081 case ARG_GET_TEST_CNT:
1082 env->get_test_cnt = true;
1083 break;
1084 case ARG_LIST_TEST_NAMES:
1085 env->list_test_names = true;
1086 break;
1087 case ARG_NUM_WORKERS:
1088 if (arg) {
1089 env->workers = atoi(arg);
1090 if (!env->workers) {
1091 fprintf(stderr, "Invalid number of worker: %s.", arg);
1092 return -EINVAL;
1093 }
1094 } else {
1095 env->workers = get_nprocs();
1096 }
1097 break;
1098 case ARG_DEBUG:
1099 env->debug = true;
1100 break;
1101 case ARG_JSON_SUMMARY:
1102 env->json = fopen(arg, "w");
1103 if (env->json == NULL) {
1104 perror("Failed to open json summary file");
1105 return -errno;
1106 }
1107 break;
1108 case ARGP_KEY_ARG:
1109 argp_usage(state);
1110 break;
1111 case ARGP_KEY_END:
1112 break;
1113#ifdef TRAFFIC_MONITOR
1114 case ARG_TRAFFIC_MONITOR:
1115 if (arg[0] == '@')
1116 err = parse_test_list_file(arg + 1,
1117 &env->tmon_selector.whitelist,
1118 true);
1119 else
1120 err = parse_test_list(arg,
1121 &env->tmon_selector.whitelist,
1122 true);
1123 break;
1124#endif
1125 case ARG_WATCHDOG_TIMEOUT:
1126 env->secs_till_kill = atoi(arg);
1127 if (env->secs_till_kill < 0) {
1128 fprintf(stderr, "Invalid watchdog timeout: %s.\n", arg);
1129 return -EINVAL;
1130 }
1131 if (env->secs_till_kill < env->secs_till_notify) {
1132 env->secs_till_notify = 0;
1133 }
1134 break;
1135 default:
1136 return ARGP_ERR_UNKNOWN;
1137 }
1138 return err;
1139}
1140
1141/*
1142 * Determine if test_progs is running as a "flavored" test runner and switch
1143 * into corresponding sub-directory to load correct BPF objects.
1144 *
1145 * This is done by looking at executable name. If it contains "-flavor"
1146 * suffix, then we are running as a flavored test runner.
1147 */
1148int cd_flavor_subdir(const char *exec_name)
1149{
1150 /* General form of argv[0] passed here is:
1151 * some/path/to/test_progs[-flavor], where -flavor part is optional.
1152 * First cut out "test_progs[-flavor]" part, then extract "flavor"
1153 * part, if it's there.
1154 */
1155 const char *flavor = strrchr(exec_name, '/');
1156
1157 if (!flavor)
1158 flavor = exec_name;
1159 else
1160 flavor++;
1161
1162 flavor = strrchr(flavor, '-');
1163 if (!flavor)
1164 return 0;
1165 flavor++;
1166 if (verbose())
1167 fprintf(stdout, "Switching to flavor '%s' subdirectory...\n", flavor);
1168
1169 return chdir(flavor);
1170}
1171
1172int trigger_module_test_read(int read_sz)
1173{
1174 int fd, err;
1175
1176 fd = open(BPF_TESTMOD_TEST_FILE, O_RDONLY);
1177 err = -errno;
1178 if (!ASSERT_GE(fd, 0, "testmod_file_open"))
1179 return err;
1180
1181 read(fd, NULL, read_sz);
1182 close(fd);
1183
1184 return 0;
1185}
1186
1187int trigger_module_test_write(int write_sz)
1188{
1189 int fd, err;
1190 char *buf = malloc(write_sz);
1191
1192 if (!buf)
1193 return -ENOMEM;
1194
1195 memset(buf, 'a', write_sz);
1196 buf[write_sz-1] = '\0';
1197
1198 fd = open(BPF_TESTMOD_TEST_FILE, O_WRONLY);
1199 err = -errno;
1200 if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
1201 free(buf);
1202 return err;
1203 }
1204
1205 write(fd, buf, write_sz);
1206 close(fd);
1207 free(buf);
1208 return 0;
1209}
1210
1211int write_sysctl(const char *sysctl, const char *value)
1212{
1213 int fd, err, len;
1214
1215 fd = open(sysctl, O_WRONLY);
1216 if (!ASSERT_NEQ(fd, -1, "open sysctl"))
1217 return -1;
1218
1219 len = strlen(value);
1220 err = write(fd, value, len);
1221 close(fd);
1222 if (!ASSERT_EQ(err, len, "write sysctl"))
1223 return -1;
1224
1225 return 0;
1226}
1227
1228int get_bpf_max_tramp_links_from(struct btf *btf)
1229{
1230 const struct btf_enum *e;
1231 const struct btf_type *t;
1232 __u32 i, type_cnt;
1233 const char *name;
1234 __u16 j, vlen;
1235
1236 for (i = 1, type_cnt = btf__type_cnt(btf); i < type_cnt; i++) {
1237 t = btf__type_by_id(btf, i);
1238 if (!t || !btf_is_enum(t) || t->name_off)
1239 continue;
1240 e = btf_enum(t);
1241 for (j = 0, vlen = btf_vlen(t); j < vlen; j++, e++) {
1242 name = btf__str_by_offset(btf, e->name_off);
1243 if (name && !strcmp(name, "BPF_MAX_TRAMP_LINKS"))
1244 return e->val;
1245 }
1246 }
1247
1248 return -1;
1249}
1250
1251int get_bpf_max_tramp_links(void)
1252{
1253 struct btf *vmlinux_btf;
1254 int ret;
1255
1256 vmlinux_btf = btf__load_vmlinux_btf();
1257 if (!ASSERT_OK_PTR(vmlinux_btf, "vmlinux btf"))
1258 return -1;
1259 ret = get_bpf_max_tramp_links_from(vmlinux_btf);
1260 btf__free(vmlinux_btf);
1261
1262 return ret;
1263}
1264
1265#define MAX_BACKTRACE_SZ 128
1266void crash_handler(int signum)
1267{
1268 void *bt[MAX_BACKTRACE_SZ];
1269 size_t sz;
1270
1271 sz = backtrace(bt, ARRAY_SIZE(bt));
1272
1273 if (env.stdout_saved)
1274 stdio_restore();
1275 if (env.test) {
1276 env.test_state->error_cnt++;
1277 dump_test_log(env.test, env.test_state, true, false, NULL);
1278 }
1279 if (env.worker_id != -1)
1280 fprintf(stderr, "[%d]: ", env.worker_id);
1281 fprintf(stderr, "Caught signal #%d!\nStack trace:\n", signum);
1282 backtrace_symbols_fd(bt, sz, STDERR_FILENO);
1283}
1284
1285static void sigint_handler(int signum)
1286{
1287 int i;
1288
1289 for (i = 0; i < env.workers; i++)
1290 if (env.worker_socks[i] > 0)
1291 close(env.worker_socks[i]);
1292}
1293
1294static int current_test_idx;
1295static pthread_mutex_t current_test_lock;
1296static pthread_mutex_t stdout_output_lock;
1297
1298static inline const char *str_msg(const struct msg *msg, char *buf)
1299{
1300 switch (msg->type) {
1301 case MSG_DO_TEST:
1302 sprintf(buf, "MSG_DO_TEST %d", msg->do_test.num);
1303 break;
1304 case MSG_TEST_DONE:
1305 sprintf(buf, "MSG_TEST_DONE %d (log: %d)",
1306 msg->test_done.num,
1307 msg->test_done.have_log);
1308 break;
1309 case MSG_SUBTEST_DONE:
1310 sprintf(buf, "MSG_SUBTEST_DONE %d (log: %d)",
1311 msg->subtest_done.num,
1312 msg->subtest_done.have_log);
1313 break;
1314 case MSG_TEST_LOG:
1315 sprintf(buf, "MSG_TEST_LOG (cnt: %zu, last: %d)",
1316 strlen(msg->test_log.log_buf),
1317 msg->test_log.is_last);
1318 break;
1319 case MSG_EXIT:
1320 sprintf(buf, "MSG_EXIT");
1321 break;
1322 default:
1323 sprintf(buf, "UNKNOWN");
1324 break;
1325 }
1326
1327 return buf;
1328}
1329
1330static int send_message(int sock, const struct msg *msg)
1331{
1332 char buf[256];
1333
1334 if (env.debug)
1335 fprintf(stderr, "Sending msg: %s\n", str_msg(msg, buf));
1336 return send(sock, msg, sizeof(*msg), 0);
1337}
1338
1339static int recv_message(int sock, struct msg *msg)
1340{
1341 int ret;
1342 char buf[256];
1343
1344 memset(msg, 0, sizeof(*msg));
1345 ret = recv(sock, msg, sizeof(*msg), 0);
1346 if (ret >= 0) {
1347 if (env.debug)
1348 fprintf(stderr, "Received msg: %s\n", str_msg(msg, buf));
1349 }
1350 return ret;
1351}
1352
1353static void run_one_test(int test_num)
1354{
1355 struct prog_test_def *test = &prog_test_defs[test_num];
1356 struct test_state *state = &test_states[test_num];
1357
1358 env.test = test;
1359 env.test_state = state;
1360
1361 stdio_hijack(&state->log_buf, &state->log_cnt);
1362
1363 watchdog_start();
1364 if (test->run_test)
1365 test->run_test();
1366 else if (test->run_serial_test)
1367 test->run_serial_test();
1368 watchdog_stop();
1369
1370 /* ensure last sub-test is finalized properly */
1371 if (env.subtest_state)
1372 test__end_subtest();
1373
1374 state->tested = true;
1375
1376 if (verbose() && env.worker_id == -1)
1377 print_test_result(test, state);
1378
1379 reset_affinity();
1380 restore_netns();
1381 if (test->need_cgroup_cleanup)
1382 cleanup_cgroup_environment();
1383
1384 stdio_restore();
1385 free(stop_libbpf_log_capture());
1386
1387 dump_test_log(test, state, false, false, NULL);
1388}
1389
1390struct dispatch_data {
1391 int worker_id;
1392 int sock_fd;
1393};
1394
1395static int read_prog_test_msg(int sock_fd, struct msg *msg, enum msg_type type)
1396{
1397 if (recv_message(sock_fd, msg) < 0)
1398 return 1;
1399
1400 if (msg->type != type) {
1401 printf("%s: unexpected message type %d. expected %d\n", __func__, msg->type, type);
1402 return 1;
1403 }
1404
1405 return 0;
1406}
1407
1408static int dispatch_thread_read_log(int sock_fd, char **log_buf, size_t *log_cnt)
1409{
1410 FILE *log_fp = NULL;
1411 int result = 0;
1412
1413 log_fp = open_memstream(log_buf, log_cnt);
1414 if (!log_fp)
1415 return 1;
1416
1417 while (true) {
1418 struct msg msg;
1419
1420 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_LOG)) {
1421 result = 1;
1422 goto out;
1423 }
1424
1425 fprintf(log_fp, "%s", msg.test_log.log_buf);
1426 if (msg.test_log.is_last)
1427 break;
1428 }
1429
1430out:
1431 fclose(log_fp);
1432 log_fp = NULL;
1433 return result;
1434}
1435
1436static int dispatch_thread_send_subtests(int sock_fd, struct test_state *state)
1437{
1438 struct msg msg;
1439 struct subtest_state *subtest_state;
1440 int subtest_num = state->subtest_num;
1441
1442 state->subtest_states = malloc(subtest_num * sizeof(*subtest_state));
1443
1444 for (int i = 0; i < subtest_num; i++) {
1445 subtest_state = &state->subtest_states[i];
1446
1447 memset(subtest_state, 0, sizeof(*subtest_state));
1448
1449 if (read_prog_test_msg(sock_fd, &msg, MSG_SUBTEST_DONE))
1450 return 1;
1451
1452 subtest_state->name = strdup(msg.subtest_done.name);
1453 subtest_state->error_cnt = msg.subtest_done.error_cnt;
1454 subtest_state->skipped = msg.subtest_done.skipped;
1455 subtest_state->filtered = msg.subtest_done.filtered;
1456
1457 /* collect all logs */
1458 if (msg.subtest_done.have_log)
1459 if (dispatch_thread_read_log(sock_fd,
1460 &subtest_state->log_buf,
1461 &subtest_state->log_cnt))
1462 return 1;
1463 }
1464
1465 return 0;
1466}
1467
1468static void *dispatch_thread(void *ctx)
1469{
1470 struct dispatch_data *data = ctx;
1471 int sock_fd;
1472
1473 sock_fd = data->sock_fd;
1474
1475 while (true) {
1476 int test_to_run = -1;
1477 struct prog_test_def *test;
1478 struct test_state *state;
1479
1480 /* grab a test */
1481 {
1482 pthread_mutex_lock(¤t_test_lock);
1483
1484 if (current_test_idx >= prog_test_cnt) {
1485 pthread_mutex_unlock(¤t_test_lock);
1486 goto done;
1487 }
1488
1489 test = &prog_test_defs[current_test_idx];
1490 test_to_run = current_test_idx;
1491 current_test_idx++;
1492
1493 pthread_mutex_unlock(¤t_test_lock);
1494 }
1495
1496 if (!test->should_run || test->run_serial_test)
1497 continue;
1498
1499 /* run test through worker */
1500 {
1501 struct msg msg_do_test;
1502
1503 memset(&msg_do_test, 0, sizeof(msg_do_test));
1504 msg_do_test.type = MSG_DO_TEST;
1505 msg_do_test.do_test.num = test_to_run;
1506 if (send_message(sock_fd, &msg_do_test) < 0) {
1507 perror("Fail to send command");
1508 goto done;
1509 }
1510 env.worker_current_test[data->worker_id] = test_to_run;
1511 }
1512
1513 /* wait for test done */
1514 do {
1515 struct msg msg;
1516
1517 if (read_prog_test_msg(sock_fd, &msg, MSG_TEST_DONE))
1518 goto error;
1519 if (test_to_run != msg.test_done.num)
1520 goto error;
1521
1522 state = &test_states[test_to_run];
1523 state->tested = true;
1524 state->error_cnt = msg.test_done.error_cnt;
1525 state->skip_cnt = msg.test_done.skip_cnt;
1526 state->sub_succ_cnt = msg.test_done.sub_succ_cnt;
1527 state->subtest_num = msg.test_done.subtest_num;
1528
1529 /* collect all logs */
1530 if (msg.test_done.have_log) {
1531 if (dispatch_thread_read_log(sock_fd,
1532 &state->log_buf,
1533 &state->log_cnt))
1534 goto error;
1535 }
1536
1537 /* collect all subtests and subtest logs */
1538 if (!state->subtest_num)
1539 break;
1540
1541 if (dispatch_thread_send_subtests(sock_fd, state))
1542 goto error;
1543 } while (false);
1544
1545 pthread_mutex_lock(&stdout_output_lock);
1546 dump_test_log(test, state, false, true, NULL);
1547 pthread_mutex_unlock(&stdout_output_lock);
1548 } /* while (true) */
1549error:
1550 if (env.debug)
1551 fprintf(stderr, "[%d]: Protocol/IO error: %s.\n", data->worker_id, strerror(errno));
1552
1553done:
1554 {
1555 struct msg msg_exit;
1556
1557 msg_exit.type = MSG_EXIT;
1558 if (send_message(sock_fd, &msg_exit) < 0) {
1559 if (env.debug)
1560 fprintf(stderr, "[%d]: send_message msg_exit: %s.\n",
1561 data->worker_id, strerror(errno));
1562 }
1563 }
1564 return NULL;
1565}
1566
1567static void calculate_summary_and_print_errors(struct test_env *env)
1568{
1569 int i;
1570 int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
1571 json_writer_t *w = NULL;
1572
1573 for (i = 0; i < prog_test_cnt; i++) {
1574 struct test_state *state = &test_states[i];
1575
1576 if (!state->tested)
1577 continue;
1578
1579 sub_succ_cnt += state->sub_succ_cnt;
1580 skip_cnt += state->skip_cnt;
1581
1582 if (state->error_cnt)
1583 fail_cnt++;
1584 else
1585 succ_cnt++;
1586 }
1587
1588 if (env->json) {
1589 w = jsonw_new(env->json);
1590 if (!w)
1591 fprintf(env->stderr_saved, "Failed to create new JSON stream.");
1592 }
1593
1594 if (w) {
1595 jsonw_start_object(w);
1596 jsonw_uint_field(w, "success", succ_cnt);
1597 jsonw_uint_field(w, "success_subtest", sub_succ_cnt);
1598 jsonw_uint_field(w, "skipped", skip_cnt);
1599 jsonw_uint_field(w, "failed", fail_cnt);
1600 jsonw_name(w, "results");
1601 jsonw_start_array(w);
1602 }
1603
1604 /*
1605 * We only print error logs summary when there are failed tests and
1606 * verbose mode is not enabled. Otherwise, results may be inconsistent.
1607 *
1608 */
1609 if (!verbose() && fail_cnt) {
1610 printf("\nAll error logs:\n");
1611
1612 /* print error logs again */
1613 for (i = 0; i < prog_test_cnt; i++) {
1614 struct prog_test_def *test = &prog_test_defs[i];
1615 struct test_state *state = &test_states[i];
1616
1617 if (!state->tested || !state->error_cnt)
1618 continue;
1619
1620 dump_test_log(test, state, true, true, w);
1621 }
1622 }
1623
1624 if (w) {
1625 jsonw_end_array(w);
1626 jsonw_end_object(w);
1627 jsonw_destroy(&w);
1628 }
1629
1630 if (env->json)
1631 fclose(env->json);
1632
1633 printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
1634 succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
1635
1636 env->succ_cnt = succ_cnt;
1637 env->sub_succ_cnt = sub_succ_cnt;
1638 env->fail_cnt = fail_cnt;
1639 env->skip_cnt = skip_cnt;
1640}
1641
1642static void server_main(void)
1643{
1644 pthread_t *dispatcher_threads;
1645 struct dispatch_data *data;
1646 struct sigaction sigact_int = {
1647 .sa_handler = sigint_handler,
1648 .sa_flags = SA_RESETHAND,
1649 };
1650 int i;
1651
1652 sigaction(SIGINT, &sigact_int, NULL);
1653
1654 dispatcher_threads = calloc(sizeof(pthread_t), env.workers);
1655 data = calloc(sizeof(struct dispatch_data), env.workers);
1656
1657 env.worker_current_test = calloc(sizeof(int), env.workers);
1658 for (i = 0; i < env.workers; i++) {
1659 int rc;
1660
1661 data[i].worker_id = i;
1662 data[i].sock_fd = env.worker_socks[i];
1663 rc = pthread_create(&dispatcher_threads[i], NULL, dispatch_thread, &data[i]);
1664 if (rc < 0) {
1665 perror("Failed to launch dispatcher thread");
1666 exit(EXIT_ERR_SETUP_INFRA);
1667 }
1668 }
1669
1670 /* wait for all dispatcher to finish */
1671 for (i = 0; i < env.workers; i++) {
1672 while (true) {
1673 int ret = pthread_tryjoin_np(dispatcher_threads[i], NULL);
1674
1675 if (!ret) {
1676 break;
1677 } else if (ret == EBUSY) {
1678 if (env.debug)
1679 fprintf(stderr, "Still waiting for thread %d (test %d).\n",
1680 i, env.worker_current_test[i] + 1);
1681 usleep(1000 * 1000);
1682 continue;
1683 } else {
1684 fprintf(stderr, "Unexpected error joining dispatcher thread: %d", ret);
1685 break;
1686 }
1687 }
1688 }
1689 free(dispatcher_threads);
1690 free(env.worker_current_test);
1691 free(data);
1692
1693 /* run serial tests */
1694 save_netns();
1695
1696 for (int i = 0; i < prog_test_cnt; i++) {
1697 struct prog_test_def *test = &prog_test_defs[i];
1698
1699 if (!test->should_run || !test->run_serial_test)
1700 continue;
1701
1702 run_one_test(i);
1703 }
1704
1705 /* generate summary */
1706 fflush(stderr);
1707 fflush(stdout);
1708
1709 calculate_summary_and_print_errors(&env);
1710
1711 /* reap all workers */
1712 for (i = 0; i < env.workers; i++) {
1713 int wstatus, pid;
1714
1715 pid = waitpid(env.worker_pids[i], &wstatus, 0);
1716 if (pid != env.worker_pids[i])
1717 perror("Unable to reap worker");
1718 }
1719}
1720
1721static void worker_main_send_log(int sock, char *log_buf, size_t log_cnt)
1722{
1723 char *src;
1724 size_t slen;
1725
1726 src = log_buf;
1727 slen = log_cnt;
1728 while (slen) {
1729 struct msg msg_log;
1730 char *dest;
1731 size_t len;
1732
1733 memset(&msg_log, 0, sizeof(msg_log));
1734 msg_log.type = MSG_TEST_LOG;
1735 dest = msg_log.test_log.log_buf;
1736 len = slen >= MAX_LOG_TRUNK_SIZE ? MAX_LOG_TRUNK_SIZE : slen;
1737 memcpy(dest, src, len);
1738
1739 src += len;
1740 slen -= len;
1741 if (!slen)
1742 msg_log.test_log.is_last = true;
1743
1744 assert(send_message(sock, &msg_log) >= 0);
1745 }
1746}
1747
1748static void free_subtest_state(struct subtest_state *state)
1749{
1750 if (state->log_buf) {
1751 free(state->log_buf);
1752 state->log_buf = NULL;
1753 state->log_cnt = 0;
1754 }
1755 free(state->name);
1756 state->name = NULL;
1757}
1758
1759static int worker_main_send_subtests(int sock, struct test_state *state)
1760{
1761 int i, result = 0;
1762 struct msg msg;
1763 struct subtest_state *subtest_state;
1764
1765 memset(&msg, 0, sizeof(msg));
1766 msg.type = MSG_SUBTEST_DONE;
1767
1768 for (i = 0; i < state->subtest_num; i++) {
1769 subtest_state = &state->subtest_states[i];
1770
1771 msg.subtest_done.num = i;
1772
1773 strncpy(msg.subtest_done.name, subtest_state->name, MAX_SUBTEST_NAME);
1774
1775 msg.subtest_done.error_cnt = subtest_state->error_cnt;
1776 msg.subtest_done.skipped = subtest_state->skipped;
1777 msg.subtest_done.filtered = subtest_state->filtered;
1778 msg.subtest_done.have_log = false;
1779
1780 if (verbose() || state->force_log || subtest_state->error_cnt) {
1781 if (subtest_state->log_cnt)
1782 msg.subtest_done.have_log = true;
1783 }
1784
1785 if (send_message(sock, &msg) < 0) {
1786 perror("Fail to send message done");
1787 result = 1;
1788 goto out;
1789 }
1790
1791 /* send logs */
1792 if (msg.subtest_done.have_log)
1793 worker_main_send_log(sock, subtest_state->log_buf, subtest_state->log_cnt);
1794
1795 free_subtest_state(subtest_state);
1796 free(subtest_state->name);
1797 }
1798
1799out:
1800 for (; i < state->subtest_num; i++)
1801 free_subtest_state(&state->subtest_states[i]);
1802 free(state->subtest_states);
1803 return result;
1804}
1805
1806static int worker_main(int sock)
1807{
1808 save_netns();
1809 watchdog_init();
1810
1811 while (true) {
1812 /* receive command */
1813 struct msg msg;
1814
1815 if (recv_message(sock, &msg) < 0)
1816 goto out;
1817
1818 switch (msg.type) {
1819 case MSG_EXIT:
1820 if (env.debug)
1821 fprintf(stderr, "[%d]: worker exit.\n",
1822 env.worker_id);
1823 goto out;
1824 case MSG_DO_TEST: {
1825 int test_to_run = msg.do_test.num;
1826 struct prog_test_def *test = &prog_test_defs[test_to_run];
1827 struct test_state *state = &test_states[test_to_run];
1828 struct msg msg;
1829
1830 if (env.debug)
1831 fprintf(stderr, "[%d]: #%d:%s running.\n",
1832 env.worker_id,
1833 test_to_run + 1,
1834 test->test_name);
1835
1836 run_one_test(test_to_run);
1837
1838 memset(&msg, 0, sizeof(msg));
1839 msg.type = MSG_TEST_DONE;
1840 msg.test_done.num = test_to_run;
1841 msg.test_done.error_cnt = state->error_cnt;
1842 msg.test_done.skip_cnt = state->skip_cnt;
1843 msg.test_done.sub_succ_cnt = state->sub_succ_cnt;
1844 msg.test_done.subtest_num = state->subtest_num;
1845 msg.test_done.have_log = false;
1846
1847 if (verbose() || state->force_log || state->error_cnt) {
1848 if (state->log_cnt)
1849 msg.test_done.have_log = true;
1850 }
1851 if (send_message(sock, &msg) < 0) {
1852 perror("Fail to send message done");
1853 goto out;
1854 }
1855
1856 /* send logs */
1857 if (msg.test_done.have_log)
1858 worker_main_send_log(sock, state->log_buf, state->log_cnt);
1859
1860 if (state->log_buf) {
1861 free(state->log_buf);
1862 state->log_buf = NULL;
1863 state->log_cnt = 0;
1864 }
1865
1866 if (state->subtest_num)
1867 if (worker_main_send_subtests(sock, state))
1868 goto out;
1869
1870 if (env.debug)
1871 fprintf(stderr, "[%d]: #%d:%s done.\n",
1872 env.worker_id,
1873 test_to_run + 1,
1874 test->test_name);
1875 break;
1876 } /* case MSG_DO_TEST */
1877 default:
1878 if (env.debug)
1879 fprintf(stderr, "[%d]: unknown message.\n", env.worker_id);
1880 return -1;
1881 }
1882 }
1883out:
1884 return 0;
1885}
1886
1887static void free_test_states(void)
1888{
1889 int i, j;
1890
1891 for (i = 0; i < ARRAY_SIZE(prog_test_defs); i++) {
1892 struct test_state *test_state = &test_states[i];
1893
1894 for (j = 0; j < test_state->subtest_num; j++)
1895 free_subtest_state(&test_state->subtest_states[j]);
1896
1897 free(test_state->subtest_states);
1898 free(test_state->log_buf);
1899 test_state->subtest_states = NULL;
1900 test_state->log_buf = NULL;
1901 }
1902}
1903
1904int main(int argc, char **argv)
1905{
1906 static const struct argp argp = {
1907 .options = opts,
1908 .parser = parse_arg,
1909 .doc = argp_program_doc,
1910 };
1911 struct sigaction sigact = {
1912 .sa_handler = crash_handler,
1913 .sa_flags = SA_RESETHAND,
1914 };
1915 int err, i;
1916
1917 sigaction(SIGSEGV, &sigact, NULL);
1918
1919 env.secs_till_notify = 10;
1920 env.secs_till_kill = 120;
1921 err = argp_parse(&argp, argc, argv, 0, NULL, &env);
1922 if (err)
1923 return err;
1924
1925 err = cd_flavor_subdir(argv[0]);
1926 if (err)
1927 return err;
1928
1929 watchdog_init();
1930
1931 /* Use libbpf 1.0 API mode */
1932 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1933 libbpf_set_print(libbpf_print_fn);
1934
1935 srand(time(NULL));
1936
1937 env.jit_enabled = is_jit_enabled();
1938 env.nr_cpus = libbpf_num_possible_cpus();
1939 if (env.nr_cpus < 0) {
1940 fprintf(stderr, "Failed to get number of CPUs: %d!\n",
1941 env.nr_cpus);
1942 return -1;
1943 }
1944
1945 env.stdout_saved = stdout;
1946 env.stderr_saved = stderr;
1947
1948 env.has_testmod = true;
1949 if (!env.list_test_names) {
1950 /* ensure previous instance of the module is unloaded */
1951 unload_bpf_testmod(verbose());
1952
1953 if (load_bpf_testmod(verbose())) {
1954 fprintf(env.stderr_saved, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n");
1955 env.has_testmod = false;
1956 }
1957 }
1958
1959 /* initializing tests */
1960 for (i = 0; i < prog_test_cnt; i++) {
1961 struct prog_test_def *test = &prog_test_defs[i];
1962
1963 test->test_num = i + 1;
1964 test->should_run = should_run(&env.test_selector,
1965 test->test_num, test->test_name);
1966
1967 if ((test->run_test == NULL && test->run_serial_test == NULL) ||
1968 (test->run_test != NULL && test->run_serial_test != NULL)) {
1969 fprintf(stderr, "Test %d:%s must have either test_%s() or serial_test_%sl() defined.\n",
1970 test->test_num, test->test_name, test->test_name, test->test_name);
1971 exit(EXIT_ERR_SETUP_INFRA);
1972 }
1973 if (test->should_run)
1974 test->should_tmon = should_tmon(&env.tmon_selector, test->test_name);
1975 }
1976
1977 /* ignore workers if we are just listing */
1978 if (env.get_test_cnt || env.list_test_names)
1979 env.workers = 0;
1980
1981 /* launch workers if requested */
1982 env.worker_id = -1; /* main process */
1983 if (env.workers) {
1984 env.worker_pids = calloc(sizeof(pid_t), env.workers);
1985 env.worker_socks = calloc(sizeof(int), env.workers);
1986 if (env.debug)
1987 fprintf(stdout, "Launching %d workers.\n", env.workers);
1988 for (i = 0; i < env.workers; i++) {
1989 int sv[2];
1990 pid_t pid;
1991
1992 if (socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, sv) < 0) {
1993 perror("Fail to create worker socket");
1994 return -1;
1995 }
1996 pid = fork();
1997 if (pid < 0) {
1998 perror("Failed to fork worker");
1999 return -1;
2000 } else if (pid != 0) { /* main process */
2001 close(sv[1]);
2002 env.worker_pids[i] = pid;
2003 env.worker_socks[i] = sv[0];
2004 } else { /* inside each worker process */
2005 close(sv[0]);
2006 env.worker_id = i;
2007 return worker_main(sv[1]);
2008 }
2009 }
2010
2011 if (env.worker_id == -1) {
2012 server_main();
2013 goto out;
2014 }
2015 }
2016
2017 /* The rest of the main process */
2018
2019 /* on single mode */
2020 save_netns();
2021
2022 for (i = 0; i < prog_test_cnt; i++) {
2023 struct prog_test_def *test = &prog_test_defs[i];
2024
2025 if (!test->should_run)
2026 continue;
2027
2028 if (env.get_test_cnt) {
2029 env.succ_cnt++;
2030 continue;
2031 }
2032
2033 if (env.list_test_names) {
2034 fprintf(env.stdout_saved, "%s\n", test->test_name);
2035 env.succ_cnt++;
2036 continue;
2037 }
2038
2039 run_one_test(i);
2040 }
2041
2042 if (env.get_test_cnt) {
2043 printf("%d\n", env.succ_cnt);
2044 goto out;
2045 }
2046
2047 if (env.list_test_names)
2048 goto out;
2049
2050 calculate_summary_and_print_errors(&env);
2051
2052 close(env.saved_netns_fd);
2053out:
2054 if (!env.list_test_names && env.has_testmod)
2055 unload_bpf_testmod(verbose());
2056
2057 free_test_selector(&env.test_selector);
2058 free_test_selector(&env.subtest_selector);
2059 free_test_selector(&env.tmon_selector);
2060 free_test_states();
2061
2062 if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
2063 return EXIT_NO_TEST;
2064
2065 return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
2066}
1/* Copyright (c) 2017 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <stdio.h>
8#include <unistd.h>
9#include <errno.h>
10#include <string.h>
11#include <assert.h>
12#include <stdlib.h>
13#include <time.h>
14
15#include <linux/types.h>
16typedef __u16 __sum16;
17#include <arpa/inet.h>
18#include <linux/if_ether.h>
19#include <linux/if_packet.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22#include <linux/tcp.h>
23#include <linux/filter.h>
24#include <linux/perf_event.h>
25#include <linux/unistd.h>
26
27#include <sys/ioctl.h>
28#include <sys/wait.h>
29#include <sys/types.h>
30#include <fcntl.h>
31
32#include <linux/bpf.h>
33#include <linux/err.h>
34#include <bpf/bpf.h>
35#include <bpf/libbpf.h>
36
37#include "test_iptunnel_common.h"
38#include "bpf_util.h"
39#include "bpf_endian.h"
40#include "bpf_rlimit.h"
41
42static int error_cnt, pass_cnt;
43
44#define MAGIC_BYTES 123
45
46/* ipv4 test vector */
47static struct {
48 struct ethhdr eth;
49 struct iphdr iph;
50 struct tcphdr tcp;
51} __packed pkt_v4 = {
52 .eth.h_proto = bpf_htons(ETH_P_IP),
53 .iph.ihl = 5,
54 .iph.protocol = 6,
55 .iph.tot_len = bpf_htons(MAGIC_BYTES),
56 .tcp.urg_ptr = 123,
57};
58
59/* ipv6 test vector */
60static struct {
61 struct ethhdr eth;
62 struct ipv6hdr iph;
63 struct tcphdr tcp;
64} __packed pkt_v6 = {
65 .eth.h_proto = bpf_htons(ETH_P_IPV6),
66 .iph.nexthdr = 6,
67 .iph.payload_len = bpf_htons(MAGIC_BYTES),
68 .tcp.urg_ptr = 123,
69};
70
71#define CHECK(condition, tag, format...) ({ \
72 int __ret = !!(condition); \
73 if (__ret) { \
74 error_cnt++; \
75 printf("%s:FAIL:%s ", __func__, tag); \
76 printf(format); \
77 } else { \
78 pass_cnt++; \
79 printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
80 } \
81 __ret; \
82})
83
84static int bpf_find_map(const char *test, struct bpf_object *obj,
85 const char *name)
86{
87 struct bpf_map *map;
88
89 map = bpf_object__find_map_by_name(obj, name);
90 if (!map) {
91 printf("%s:FAIL:map '%s' not found\n", test, name);
92 error_cnt++;
93 return -1;
94 }
95 return bpf_map__fd(map);
96}
97
98static void test_pkt_access(void)
99{
100 const char *file = "./test_pkt_access.o";
101 struct bpf_object *obj;
102 __u32 duration, retval;
103 int err, prog_fd;
104
105 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
106 if (err) {
107 error_cnt++;
108 return;
109 }
110
111 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
112 NULL, NULL, &retval, &duration);
113 CHECK(err || errno || retval, "ipv4",
114 "err %d errno %d retval %d duration %d\n",
115 err, errno, retval, duration);
116
117 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
118 NULL, NULL, &retval, &duration);
119 CHECK(err || errno || retval, "ipv6",
120 "err %d errno %d retval %d duration %d\n",
121 err, errno, retval, duration);
122 bpf_object__close(obj);
123}
124
125static void test_xdp(void)
126{
127 struct vip key4 = {.protocol = 6, .family = AF_INET};
128 struct vip key6 = {.protocol = 6, .family = AF_INET6};
129 struct iptnl_info value4 = {.family = AF_INET};
130 struct iptnl_info value6 = {.family = AF_INET6};
131 const char *file = "./test_xdp.o";
132 struct bpf_object *obj;
133 char buf[128];
134 struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
135 struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
136 __u32 duration, retval, size;
137 int err, prog_fd, map_fd;
138
139 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
140 if (err) {
141 error_cnt++;
142 return;
143 }
144
145 map_fd = bpf_find_map(__func__, obj, "vip2tnl");
146 if (map_fd < 0)
147 goto out;
148 bpf_map_update_elem(map_fd, &key4, &value4, 0);
149 bpf_map_update_elem(map_fd, &key6, &value6, 0);
150
151 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
152 buf, &size, &retval, &duration);
153
154 CHECK(err || errno || retval != XDP_TX || size != 74 ||
155 iph->protocol != IPPROTO_IPIP, "ipv4",
156 "err %d errno %d retval %d size %d\n",
157 err, errno, retval, size);
158
159 err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
160 buf, &size, &retval, &duration);
161 CHECK(err || errno || retval != XDP_TX || size != 114 ||
162 iph6->nexthdr != IPPROTO_IPV6, "ipv6",
163 "err %d errno %d retval %d size %d\n",
164 err, errno, retval, size);
165out:
166 bpf_object__close(obj);
167}
168
169#define MAGIC_VAL 0x1234
170#define NUM_ITER 100000
171#define VIP_NUM 5
172
173static void test_l4lb(const char *file)
174{
175 unsigned int nr_cpus = bpf_num_possible_cpus();
176 struct vip key = {.protocol = 6};
177 struct vip_meta {
178 __u32 flags;
179 __u32 vip_num;
180 } value = {.vip_num = VIP_NUM};
181 __u32 stats_key = VIP_NUM;
182 struct vip_stats {
183 __u64 bytes;
184 __u64 pkts;
185 } stats[nr_cpus];
186 struct real_definition {
187 union {
188 __be32 dst;
189 __be32 dstv6[4];
190 };
191 __u8 flags;
192 } real_def = {.dst = MAGIC_VAL};
193 __u32 ch_key = 11, real_num = 3;
194 __u32 duration, retval, size;
195 int err, i, prog_fd, map_fd;
196 __u64 bytes = 0, pkts = 0;
197 struct bpf_object *obj;
198 char buf[128];
199 u32 *magic = (u32 *)buf;
200
201 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
202 if (err) {
203 error_cnt++;
204 return;
205 }
206
207 map_fd = bpf_find_map(__func__, obj, "vip_map");
208 if (map_fd < 0)
209 goto out;
210 bpf_map_update_elem(map_fd, &key, &value, 0);
211
212 map_fd = bpf_find_map(__func__, obj, "ch_rings");
213 if (map_fd < 0)
214 goto out;
215 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
216
217 map_fd = bpf_find_map(__func__, obj, "reals");
218 if (map_fd < 0)
219 goto out;
220 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
221
222 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
223 buf, &size, &retval, &duration);
224 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
225 *magic != MAGIC_VAL, "ipv4",
226 "err %d errno %d retval %d size %d magic %x\n",
227 err, errno, retval, size, *magic);
228
229 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
230 buf, &size, &retval, &duration);
231 CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
232 *magic != MAGIC_VAL, "ipv6",
233 "err %d errno %d retval %d size %d magic %x\n",
234 err, errno, retval, size, *magic);
235
236 map_fd = bpf_find_map(__func__, obj, "stats");
237 if (map_fd < 0)
238 goto out;
239 bpf_map_lookup_elem(map_fd, &stats_key, stats);
240 for (i = 0; i < nr_cpus; i++) {
241 bytes += stats[i].bytes;
242 pkts += stats[i].pkts;
243 }
244 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
245 error_cnt++;
246 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
247 }
248out:
249 bpf_object__close(obj);
250}
251
252static void test_l4lb_all(void)
253{
254 const char *file1 = "./test_l4lb.o";
255 const char *file2 = "./test_l4lb_noinline.o";
256
257 test_l4lb(file1);
258 test_l4lb(file2);
259}
260
261static void test_xdp_noinline(void)
262{
263 const char *file = "./test_xdp_noinline.o";
264 unsigned int nr_cpus = bpf_num_possible_cpus();
265 struct vip key = {.protocol = 6};
266 struct vip_meta {
267 __u32 flags;
268 __u32 vip_num;
269 } value = {.vip_num = VIP_NUM};
270 __u32 stats_key = VIP_NUM;
271 struct vip_stats {
272 __u64 bytes;
273 __u64 pkts;
274 } stats[nr_cpus];
275 struct real_definition {
276 union {
277 __be32 dst;
278 __be32 dstv6[4];
279 };
280 __u8 flags;
281 } real_def = {.dst = MAGIC_VAL};
282 __u32 ch_key = 11, real_num = 3;
283 __u32 duration, retval, size;
284 int err, i, prog_fd, map_fd;
285 __u64 bytes = 0, pkts = 0;
286 struct bpf_object *obj;
287 char buf[128];
288 u32 *magic = (u32 *)buf;
289
290 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
291 if (err) {
292 error_cnt++;
293 return;
294 }
295
296 map_fd = bpf_find_map(__func__, obj, "vip_map");
297 if (map_fd < 0)
298 goto out;
299 bpf_map_update_elem(map_fd, &key, &value, 0);
300
301 map_fd = bpf_find_map(__func__, obj, "ch_rings");
302 if (map_fd < 0)
303 goto out;
304 bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
305
306 map_fd = bpf_find_map(__func__, obj, "reals");
307 if (map_fd < 0)
308 goto out;
309 bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
310
311 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
312 buf, &size, &retval, &duration);
313 CHECK(err || errno || retval != 1 || size != 54 ||
314 *magic != MAGIC_VAL, "ipv4",
315 "err %d errno %d retval %d size %d magic %x\n",
316 err, errno, retval, size, *magic);
317
318 err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
319 buf, &size, &retval, &duration);
320 CHECK(err || errno || retval != 1 || size != 74 ||
321 *magic != MAGIC_VAL, "ipv6",
322 "err %d errno %d retval %d size %d magic %x\n",
323 err, errno, retval, size, *magic);
324
325 map_fd = bpf_find_map(__func__, obj, "stats");
326 if (map_fd < 0)
327 goto out;
328 bpf_map_lookup_elem(map_fd, &stats_key, stats);
329 for (i = 0; i < nr_cpus; i++) {
330 bytes += stats[i].bytes;
331 pkts += stats[i].pkts;
332 }
333 if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
334 error_cnt++;
335 printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
336 }
337out:
338 bpf_object__close(obj);
339}
340
341static void test_tcp_estats(void)
342{
343 const char *file = "./test_tcp_estats.o";
344 int err, prog_fd;
345 struct bpf_object *obj;
346 __u32 duration = 0;
347
348 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
349 CHECK(err, "", "err %d errno %d\n", err, errno);
350 if (err) {
351 error_cnt++;
352 return;
353 }
354
355 bpf_object__close(obj);
356}
357
358static inline __u64 ptr_to_u64(const void *ptr)
359{
360 return (__u64) (unsigned long) ptr;
361}
362
363static void test_bpf_obj_id(void)
364{
365 const __u64 array_magic_value = 0xfaceb00c;
366 const __u32 array_key = 0;
367 const int nr_iters = 2;
368 const char *file = "./test_obj_id.o";
369 const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
370 const char *expected_prog_name = "test_obj_id";
371 const char *expected_map_name = "test_map_id";
372 const __u64 nsec_per_sec = 1000000000;
373
374 struct bpf_object *objs[nr_iters];
375 int prog_fds[nr_iters], map_fds[nr_iters];
376 /* +1 to test for the info_len returned by kernel */
377 struct bpf_prog_info prog_infos[nr_iters + 1];
378 struct bpf_map_info map_infos[nr_iters + 1];
379 /* Each prog only uses one map. +1 to test nr_map_ids
380 * returned by kernel.
381 */
382 __u32 map_ids[nr_iters + 1];
383 char jited_insns[128], xlated_insns[128], zeros[128];
384 __u32 i, next_id, info_len, nr_id_found, duration = 0;
385 struct timespec real_time_ts, boot_time_ts;
386 int sysctl_fd, jit_enabled = 0, err = 0;
387 __u64 array_value;
388 uid_t my_uid = getuid();
389 time_t now, load_time;
390
391 sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
392 if (sysctl_fd != -1) {
393 char tmpc;
394
395 if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
396 jit_enabled = (tmpc != '0');
397 close(sysctl_fd);
398 }
399
400 err = bpf_prog_get_fd_by_id(0);
401 CHECK(err >= 0 || errno != ENOENT,
402 "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
403
404 err = bpf_map_get_fd_by_id(0);
405 CHECK(err >= 0 || errno != ENOENT,
406 "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
407
408 for (i = 0; i < nr_iters; i++)
409 objs[i] = NULL;
410
411 /* Check bpf_obj_get_info_by_fd() */
412 bzero(zeros, sizeof(zeros));
413 for (i = 0; i < nr_iters; i++) {
414 now = time(NULL);
415 err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
416 &objs[i], &prog_fds[i]);
417 /* test_obj_id.o is a dumb prog. It should never fail
418 * to load.
419 */
420 if (err)
421 error_cnt++;
422 assert(!err);
423
424 /* Insert a magic value to the map */
425 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
426 assert(map_fds[i] >= 0);
427 err = bpf_map_update_elem(map_fds[i], &array_key,
428 &array_magic_value, 0);
429 assert(!err);
430
431 /* Check getting map info */
432 info_len = sizeof(struct bpf_map_info) * 2;
433 bzero(&map_infos[i], info_len);
434 err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
435 &info_len);
436 if (CHECK(err ||
437 map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
438 map_infos[i].key_size != sizeof(__u32) ||
439 map_infos[i].value_size != sizeof(__u64) ||
440 map_infos[i].max_entries != 1 ||
441 map_infos[i].map_flags != 0 ||
442 info_len != sizeof(struct bpf_map_info) ||
443 strcmp((char *)map_infos[i].name, expected_map_name),
444 "get-map-info(fd)",
445 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
446 err, errno,
447 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
448 info_len, sizeof(struct bpf_map_info),
449 map_infos[i].key_size,
450 map_infos[i].value_size,
451 map_infos[i].max_entries,
452 map_infos[i].map_flags,
453 map_infos[i].name, expected_map_name))
454 goto done;
455
456 /* Check getting prog info */
457 info_len = sizeof(struct bpf_prog_info) * 2;
458 bzero(&prog_infos[i], info_len);
459 bzero(jited_insns, sizeof(jited_insns));
460 bzero(xlated_insns, sizeof(xlated_insns));
461 prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
462 prog_infos[i].jited_prog_len = sizeof(jited_insns);
463 prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
464 prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
465 prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
466 prog_infos[i].nr_map_ids = 2;
467 err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
468 assert(!err);
469 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
470 assert(!err);
471 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
472 &info_len);
473 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
474 + (prog_infos[i].load_time / nsec_per_sec);
475 if (CHECK(err ||
476 prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
477 info_len != sizeof(struct bpf_prog_info) ||
478 (jit_enabled && !prog_infos[i].jited_prog_len) ||
479 (jit_enabled &&
480 !memcmp(jited_insns, zeros, sizeof(zeros))) ||
481 !prog_infos[i].xlated_prog_len ||
482 !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
483 load_time < now - 60 || load_time > now + 60 ||
484 prog_infos[i].created_by_uid != my_uid ||
485 prog_infos[i].nr_map_ids != 1 ||
486 *(int *)prog_infos[i].map_ids != map_infos[i].id ||
487 strcmp((char *)prog_infos[i].name, expected_prog_name),
488 "get-prog-info(fd)",
489 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
490 err, errno, i,
491 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
492 info_len, sizeof(struct bpf_prog_info),
493 jit_enabled,
494 prog_infos[i].jited_prog_len,
495 prog_infos[i].xlated_prog_len,
496 !!memcmp(jited_insns, zeros, sizeof(zeros)),
497 !!memcmp(xlated_insns, zeros, sizeof(zeros)),
498 load_time, now,
499 prog_infos[i].created_by_uid, my_uid,
500 prog_infos[i].nr_map_ids, 1,
501 *(int *)prog_infos[i].map_ids, map_infos[i].id,
502 prog_infos[i].name, expected_prog_name))
503 goto done;
504 }
505
506 /* Check bpf_prog_get_next_id() */
507 nr_id_found = 0;
508 next_id = 0;
509 while (!bpf_prog_get_next_id(next_id, &next_id)) {
510 struct bpf_prog_info prog_info = {};
511 __u32 saved_map_id;
512 int prog_fd;
513
514 info_len = sizeof(prog_info);
515
516 prog_fd = bpf_prog_get_fd_by_id(next_id);
517 if (prog_fd < 0 && errno == ENOENT)
518 /* The bpf_prog is in the dead row */
519 continue;
520 if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
521 "prog_fd %d next_id %d errno %d\n",
522 prog_fd, next_id, errno))
523 break;
524
525 for (i = 0; i < nr_iters; i++)
526 if (prog_infos[i].id == next_id)
527 break;
528
529 if (i == nr_iters)
530 continue;
531
532 nr_id_found++;
533
534 /* Negative test:
535 * prog_info.nr_map_ids = 1
536 * prog_info.map_ids = NULL
537 */
538 prog_info.nr_map_ids = 1;
539 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
540 if (CHECK(!err || errno != EFAULT,
541 "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
542 err, errno, EFAULT))
543 break;
544 bzero(&prog_info, sizeof(prog_info));
545 info_len = sizeof(prog_info);
546
547 saved_map_id = *(int *)(prog_infos[i].map_ids);
548 prog_info.map_ids = prog_infos[i].map_ids;
549 prog_info.nr_map_ids = 2;
550 err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
551 prog_infos[i].jited_prog_insns = 0;
552 prog_infos[i].xlated_prog_insns = 0;
553 CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
554 memcmp(&prog_info, &prog_infos[i], info_len) ||
555 *(int *)prog_info.map_ids != saved_map_id,
556 "get-prog-info(next_id->fd)",
557 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
558 err, errno, info_len, sizeof(struct bpf_prog_info),
559 memcmp(&prog_info, &prog_infos[i], info_len),
560 *(int *)prog_info.map_ids, saved_map_id);
561 close(prog_fd);
562 }
563 CHECK(nr_id_found != nr_iters,
564 "check total prog id found by get_next_id",
565 "nr_id_found %u(%u)\n",
566 nr_id_found, nr_iters);
567
568 /* Check bpf_map_get_next_id() */
569 nr_id_found = 0;
570 next_id = 0;
571 while (!bpf_map_get_next_id(next_id, &next_id)) {
572 struct bpf_map_info map_info = {};
573 int map_fd;
574
575 info_len = sizeof(map_info);
576
577 map_fd = bpf_map_get_fd_by_id(next_id);
578 if (map_fd < 0 && errno == ENOENT)
579 /* The bpf_map is in the dead row */
580 continue;
581 if (CHECK(map_fd < 0, "get-map-fd(next_id)",
582 "map_fd %d next_id %u errno %d\n",
583 map_fd, next_id, errno))
584 break;
585
586 for (i = 0; i < nr_iters; i++)
587 if (map_infos[i].id == next_id)
588 break;
589
590 if (i == nr_iters)
591 continue;
592
593 nr_id_found++;
594
595 err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
596 assert(!err);
597
598 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
599 CHECK(err || info_len != sizeof(struct bpf_map_info) ||
600 memcmp(&map_info, &map_infos[i], info_len) ||
601 array_value != array_magic_value,
602 "check get-map-info(next_id->fd)",
603 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
604 err, errno, info_len, sizeof(struct bpf_map_info),
605 memcmp(&map_info, &map_infos[i], info_len),
606 array_value, array_magic_value);
607
608 close(map_fd);
609 }
610 CHECK(nr_id_found != nr_iters,
611 "check total map id found by get_next_id",
612 "nr_id_found %u(%u)\n",
613 nr_id_found, nr_iters);
614
615done:
616 for (i = 0; i < nr_iters; i++)
617 bpf_object__close(objs[i]);
618}
619
620static void test_pkt_md_access(void)
621{
622 const char *file = "./test_pkt_md_access.o";
623 struct bpf_object *obj;
624 __u32 duration, retval;
625 int err, prog_fd;
626
627 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
628 if (err) {
629 error_cnt++;
630 return;
631 }
632
633 err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
634 NULL, NULL, &retval, &duration);
635 CHECK(err || retval, "",
636 "err %d errno %d retval %d duration %d\n",
637 err, errno, retval, duration);
638
639 bpf_object__close(obj);
640}
641
642static void test_obj_name(void)
643{
644 struct {
645 const char *name;
646 int success;
647 int expected_errno;
648 } tests[] = {
649 { "", 1, 0 },
650 { "_123456789ABCDE", 1, 0 },
651 { "_123456789ABCDEF", 0, EINVAL },
652 { "_123456789ABCD\n", 0, EINVAL },
653 };
654 struct bpf_insn prog[] = {
655 BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
656 BPF_EXIT_INSN(),
657 };
658 __u32 duration = 0;
659 int i;
660
661 for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
662 size_t name_len = strlen(tests[i].name) + 1;
663 union bpf_attr attr;
664 size_t ncopy;
665 int fd;
666
667 /* test different attr.prog_name during BPF_PROG_LOAD */
668 ncopy = name_len < sizeof(attr.prog_name) ?
669 name_len : sizeof(attr.prog_name);
670 bzero(&attr, sizeof(attr));
671 attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
672 attr.insn_cnt = 2;
673 attr.insns = ptr_to_u64(prog);
674 attr.license = ptr_to_u64("");
675 memcpy(attr.prog_name, tests[i].name, ncopy);
676
677 fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
678 CHECK((tests[i].success && fd < 0) ||
679 (!tests[i].success && fd != -1) ||
680 (!tests[i].success && errno != tests[i].expected_errno),
681 "check-bpf-prog-name",
682 "fd %d(%d) errno %d(%d)\n",
683 fd, tests[i].success, errno, tests[i].expected_errno);
684
685 if (fd != -1)
686 close(fd);
687
688 /* test different attr.map_name during BPF_MAP_CREATE */
689 ncopy = name_len < sizeof(attr.map_name) ?
690 name_len : sizeof(attr.map_name);
691 bzero(&attr, sizeof(attr));
692 attr.map_type = BPF_MAP_TYPE_ARRAY;
693 attr.key_size = 4;
694 attr.value_size = 4;
695 attr.max_entries = 1;
696 attr.map_flags = 0;
697 memcpy(attr.map_name, tests[i].name, ncopy);
698 fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
699 CHECK((tests[i].success && fd < 0) ||
700 (!tests[i].success && fd != -1) ||
701 (!tests[i].success && errno != tests[i].expected_errno),
702 "check-bpf-map-name",
703 "fd %d(%d) errno %d(%d)\n",
704 fd, tests[i].success, errno, tests[i].expected_errno);
705
706 if (fd != -1)
707 close(fd);
708 }
709}
710
711static void test_tp_attach_query(void)
712{
713 const int num_progs = 3;
714 int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
715 __u32 duration = 0, info_len, saved_prog_ids[num_progs];
716 const char *file = "./test_tracepoint.o";
717 struct perf_event_query_bpf *query;
718 struct perf_event_attr attr = {};
719 struct bpf_object *obj[num_progs];
720 struct bpf_prog_info prog_info;
721 char buf[256];
722
723 snprintf(buf, sizeof(buf),
724 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
725 efd = open(buf, O_RDONLY, 0);
726 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
727 return;
728 bytes = read(efd, buf, sizeof(buf));
729 close(efd);
730 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
731 "read", "bytes %d errno %d\n", bytes, errno))
732 return;
733
734 attr.config = strtol(buf, NULL, 0);
735 attr.type = PERF_TYPE_TRACEPOINT;
736 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
737 attr.sample_period = 1;
738 attr.wakeup_events = 1;
739
740 query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
741 for (i = 0; i < num_progs; i++) {
742 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
743 &prog_fd[i]);
744 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
745 goto cleanup1;
746
747 bzero(&prog_info, sizeof(prog_info));
748 prog_info.jited_prog_len = 0;
749 prog_info.xlated_prog_len = 0;
750 prog_info.nr_map_ids = 0;
751 info_len = sizeof(prog_info);
752 err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
753 if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
754 err, errno))
755 goto cleanup1;
756 saved_prog_ids[i] = prog_info.id;
757
758 pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
759 0 /* cpu 0 */, -1 /* group id */,
760 0 /* flags */);
761 if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
762 pmu_fd[i], errno))
763 goto cleanup2;
764 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
765 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
766 err, errno))
767 goto cleanup3;
768
769 if (i == 0) {
770 /* check NULL prog array query */
771 query->ids_len = num_progs;
772 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
773 if (CHECK(err || query->prog_cnt != 0,
774 "perf_event_ioc_query_bpf",
775 "err %d errno %d query->prog_cnt %u\n",
776 err, errno, query->prog_cnt))
777 goto cleanup3;
778 }
779
780 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
781 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
782 err, errno))
783 goto cleanup3;
784
785 if (i == 1) {
786 /* try to get # of programs only */
787 query->ids_len = 0;
788 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
789 if (CHECK(err || query->prog_cnt != 2,
790 "perf_event_ioc_query_bpf",
791 "err %d errno %d query->prog_cnt %u\n",
792 err, errno, query->prog_cnt))
793 goto cleanup3;
794
795 /* try a few negative tests */
796 /* invalid query pointer */
797 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
798 (struct perf_event_query_bpf *)0x1);
799 if (CHECK(!err || errno != EFAULT,
800 "perf_event_ioc_query_bpf",
801 "err %d errno %d\n", err, errno))
802 goto cleanup3;
803
804 /* no enough space */
805 query->ids_len = 1;
806 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
807 if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
808 "perf_event_ioc_query_bpf",
809 "err %d errno %d query->prog_cnt %u\n",
810 err, errno, query->prog_cnt))
811 goto cleanup3;
812 }
813
814 query->ids_len = num_progs;
815 err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
816 if (CHECK(err || query->prog_cnt != (i + 1),
817 "perf_event_ioc_query_bpf",
818 "err %d errno %d query->prog_cnt %u\n",
819 err, errno, query->prog_cnt))
820 goto cleanup3;
821 for (j = 0; j < i + 1; j++)
822 if (CHECK(saved_prog_ids[j] != query->ids[j],
823 "perf_event_ioc_query_bpf",
824 "#%d saved_prog_id %x query prog_id %x\n",
825 j, saved_prog_ids[j], query->ids[j]))
826 goto cleanup3;
827 }
828
829 i = num_progs - 1;
830 for (; i >= 0; i--) {
831 cleanup3:
832 ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
833 cleanup2:
834 close(pmu_fd[i]);
835 cleanup1:
836 bpf_object__close(obj[i]);
837 }
838 free(query);
839}
840
841static int compare_map_keys(int map1_fd, int map2_fd)
842{
843 __u32 key, next_key;
844 char val_buf[PERF_MAX_STACK_DEPTH *
845 sizeof(struct bpf_stack_build_id)];
846 int err;
847
848 err = bpf_map_get_next_key(map1_fd, NULL, &key);
849 if (err)
850 return err;
851 err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
852 if (err)
853 return err;
854
855 while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
856 err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
857 if (err)
858 return err;
859
860 key = next_key;
861 }
862 if (errno != ENOENT)
863 return -1;
864
865 return 0;
866}
867
868static void test_stacktrace_map()
869{
870 int control_map_fd, stackid_hmap_fd, stackmap_fd;
871 const char *file = "./test_stacktrace_map.o";
872 int bytes, efd, err, pmu_fd, prog_fd;
873 struct perf_event_attr attr = {};
874 __u32 key, val, duration = 0;
875 struct bpf_object *obj;
876 char buf[256];
877
878 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
879 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
880 return;
881
882 /* Get the ID for the sched/sched_switch tracepoint */
883 snprintf(buf, sizeof(buf),
884 "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
885 efd = open(buf, O_RDONLY, 0);
886 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
887 goto close_prog;
888
889 bytes = read(efd, buf, sizeof(buf));
890 close(efd);
891 if (bytes <= 0 || bytes >= sizeof(buf))
892 goto close_prog;
893
894 /* Open the perf event and attach bpf progrram */
895 attr.config = strtol(buf, NULL, 0);
896 attr.type = PERF_TYPE_TRACEPOINT;
897 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
898 attr.sample_period = 1;
899 attr.wakeup_events = 1;
900 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
901 0 /* cpu 0 */, -1 /* group id */,
902 0 /* flags */);
903 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
904 pmu_fd, errno))
905 goto close_prog;
906
907 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
908 if (err)
909 goto disable_pmu;
910
911 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
912 if (err)
913 goto disable_pmu;
914
915 /* find map fds */
916 control_map_fd = bpf_find_map(__func__, obj, "control_map");
917 if (control_map_fd < 0)
918 goto disable_pmu;
919
920 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
921 if (stackid_hmap_fd < 0)
922 goto disable_pmu;
923
924 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
925 if (stackmap_fd < 0)
926 goto disable_pmu;
927
928 /* give some time for bpf program run */
929 sleep(1);
930
931 /* disable stack trace collection */
932 key = 0;
933 val = 1;
934 bpf_map_update_elem(control_map_fd, &key, &val, 0);
935
936 /* for every element in stackid_hmap, we can find a corresponding one
937 * in stackmap, and vise versa.
938 */
939 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
940 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
941 "err %d errno %d\n", err, errno))
942 goto disable_pmu_noerr;
943
944 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
945 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
946 "err %d errno %d\n", err, errno))
947 goto disable_pmu_noerr;
948
949 goto disable_pmu_noerr;
950disable_pmu:
951 error_cnt++;
952disable_pmu_noerr:
953 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
954 close(pmu_fd);
955close_prog:
956 bpf_object__close(obj);
957}
958
959static void test_stacktrace_map_raw_tp()
960{
961 int control_map_fd, stackid_hmap_fd, stackmap_fd;
962 const char *file = "./test_stacktrace_map.o";
963 int efd, err, prog_fd;
964 __u32 key, val, duration = 0;
965 struct bpf_object *obj;
966
967 err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
968 if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
969 return;
970
971 efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
972 if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
973 goto close_prog;
974
975 /* find map fds */
976 control_map_fd = bpf_find_map(__func__, obj, "control_map");
977 if (control_map_fd < 0)
978 goto close_prog;
979
980 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
981 if (stackid_hmap_fd < 0)
982 goto close_prog;
983
984 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
985 if (stackmap_fd < 0)
986 goto close_prog;
987
988 /* give some time for bpf program run */
989 sleep(1);
990
991 /* disable stack trace collection */
992 key = 0;
993 val = 1;
994 bpf_map_update_elem(control_map_fd, &key, &val, 0);
995
996 /* for every element in stackid_hmap, we can find a corresponding one
997 * in stackmap, and vise versa.
998 */
999 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1000 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1001 "err %d errno %d\n", err, errno))
1002 goto close_prog;
1003
1004 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1005 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1006 "err %d errno %d\n", err, errno))
1007 goto close_prog;
1008
1009 goto close_prog_noerr;
1010close_prog:
1011 error_cnt++;
1012close_prog_noerr:
1013 bpf_object__close(obj);
1014}
1015
1016static int extract_build_id(char *build_id, size_t size)
1017{
1018 FILE *fp;
1019 char *line = NULL;
1020 size_t len = 0;
1021
1022 fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
1023 if (fp == NULL)
1024 return -1;
1025
1026 if (getline(&line, &len, fp) == -1)
1027 goto err;
1028 fclose(fp);
1029
1030 if (len > size)
1031 len = size;
1032 memcpy(build_id, line, len);
1033 build_id[len] = '\0';
1034 return 0;
1035err:
1036 fclose(fp);
1037 return -1;
1038}
1039
1040static void test_stacktrace_build_id(void)
1041{
1042 int control_map_fd, stackid_hmap_fd, stackmap_fd;
1043 const char *file = "./test_stacktrace_build_id.o";
1044 int bytes, efd, err, pmu_fd, prog_fd;
1045 struct perf_event_attr attr = {};
1046 __u32 key, previous_key, val, duration = 0;
1047 struct bpf_object *obj;
1048 char buf[256];
1049 int i, j;
1050 struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
1051 int build_id_matches = 0;
1052
1053 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
1054 if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
1055 goto out;
1056
1057 /* Get the ID for the sched/sched_switch tracepoint */
1058 snprintf(buf, sizeof(buf),
1059 "/sys/kernel/debug/tracing/events/random/urandom_read/id");
1060 efd = open(buf, O_RDONLY, 0);
1061 if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
1062 goto close_prog;
1063
1064 bytes = read(efd, buf, sizeof(buf));
1065 close(efd);
1066 if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
1067 "read", "bytes %d errno %d\n", bytes, errno))
1068 goto close_prog;
1069
1070 /* Open the perf event and attach bpf progrram */
1071 attr.config = strtol(buf, NULL, 0);
1072 attr.type = PERF_TYPE_TRACEPOINT;
1073 attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
1074 attr.sample_period = 1;
1075 attr.wakeup_events = 1;
1076 pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
1077 0 /* cpu 0 */, -1 /* group id */,
1078 0 /* flags */);
1079 if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
1080 pmu_fd, errno))
1081 goto close_prog;
1082
1083 err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
1084 if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
1085 err, errno))
1086 goto close_pmu;
1087
1088 err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
1089 if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
1090 err, errno))
1091 goto disable_pmu;
1092
1093 /* find map fds */
1094 control_map_fd = bpf_find_map(__func__, obj, "control_map");
1095 if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
1096 "err %d errno %d\n", err, errno))
1097 goto disable_pmu;
1098
1099 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
1100 if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
1101 "err %d errno %d\n", err, errno))
1102 goto disable_pmu;
1103
1104 stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
1105 if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
1106 err, errno))
1107 goto disable_pmu;
1108
1109 assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
1110 == 0);
1111 assert(system("./urandom_read") == 0);
1112 /* disable stack trace collection */
1113 key = 0;
1114 val = 1;
1115 bpf_map_update_elem(control_map_fd, &key, &val, 0);
1116
1117 /* for every element in stackid_hmap, we can find a corresponding one
1118 * in stackmap, and vise versa.
1119 */
1120 err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
1121 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
1122 "err %d errno %d\n", err, errno))
1123 goto disable_pmu;
1124
1125 err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
1126 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
1127 "err %d errno %d\n", err, errno))
1128 goto disable_pmu;
1129
1130 err = extract_build_id(buf, 256);
1131
1132 if (CHECK(err, "get build_id with readelf",
1133 "err %d errno %d\n", err, errno))
1134 goto disable_pmu;
1135
1136 err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
1137 if (CHECK(err, "get_next_key from stackmap",
1138 "err %d, errno %d\n", err, errno))
1139 goto disable_pmu;
1140
1141 do {
1142 char build_id[64];
1143
1144 err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
1145 if (CHECK(err, "lookup_elem from stackmap",
1146 "err %d, errno %d\n", err, errno))
1147 goto disable_pmu;
1148 for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
1149 if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
1150 id_offs[i].offset != 0) {
1151 for (j = 0; j < 20; ++j)
1152 sprintf(build_id + 2 * j, "%02x",
1153 id_offs[i].build_id[j] & 0xff);
1154 if (strstr(buf, build_id) != NULL)
1155 build_id_matches = 1;
1156 }
1157 previous_key = key;
1158 } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
1159
1160 CHECK(build_id_matches < 1, "build id match",
1161 "Didn't find expected build ID from the map\n");
1162
1163disable_pmu:
1164 ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
1165
1166close_pmu:
1167 close(pmu_fd);
1168
1169close_prog:
1170 bpf_object__close(obj);
1171
1172out:
1173 return;
1174}
1175
1176int main(void)
1177{
1178 test_pkt_access();
1179 test_xdp();
1180 test_l4lb_all();
1181 test_xdp_noinline();
1182 test_tcp_estats();
1183 test_bpf_obj_id();
1184 test_pkt_md_access();
1185 test_obj_name();
1186 test_tp_attach_query();
1187 test_stacktrace_map();
1188 test_stacktrace_build_id();
1189 test_stacktrace_map_raw_tp();
1190
1191 printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
1192 return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
1193}