Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
  3#include <linux/capability.h>
  4#include <stdlib.h>
  5#include <test_progs.h>
  6#include <bpf/btf.h>
  7
  8#include "autoconf_helper.h"
  9#include "unpriv_helpers.h"
 10#include "cap_helpers.h"
 11
 12#define str_has_pfx(str, pfx) \
 13	(strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
 14
 15#define TEST_LOADER_LOG_BUF_SZ 2097152
 16
 17#define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure"
 18#define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success"
 19#define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg="
 20#define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv"
 21#define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv"
 22#define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv="
 23#define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level="
 24#define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags="
 25#define TEST_TAG_DESCRIPTION_PFX "comment:test_description="
 26#define TEST_TAG_RETVAL_PFX "comment:test_retval="
 27#define TEST_TAG_RETVAL_PFX_UNPRIV "comment:test_retval_unpriv="
 28#define TEST_TAG_AUXILIARY "comment:test_auxiliary"
 29#define TEST_TAG_AUXILIARY_UNPRIV "comment:test_auxiliary_unpriv"
 30#define TEST_BTF_PATH "comment:test_btf_path="
 31
 32/* Warning: duplicated in bpf_misc.h */
 33#define POINTER_VALUE	0xcafe4all
 34#define TEST_DATA_LEN	64
 35
 36#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 37#define EFFICIENT_UNALIGNED_ACCESS 1
 38#else
 39#define EFFICIENT_UNALIGNED_ACCESS 0
 40#endif
 41
 42static int sysctl_unpriv_disabled = -1;
 43
 44enum mode {
 45	PRIV = 1,
 46	UNPRIV = 2
 47};
 48
 49struct test_subspec {
 50	char *name;
 51	bool expect_failure;
 52	const char **expect_msgs;
 53	size_t expect_msg_cnt;
 54	int retval;
 55	bool execute;
 56};
 57
 58struct test_spec {
 59	const char *prog_name;
 60	struct test_subspec priv;
 61	struct test_subspec unpriv;
 62	const char *btf_custom_path;
 63	int log_level;
 64	int prog_flags;
 65	int mode_mask;
 66	bool auxiliary;
 67	bool valid;
 68};
 69
 70static int tester_init(struct test_loader *tester)
 71{
 72	if (!tester->log_buf) {
 73		tester->log_buf_sz = TEST_LOADER_LOG_BUF_SZ;
 74		tester->log_buf = calloc(tester->log_buf_sz, 1);
 75		if (!ASSERT_OK_PTR(tester->log_buf, "tester_log_buf"))
 76			return -ENOMEM;
 77	}
 78
 79	return 0;
 80}
 81
 82void test_loader_fini(struct test_loader *tester)
 83{
 84	if (!tester)
 85		return;
 86
 87	free(tester->log_buf);
 88}
 89
 90static void free_test_spec(struct test_spec *spec)
 91{
 92	free(spec->priv.name);
 93	free(spec->unpriv.name);
 94	free(spec->priv.expect_msgs);
 95	free(spec->unpriv.expect_msgs);
 96
 97	spec->priv.name = NULL;
 98	spec->unpriv.name = NULL;
 99	spec->priv.expect_msgs = NULL;
100	spec->unpriv.expect_msgs = NULL;
101}
102
103static int push_msg(const char *msg, struct test_subspec *subspec)
104{
105	void *tmp;
106
107	tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *));
108	if (!tmp) {
109		ASSERT_FAIL("failed to realloc memory for messages\n");
110		return -ENOMEM;
111	}
112	subspec->expect_msgs = tmp;
113	subspec->expect_msgs[subspec->expect_msg_cnt++] = msg;
114
115	return 0;
116}
117
118static int parse_int(const char *str, int *val, const char *name)
119{
120	char *end;
121	long tmp;
122
123	errno = 0;
124	if (str_has_pfx(str, "0x"))
125		tmp = strtol(str + 2, &end, 16);
126	else
127		tmp = strtol(str, &end, 10);
128	if (errno || end[0] != '\0') {
129		PRINT_FAIL("failed to parse %s from '%s'\n", name, str);
130		return -EINVAL;
131	}
132	*val = tmp;
133	return 0;
134}
135
136static int parse_retval(const char *str, int *val, const char *name)
137{
138	struct {
139		char *name;
140		int val;
141	} named_values[] = {
142		{ "INT_MIN"      , INT_MIN },
143		{ "POINTER_VALUE", POINTER_VALUE },
144		{ "TEST_DATA_LEN", TEST_DATA_LEN },
145	};
146	int i;
147
148	for (i = 0; i < ARRAY_SIZE(named_values); ++i) {
149		if (strcmp(str, named_values[i].name) != 0)
150			continue;
151		*val = named_values[i].val;
152		return 0;
153	}
154
155	return parse_int(str, val, name);
156}
157
158static void update_flags(int *flags, int flag, bool clear)
159{
160	if (clear)
161		*flags &= ~flag;
162	else
163		*flags |= flag;
164}
165
166/* Uses btf_decl_tag attributes to describe the expected test
167 * behavior, see bpf_misc.h for detailed description of each attribute
168 * and attribute combinations.
169 */
170static int parse_test_spec(struct test_loader *tester,
171			   struct bpf_object *obj,
172			   struct bpf_program *prog,
173			   struct test_spec *spec)
174{
175	const char *description = NULL;
176	bool has_unpriv_result = false;
177	bool has_unpriv_retval = false;
178	int func_id, i, err = 0;
179	struct btf *btf;
180
181	memset(spec, 0, sizeof(*spec));
182
183	spec->prog_name = bpf_program__name(prog);
184	spec->prog_flags = BPF_F_TEST_REG_INVARIANTS; /* by default be strict */
185
186	btf = bpf_object__btf(obj);
187	if (!btf) {
188		ASSERT_FAIL("BPF object has no BTF");
189		return -EINVAL;
190	}
191
192	func_id = btf__find_by_name_kind(btf, spec->prog_name, BTF_KIND_FUNC);
193	if (func_id < 0) {
194		ASSERT_FAIL("failed to find FUNC BTF type for '%s'", spec->prog_name);
195		return -EINVAL;
196	}
197
198	for (i = 1; i < btf__type_cnt(btf); i++) {
199		const char *s, *val, *msg;
200		const struct btf_type *t;
201		bool clear;
202		int flags;
203
204		t = btf__type_by_id(btf, i);
205		if (!btf_is_decl_tag(t))
206			continue;
207
208		if (t->type != func_id || btf_decl_tag(t)->component_idx != -1)
209			continue;
210
211		s = btf__str_by_offset(btf, t->name_off);
212		if (str_has_pfx(s, TEST_TAG_DESCRIPTION_PFX)) {
213			description = s + sizeof(TEST_TAG_DESCRIPTION_PFX) - 1;
214		} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE) == 0) {
215			spec->priv.expect_failure = true;
216			spec->mode_mask |= PRIV;
217		} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS) == 0) {
218			spec->priv.expect_failure = false;
219			spec->mode_mask |= PRIV;
220		} else if (strcmp(s, TEST_TAG_EXPECT_FAILURE_UNPRIV) == 0) {
221			spec->unpriv.expect_failure = true;
222			spec->mode_mask |= UNPRIV;
223			has_unpriv_result = true;
224		} else if (strcmp(s, TEST_TAG_EXPECT_SUCCESS_UNPRIV) == 0) {
225			spec->unpriv.expect_failure = false;
226			spec->mode_mask |= UNPRIV;
227			has_unpriv_result = true;
228		} else if (strcmp(s, TEST_TAG_AUXILIARY) == 0) {
229			spec->auxiliary = true;
230			spec->mode_mask |= PRIV;
231		} else if (strcmp(s, TEST_TAG_AUXILIARY_UNPRIV) == 0) {
232			spec->auxiliary = true;
233			spec->mode_mask |= UNPRIV;
234		} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) {
235			msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1;
236			err = push_msg(msg, &spec->priv);
237			if (err)
238				goto cleanup;
239			spec->mode_mask |= PRIV;
240		} else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) {
241			msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1;
242			err = push_msg(msg, &spec->unpriv);
243			if (err)
244				goto cleanup;
245			spec->mode_mask |= UNPRIV;
246		} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX)) {
247			val = s + sizeof(TEST_TAG_RETVAL_PFX) - 1;
248			err = parse_retval(val, &spec->priv.retval, "__retval");
249			if (err)
250				goto cleanup;
251			spec->priv.execute = true;
252			spec->mode_mask |= PRIV;
253		} else if (str_has_pfx(s, TEST_TAG_RETVAL_PFX_UNPRIV)) {
254			val = s + sizeof(TEST_TAG_RETVAL_PFX_UNPRIV) - 1;
255			err = parse_retval(val, &spec->unpriv.retval, "__retval_unpriv");
256			if (err)
257				goto cleanup;
258			spec->mode_mask |= UNPRIV;
259			spec->unpriv.execute = true;
260			has_unpriv_retval = true;
261		} else if (str_has_pfx(s, TEST_TAG_LOG_LEVEL_PFX)) {
262			val = s + sizeof(TEST_TAG_LOG_LEVEL_PFX) - 1;
263			err = parse_int(val, &spec->log_level, "test log level");
264			if (err)
265				goto cleanup;
266		} else if (str_has_pfx(s, TEST_TAG_PROG_FLAGS_PFX)) {
267			val = s + sizeof(TEST_TAG_PROG_FLAGS_PFX) - 1;
268
269			clear = val[0] == '!';
270			if (clear)
271				val++;
272
273			if (strcmp(val, "BPF_F_STRICT_ALIGNMENT") == 0) {
274				update_flags(&spec->prog_flags, BPF_F_STRICT_ALIGNMENT, clear);
275			} else if (strcmp(val, "BPF_F_ANY_ALIGNMENT") == 0) {
276				update_flags(&spec->prog_flags, BPF_F_ANY_ALIGNMENT, clear);
277			} else if (strcmp(val, "BPF_F_TEST_RND_HI32") == 0) {
278				update_flags(&spec->prog_flags, BPF_F_TEST_RND_HI32, clear);
279			} else if (strcmp(val, "BPF_F_TEST_STATE_FREQ") == 0) {
280				update_flags(&spec->prog_flags, BPF_F_TEST_STATE_FREQ, clear);
281			} else if (strcmp(val, "BPF_F_SLEEPABLE") == 0) {
282				update_flags(&spec->prog_flags, BPF_F_SLEEPABLE, clear);
283			} else if (strcmp(val, "BPF_F_XDP_HAS_FRAGS") == 0) {
284				update_flags(&spec->prog_flags, BPF_F_XDP_HAS_FRAGS, clear);
285			} else if (strcmp(val, "BPF_F_TEST_REG_INVARIANTS") == 0) {
286				update_flags(&spec->prog_flags, BPF_F_TEST_REG_INVARIANTS, clear);
287			} else /* assume numeric value */ {
288				err = parse_int(val, &flags, "test prog flags");
289				if (err)
290					goto cleanup;
291				update_flags(&spec->prog_flags, flags, clear);
292			}
293		} else if (str_has_pfx(s, TEST_BTF_PATH)) {
294			spec->btf_custom_path = s + sizeof(TEST_BTF_PATH) - 1;
295		}
296	}
297
298	if (spec->mode_mask == 0)
299		spec->mode_mask = PRIV;
300
301	if (!description)
302		description = spec->prog_name;
303
304	if (spec->mode_mask & PRIV) {
305		spec->priv.name = strdup(description);
306		if (!spec->priv.name) {
307			PRINT_FAIL("failed to allocate memory for priv.name\n");
308			err = -ENOMEM;
309			goto cleanup;
310		}
311	}
312
313	if (spec->mode_mask & UNPRIV) {
314		int descr_len = strlen(description);
315		const char *suffix = " @unpriv";
316		char *name;
317
318		name = malloc(descr_len + strlen(suffix) + 1);
319		if (!name) {
320			PRINT_FAIL("failed to allocate memory for unpriv.name\n");
321			err = -ENOMEM;
322			goto cleanup;
323		}
324
325		strcpy(name, description);
326		strcpy(&name[descr_len], suffix);
327		spec->unpriv.name = name;
328	}
329
330	if (spec->mode_mask & (PRIV | UNPRIV)) {
331		if (!has_unpriv_result)
332			spec->unpriv.expect_failure = spec->priv.expect_failure;
333
334		if (!has_unpriv_retval) {
335			spec->unpriv.retval = spec->priv.retval;
336			spec->unpriv.execute = spec->priv.execute;
337		}
338
339		if (!spec->unpriv.expect_msgs) {
340			size_t sz = spec->priv.expect_msg_cnt * sizeof(void *);
341
342			spec->unpriv.expect_msgs = malloc(sz);
343			if (!spec->unpriv.expect_msgs) {
344				PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n");
345				err = -ENOMEM;
346				goto cleanup;
347			}
348			memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz);
349			spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt;
350		}
351	}
352
353	spec->valid = true;
354
355	return 0;
356
357cleanup:
358	free_test_spec(spec);
359	return err;
360}
361
362static void prepare_case(struct test_loader *tester,
363			 struct test_spec *spec,
364			 struct bpf_object *obj,
365			 struct bpf_program *prog)
366{
367	int min_log_level = 0, prog_flags;
368
369	if (env.verbosity > VERBOSE_NONE)
370		min_log_level = 1;
371	if (env.verbosity > VERBOSE_VERY)
372		min_log_level = 2;
373
374	bpf_program__set_log_buf(prog, tester->log_buf, tester->log_buf_sz);
375
376	/* Make sure we set at least minimal log level, unless test requires
377	 * even higher level already. Make sure to preserve independent log
378	 * level 4 (verifier stats), though.
379	 */
380	if ((spec->log_level & 3) < min_log_level)
381		bpf_program__set_log_level(prog, (spec->log_level & 4) | min_log_level);
382	else
383		bpf_program__set_log_level(prog, spec->log_level);
384
385	prog_flags = bpf_program__flags(prog);
386	bpf_program__set_flags(prog, prog_flags | spec->prog_flags);
387
388	tester->log_buf[0] = '\0';
389	tester->next_match_pos = 0;
390}
391
392static void emit_verifier_log(const char *log_buf, bool force)
393{
394	if (!force && env.verbosity == VERBOSE_NONE)
395		return;
396	fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log_buf);
397}
398
399static void validate_case(struct test_loader *tester,
400			  struct test_subspec *subspec,
401			  struct bpf_object *obj,
402			  struct bpf_program *prog,
403			  int load_err)
404{
405	int i, j;
406
407	for (i = 0; i < subspec->expect_msg_cnt; i++) {
408		char *match;
409		const char *expect_msg;
410
411		expect_msg = subspec->expect_msgs[i];
412
413		match = strstr(tester->log_buf + tester->next_match_pos, expect_msg);
414		if (!ASSERT_OK_PTR(match, "expect_msg")) {
415			/* if we are in verbose mode, we've already emitted log */
416			if (env.verbosity == VERBOSE_NONE)
417				emit_verifier_log(tester->log_buf, true /*force*/);
418			for (j = 0; j < i; j++)
419				fprintf(stderr,
420					"MATCHED  MSG: '%s'\n", subspec->expect_msgs[j]);
421			fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg);
422			return;
423		}
424
425		tester->next_match_pos = match - tester->log_buf + strlen(expect_msg);
426	}
427}
428
429struct cap_state {
430	__u64 old_caps;
431	bool initialized;
432};
433
434static int drop_capabilities(struct cap_state *caps)
435{
436	const __u64 caps_to_drop = (1ULL << CAP_SYS_ADMIN | 1ULL << CAP_NET_ADMIN |
437				    1ULL << CAP_PERFMON   | 1ULL << CAP_BPF);
438	int err;
439
440	err = cap_disable_effective(caps_to_drop, &caps->old_caps);
441	if (err) {
442		PRINT_FAIL("failed to drop capabilities: %i, %s\n", err, strerror(err));
443		return err;
444	}
445
446	caps->initialized = true;
447	return 0;
448}
449
450static int restore_capabilities(struct cap_state *caps)
451{
452	int err;
453
454	if (!caps->initialized)
455		return 0;
456
457	err = cap_enable_effective(caps->old_caps, NULL);
458	if (err)
459		PRINT_FAIL("failed to restore capabilities: %i, %s\n", err, strerror(err));
460	caps->initialized = false;
461	return err;
462}
463
464static bool can_execute_unpriv(struct test_loader *tester, struct test_spec *spec)
465{
466	if (sysctl_unpriv_disabled < 0)
467		sysctl_unpriv_disabled = get_unpriv_disabled() ? 1 : 0;
468	if (sysctl_unpriv_disabled)
469		return false;
470	if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS)
471		return false;
472	return true;
473}
474
475static bool is_unpriv_capable_map(struct bpf_map *map)
476{
477	enum bpf_map_type type;
478	__u32 flags;
479
480	type = bpf_map__type(map);
481
482	switch (type) {
483	case BPF_MAP_TYPE_HASH:
484	case BPF_MAP_TYPE_PERCPU_HASH:
485	case BPF_MAP_TYPE_HASH_OF_MAPS:
486		flags = bpf_map__map_flags(map);
487		return !(flags & BPF_F_ZERO_SEED);
488	case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
489	case BPF_MAP_TYPE_ARRAY:
490	case BPF_MAP_TYPE_RINGBUF:
491	case BPF_MAP_TYPE_PROG_ARRAY:
492	case BPF_MAP_TYPE_CGROUP_ARRAY:
493	case BPF_MAP_TYPE_PERCPU_ARRAY:
494	case BPF_MAP_TYPE_USER_RINGBUF:
495	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
496	case BPF_MAP_TYPE_CGROUP_STORAGE:
497	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
498		return true;
499	default:
500		return false;
501	}
502}
503
504static int do_prog_test_run(int fd_prog, int *retval)
505{
506	__u8 tmp_out[TEST_DATA_LEN << 2] = {};
507	__u8 tmp_in[TEST_DATA_LEN] = {};
508	int err, saved_errno;
509	LIBBPF_OPTS(bpf_test_run_opts, topts,
510		.data_in = tmp_in,
511		.data_size_in = sizeof(tmp_in),
512		.data_out = tmp_out,
513		.data_size_out = sizeof(tmp_out),
514		.repeat = 1,
515	);
516
517	err = bpf_prog_test_run_opts(fd_prog, &topts);
518	saved_errno = errno;
519
520	if (err) {
521		PRINT_FAIL("FAIL: Unexpected bpf_prog_test_run error: %d (%s) ",
522			   saved_errno, strerror(saved_errno));
523		return err;
524	}
525
526	ASSERT_OK(0, "bpf_prog_test_run");
527	*retval = topts.retval;
528
529	return 0;
530}
531
532static bool should_do_test_run(struct test_spec *spec, struct test_subspec *subspec)
533{
534	if (!subspec->execute)
535		return false;
536
537	if (subspec->expect_failure)
538		return false;
539
540	if ((spec->prog_flags & BPF_F_ANY_ALIGNMENT) && !EFFICIENT_UNALIGNED_ACCESS) {
541		if (env.verbosity != VERBOSE_NONE)
542			printf("alignment prevents execution\n");
543		return false;
544	}
545
546	return true;
547}
548
549/* this function is forced noinline and has short generic name to look better
550 * in test_progs output (in case of a failure)
551 */
552static noinline
553void run_subtest(struct test_loader *tester,
554		 struct bpf_object_open_opts *open_opts,
555		 const void *obj_bytes,
556		 size_t obj_byte_cnt,
557		 struct test_spec *specs,
558		 struct test_spec *spec,
559		 bool unpriv)
560{
561	struct test_subspec *subspec = unpriv ? &spec->unpriv : &spec->priv;
562	struct bpf_program *tprog = NULL, *tprog_iter;
563	struct test_spec *spec_iter;
564	struct cap_state caps = {};
565	struct bpf_object *tobj;
566	struct bpf_map *map;
567	int retval, err, i;
568	bool should_load;
569
570	if (!test__start_subtest(subspec->name))
571		return;
572
573	if (unpriv) {
574		if (!can_execute_unpriv(tester, spec)) {
575			test__skip();
576			test__end_subtest();
577			return;
578		}
579		if (drop_capabilities(&caps)) {
580			test__end_subtest();
581			return;
582		}
583	}
584
585	/* Implicitly reset to NULL if next test case doesn't specify */
586	open_opts->btf_custom_path = spec->btf_custom_path;
587
588	tobj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, open_opts);
589	if (!ASSERT_OK_PTR(tobj, "obj_open_mem")) /* shouldn't happen */
590		goto subtest_cleanup;
591
592	i = 0;
593	bpf_object__for_each_program(tprog_iter, tobj) {
594		spec_iter = &specs[i++];
595		should_load = false;
596
597		if (spec_iter->valid) {
598			if (strcmp(bpf_program__name(tprog_iter), spec->prog_name) == 0) {
599				tprog = tprog_iter;
600				should_load = true;
601			}
602
603			if (spec_iter->auxiliary &&
604			    spec_iter->mode_mask & (unpriv ? UNPRIV : PRIV))
605				should_load = true;
606		}
607
608		bpf_program__set_autoload(tprog_iter, should_load);
609	}
610
611	prepare_case(tester, spec, tobj, tprog);
612
613	/* By default bpf_object__load() automatically creates all
614	 * maps declared in the skeleton. Some map types are only
615	 * allowed in priv mode. Disable autoload for such maps in
616	 * unpriv mode.
617	 */
618	bpf_object__for_each_map(map, tobj)
619		bpf_map__set_autocreate(map, !unpriv || is_unpriv_capable_map(map));
620
621	err = bpf_object__load(tobj);
622	if (subspec->expect_failure) {
623		if (!ASSERT_ERR(err, "unexpected_load_success")) {
624			emit_verifier_log(tester->log_buf, false /*force*/);
625			goto tobj_cleanup;
626		}
627	} else {
628		if (!ASSERT_OK(err, "unexpected_load_failure")) {
629			emit_verifier_log(tester->log_buf, true /*force*/);
630			goto tobj_cleanup;
631		}
632	}
633
634	emit_verifier_log(tester->log_buf, false /*force*/);
635	validate_case(tester, subspec, tobj, tprog, err);
636
637	if (should_do_test_run(spec, subspec)) {
638		/* For some reason test_verifier executes programs
639		 * with all capabilities restored. Do the same here.
640		 */
641		if (restore_capabilities(&caps))
642			goto tobj_cleanup;
643
644		if (tester->pre_execution_cb) {
645			err = tester->pre_execution_cb(tobj);
646			if (err) {
647				PRINT_FAIL("pre_execution_cb failed: %d\n", err);
648				goto tobj_cleanup;
649			}
650		}
651
652		do_prog_test_run(bpf_program__fd(tprog), &retval);
653		if (retval != subspec->retval && subspec->retval != POINTER_VALUE) {
654			PRINT_FAIL("Unexpected retval: %d != %d\n", retval, subspec->retval);
655			goto tobj_cleanup;
656		}
657	}
658
659tobj_cleanup:
660	bpf_object__close(tobj);
661subtest_cleanup:
662	test__end_subtest();
663	restore_capabilities(&caps);
664}
665
666static void process_subtest(struct test_loader *tester,
667			    const char *skel_name,
668			    skel_elf_bytes_fn elf_bytes_factory)
669{
670	LIBBPF_OPTS(bpf_object_open_opts, open_opts, .object_name = skel_name);
671	struct test_spec *specs = NULL;
672	struct bpf_object *obj = NULL;
673	struct bpf_program *prog;
674	const void *obj_bytes;
675	int err, i, nr_progs;
676	size_t obj_byte_cnt;
677
678	if (tester_init(tester) < 0)
679		return; /* failed to initialize tester */
680
681	obj_bytes = elf_bytes_factory(&obj_byte_cnt);
682	obj = bpf_object__open_mem(obj_bytes, obj_byte_cnt, &open_opts);
683	if (!ASSERT_OK_PTR(obj, "obj_open_mem"))
684		return;
685
686	nr_progs = 0;
687	bpf_object__for_each_program(prog, obj)
688		++nr_progs;
689
690	specs = calloc(nr_progs, sizeof(struct test_spec));
691	if (!ASSERT_OK_PTR(specs, "Can't alloc specs array"))
692		return;
693
694	i = 0;
695	bpf_object__for_each_program(prog, obj) {
696		/* ignore tests for which  we can't derive test specification */
697		err = parse_test_spec(tester, obj, prog, &specs[i++]);
698		if (err)
699			PRINT_FAIL("Can't parse test spec for program '%s'\n",
700				   bpf_program__name(prog));
701	}
702
703	i = 0;
704	bpf_object__for_each_program(prog, obj) {
705		struct test_spec *spec = &specs[i++];
706
707		if (!spec->valid || spec->auxiliary)
708			continue;
709
710		if (spec->mode_mask & PRIV)
711			run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
712				    specs, spec, false);
713		if (spec->mode_mask & UNPRIV)
714			run_subtest(tester, &open_opts, obj_bytes, obj_byte_cnt,
715				    specs, spec, true);
716
717	}
718
719	for (i = 0; i < nr_progs; ++i)
720		free_test_spec(&specs[i]);
721	free(specs);
722	bpf_object__close(obj);
723}
724
725void test_loader__run_subtests(struct test_loader *tester,
726			       const char *skel_name,
727			       skel_elf_bytes_fn elf_bytes_factory)
728{
729	/* see comment in run_subtest() for why we do this function nesting */
730	process_subtest(tester, skel_name, elf_bytes_factory);
731}