Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * builtin-test.c
  3 *
  4 * Builtin regression testing command: ever growing number of sanity tests
  5 */
 
 
 
 
 
 
 
 
 
 
 
  6#include "builtin.h"
 
 
  7#include "intlist.h"
  8#include "tests.h"
  9#include "debug.h"
 10#include "color.h"
 11#include "parse-options.h"
 
 
 12#include "symbol.h"
 
 
 
 
 
 
 13
 14static struct test {
 15	const char *desc;
 16	int (*func)(void);
 17} tests[] = {
 18	{
 19		.desc = "vmlinux symtab matches kallsyms",
 20		.func = test__vmlinux_matches_kallsyms,
 21	},
 22	{
 23		.desc = "detect open syscall event",
 24		.func = test__open_syscall_event,
 25	},
 26	{
 27		.desc = "detect open syscall event on all cpus",
 28		.func = test__open_syscall_event_on_all_cpus,
 29	},
 30	{
 31		.desc = "read samples using the mmap interface",
 32		.func = test__basic_mmap,
 33	},
 34	{
 35		.desc = "parse events tests",
 36		.func = test__parse_events,
 37	},
 38#if defined(__x86_64__) || defined(__i386__)
 39	{
 40		.desc = "x86 rdpmc test",
 41		.func = test__rdpmc,
 42	},
 43#endif
 44	{
 45		.desc = "Validate PERF_RECORD_* events & perf_sample fields",
 46		.func = test__PERF_RECORD,
 47	},
 48	{
 49		.desc = "Test perf pmu format parsing",
 50		.func = test__pmu,
 51	},
 52	{
 53		.desc = "Test dso data interface",
 54		.func = test__dso_data,
 55	},
 56	{
 57		.desc = "roundtrip evsel->name check",
 58		.func = test__perf_evsel__roundtrip_name_test,
 59	},
 60	{
 61		.desc = "Check parsing of sched tracepoints fields",
 62		.func = test__perf_evsel__tp_sched_test,
 63	},
 64	{
 65		.desc = "Generate and check syscalls:sys_enter_open event fields",
 66		.func = test__syscall_open_tp_fields,
 67	},
 68	{
 69		.desc = "struct perf_event_attr setup",
 70		.func = test__attr,
 71	},
 72	{
 73		.desc = "Test matching and linking multiple hists",
 74		.func = test__hists_link,
 75	},
 76	{
 77		.desc = "Try 'use perf' in python, checking link problems",
 78		.func = test__python_use,
 79	},
 80	{
 81		.desc = "Test breakpoint overflow signal handler",
 82		.func = test__bp_signal,
 83	},
 84	{
 85		.desc = "Test breakpoint overflow sampling",
 86		.func = test__bp_signal_overflow,
 87	},
 88	{
 89		.desc = "Test number of exit event of a simple workload",
 90		.func = test__task_exit,
 91	},
 92	{
 93		.desc = "Test software clock events have valid period values",
 94		.func = test__sw_clock_freq,
 95	},
 96#if defined(__x86_64__) || defined(__i386__)
 97	{
 98		.desc = "Test converting perf time to TSC",
 99		.func = test__perf_time_to_tsc,
100	},
101#endif
102	{
103		.desc = "Test object code reading",
104		.func = test__code_reading,
105	},
106	{
107		.desc = "Test sample parsing",
108		.func = test__sample_parsing,
109	},
110	{
111		.desc = "Test using a dummy software event to keep tracking",
112		.func = test__keep_tracking,
113	},
114	{
115		.desc = "Test parsing with no sample_id_all bit set",
116		.func = test__parse_no_sample_id_all,
117	},
118#if defined(__x86_64__) || defined(__i386__)
119#ifdef HAVE_DWARF_UNWIND_SUPPORT
120	{
121		.desc = "Test dwarf unwind",
122		.func = test__dwarf_unwind,
123	},
124#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125#endif
126	{
127		.func = NULL,
128	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129};
130
131static bool perf_test__matches(int curr, int argc, const char *argv[])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132{
133	int i;
134
135	if (argc == 0)
136		return true;
137
138	for (i = 0; i < argc; ++i) {
139		char *end;
140		long nr = strtoul(argv[i], &end, 10);
141
142		if (*end == '\0') {
143			if (nr == curr + 1)
144				return true;
145			continue;
146		}
147
148		if (strstr(tests[curr].desc, argv[i]))
149			return true;
150	}
151
152	return false;
153}
154
155static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
 
 
 
 
 
 
 
 
 
156{
157	int i = 0;
158	int width = 0;
159
160	while (tests[i].func) {
161		int len = strlen(tests[i].desc);
 
 
 
 
 
162
163		if (width < len)
164			width = len;
165		++i;
 
 
166	}
167
168	i = 0;
169	while (tests[i].func) {
170		int curr = i++, err;
171
172		if (!perf_test__matches(curr, argc, argv))
173			continue;
 
 
 
 
 
 
 
 
 
174
175		pr_info("%2d: %-*s:", i, width, tests[curr].desc);
176
177		if (intlist__find(skiplist, i)) {
178			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
179			continue;
180		}
 
181
182		pr_debug("\n--- start ---\n");
183		err = tests[curr].func();
184		pr_debug("---- end ----\n%s:", tests[curr].desc);
185
186		switch (err) {
187		case TEST_OK:
188			pr_info(" Ok\n");
189			break;
190		case TEST_SKIP:
 
 
 
 
 
 
 
 
191			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
192			break;
193		case TEST_FAIL:
194		default:
195			color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
196			break;
197		}
198	}
199
200	return 0;
201}
202
203static int perf_test__list(int argc, const char **argv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204{
205	int i = 0;
206
207	while (tests[i].func) {
208		int curr = i++;
209
210		if (argc > 1 && !strstr(tests[curr].desc, argv[1]))
211			continue;
212
213		pr_info("%2d: %s\n", i, tests[curr].desc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214	}
215
 
 
 
 
 
 
 
 
 
 
216	return 0;
217}
218
219int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220{
221	const char * const test_usage[] = {
222	"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
223	NULL,
224	};
225	const char *skip = NULL;
 
 
226	const struct option test_options[] = {
227	OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
228	OPT_INCR('v', "verbose", &verbose,
229		    "be more verbose (show symbol address, etc)"),
 
 
 
 
 
 
 
 
 
230	OPT_END()
231	};
 
232	struct intlist *skiplist = NULL;
 
 
 
 
 
233
234	argc = parse_options(argc, argv, test_options, test_usage, 0);
235	if (argc >= 1 && !strcmp(argv[0], "list"))
236		return perf_test__list(argc, argv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
238	symbol_conf.priv_size = sizeof(int);
239	symbol_conf.sort_by_name = true;
240	symbol_conf.try_vmlinux_path = true;
241
242	if (symbol__init() < 0)
 
243		return -1;
244
245	if (skip != NULL)
246		skiplist = intlist__new(skip);
247
248	return __cmd_test(argc, argv, skiplist);
 
 
 
 
 
 
 
 
249}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * builtin-test.c
  4 *
  5 * Builtin regression testing command: ever growing number of sanity tests
  6 */
  7#include <fcntl.h>
  8#include <errno.h>
  9#include <poll.h>
 10#include <unistd.h>
 11#include <setjmp.h>
 12#include <string.h>
 13#include <stdlib.h>
 14#include <sys/types.h>
 15#include <dirent.h>
 16#include <sys/wait.h>
 17#include <sys/stat.h>
 18#include "builtin.h"
 19#include "config.h"
 20#include "hist.h"
 21#include "intlist.h"
 22#include "tests.h"
 23#include "debug.h"
 24#include "color.h"
 25#include <subcmd/parse-options.h>
 26#include <subcmd/run-command.h>
 27#include "string2.h"
 28#include "symbol.h"
 29#include "util/rlimit.h"
 30#include "util/strbuf.h"
 31#include <linux/kernel.h>
 32#include <linux/string.h>
 33#include <subcmd/exec-cmd.h>
 34#include <linux/zalloc.h>
 35
 36#include "tests-scripts.h"
 37
 38/*
 39 * Command line option to not fork the test running in the same process and
 40 * making them easier to debug.
 41 */
 42static bool dont_fork;
 43/* Fork the tests in parallel and wait for their completion. */
 44static bool sequential;
 45const char *dso_to_test;
 46const char *test_objdump_path = "objdump";
 47
 48/*
 49 * List of architecture specific tests. Not a weak symbol as the array length is
 50 * dependent on the initialization, as such GCC with LTO complains of
 51 * conflicting definitions with a weak symbol.
 52 */
 53#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || defined(__powerpc64__)
 54extern struct test_suite *arch_tests[];
 55#else
 56static struct test_suite *arch_tests[] = {
 57	NULL,
 58};
 
 
 
 
 
 
 59#endif
 60
 61static struct test_suite *generic_tests[] = {
 62	&suite__vmlinux_matches_kallsyms,
 63#ifdef HAVE_LIBTRACEEVENT
 64	&suite__openat_syscall_event,
 65	&suite__openat_syscall_event_on_all_cpus,
 66	&suite__basic_mmap,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67#endif
 68	&suite__mem,
 69	&suite__parse_events,
 70	&suite__expr,
 71	&suite__PERF_RECORD,
 72	&suite__pmu,
 73	&suite__pmu_events,
 74	&suite__hwmon_pmu,
 75	&suite__tool_pmu,
 76	&suite__dso_data,
 77	&suite__perf_evsel__roundtrip_name_test,
 78#ifdef HAVE_LIBTRACEEVENT
 79	&suite__perf_evsel__tp_sched_test,
 80	&suite__syscall_openat_tp_fields,
 
 
 
 
 
 
 
 
 
 81#endif
 82	&suite__hists_link,
 83	&suite__python_use,
 84	&suite__bp_signal,
 85	&suite__bp_signal_overflow,
 86	&suite__bp_accounting,
 87	&suite__wp,
 88	&suite__task_exit,
 89	&suite__sw_clock_freq,
 90	&suite__code_reading,
 91	&suite__sample_parsing,
 92	&suite__keep_tracking,
 93	&suite__parse_no_sample_id_all,
 94	&suite__hists_filter,
 95	&suite__mmap_thread_lookup,
 96	&suite__thread_maps_share,
 97	&suite__hists_output,
 98	&suite__hists_cumulate,
 99#ifdef HAVE_LIBTRACEEVENT
100	&suite__switch_tracking,
101#endif
102	&suite__fdarray__filter,
103	&suite__fdarray__add,
104	&suite__kmod_path__parse,
105	&suite__thread_map,
106	&suite__session_topology,
107	&suite__thread_map_synthesize,
108	&suite__thread_map_remove,
109	&suite__cpu_map,
110	&suite__synthesize_stat_config,
111	&suite__synthesize_stat,
112	&suite__synthesize_stat_round,
113	&suite__event_update,
114	&suite__event_times,
115	&suite__backward_ring_buffer,
116	&suite__sdt_event,
117	&suite__is_printable_array,
118	&suite__bitmap_print,
119	&suite__perf_hooks,
120	&suite__unit_number__scnprint,
121	&suite__mem2node,
122	&suite__time_utils,
123	&suite__jit_write_elf,
124	&suite__pfm,
125	&suite__api_io,
126	&suite__maps__merge_in,
127	&suite__demangle_java,
128	&suite__demangle_ocaml,
129	&suite__parse_metric,
130	&suite__pe_file_parsing,
131	&suite__expand_cgroup_events,
132	&suite__perf_time_to_tsc,
133	&suite__dlfilter,
134	&suite__sigtrap,
135	&suite__event_groups,
136	&suite__symbols,
137	&suite__util,
138	NULL,
139};
140
141static struct test_workload *workloads[] = {
142	&workload__noploop,
143	&workload__thloop,
144	&workload__leafloop,
145	&workload__sqrtloop,
146	&workload__brstack,
147	&workload__datasym,
148	&workload__landlock,
149};
150
151#define workloads__for_each(workload) \
152	for (unsigned i = 0; i < ARRAY_SIZE(workloads) && ({ workload = workloads[i]; 1; }); i++)
153
154static int num_subtests(const struct test_suite *t)
155{
156	int num;
157
158	if (!t->test_cases)
159		return 0;
160
161	num = 0;
162	while (t->test_cases[num].name)
163		num++;
164
165	return num;
166}
167
168static bool has_subtests(const struct test_suite *t)
169{
170	return num_subtests(t) > 1;
171}
172
173static const char *skip_reason(const struct test_suite *t, int subtest)
174{
175	if (!t->test_cases)
176		return NULL;
177
178	return t->test_cases[subtest >= 0 ? subtest : 0].skip_reason;
179}
180
181static const char *test_description(const struct test_suite *t, int subtest)
182{
183	if (t->test_cases && subtest >= 0)
184		return t->test_cases[subtest].desc;
185
186	return t->desc;
187}
188
189static test_fnptr test_function(const struct test_suite *t, int subtest)
190{
191	if (subtest <= 0)
192		return t->test_cases[0].run_case;
193
194	return t->test_cases[subtest].run_case;
195}
196
197static bool test_exclusive(const struct test_suite *t, int subtest)
198{
199	if (subtest <= 0)
200		return t->test_cases[0].exclusive;
201
202	return t->test_cases[subtest].exclusive;
203}
204
205static bool perf_test__matches(const char *desc, int curr, int argc, const char *argv[])
206{
207	int i;
208
209	if (argc == 0)
210		return true;
211
212	for (i = 0; i < argc; ++i) {
213		char *end;
214		long nr = strtoul(argv[i], &end, 10);
215
216		if (*end == '\0') {
217			if (nr == curr + 1)
218				return true;
219			continue;
220		}
221
222		if (strcasestr(desc, argv[i]))
223			return true;
224	}
225
226	return false;
227}
228
229struct child_test {
230	struct child_process process;
231	struct test_suite *test;
232	int test_num;
233	int subtest;
234};
235
236static jmp_buf run_test_jmp_buf;
237
238static void child_test_sig_handler(int sig)
239{
240	siglongjmp(run_test_jmp_buf, sig);
241}
242
243static int run_test_child(struct child_process *process)
244{
245	const int signals[] = {
246		SIGABRT, SIGBUS, SIGFPE, SIGILL, SIGINT, SIGPIPE, SIGQUIT, SIGSEGV, SIGTERM,
247	};
248	struct child_test *child = container_of(process, struct child_test, process);
249	int err;
250
251	err = sigsetjmp(run_test_jmp_buf, 1);
252	if (err) {
253		fprintf(stderr, "\n---- unexpected signal (%d) ----\n", err);
254		err = err > 0 ? -err : -1;
255		goto err_out;
256	}
257
258	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
259		signal(signals[i], child_test_sig_handler);
 
260
261	pr_debug("--- start ---\n");
262	pr_debug("test child forked, pid %d\n", getpid());
263	err = test_function(child->test, child->subtest)(child->test, child->subtest);
264	pr_debug("---- end(%d) ----\n", err);
265
266err_out:
267	fflush(NULL);
268	for (size_t i = 0; i < ARRAY_SIZE(signals); i++)
269		signal(signals[i], SIG_DFL);
270	return -err;
271}
272
273#define TEST_RUNNING -3
274
275static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width,
276			     int running)
277{
278	if (has_subtests(t)) {
279		int subw = width > 2 ? width - 2 : width;
280
281		pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest));
282	} else
283		pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest));
284
285	switch (result) {
286	case TEST_RUNNING:
287		color_fprintf(stderr, PERF_COLOR_YELLOW, " Running (%d active)\n", running);
288		break;
289	case TEST_OK:
290		pr_info(" Ok\n");
291		break;
292	case TEST_SKIP: {
293		const char *reason = skip_reason(t, subtest);
294
295		if (reason)
296			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (%s)\n", reason);
297		else
298			color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip\n");
299	}
300		break;
301	case TEST_FAIL:
302	default:
303		color_fprintf(stderr, PERF_COLOR_RED, " FAILED!\n");
304		break;
305	}
306
307	return 0;
308}
309
310static void finish_test(struct child_test **child_tests, int running_test, int child_test_num,
311		int width)
312{
313	struct child_test *child_test = child_tests[running_test];
314	struct test_suite *t;
315	int i, subi, err;
316	bool err_done = false;
317	struct strbuf err_output = STRBUF_INIT;
318	int last_running = -1;
319	int ret;
320
321	if (child_test == NULL) {
322		/* Test wasn't started. */
323		return;
324	}
325	t = child_test->test;
326	i = child_test->test_num;
327	subi = child_test->subtest;
328	err = child_test->process.err;
329	/*
330	 * For test suites with subtests, display the suite name ahead of the
331	 * sub test names.
332	 */
333	if (has_subtests(t) && subi == 0)
334		pr_info("%3d: %-*s:\n", i + 1, width, test_description(t, -1));
335
336	/*
337	 * Busy loop reading from the child's stdout/stderr that are set to be
338	 * non-blocking until EOF.
339	 */
340	if (err > 0)
341		fcntl(err, F_SETFL, O_NONBLOCK);
342	if (verbose > 1) {
343		if (has_subtests(t))
344			pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
345		else
346			pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
347	}
348	while (!err_done) {
349		struct pollfd pfds[1] = {
350			{ .fd = err,
351			  .events = POLLIN | POLLERR | POLLHUP | POLLNVAL,
352			},
353		};
354		if (perf_use_color_default) {
355			int running = 0;
356
357			for (int y = running_test; y < child_test_num; y++) {
358				if (child_tests[y] == NULL)
359					continue;
360				if (check_if_command_finished(&child_tests[y]->process) == 0)
361					running++;
362			}
363			if (running != last_running) {
364				if (last_running != -1) {
365					/*
366					 * Erase "Running (.. active)" line
367					 * printed before poll/sleep.
368					 */
369					fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
370				}
371				print_test_result(t, i, subi, TEST_RUNNING, width, running);
372				last_running = running;
373			}
374		}
375
376		err_done = true;
377		if (err <= 0) {
378			/* No child stderr to poll, sleep for 10ms for child to complete. */
379			usleep(10 * 1000);
380		} else {
381			/* Poll to avoid excessive spinning, timeout set for 100ms. */
382			poll(pfds, ARRAY_SIZE(pfds), /*timeout=*/100);
383			if (pfds[0].revents) {
384				char buf[512];
385				ssize_t len;
386
387				len = read(err, buf, sizeof(buf) - 1);
388
389				if (len > 0) {
390					err_done = false;
391					buf[len] = '\0';
392					strbuf_addstr(&err_output, buf);
393				}
394			}
395		}
396		if (err_done)
397			err_done = check_if_command_finished(&child_test->process);
398	}
399	if (perf_use_color_default && last_running != -1) {
400		/* Erase "Running (.. active)" line printed before poll/sleep. */
401		fprintf(debug_file(), PERF_COLOR_DELETE_LINE);
402	}
403	/* Clean up child process. */
404	ret = finish_command(&child_test->process);
405	if (verbose > 1 || (verbose == 1 && ret == TEST_FAIL))
406		fprintf(stderr, "%s", err_output.buf);
407
408	strbuf_release(&err_output);
409	print_test_result(t, i, subi, ret, width, /*running=*/0);
410	if (err > 0)
411		close(err);
412	zfree(&child_tests[running_test]);
413}
414
415static int start_test(struct test_suite *test, int i, int subi, struct child_test **child,
416		int width, int pass)
417{
418	int err;
419
420	*child = NULL;
421	if (dont_fork) {
422		if (pass == 1) {
423			pr_debug("--- start ---\n");
424			err = test_function(test, subi)(test, subi);
425			pr_debug("---- end ----\n");
426			print_test_result(test, i, subi, err, width, /*running=*/0);
427		}
428		return 0;
429	}
430	if (pass == 1 && !sequential && test_exclusive(test, subi)) {
431		/* When parallel, skip exclusive tests on the first pass. */
432		return 0;
433	}
434	if (pass != 1 && (sequential || !test_exclusive(test, subi))) {
435		/* Sequential and non-exclusive tests were run on the first pass. */
436		return 0;
437	}
438	*child = zalloc(sizeof(**child));
439	if (!*child)
440		return -ENOMEM;
441
442	(*child)->test = test;
443	(*child)->test_num = i;
444	(*child)->subtest = subi;
445	(*child)->process.pid = -1;
446	(*child)->process.no_stdin = 1;
447	if (verbose <= 0) {
448		(*child)->process.no_stdout = 1;
449		(*child)->process.no_stderr = 1;
450	} else {
451		(*child)->process.stdout_to_stderr = 1;
452		(*child)->process.out = -1;
453		(*child)->process.err = -1;
454	}
455	(*child)->process.no_exec_cmd = run_test_child;
456	if (sequential || pass == 2) {
457		err = start_command(&(*child)->process);
458		if (err)
459			return err;
460		finish_test(child, /*running_test=*/0, /*child_test_num=*/1, width);
461		return 0;
462	}
463	return start_command(&(*child)->process);
464}
465
466/* State outside of __cmd_test for the sake of the signal handler. */
467
468static size_t num_tests;
469static struct child_test **child_tests;
470static jmp_buf cmd_test_jmp_buf;
471
472static void cmd_test_sig_handler(int sig)
473{
474	siglongjmp(cmd_test_jmp_buf, sig);
475}
476
477static int __cmd_test(struct test_suite **suites, int argc, const char *argv[],
478		      struct intlist *skiplist)
479{
480	static int width = 0;
481	int err = 0;
482
483	for (struct test_suite **t = suites; *t; t++) {
484		int len = strlen(test_description(*t, -1));
485
486		if (width < len)
487			width = len;
488
489		if (has_subtests(*t)) {
490			for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
491				len = strlen(test_description(*t, subi));
492				if (width < len)
493					width = len;
494				num_tests++;
495			}
496		} else {
497			num_tests++;
498		}
499	}
500	child_tests = calloc(num_tests, sizeof(*child_tests));
501	if (!child_tests)
502		return -ENOMEM;
503
504	err = sigsetjmp(cmd_test_jmp_buf, 1);
505	if (err) {
506		pr_err("\nSignal (%d) while running tests.\nTerminating tests with the same signal\n",
507		       err);
508		for (size_t x = 0; x < num_tests; x++) {
509			struct child_test *child_test = child_tests[x];
510
511			if (!child_test || child_test->process.pid <= 0)
512				continue;
513
514			pr_debug3("Killing %d pid %d\n",
515				  child_test->test_num + 1,
516				  child_test->process.pid);
517			kill(child_test->process.pid, err);
518		}
519		goto err_out;
520	}
521	signal(SIGINT, cmd_test_sig_handler);
522	signal(SIGTERM, cmd_test_sig_handler);
523
524	/*
525	 * In parallel mode pass 1 runs non-exclusive tests in parallel, pass 2
526	 * runs the exclusive tests sequentially. In other modes all tests are
527	 * run in pass 1.
528	 */
529	for (int pass = 1; pass <= 2; pass++) {
530		int child_test_num = 0;
531		int i = 0;
532
533		for (struct test_suite **t = suites; *t; t++) {
534			int curr = i++;
535
536			if (!perf_test__matches(test_description(*t, -1), curr, argc, argv)) {
537				/*
538				 * Test suite shouldn't be run based on
539				 * description. See if subtest should.
540				 */
541				bool skip = true;
542
543				for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
544					if (perf_test__matches(test_description(*t, subi),
545								curr, argc, argv))
546						skip = false;
547				}
548
549				if (skip)
550					continue;
551			}
552
553			if (intlist__find(skiplist, i)) {
554				pr_info("%3d: %-*s:", curr + 1, width, test_description(*t, -1));
555				color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
556				continue;
557			}
558
559			if (!has_subtests(*t)) {
560				err = start_test(*t, curr, -1, &child_tests[child_test_num++],
561						 width, pass);
562				if (err)
563					goto err_out;
564				continue;
565			}
566			for (int subi = 0, subn = num_subtests(*t); subi < subn; subi++) {
567				if (!perf_test__matches(test_description(*t, subi),
568							curr, argc, argv))
569					continue;
570
571				err = start_test(*t, curr, subi, &child_tests[child_test_num++],
572						 width, pass);
573				if (err)
574					goto err_out;
575			}
576		}
577		if (!sequential) {
578			/* Parallel mode starts tests but doesn't finish them. Do that now. */
579			for (size_t x = 0; x < num_tests; x++)
580				finish_test(child_tests, x, num_tests, width);
581		}
582	}
583err_out:
584	signal(SIGINT, SIG_DFL);
585	signal(SIGTERM, SIG_DFL);
586	if (err) {
587		pr_err("Internal test harness failure. Completing any started tests:\n:");
588		for (size_t x = 0; x < num_tests; x++)
589			finish_test(child_tests, x, num_tests, width);
590	}
591	free(child_tests);
592	return err;
593}
594
595static int perf_test__list(struct test_suite **suites, int argc, const char **argv)
596{
597	int i = 0;
598
599	for (struct test_suite **t = suites; *t; t++) {
600		int curr = i++;
601
602		if (!perf_test__matches(test_description(*t, -1), curr, argc, argv))
603			continue;
604
605		pr_info("%3d: %s\n", i, test_description(*t, -1));
606
607		if (has_subtests(*t)) {
608			int subn = num_subtests(*t);
609			int subi;
610
611			for (subi = 0; subi < subn; subi++)
612				pr_info("%3d:%1d: %s\n", i, subi + 1,
613					test_description(*t, subi));
614		}
615	}
616	return 0;
617}
618
619static int workloads__fprintf_list(FILE *fp)
620{
621	struct test_workload *twl;
622	int printed = 0;
623
624	workloads__for_each(twl)
625		printed += fprintf(fp, "%s\n", twl->name);
626
627	return printed;
628}
629
630static int run_workload(const char *work, int argc, const char **argv)
631{
632	struct test_workload *twl;
633
634	workloads__for_each(twl) {
635		if (!strcmp(twl->name, work))
636			return twl->func(argc, argv);
637	}
638
639	pr_info("No workload found: %s\n", work);
640	return -1;
641}
642
643static int perf_test__config(const char *var, const char *value,
644			     void *data __maybe_unused)
645{
646	if (!strcmp(var, "annotate.objdump"))
647		test_objdump_path = value;
648
649	return 0;
650}
651
652static struct test_suite **build_suites(void)
653{
654	/*
655	 * TODO: suites is static to avoid needing to clean up the scripts tests
656	 * for leak sanitizer.
657	 */
658	static struct test_suite **suites[] = {
659		generic_tests,
660		arch_tests,
661		NULL,
662	};
663	struct test_suite **result;
664	struct test_suite *t;
665	size_t n = 0, num_suites = 0;
666
667	if (suites[2] == NULL)
668		suites[2] = create_script_test_suites();
669
670#define for_each_test(t)						\
671	for (size_t i = 0, j = 0; i < ARRAY_SIZE(suites); i++, j = 0)	\
672		while ((t = suites[i][j++]) != NULL)
673
674	for_each_test(t)
675		num_suites++;
676
677	result = calloc(num_suites + 1, sizeof(struct test_suite *));
678
679	for (int pass = 1; pass <= 2; pass++) {
680		for_each_test(t) {
681			bool exclusive = false;
682
683			if (!has_subtests(t)) {
684				exclusive = test_exclusive(t, -1);
685			} else {
686				for (int subi = 0, subn = num_subtests(t); subi < subn; subi++) {
687					if (test_exclusive(t, subi)) {
688						exclusive = true;
689						break;
690					}
691				}
692			}
693			if ((!exclusive && pass == 1) || (exclusive && pass == 2))
694				result[n++] = t;
695		}
696	}
697	return result;
698#undef for_each_test
699}
700
701int cmd_test(int argc, const char **argv)
702{
703	const char *test_usage[] = {
704	"perf test [<options>] [{list <test-name-fragment>|[<test-name-fragments>|<test-numbers>]}]",
705	NULL,
706	};
707	const char *skip = NULL;
708	const char *workload = NULL;
709	bool list_workloads = false;
710	const struct option test_options[] = {
711	OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
712	OPT_INCR('v', "verbose", &verbose,
713		    "be more verbose (show symbol address, etc)"),
714	OPT_BOOLEAN('F', "dont-fork", &dont_fork,
715		    "Do not fork for testcase"),
716	OPT_BOOLEAN('S', "sequential", &sequential,
717		    "Run the tests one after another rather than in parallel"),
718	OPT_STRING('w', "workload", &workload, "work", "workload to run for testing, use '--list-workloads' to list the available ones."),
719	OPT_BOOLEAN(0, "list-workloads", &list_workloads, "List the available builtin workloads to use with -w/--workload"),
720	OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
721	OPT_STRING(0, "objdump", &test_objdump_path, "path",
722		   "objdump binary to use for disassembly and annotations"),
723	OPT_END()
724	};
725	const char * const test_subcommands[] = { "list", NULL };
726	struct intlist *skiplist = NULL;
727        int ret = hists__init();
728	struct test_suite **suites;
729
730        if (ret < 0)
731                return ret;
732
733	perf_config(perf_test__config, NULL);
734
735	/* Unbuffered output */
736	setvbuf(stdout, NULL, _IONBF, 0);
737
738	argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
739	if (argc >= 1 && !strcmp(argv[0], "list")) {
740		suites = build_suites();
741		ret = perf_test__list(suites, argc - 1, argv + 1);
742		free(suites);
743		return ret;
744	}
745
746	if (workload)
747		return run_workload(workload, argc, argv);
748
749	if (list_workloads) {
750		workloads__fprintf_list(stdout);
751		return 0;
752	}
753
754	if (dont_fork)
755		sequential = true;
756
757	symbol_conf.priv_size = sizeof(int);
 
758	symbol_conf.try_vmlinux_path = true;
759
760
761	if (symbol__init(NULL) < 0)
762		return -1;
763
764	if (skip != NULL)
765		skiplist = intlist__new(skip);
766	/*
767	 * Tests that create BPF maps, for instance, need more than the 64K
768	 * default:
769	 */
770	rlimit__bump_memlock();
771
772	suites = build_suites();
773	ret = __cmd_test(suites, argc, argv, skiplist);
774	free(suites);
775	return ret;
776}