Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/compiler.h>
  3#include <linux/types.h>
  4#include <linux/zalloc.h>
  5#include <inttypes.h>
  6#include <limits.h>
  7#include <unistd.h>
  8#include "tests.h"
  9#include "debug.h"
 10#include "machine.h"
 11#include "event.h"
 12#include "../util/unwind.h"
 13#include "perf_regs.h"
 14#include "map.h"
 15#include "symbol.h"
 16#include "thread.h"
 17#include "callchain.h"
 18#include "util/synthetic-events.h"
 
 
 
 19
 20/* For bsearch. We try to unwind functions in shared object. */
 21#include <stdlib.h>
 22
 23/*
 24 * The test will assert frames are on the stack but tail call optimizations lose
 25 * the frame of the caller. Clang can disable this optimization on a called
 26 * function but GCC currently (11/2020) lacks this attribute. The barrier is
 27 * used to inhibit tail calls in these cases.
 28 */
 29#ifdef __has_attribute
 30#if __has_attribute(disable_tail_calls)
 31#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
 32#define NO_TAIL_CALL_BARRIER
 33#endif
 34#endif
 35#ifndef NO_TAIL_CALL_ATTRIBUTE
 36#define NO_TAIL_CALL_ATTRIBUTE
 37#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
 38#endif
 39
 40static int mmap_handler(struct perf_tool *tool __maybe_unused,
 41			union perf_event *event,
 42			struct perf_sample *sample,
 43			struct machine *machine)
 44{
 45	return machine__process_mmap2_event(machine, event, sample);
 46}
 47
 48static int init_live_machine(struct machine *machine)
 49{
 50	union perf_event event;
 51	pid_t pid = getpid();
 52
 53	memset(&event, 0, sizeof(event));
 54	return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
 55						  mmap_handler, machine, true);
 56}
 57
 58/*
 59 * We need to keep these functions global, despite the
 60 * fact that they are used only locally in this object,
 61 * in order to keep them around even if the binary is
 62 * stripped. If they are gone, the unwind check for
 63 * symbol fails.
 64 */
 65int test_dwarf_unwind__thread(struct thread *thread);
 66int test_dwarf_unwind__compare(void *p1, void *p2);
 67int test_dwarf_unwind__krava_3(struct thread *thread);
 68int test_dwarf_unwind__krava_2(struct thread *thread);
 69int test_dwarf_unwind__krava_1(struct thread *thread);
 70
 71#define MAX_STACK 8
 72
 73static int unwind_entry(struct unwind_entry *entry, void *arg)
 74{
 75	unsigned long *cnt = (unsigned long *) arg;
 76	char *symbol = entry->ms.sym ? entry->ms.sym->name : NULL;
 77	static const char *funcs[MAX_STACK] = {
 78		"test__arch_unwind_sample",
 79		"test_dwarf_unwind__thread",
 80		"test_dwarf_unwind__compare",
 81		"bsearch",
 82		"test_dwarf_unwind__krava_3",
 83		"test_dwarf_unwind__krava_2",
 84		"test_dwarf_unwind__krava_1",
 85		"test__dwarf_unwind"
 86	};
 87	/*
 88	 * The funcs[MAX_STACK] array index, based on the
 89	 * callchain order setup.
 90	 */
 91	int idx = callchain_param.order == ORDER_CALLER ?
 92		  MAX_STACK - *cnt - 1 : *cnt;
 93
 94	if (*cnt >= MAX_STACK) {
 95		pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
 96		return -1;
 97	}
 98
 99	if (!symbol) {
100		pr_debug("failed: got unresolved address 0x%" PRIx64 "\n",
101			 entry->ip);
102		return -1;
103	}
104
105	(*cnt)++;
106	pr_debug("got: %s 0x%" PRIx64 ", expecting %s\n",
107		 symbol, entry->ip, funcs[idx]);
108	return strcmp((const char *) symbol, funcs[idx]);
109}
110
111NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
 
112{
113	struct perf_sample sample;
114	unsigned long cnt = 0;
115	int err = -1;
116
117	memset(&sample, 0, sizeof(sample));
118
119	if (test__arch_unwind_sample(&sample, thread)) {
120		pr_debug("failed to get unwind sample\n");
121		goto out;
122	}
123
124	err = unwind__get_entries(unwind_entry, &cnt, thread,
125				  &sample, MAX_STACK, false);
126	if (err)
127		pr_debug("unwind failed\n");
128	else if (cnt != MAX_STACK) {
129		pr_debug("got wrong number of stack entries %lu != %d\n",
130			 cnt, MAX_STACK);
131		err = -1;
132	}
133
134 out:
135	zfree(&sample.user_stack.data);
136	zfree(&sample.user_regs.regs);
137	return err;
138}
139
140static int global_unwind_retval = -INT_MAX;
141
142NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
 
143{
144	/* Any possible value should be 'thread' */
145	struct thread *thread = *(struct thread **)p1;
146
147	if (global_unwind_retval == -INT_MAX) {
148		/* Call unwinder twice for both callchain orders. */
149		callchain_param.order = ORDER_CALLER;
150
151		global_unwind_retval = test_dwarf_unwind__thread(thread);
152		if (!global_unwind_retval) {
153			callchain_param.order = ORDER_CALLEE;
154			global_unwind_retval = test_dwarf_unwind__thread(thread);
155		}
156	}
157
158	return p1 - p2;
159}
160
161NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
 
162{
163	struct thread *array[2] = {thread, thread};
164	void *fp = &bsearch;
165	/*
166	 * make _bsearch a volatile function pointer to
167	 * prevent potential optimization, which may expand
168	 * bsearch and call compare directly from this function,
169	 * instead of libc shared object.
170	 */
171	void *(*volatile _bsearch)(void *, void *, size_t,
172			size_t, int (*)(void *, void *));
173
174	_bsearch = fp;
175	_bsearch(array, &thread, 2, sizeof(struct thread **),
176		 test_dwarf_unwind__compare);
177	return global_unwind_retval;
178}
179
180NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
 
181{
182	int ret;
183
184	ret =  test_dwarf_unwind__krava_3(thread);
185	NO_TAIL_CALL_BARRIER;
186	return ret;
187}
188
189NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
 
190{
191	int ret;
192
193	ret =  test_dwarf_unwind__krava_2(thread);
194	NO_TAIL_CALL_BARRIER;
195	return ret;
196}
197
198static int test__dwarf_unwind(struct test_suite *test __maybe_unused,
199			      int subtest __maybe_unused)
200{
201	struct machine *machine;
202	struct thread *thread;
203	int err = -1;
204
205	machine = machine__new_host();
206	if (!machine) {
207		pr_err("Could not get machine\n");
208		return -1;
209	}
210
211	if (machine__create_kernel_maps(machine)) {
212		pr_err("Failed to create kernel maps\n");
213		return -1;
214	}
215
216	callchain_param.record_mode = CALLCHAIN_DWARF;
217	dwarf_callchain_users = true;
218
219	if (init_live_machine(machine)) {
220		pr_err("Could not init machine\n");
221		goto out;
222	}
223
224	if (verbose > 1)
225		machine__fprintf(machine, stderr);
226
227	thread = machine__find_thread(machine, getpid(), getpid());
228	if (!thread) {
229		pr_err("Could not get thread\n");
230		goto out;
231	}
232
233	err = test_dwarf_unwind__krava_1(thread);
234	thread__put(thread);
235
236 out:
237	machine__delete_threads(machine);
238	machine__delete(machine);
239	return err;
240}
241
242DEFINE_SUITE("Test dwarf unwind", dwarf_unwind);
v4.6
 
  1#include <linux/compiler.h>
  2#include <linux/types.h>
 
 
 
  3#include <unistd.h>
  4#include "tests.h"
  5#include "debug.h"
  6#include "machine.h"
  7#include "event.h"
  8#include "unwind.h"
  9#include "perf_regs.h"
 10#include "map.h"
 
 11#include "thread.h"
 12#include "callchain.h"
 13
 14#if defined (__x86_64__) || defined (__i386__)
 15#include "arch-tests.h"
 16#endif
 17
 18/* For bsearch. We try to unwind functions in shared object. */
 19#include <stdlib.h>
 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21static int mmap_handler(struct perf_tool *tool __maybe_unused,
 22			union perf_event *event,
 23			struct perf_sample *sample,
 24			struct machine *machine)
 25{
 26	return machine__process_mmap2_event(machine, event, sample);
 27}
 28
 29static int init_live_machine(struct machine *machine)
 30{
 31	union perf_event event;
 32	pid_t pid = getpid();
 33
 
 34	return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
 35						  mmap_handler, machine, true, 500);
 36}
 37
 
 
 
 
 
 
 
 
 
 
 
 
 
 38#define MAX_STACK 8
 39
 40static int unwind_entry(struct unwind_entry *entry, void *arg)
 41{
 42	unsigned long *cnt = (unsigned long *) arg;
 43	char *symbol = entry->sym ? entry->sym->name : NULL;
 44	static const char *funcs[MAX_STACK] = {
 45		"test__arch_unwind_sample",
 46		"unwind_thread",
 47		"compare",
 48		"bsearch",
 49		"krava_3",
 50		"krava_2",
 51		"krava_1",
 52		"test__dwarf_unwind"
 53	};
 54	/*
 55	 * The funcs[MAX_STACK] array index, based on the
 56	 * callchain order setup.
 57	 */
 58	int idx = callchain_param.order == ORDER_CALLER ?
 59		  MAX_STACK - *cnt - 1 : *cnt;
 60
 61	if (*cnt >= MAX_STACK) {
 62		pr_debug("failed: crossed the max stack value %d\n", MAX_STACK);
 63		return -1;
 64	}
 65
 66	if (!symbol) {
 67		pr_debug("failed: got unresolved address 0x%" PRIx64 "\n",
 68			 entry->ip);
 69		return -1;
 70	}
 71
 72	(*cnt)++;
 73	pr_debug("got: %s 0x%" PRIx64 ", expecting %s\n",
 74		 symbol, entry->ip, funcs[idx]);
 75	return strcmp((const char *) symbol, funcs[idx]);
 76}
 77
 78__attribute__ ((noinline))
 79static int unwind_thread(struct thread *thread)
 80{
 81	struct perf_sample sample;
 82	unsigned long cnt = 0;
 83	int err = -1;
 84
 85	memset(&sample, 0, sizeof(sample));
 86
 87	if (test__arch_unwind_sample(&sample, thread)) {
 88		pr_debug("failed to get unwind sample\n");
 89		goto out;
 90	}
 91
 92	err = unwind__get_entries(unwind_entry, &cnt, thread,
 93				  &sample, MAX_STACK);
 94	if (err)
 95		pr_debug("unwind failed\n");
 96	else if (cnt != MAX_STACK) {
 97		pr_debug("got wrong number of stack entries %lu != %d\n",
 98			 cnt, MAX_STACK);
 99		err = -1;
100	}
101
102 out:
103	free(sample.user_stack.data);
104	free(sample.user_regs.regs);
105	return err;
106}
107
108static int global_unwind_retval = -INT_MAX;
109
110__attribute__ ((noinline))
111static int compare(void *p1, void *p2)
112{
113	/* Any possible value should be 'thread' */
114	struct thread *thread = *(struct thread **)p1;
115
116	if (global_unwind_retval == -INT_MAX) {
117		/* Call unwinder twice for both callchain orders. */
118		callchain_param.order = ORDER_CALLER;
119
120		global_unwind_retval = unwind_thread(thread);
121		if (!global_unwind_retval) {
122			callchain_param.order = ORDER_CALLEE;
123			global_unwind_retval = unwind_thread(thread);
124		}
125	}
126
127	return p1 - p2;
128}
129
130__attribute__ ((noinline))
131static int krava_3(struct thread *thread)
132{
133	struct thread *array[2] = {thread, thread};
134	void *fp = &bsearch;
135	/*
136	 * make _bsearch a volatile function pointer to
137	 * prevent potential optimization, which may expand
138	 * bsearch and call compare directly from this function,
139	 * instead of libc shared object.
140	 */
141	void *(*volatile _bsearch)(void *, void *, size_t,
142			size_t, int (*)(void *, void *));
143
144	_bsearch = fp;
145	_bsearch(array, &thread, 2, sizeof(struct thread **), compare);
 
146	return global_unwind_retval;
147}
148
149__attribute__ ((noinline))
150static int krava_2(struct thread *thread)
151{
152	return krava_3(thread);
 
 
 
 
153}
154
155__attribute__ ((noinline))
156static int krava_1(struct thread *thread)
157{
158	return krava_2(thread);
 
 
 
 
159}
160
161int test__dwarf_unwind(int subtest __maybe_unused)
 
162{
163	struct machine *machine;
164	struct thread *thread;
165	int err = -1;
166
167	machine = machine__new_host();
168	if (!machine) {
169		pr_err("Could not get machine\n");
170		return -1;
171	}
172
173	if (machine__create_kernel_maps(machine)) {
174		pr_err("Failed to create kernel maps\n");
175		return -1;
176	}
177
178	callchain_param.record_mode = CALLCHAIN_DWARF;
 
179
180	if (init_live_machine(machine)) {
181		pr_err("Could not init machine\n");
182		goto out;
183	}
184
185	if (verbose > 1)
186		machine__fprintf(machine, stderr);
187
188	thread = machine__find_thread(machine, getpid(), getpid());
189	if (!thread) {
190		pr_err("Could not get thread\n");
191		goto out;
192	}
193
194	err = krava_1(thread);
195	thread__put(thread);
196
197 out:
198	machine__delete_threads(machine);
199	machine__delete(machine);
200	return err;
201}