Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Facebook
  3 */
  4#include <stdio.h>
  5#include <unistd.h>
  6#include <stdlib.h>
  7#include <stdbool.h>
  8#include <string.h>
  9#include <fcntl.h>
 10#include <poll.h>
 11#include <sys/ioctl.h>
 12#include <linux/perf_event.h>
 13#include <linux/bpf.h>
 14#include <signal.h>
 15#include <assert.h>
 16#include <errno.h>
 17#include <sys/resource.h>
 18#include "libbpf.h"
 19#include "bpf_load.h"
 20#include "perf-sys.h"
 21#include "trace_helpers.h"
 22
 23#define SAMPLE_FREQ 50
 24
 25static bool sys_read_seen, sys_write_seen;
 26
 27static void print_ksym(__u64 addr)
 28{
 29	struct ksym *sym;
 30
 31	if (!addr)
 32		return;
 33	sym = ksym_search(addr);
 34	if (!sym) {
 35		printf("ksym not found. Is kallsyms loaded?\n");
 36		return;
 37	}
 38
 39	printf("%s;", sym->name);
 40	if (!strcmp(sym->name, "sys_read"))
 41		sys_read_seen = true;
 42	else if (!strcmp(sym->name, "sys_write"))
 43		sys_write_seen = true;
 44}
 45
 46static void print_addr(__u64 addr)
 47{
 48	if (!addr)
 49		return;
 50	printf("%llx;", addr);
 51}
 52
 53#define TASK_COMM_LEN 16
 54
 55struct key_t {
 56	char comm[TASK_COMM_LEN];
 57	__u32 kernstack;
 58	__u32 userstack;
 59};
 60
 61static void print_stack(struct key_t *key, __u64 count)
 62{
 63	__u64 ip[PERF_MAX_STACK_DEPTH] = {};
 64	static bool warned;
 65	int i;
 66
 67	printf("%3lld %s;", count, key->comm);
 68	if (bpf_map_lookup_elem(map_fd[1], &key->kernstack, ip) != 0) {
 69		printf("---;");
 70	} else {
 71		for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
 72			print_ksym(ip[i]);
 73	}
 74	printf("-;");
 75	if (bpf_map_lookup_elem(map_fd[1], &key->userstack, ip) != 0) {
 76		printf("---;");
 77	} else {
 78		for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
 79			print_addr(ip[i]);
 80	}
 81	if (count < 6)
 82		printf("\r");
 83	else
 84		printf("\n");
 85
 86	if (key->kernstack == -EEXIST && !warned) {
 87		printf("stackmap collisions seen. Consider increasing size\n");
 88		warned = true;
 89	} else if ((int)key->kernstack < 0 && (int)key->userstack < 0) {
 90		printf("err stackid %d %d\n", key->kernstack, key->userstack);
 91	}
 92}
 93
 94static void int_exit(int sig)
 95{
 96	kill(0, SIGKILL);
 97	exit(0);
 98}
 99
100static void print_stacks(void)
101{
102	struct key_t key = {}, next_key;
103	__u64 value;
104	__u32 stackid = 0, next_id;
105	int fd = map_fd[0], stack_map = map_fd[1];
106
107	sys_read_seen = sys_write_seen = false;
108	while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
109		bpf_map_lookup_elem(fd, &next_key, &value);
110		print_stack(&next_key, value);
111		bpf_map_delete_elem(fd, &next_key);
112		key = next_key;
113	}
114	printf("\n");
115	if (!sys_read_seen || !sys_write_seen) {
116		printf("BUG kernel stack doesn't contain sys_read() and sys_write()\n");
117		int_exit(0);
118	}
119
120	/* clear stack map */
121	while (bpf_map_get_next_key(stack_map, &stackid, &next_id) == 0) {
122		bpf_map_delete_elem(stack_map, &next_id);
123		stackid = next_id;
124	}
125}
126
127static inline int generate_load(void)
128{
129	if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
130		printf("failed to generate some load with dd: %s\n", strerror(errno));
131		return -1;
132	}
133
134	return 0;
135}
136
137static void test_perf_event_all_cpu(struct perf_event_attr *attr)
138{
139	int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
140	int *pmu_fd = malloc(nr_cpus * sizeof(int));
141	int i, error = 0;
142
143	/* system wide perf event, no need to inherit */
144	attr->inherit = 0;
145
146	/* open perf_event on all cpus */
147	for (i = 0; i < nr_cpus; i++) {
148		pmu_fd[i] = sys_perf_event_open(attr, -1, i, -1, 0);
149		if (pmu_fd[i] < 0) {
150			printf("sys_perf_event_open failed\n");
151			error = 1;
152			goto all_cpu_err;
153		}
154		assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
155		assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
156	}
157
158	if (generate_load() < 0) {
159		error = 1;
160		goto all_cpu_err;
161	}
162	print_stacks();
163all_cpu_err:
164	for (i--; i >= 0; i--) {
165		ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
166		close(pmu_fd[i]);
167	}
168	free(pmu_fd);
169	if (error)
170		int_exit(0);
171}
172
173static void test_perf_event_task(struct perf_event_attr *attr)
174{
175	int pmu_fd, error = 0;
176
177	/* per task perf event, enable inherit so the "dd ..." command can be traced properly.
178	 * Enabling inherit will cause bpf_perf_prog_read_time helper failure.
179	 */
180	attr->inherit = 1;
181
182	/* open task bound event */
183	pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
184	if (pmu_fd < 0) {
185		printf("sys_perf_event_open failed\n");
186		int_exit(0);
187	}
188	assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
189	assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
190
191	if (generate_load() < 0) {
192		error = 1;
193		goto err;
194	}
195	print_stacks();
196err:
197	ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
198	close(pmu_fd);
199	if (error)
200		int_exit(0);
201}
202
203static void test_bpf_perf_event(void)
204{
205	struct perf_event_attr attr_type_hw = {
206		.sample_freq = SAMPLE_FREQ,
207		.freq = 1,
208		.type = PERF_TYPE_HARDWARE,
209		.config = PERF_COUNT_HW_CPU_CYCLES,
210	};
211	struct perf_event_attr attr_type_sw = {
212		.sample_freq = SAMPLE_FREQ,
213		.freq = 1,
214		.type = PERF_TYPE_SOFTWARE,
215		.config = PERF_COUNT_SW_CPU_CLOCK,
216	};
217	struct perf_event_attr attr_hw_cache_l1d = {
218		.sample_freq = SAMPLE_FREQ,
219		.freq = 1,
220		.type = PERF_TYPE_HW_CACHE,
221		.config =
222			PERF_COUNT_HW_CACHE_L1D |
223			(PERF_COUNT_HW_CACHE_OP_READ << 8) |
224			(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16),
225	};
226	struct perf_event_attr attr_hw_cache_branch_miss = {
227		.sample_freq = SAMPLE_FREQ,
228		.freq = 1,
229		.type = PERF_TYPE_HW_CACHE,
230		.config =
231			PERF_COUNT_HW_CACHE_BPU |
232			(PERF_COUNT_HW_CACHE_OP_READ << 8) |
233			(PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
234	};
235	struct perf_event_attr attr_type_raw = {
236		.sample_freq = SAMPLE_FREQ,
237		.freq = 1,
238		.type = PERF_TYPE_RAW,
239		/* Intel Instruction Retired */
240		.config = 0xc0,
241	};
242	struct perf_event_attr attr_type_raw_lock_load = {
243		.sample_freq = SAMPLE_FREQ,
244		.freq = 1,
245		.type = PERF_TYPE_RAW,
246		/* Intel MEM_UOPS_RETIRED.LOCK_LOADS */
247		.config = 0x21d0,
248		/* Request to record lock address from PEBS */
249		.sample_type = PERF_SAMPLE_ADDR,
250		/* Record address value requires precise event */
251		.precise_ip = 2,
252	};
253
254	printf("Test HW_CPU_CYCLES\n");
255	test_perf_event_all_cpu(&attr_type_hw);
256	test_perf_event_task(&attr_type_hw);
257
258	printf("Test SW_CPU_CLOCK\n");
259	test_perf_event_all_cpu(&attr_type_sw);
260	test_perf_event_task(&attr_type_sw);
261
262	printf("Test HW_CACHE_L1D\n");
263	test_perf_event_all_cpu(&attr_hw_cache_l1d);
264	test_perf_event_task(&attr_hw_cache_l1d);
265
266	printf("Test HW_CACHE_BPU\n");
267	test_perf_event_all_cpu(&attr_hw_cache_branch_miss);
268	test_perf_event_task(&attr_hw_cache_branch_miss);
269
270	printf("Test Instruction Retired\n");
271	test_perf_event_all_cpu(&attr_type_raw);
272	test_perf_event_task(&attr_type_raw);
273
274	printf("Test Lock Load\n");
275	test_perf_event_all_cpu(&attr_type_raw_lock_load);
276	test_perf_event_task(&attr_type_raw_lock_load);
277
278	printf("*** PASS ***\n");
279}
280
281
282int main(int argc, char **argv)
283{
284	struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
285	char filename[256];
286
287	snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
288	setrlimit(RLIMIT_MEMLOCK, &r);
289
290	signal(SIGINT, int_exit);
291	signal(SIGTERM, int_exit);
292
293	if (load_kallsyms()) {
294		printf("failed to process /proc/kallsyms\n");
295		return 1;
296	}
297
298	if (load_bpf_file(filename)) {
299		printf("%s", bpf_log_buf);
300		return 2;
301	}
302
303	if (fork() == 0) {
304		read_trace_pipe();
305		return 0;
306	}
307	test_bpf_perf_event();
308	int_exit(0);
309	return 0;
310}