Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdbool.h>
  3#include <inttypes.h>
  4#include <linux/kernel.h>
  5#include <linux/types.h>
  6
  7#include "util.h"
  8#include "event.h"
  9#include "evsel.h"
 10#include "debug.h"
 11
 12#include "tests.h"
 13
 14#define COMP(m) do {					\
 15	if (s1->m != s2->m) {				\
 16		pr_debug("Samples differ at '"#m"'\n");	\
 17		return false;				\
 18	}						\
 19} while (0)
 20
 21#define MCOMP(m) do {					\
 22	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
 23		pr_debug("Samples differ at '"#m"'\n");	\
 24		return false;				\
 25	}						\
 26} while (0)
 27
 28static bool samples_same(const struct perf_sample *s1,
 29			 const struct perf_sample *s2,
 30			 u64 type, u64 read_format)
 31{
 32	size_t i;
 33
 34	if (type & PERF_SAMPLE_IDENTIFIER)
 35		COMP(id);
 36
 37	if (type & PERF_SAMPLE_IP)
 38		COMP(ip);
 39
 40	if (type & PERF_SAMPLE_TID) {
 41		COMP(pid);
 42		COMP(tid);
 43	}
 44
 45	if (type & PERF_SAMPLE_TIME)
 46		COMP(time);
 47
 48	if (type & PERF_SAMPLE_ADDR)
 49		COMP(addr);
 50
 51	if (type & PERF_SAMPLE_ID)
 52		COMP(id);
 53
 54	if (type & PERF_SAMPLE_STREAM_ID)
 55		COMP(stream_id);
 56
 57	if (type & PERF_SAMPLE_CPU)
 58		COMP(cpu);
 59
 60	if (type & PERF_SAMPLE_PERIOD)
 61		COMP(period);
 62
 63	if (type & PERF_SAMPLE_READ) {
 64		if (read_format & PERF_FORMAT_GROUP)
 65			COMP(read.group.nr);
 66		else
 67			COMP(read.one.value);
 68		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 69			COMP(read.time_enabled);
 70		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 71			COMP(read.time_running);
 72		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
 73		if (read_format & PERF_FORMAT_GROUP) {
 74			for (i = 0; i < s1->read.group.nr; i++)
 75				MCOMP(read.group.values[i]);
 76		} else {
 77			COMP(read.one.id);
 78		}
 79	}
 80
 81	if (type & PERF_SAMPLE_CALLCHAIN) {
 82		COMP(callchain->nr);
 83		for (i = 0; i < s1->callchain->nr; i++)
 84			COMP(callchain->ips[i]);
 85	}
 86
 87	if (type & PERF_SAMPLE_RAW) {
 88		COMP(raw_size);
 89		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
 90			pr_debug("Samples differ at 'raw_data'\n");
 91			return false;
 92		}
 93	}
 94
 95	if (type & PERF_SAMPLE_BRANCH_STACK) {
 96		COMP(branch_stack->nr);
 97		for (i = 0; i < s1->branch_stack->nr; i++)
 98			MCOMP(branch_stack->entries[i]);
 99	}
100
101	if (type & PERF_SAMPLE_REGS_USER) {
102		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
103
104		COMP(user_regs.mask);
105		COMP(user_regs.abi);
106		if (s1->user_regs.abi &&
107		    (!s1->user_regs.regs || !s2->user_regs.regs ||
108		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
109			pr_debug("Samples differ at 'user_regs'\n");
110			return false;
111		}
112	}
113
114	if (type & PERF_SAMPLE_STACK_USER) {
115		COMP(user_stack.size);
116		if (memcmp(s1->user_stack.data, s2->user_stack.data,
117			   s1->user_stack.size)) {
118			pr_debug("Samples differ at 'user_stack'\n");
119			return false;
120		}
121	}
122
123	if (type & PERF_SAMPLE_WEIGHT)
124		COMP(weight);
125
126	if (type & PERF_SAMPLE_DATA_SRC)
127		COMP(data_src);
128
129	if (type & PERF_SAMPLE_TRANSACTION)
130		COMP(transaction);
131
132	if (type & PERF_SAMPLE_REGS_INTR) {
133		size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
134
135		COMP(intr_regs.mask);
136		COMP(intr_regs.abi);
137		if (s1->intr_regs.abi &&
138		    (!s1->intr_regs.regs || !s2->intr_regs.regs ||
139		     memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
140			pr_debug("Samples differ at 'intr_regs'\n");
141			return false;
142		}
143	}
144
145	if (type & PERF_SAMPLE_PHYS_ADDR)
146		COMP(phys_addr);
147
148	return true;
149}
150
151static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
152{
153	struct perf_evsel evsel = {
154		.needs_swap = false,
155		.attr = {
156			.sample_type = sample_type,
 
157			.read_format = read_format,
158		},
159	};
160	union perf_event *event;
161	union {
162		struct ip_callchain callchain;
163		u64 data[64];
164	} callchain = {
165		/* 3 ips */
166		.data = {3, 201, 202, 203},
167	};
168	union {
169		struct branch_stack branch_stack;
170		u64 data[64];
171	} branch_stack = {
172		/* 1 branch_entry */
173		.data = {1, 211, 212, 213},
174	};
175	u64 regs[64];
176	const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
177	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
178	struct perf_sample sample = {
179		.ip		= 101,
180		.pid		= 102,
181		.tid		= 103,
182		.time		= 104,
183		.addr		= 105,
184		.id		= 106,
185		.stream_id	= 107,
186		.period		= 108,
187		.weight		= 109,
188		.cpu		= 110,
189		.raw_size	= sizeof(raw_data),
190		.data_src	= 111,
191		.transaction	= 112,
192		.raw_data	= (void *)raw_data,
193		.callchain	= &callchain.callchain,
194		.branch_stack	= &branch_stack.branch_stack,
195		.user_regs	= {
196			.abi	= PERF_SAMPLE_REGS_ABI_64,
197			.mask	= sample_regs,
198			.regs	= regs,
199		},
200		.user_stack	= {
201			.size	= sizeof(data),
202			.data	= (void *)data,
203		},
204		.read		= {
205			.time_enabled = 0x030a59d664fca7deULL,
206			.time_running = 0x011b6ae553eb98edULL,
207		},
208		.intr_regs	= {
209			.abi	= PERF_SAMPLE_REGS_ABI_64,
210			.mask	= sample_regs,
211			.regs	= regs,
212		},
213		.phys_addr	= 113,
214	};
215	struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
216	struct perf_sample sample_out;
217	size_t i, sz, bufsz;
218	int err, ret = -1;
219
220	if (sample_type & PERF_SAMPLE_REGS_USER)
221		evsel.attr.sample_regs_user = sample_regs;
222
223	if (sample_type & PERF_SAMPLE_REGS_INTR)
224		evsel.attr.sample_regs_intr = sample_regs;
225
226	for (i = 0; i < sizeof(regs); i++)
227		*(i + (u8 *)regs) = i & 0xfe;
228
229	if (read_format & PERF_FORMAT_GROUP) {
230		sample.read.group.nr     = 4;
231		sample.read.group.values = values;
232	} else {
233		sample.read.one.value = 0x08789faeb786aa87ULL;
234		sample.read.one.id    = 99;
235	}
236
237	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
238	bufsz = sz + 4096; /* Add a bit for overrun checking */
239	event = malloc(bufsz);
240	if (!event) {
241		pr_debug("malloc failed\n");
242		return -1;
243	}
244
245	memset(event, 0xff, bufsz);
246	event->header.type = PERF_RECORD_SAMPLE;
247	event->header.misc = 0;
248	event->header.size = sz;
249
250	err = perf_event__synthesize_sample(event, sample_type, read_format,
251					    &sample);
252	if (err) {
253		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
254			 "perf_event__synthesize_sample", sample_type, err);
255		goto out_free;
256	}
257
258	/* The data does not contain 0xff so we use that to check the size */
259	for (i = bufsz; i > 0; i--) {
260		if (*(i - 1 + (u8 *)event) != 0xff)
261			break;
262	}
263	if (i != sz) {
264		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
265			 i, sz);
266		goto out_free;
267	}
268
269	evsel.sample_size = __perf_evsel__sample_size(sample_type);
270
271	err = perf_evsel__parse_sample(&evsel, event, &sample_out);
272	if (err) {
273		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
274			 "perf_evsel__parse_sample", sample_type, err);
275		goto out_free;
276	}
277
278	if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
279		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
280			 sample_type);
281		goto out_free;
282	}
283
284	ret = 0;
285out_free:
286	free(event);
287	if (ret && read_format)
288		pr_debug("read_format %#"PRIx64"\n", read_format);
289	return ret;
290}
291
292/**
293 * test__sample_parsing - test sample parsing.
294 *
295 * This function implements a test that synthesizes a sample event, parses it
296 * and then checks that the parsed sample matches the original sample.  The test
297 * checks sample format bits separately and together.  If the test passes %0 is
298 * returned, otherwise %-1 is returned.
299 */
300int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
301{
302	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
303	u64 sample_type;
304	u64 sample_regs;
305	size_t i;
306	int err;
307
308	/*
309	 * Fail the test if it has not been updated when new sample format bits
310	 * were added.  Please actually update the test rather than just change
311	 * the condition below.
312	 */
313	if (PERF_SAMPLE_MAX > PERF_SAMPLE_PHYS_ADDR << 1) {
314		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
315		return -1;
316	}
317
318	/* Test each sample format bit separately */
319	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
320	     sample_type <<= 1) {
321		/* Test read_format variations */
322		if (sample_type == PERF_SAMPLE_READ) {
323			for (i = 0; i < ARRAY_SIZE(rf); i++) {
324				err = do_test(sample_type, 0, rf[i]);
325				if (err)
326					return err;
327			}
328			continue;
329		}
330		sample_regs = 0;
331
332		if (sample_type == PERF_SAMPLE_REGS_USER)
333			sample_regs = 0x3fff;
334
335		if (sample_type == PERF_SAMPLE_REGS_INTR)
336			sample_regs = 0xff0fff;
337
338		err = do_test(sample_type, sample_regs, 0);
339		if (err)
340			return err;
341	}
342
343	/* Test all sample format bits together */
344	sample_type = PERF_SAMPLE_MAX - 1;
345	sample_regs = 0x3fff; /* shared yb intr and user regs */
346	for (i = 0; i < ARRAY_SIZE(rf); i++) {
347		err = do_test(sample_type, sample_regs, rf[i]);
348		if (err)
349			return err;
350	}
351
352	return 0;
353}
v3.15
 
  1#include <stdbool.h>
  2#include <inttypes.h>
 
 
  3
  4#include "util.h"
  5#include "event.h"
  6#include "evsel.h"
 
  7
  8#include "tests.h"
  9
 10#define COMP(m) do {					\
 11	if (s1->m != s2->m) {				\
 12		pr_debug("Samples differ at '"#m"'\n");	\
 13		return false;				\
 14	}						\
 15} while (0)
 16
 17#define MCOMP(m) do {					\
 18	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
 19		pr_debug("Samples differ at '"#m"'\n");	\
 20		return false;				\
 21	}						\
 22} while (0)
 23
 24static bool samples_same(const struct perf_sample *s1,
 25			 const struct perf_sample *s2,
 26			 u64 type, u64 read_format)
 27{
 28	size_t i;
 29
 30	if (type & PERF_SAMPLE_IDENTIFIER)
 31		COMP(id);
 32
 33	if (type & PERF_SAMPLE_IP)
 34		COMP(ip);
 35
 36	if (type & PERF_SAMPLE_TID) {
 37		COMP(pid);
 38		COMP(tid);
 39	}
 40
 41	if (type & PERF_SAMPLE_TIME)
 42		COMP(time);
 43
 44	if (type & PERF_SAMPLE_ADDR)
 45		COMP(addr);
 46
 47	if (type & PERF_SAMPLE_ID)
 48		COMP(id);
 49
 50	if (type & PERF_SAMPLE_STREAM_ID)
 51		COMP(stream_id);
 52
 53	if (type & PERF_SAMPLE_CPU)
 54		COMP(cpu);
 55
 56	if (type & PERF_SAMPLE_PERIOD)
 57		COMP(period);
 58
 59	if (type & PERF_SAMPLE_READ) {
 60		if (read_format & PERF_FORMAT_GROUP)
 61			COMP(read.group.nr);
 62		else
 63			COMP(read.one.value);
 64		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 65			COMP(read.time_enabled);
 66		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 67			COMP(read.time_running);
 68		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
 69		if (read_format & PERF_FORMAT_GROUP) {
 70			for (i = 0; i < s1->read.group.nr; i++)
 71				MCOMP(read.group.values[i]);
 72		} else {
 73			COMP(read.one.id);
 74		}
 75	}
 76
 77	if (type & PERF_SAMPLE_CALLCHAIN) {
 78		COMP(callchain->nr);
 79		for (i = 0; i < s1->callchain->nr; i++)
 80			COMP(callchain->ips[i]);
 81	}
 82
 83	if (type & PERF_SAMPLE_RAW) {
 84		COMP(raw_size);
 85		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
 86			pr_debug("Samples differ at 'raw_data'\n");
 87			return false;
 88		}
 89	}
 90
 91	if (type & PERF_SAMPLE_BRANCH_STACK) {
 92		COMP(branch_stack->nr);
 93		for (i = 0; i < s1->branch_stack->nr; i++)
 94			MCOMP(branch_stack->entries[i]);
 95	}
 96
 97	if (type & PERF_SAMPLE_REGS_USER) {
 98		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
 99
100		COMP(user_regs.mask);
101		COMP(user_regs.abi);
102		if (s1->user_regs.abi &&
103		    (!s1->user_regs.regs || !s2->user_regs.regs ||
104		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
105			pr_debug("Samples differ at 'user_regs'\n");
106			return false;
107		}
108	}
109
110	if (type & PERF_SAMPLE_STACK_USER) {
111		COMP(user_stack.size);
112		if (memcmp(s1->user_stack.data, s1->user_stack.data,
113			   s1->user_stack.size)) {
114			pr_debug("Samples differ at 'user_stack'\n");
115			return false;
116		}
117	}
118
119	if (type & PERF_SAMPLE_WEIGHT)
120		COMP(weight);
121
122	if (type & PERF_SAMPLE_DATA_SRC)
123		COMP(data_src);
124
125	if (type & PERF_SAMPLE_TRANSACTION)
126		COMP(transaction);
127
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128	return true;
129}
130
131static int do_test(u64 sample_type, u64 sample_regs_user, u64 read_format)
132{
133	struct perf_evsel evsel = {
134		.needs_swap = false,
135		.attr = {
136			.sample_type = sample_type,
137			.sample_regs_user = sample_regs_user,
138			.read_format = read_format,
139		},
140	};
141	union perf_event *event;
142	union {
143		struct ip_callchain callchain;
144		u64 data[64];
145	} callchain = {
146		/* 3 ips */
147		.data = {3, 201, 202, 203},
148	};
149	union {
150		struct branch_stack branch_stack;
151		u64 data[64];
152	} branch_stack = {
153		/* 1 branch_entry */
154		.data = {1, 211, 212, 213},
155	};
156	u64 user_regs[64];
157	const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
158	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
159	struct perf_sample sample = {
160		.ip		= 101,
161		.pid		= 102,
162		.tid		= 103,
163		.time		= 104,
164		.addr		= 105,
165		.id		= 106,
166		.stream_id	= 107,
167		.period		= 108,
168		.weight		= 109,
169		.cpu		= 110,
170		.raw_size	= sizeof(raw_data),
171		.data_src	= 111,
172		.transaction	= 112,
173		.raw_data	= (void *)raw_data,
174		.callchain	= &callchain.callchain,
175		.branch_stack	= &branch_stack.branch_stack,
176		.user_regs	= {
177			.abi	= PERF_SAMPLE_REGS_ABI_64,
178			.mask	= sample_regs_user,
179			.regs	= user_regs,
180		},
181		.user_stack	= {
182			.size	= sizeof(data),
183			.data	= (void *)data,
184		},
185		.read		= {
186			.time_enabled = 0x030a59d664fca7deULL,
187			.time_running = 0x011b6ae553eb98edULL,
188		},
 
 
 
 
 
 
189	};
190	struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
191	struct perf_sample sample_out;
192	size_t i, sz, bufsz;
193	int err, ret = -1;
194
195	for (i = 0; i < sizeof(user_regs); i++)
196		*(i + (u8 *)user_regs) = i & 0xfe;
 
 
 
 
 
 
197
198	if (read_format & PERF_FORMAT_GROUP) {
199		sample.read.group.nr     = 4;
200		sample.read.group.values = values;
201	} else {
202		sample.read.one.value = 0x08789faeb786aa87ULL;
203		sample.read.one.id    = 99;
204	}
205
206	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
207	bufsz = sz + 4096; /* Add a bit for overrun checking */
208	event = malloc(bufsz);
209	if (!event) {
210		pr_debug("malloc failed\n");
211		return -1;
212	}
213
214	memset(event, 0xff, bufsz);
215	event->header.type = PERF_RECORD_SAMPLE;
216	event->header.misc = 0;
217	event->header.size = sz;
218
219	err = perf_event__synthesize_sample(event, sample_type, read_format,
220					    &sample, false);
221	if (err) {
222		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
223			 "perf_event__synthesize_sample", sample_type, err);
224		goto out_free;
225	}
226
227	/* The data does not contain 0xff so we use that to check the size */
228	for (i = bufsz; i > 0; i--) {
229		if (*(i - 1 + (u8 *)event) != 0xff)
230			break;
231	}
232	if (i != sz) {
233		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
234			 i, sz);
235		goto out_free;
236	}
237
238	evsel.sample_size = __perf_evsel__sample_size(sample_type);
239
240	err = perf_evsel__parse_sample(&evsel, event, &sample_out);
241	if (err) {
242		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
243			 "perf_evsel__parse_sample", sample_type, err);
244		goto out_free;
245	}
246
247	if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
248		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
249			 sample_type);
250		goto out_free;
251	}
252
253	ret = 0;
254out_free:
255	free(event);
256	if (ret && read_format)
257		pr_debug("read_format %#"PRIx64"\n", read_format);
258	return ret;
259}
260
261/**
262 * test__sample_parsing - test sample parsing.
263 *
264 * This function implements a test that synthesizes a sample event, parses it
265 * and then checks that the parsed sample matches the original sample.  The test
266 * checks sample format bits separately and together.  If the test passes %0 is
267 * returned, otherwise %-1 is returned.
268 */
269int test__sample_parsing(void)
270{
271	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
272	u64 sample_type;
273	u64 sample_regs_user;
274	size_t i;
275	int err;
276
277	/*
278	 * Fail the test if it has not been updated when new sample format bits
279	 * were added.  Please actually update the test rather than just change
280	 * the condition below.
281	 */
282	if (PERF_SAMPLE_MAX > PERF_SAMPLE_TRANSACTION << 1) {
283		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
284		return -1;
285	}
286
287	/* Test each sample format bit separately */
288	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
289	     sample_type <<= 1) {
290		/* Test read_format variations */
291		if (sample_type == PERF_SAMPLE_READ) {
292			for (i = 0; i < ARRAY_SIZE(rf); i++) {
293				err = do_test(sample_type, 0, rf[i]);
294				if (err)
295					return err;
296			}
297			continue;
298		}
 
299
300		if (sample_type == PERF_SAMPLE_REGS_USER)
301			sample_regs_user = 0x3fff;
302		else
303			sample_regs_user = 0;
 
304
305		err = do_test(sample_type, sample_regs_user, 0);
306		if (err)
307			return err;
308	}
309
310	/* Test all sample format bits together */
311	sample_type = PERF_SAMPLE_MAX - 1;
312	sample_regs_user = 0x3fff;
313	for (i = 0; i < ARRAY_SIZE(rf); i++) {
314		err = do_test(sample_type, sample_regs_user, rf[i]);
315		if (err)
316			return err;
317	}
318
319	return 0;
320}