Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdbool.h>
  3#include <inttypes.h>
  4#include <stdlib.h>
  5#include <string.h>
  6#include <linux/bitops.h>
  7#include <linux/kernel.h>
  8#include <linux/types.h>
  9
 10#include "map_symbol.h"
 11#include "branch.h"
 12#include "event.h"
 13#include "evsel.h"
 14#include "debug.h"
 15#include "util/synthetic-events.h"
 
 16
 17#include "tests.h"
 18
 19#define COMP(m) do {					\
 20	if (s1->m != s2->m) {				\
 21		pr_debug("Samples differ at '"#m"'\n");	\
 22		return false;				\
 23	}						\
 24} while (0)
 25
 26#define MCOMP(m) do {					\
 27	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
 28		pr_debug("Samples differ at '"#m"'\n");	\
 29		return false;				\
 30	}						\
 31} while (0)
 32
 
 
 
 
 
 
 
 
 
 33static bool samples_same(const struct perf_sample *s1,
 34			 const struct perf_sample *s2,
 35			 u64 type, u64 read_format)
 36{
 37	size_t i;
 38
 39	if (type & PERF_SAMPLE_IDENTIFIER)
 40		COMP(id);
 41
 42	if (type & PERF_SAMPLE_IP)
 43		COMP(ip);
 44
 45	if (type & PERF_SAMPLE_TID) {
 46		COMP(pid);
 47		COMP(tid);
 48	}
 49
 50	if (type & PERF_SAMPLE_TIME)
 51		COMP(time);
 52
 53	if (type & PERF_SAMPLE_ADDR)
 54		COMP(addr);
 55
 56	if (type & PERF_SAMPLE_ID)
 57		COMP(id);
 58
 59	if (type & PERF_SAMPLE_STREAM_ID)
 60		COMP(stream_id);
 61
 62	if (type & PERF_SAMPLE_CPU)
 63		COMP(cpu);
 64
 65	if (type & PERF_SAMPLE_PERIOD)
 66		COMP(period);
 67
 68	if (type & PERF_SAMPLE_READ) {
 69		if (read_format & PERF_FORMAT_GROUP)
 70			COMP(read.group.nr);
 71		else
 72			COMP(read.one.value);
 73		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 74			COMP(read.time_enabled);
 75		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 76			COMP(read.time_running);
 77		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
 78		if (read_format & PERF_FORMAT_GROUP) {
 79			for (i = 0; i < s1->read.group.nr; i++)
 80				MCOMP(read.group.values[i]);
 
 
 
 81		} else {
 82			COMP(read.one.id);
 
 
 83		}
 84	}
 85
 86	if (type & PERF_SAMPLE_CALLCHAIN) {
 87		COMP(callchain->nr);
 88		for (i = 0; i < s1->callchain->nr; i++)
 89			COMP(callchain->ips[i]);
 90	}
 91
 92	if (type & PERF_SAMPLE_RAW) {
 93		COMP(raw_size);
 94		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
 95			pr_debug("Samples differ at 'raw_data'\n");
 96			return false;
 97		}
 98	}
 99
100	if (type & PERF_SAMPLE_BRANCH_STACK) {
101		COMP(branch_stack->nr);
102		for (i = 0; i < s1->branch_stack->nr; i++)
103			MCOMP(branch_stack->entries[i]);
 
 
 
 
 
 
 
104	}
105
106	if (type & PERF_SAMPLE_REGS_USER) {
107		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
108
109		COMP(user_regs.mask);
110		COMP(user_regs.abi);
111		if (s1->user_regs.abi &&
112		    (!s1->user_regs.regs || !s2->user_regs.regs ||
113		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
114			pr_debug("Samples differ at 'user_regs'\n");
115			return false;
116		}
117	}
118
119	if (type & PERF_SAMPLE_STACK_USER) {
120		COMP(user_stack.size);
121		if (memcmp(s1->user_stack.data, s2->user_stack.data,
122			   s1->user_stack.size)) {
123			pr_debug("Samples differ at 'user_stack'\n");
124			return false;
125		}
126	}
127
128	if (type & PERF_SAMPLE_WEIGHT)
129		COMP(weight);
130
131	if (type & PERF_SAMPLE_DATA_SRC)
132		COMP(data_src);
133
134	if (type & PERF_SAMPLE_TRANSACTION)
135		COMP(transaction);
136
137	if (type & PERF_SAMPLE_REGS_INTR) {
138		size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
139
140		COMP(intr_regs.mask);
141		COMP(intr_regs.abi);
142		if (s1->intr_regs.abi &&
143		    (!s1->intr_regs.regs || !s2->intr_regs.regs ||
144		     memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
145			pr_debug("Samples differ at 'intr_regs'\n");
146			return false;
147		}
148	}
149
150	if (type & PERF_SAMPLE_PHYS_ADDR)
151		COMP(phys_addr);
152
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153	return true;
154}
155
156static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
157{
158	struct evsel evsel = {
159		.needs_swap = false,
160		.core = {
161			. attr = {
162				.sample_type = sample_type,
163				.read_format = read_format,
164			},
165		},
166	};
167	union perf_event *event;
168	union {
169		struct ip_callchain callchain;
170		u64 data[64];
171	} callchain = {
172		/* 3 ips */
173		.data = {3, 201, 202, 203},
174	};
175	union {
176		struct branch_stack branch_stack;
177		u64 data[64];
178	} branch_stack = {
179		/* 1 branch_entry */
180		.data = {1, 211, 212, 213},
181	};
182	u64 regs[64];
183	const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
184	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
 
185	struct perf_sample sample = {
186		.ip		= 101,
187		.pid		= 102,
188		.tid		= 103,
189		.time		= 104,
190		.addr		= 105,
191		.id		= 106,
192		.stream_id	= 107,
193		.period		= 108,
194		.weight		= 109,
195		.cpu		= 110,
196		.raw_size	= sizeof(raw_data),
197		.data_src	= 111,
198		.transaction	= 112,
199		.raw_data	= (void *)raw_data,
200		.callchain	= &callchain.callchain,
 
201		.branch_stack	= &branch_stack.branch_stack,
202		.user_regs	= {
203			.abi	= PERF_SAMPLE_REGS_ABI_64,
204			.mask	= sample_regs,
205			.regs	= regs,
206		},
207		.user_stack	= {
208			.size	= sizeof(data),
209			.data	= (void *)data,
210		},
211		.read		= {
212			.time_enabled = 0x030a59d664fca7deULL,
213			.time_running = 0x011b6ae553eb98edULL,
214		},
215		.intr_regs	= {
216			.abi	= PERF_SAMPLE_REGS_ABI_64,
217			.mask	= sample_regs,
218			.regs	= regs,
219		},
220		.phys_addr	= 113,
 
 
 
 
 
 
 
221	};
222	struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
223	struct perf_sample sample_out;
224	size_t i, sz, bufsz;
225	int err, ret = -1;
226
227	if (sample_type & PERF_SAMPLE_REGS_USER)
228		evsel.core.attr.sample_regs_user = sample_regs;
229
230	if (sample_type & PERF_SAMPLE_REGS_INTR)
231		evsel.core.attr.sample_regs_intr = sample_regs;
232
 
 
 
233	for (i = 0; i < sizeof(regs); i++)
234		*(i + (u8 *)regs) = i & 0xfe;
235
236	if (read_format & PERF_FORMAT_GROUP) {
237		sample.read.group.nr     = 4;
238		sample.read.group.values = values;
239	} else {
240		sample.read.one.value = 0x08789faeb786aa87ULL;
241		sample.read.one.id    = 99;
 
242	}
243
244	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
245	bufsz = sz + 4096; /* Add a bit for overrun checking */
246	event = malloc(bufsz);
247	if (!event) {
248		pr_debug("malloc failed\n");
249		return -1;
250	}
251
252	memset(event, 0xff, bufsz);
253	event->header.type = PERF_RECORD_SAMPLE;
254	event->header.misc = 0;
255	event->header.size = sz;
256
257	err = perf_event__synthesize_sample(event, sample_type, read_format,
258					    &sample);
259	if (err) {
260		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
261			 "perf_event__synthesize_sample", sample_type, err);
262		goto out_free;
263	}
264
265	/* The data does not contain 0xff so we use that to check the size */
266	for (i = bufsz; i > 0; i--) {
267		if (*(i - 1 + (u8 *)event) != 0xff)
268			break;
269	}
270	if (i != sz) {
271		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
272			 i, sz);
273		goto out_free;
274	}
275
276	evsel.sample_size = __perf_evsel__sample_size(sample_type);
277
278	err = perf_evsel__parse_sample(&evsel, event, &sample_out);
279	if (err) {
280		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
281			 "perf_evsel__parse_sample", sample_type, err);
282		goto out_free;
283	}
284
285	if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
286		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
287			 sample_type);
288		goto out_free;
289	}
290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
291	ret = 0;
292out_free:
293	free(event);
294	if (ret && read_format)
295		pr_debug("read_format %#"PRIx64"\n", read_format);
296	return ret;
297}
298
299/**
300 * test__sample_parsing - test sample parsing.
301 *
302 * This function implements a test that synthesizes a sample event, parses it
303 * and then checks that the parsed sample matches the original sample.  The test
304 * checks sample format bits separately and together.  If the test passes %0 is
305 * returned, otherwise %-1 is returned.
306 */
307int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
308{
309	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
310	u64 sample_type;
311	u64 sample_regs;
312	size_t i;
313	int err;
314
315	/*
316	 * Fail the test if it has not been updated when new sample format bits
317	 * were added.  Please actually update the test rather than just change
318	 * the condition below.
319	 */
320	if (PERF_SAMPLE_MAX > PERF_SAMPLE_PHYS_ADDR << 1) {
321		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
322		return -1;
323	}
324
325	/* Test each sample format bit separately */
326	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
327	     sample_type <<= 1) {
328		/* Test read_format variations */
329		if (sample_type == PERF_SAMPLE_READ) {
330			for (i = 0; i < ARRAY_SIZE(rf); i++) {
331				err = do_test(sample_type, 0, rf[i]);
332				if (err)
333					return err;
334			}
335			continue;
336		}
337		sample_regs = 0;
338
339		if (sample_type == PERF_SAMPLE_REGS_USER)
340			sample_regs = 0x3fff;
341
342		if (sample_type == PERF_SAMPLE_REGS_INTR)
343			sample_regs = 0xff0fff;
344
345		err = do_test(sample_type, sample_regs, 0);
346		if (err)
347			return err;
348	}
349
350	/* Test all sample format bits together */
351	sample_type = PERF_SAMPLE_MAX - 1;
 
 
 
 
352	sample_regs = 0x3fff; /* shared yb intr and user regs */
353	for (i = 0; i < ARRAY_SIZE(rf); i++) {
354		err = do_test(sample_type, sample_regs, rf[i]);
355		if (err)
356			return err;
357	}
358
359	return 0;
360}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdbool.h>
  3#include <inttypes.h>
  4#include <stdlib.h>
  5#include <string.h>
  6#include <linux/bitops.h>
  7#include <linux/kernel.h>
  8#include <linux/types.h>
  9
 10#include "map_symbol.h"
 11#include "branch.h"
 12#include "event.h"
 13#include "evsel.h"
 14#include "debug.h"
 15#include "util/synthetic-events.h"
 16#include "util/util.h"
 17
 18#include "tests.h"
 19
 20#define COMP(m) do {					\
 21	if (s1->m != s2->m) {				\
 22		pr_debug("Samples differ at '"#m"'\n");	\
 23		return false;				\
 24	}						\
 25} while (0)
 26
 27#define MCOMP(m) do {					\
 28	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
 29		pr_debug("Samples differ at '"#m"'\n");	\
 30		return false;				\
 31	}						\
 32} while (0)
 33
 34/*
 35 * Hardcode the expected values for branch_entry flags.
 36 * These are based on the input value (213) specified
 37 * in branch_stack variable.
 38 */
 39#define BS_EXPECTED_BE	0xa000d00000000000
 40#define BS_EXPECTED_LE	0x1aa00000000
 41#define FLAG(s)	s->branch_stack->entries[i].flags
 42
 43static bool samples_same(const struct perf_sample *s1,
 44			 const struct perf_sample *s2,
 45			 u64 type, u64 read_format, bool needs_swap)
 46{
 47	size_t i;
 48
 49	if (type & PERF_SAMPLE_IDENTIFIER)
 50		COMP(id);
 51
 52	if (type & PERF_SAMPLE_IP)
 53		COMP(ip);
 54
 55	if (type & PERF_SAMPLE_TID) {
 56		COMP(pid);
 57		COMP(tid);
 58	}
 59
 60	if (type & PERF_SAMPLE_TIME)
 61		COMP(time);
 62
 63	if (type & PERF_SAMPLE_ADDR)
 64		COMP(addr);
 65
 66	if (type & PERF_SAMPLE_ID)
 67		COMP(id);
 68
 69	if (type & PERF_SAMPLE_STREAM_ID)
 70		COMP(stream_id);
 71
 72	if (type & PERF_SAMPLE_CPU)
 73		COMP(cpu);
 74
 75	if (type & PERF_SAMPLE_PERIOD)
 76		COMP(period);
 77
 78	if (type & PERF_SAMPLE_READ) {
 79		if (read_format & PERF_FORMAT_GROUP)
 80			COMP(read.group.nr);
 81		else
 82			COMP(read.one.value);
 83		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 84			COMP(read.time_enabled);
 85		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 86			COMP(read.time_running);
 87		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
 88		if (read_format & PERF_FORMAT_GROUP) {
 89			for (i = 0; i < s1->read.group.nr; i++) {
 90				/* FIXME: check values without LOST */
 91				if (read_format & PERF_FORMAT_LOST)
 92					MCOMP(read.group.values[i]);
 93			}
 94		} else {
 95			COMP(read.one.id);
 96			if (read_format & PERF_FORMAT_LOST)
 97				COMP(read.one.lost);
 98		}
 99	}
100
101	if (type & PERF_SAMPLE_CALLCHAIN) {
102		COMP(callchain->nr);
103		for (i = 0; i < s1->callchain->nr; i++)
104			COMP(callchain->ips[i]);
105	}
106
107	if (type & PERF_SAMPLE_RAW) {
108		COMP(raw_size);
109		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
110			pr_debug("Samples differ at 'raw_data'\n");
111			return false;
112		}
113	}
114
115	if (type & PERF_SAMPLE_BRANCH_STACK) {
116		COMP(branch_stack->nr);
117		COMP(branch_stack->hw_idx);
118		for (i = 0; i < s1->branch_stack->nr; i++) {
119			if (needs_swap)
120				return ((host_is_bigendian()) ?
121					(FLAG(s2).value == BS_EXPECTED_BE) :
122					(FLAG(s2).value == BS_EXPECTED_LE));
123			else
124				MCOMP(branch_stack->entries[i]);
125		}
126	}
127
128	if (type & PERF_SAMPLE_REGS_USER) {
129		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
130
131		COMP(user_regs.mask);
132		COMP(user_regs.abi);
133		if (s1->user_regs.abi &&
134		    (!s1->user_regs.regs || !s2->user_regs.regs ||
135		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
136			pr_debug("Samples differ at 'user_regs'\n");
137			return false;
138		}
139	}
140
141	if (type & PERF_SAMPLE_STACK_USER) {
142		COMP(user_stack.size);
143		if (memcmp(s1->user_stack.data, s2->user_stack.data,
144			   s1->user_stack.size)) {
145			pr_debug("Samples differ at 'user_stack'\n");
146			return false;
147		}
148	}
149
150	if (type & PERF_SAMPLE_WEIGHT)
151		COMP(weight);
152
153	if (type & PERF_SAMPLE_DATA_SRC)
154		COMP(data_src);
155
156	if (type & PERF_SAMPLE_TRANSACTION)
157		COMP(transaction);
158
159	if (type & PERF_SAMPLE_REGS_INTR) {
160		size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
161
162		COMP(intr_regs.mask);
163		COMP(intr_regs.abi);
164		if (s1->intr_regs.abi &&
165		    (!s1->intr_regs.regs || !s2->intr_regs.regs ||
166		     memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
167			pr_debug("Samples differ at 'intr_regs'\n");
168			return false;
169		}
170	}
171
172	if (type & PERF_SAMPLE_PHYS_ADDR)
173		COMP(phys_addr);
174
175	if (type & PERF_SAMPLE_CGROUP)
176		COMP(cgroup);
177
178	if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
179		COMP(data_page_size);
180
181	if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
182		COMP(code_page_size);
183
184	if (type & PERF_SAMPLE_AUX) {
185		COMP(aux_sample.size);
186		if (memcmp(s1->aux_sample.data, s2->aux_sample.data,
187			   s1->aux_sample.size)) {
188			pr_debug("Samples differ at 'aux_sample'\n");
189			return false;
190		}
191	}
192
193	return true;
194}
195
196static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
197{
198	struct evsel evsel = {
199		.needs_swap = false,
200		.core = {
201			. attr = {
202				.sample_type = sample_type,
203				.read_format = read_format,
204			},
205		},
206	};
207	union perf_event *event;
208	union {
209		struct ip_callchain callchain;
210		u64 data[64];
211	} callchain = {
212		/* 3 ips */
213		.data = {3, 201, 202, 203},
214	};
215	union {
216		struct branch_stack branch_stack;
217		u64 data[64];
218	} branch_stack = {
219		/* 1 branch_entry */
220		.data = {1, -1ULL, 211, 212, 213},
221	};
222	u64 regs[64];
223	const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 };
224	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
225	const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
226	struct perf_sample sample = {
227		.ip		= 101,
228		.pid		= 102,
229		.tid		= 103,
230		.time		= 104,
231		.addr		= 105,
232		.id		= 106,
233		.stream_id	= 107,
234		.period		= 108,
235		.weight		= 109,
236		.cpu		= 110,
237		.raw_size	= sizeof(raw_data),
238		.data_src	= 111,
239		.transaction	= 112,
240		.raw_data	= (void *)raw_data,
241		.callchain	= &callchain.callchain,
242		.no_hw_idx      = false,
243		.branch_stack	= &branch_stack.branch_stack,
244		.user_regs	= {
245			.abi	= PERF_SAMPLE_REGS_ABI_64,
246			.mask	= sample_regs,
247			.regs	= regs,
248		},
249		.user_stack	= {
250			.size	= sizeof(data),
251			.data	= (void *)data,
252		},
253		.read		= {
254			.time_enabled = 0x030a59d664fca7deULL,
255			.time_running = 0x011b6ae553eb98edULL,
256		},
257		.intr_regs	= {
258			.abi	= PERF_SAMPLE_REGS_ABI_64,
259			.mask	= sample_regs,
260			.regs	= regs,
261		},
262		.phys_addr	= 113,
263		.cgroup		= 114,
264		.data_page_size = 115,
265		.code_page_size = 116,
266		.aux_sample	= {
267			.size	= sizeof(aux_data),
268			.data	= (void *)aux_data,
269		},
270	};
271	struct sample_read_value values[] = {{1, 5, 0}, {9, 3, 0}, {2, 7, 0}, {6, 4, 1},};
272	struct perf_sample sample_out, sample_out_endian;
273	size_t i, sz, bufsz;
274	int err, ret = -1;
275
276	if (sample_type & PERF_SAMPLE_REGS_USER)
277		evsel.core.attr.sample_regs_user = sample_regs;
278
279	if (sample_type & PERF_SAMPLE_REGS_INTR)
280		evsel.core.attr.sample_regs_intr = sample_regs;
281
282	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
283		evsel.core.attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
284
285	for (i = 0; i < sizeof(regs); i++)
286		*(i + (u8 *)regs) = i & 0xfe;
287
288	if (read_format & PERF_FORMAT_GROUP) {
289		sample.read.group.nr     = 4;
290		sample.read.group.values = values;
291	} else {
292		sample.read.one.value = 0x08789faeb786aa87ULL;
293		sample.read.one.id    = 99;
294		sample.read.one.lost  = 1;
295	}
296
297	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
298	bufsz = sz + 4096; /* Add a bit for overrun checking */
299	event = malloc(bufsz);
300	if (!event) {
301		pr_debug("malloc failed\n");
302		return -1;
303	}
304
305	memset(event, 0xff, bufsz);
306	event->header.type = PERF_RECORD_SAMPLE;
307	event->header.misc = 0;
308	event->header.size = sz;
309
310	err = perf_event__synthesize_sample(event, sample_type, read_format,
311					    &sample);
312	if (err) {
313		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
314			 "perf_event__synthesize_sample", sample_type, err);
315		goto out_free;
316	}
317
318	/* The data does not contain 0xff so we use that to check the size */
319	for (i = bufsz; i > 0; i--) {
320		if (*(i - 1 + (u8 *)event) != 0xff)
321			break;
322	}
323	if (i != sz) {
324		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
325			 i, sz);
326		goto out_free;
327	}
328
329	evsel.sample_size = __evsel__sample_size(sample_type);
330
331	err = evsel__parse_sample(&evsel, event, &sample_out);
332	if (err) {
333		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
334			 "evsel__parse_sample", sample_type, err);
335		goto out_free;
336	}
337
338	if (!samples_same(&sample, &sample_out, sample_type, read_format, evsel.needs_swap)) {
339		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
340			 sample_type);
341		goto out_free;
342	}
343
344	if (sample_type == PERF_SAMPLE_BRANCH_STACK) {
345		evsel.needs_swap = true;
346		evsel.sample_size = __evsel__sample_size(sample_type);
347		err = evsel__parse_sample(&evsel, event, &sample_out_endian);
348		if (err) {
349			pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
350				 "evsel__parse_sample", sample_type, err);
351			goto out_free;
352		}
353
354		if (!samples_same(&sample, &sample_out_endian, sample_type, read_format, evsel.needs_swap)) {
355			pr_debug("parsing failed for sample_type %#"PRIx64"\n",
356				 sample_type);
357			goto out_free;
358		}
359	}
360
361	ret = 0;
362out_free:
363	free(event);
364	if (ret && read_format)
365		pr_debug("read_format %#"PRIx64"\n", read_format);
366	return ret;
367}
368
369/**
370 * test__sample_parsing - test sample parsing.
371 *
372 * This function implements a test that synthesizes a sample event, parses it
373 * and then checks that the parsed sample matches the original sample.  The test
374 * checks sample format bits separately and together.  If the test passes %0 is
375 * returned, otherwise %-1 is returned.
376 */
377static int test__sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
378{
379	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 28, 29, 30, 31};
380	u64 sample_type;
381	u64 sample_regs;
382	size_t i;
383	int err;
384
385	/*
386	 * Fail the test if it has not been updated when new sample format bits
387	 * were added.  Please actually update the test rather than just change
388	 * the condition below.
389	 */
390	if (PERF_SAMPLE_MAX > PERF_SAMPLE_WEIGHT_STRUCT << 1) {
391		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
392		return -1;
393	}
394
395	/* Test each sample format bit separately */
396	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
397	     sample_type <<= 1) {
398		/* Test read_format variations */
399		if (sample_type == PERF_SAMPLE_READ) {
400			for (i = 0; i < ARRAY_SIZE(rf); i++) {
401				err = do_test(sample_type, 0, rf[i]);
402				if (err)
403					return err;
404			}
405			continue;
406		}
407		sample_regs = 0;
408
409		if (sample_type == PERF_SAMPLE_REGS_USER)
410			sample_regs = 0x3fff;
411
412		if (sample_type == PERF_SAMPLE_REGS_INTR)
413			sample_regs = 0xff0fff;
414
415		err = do_test(sample_type, sample_regs, 0);
416		if (err)
417			return err;
418	}
419
420	/*
421	 * Test all sample format bits together
422	 * Note: PERF_SAMPLE_WEIGHT and PERF_SAMPLE_WEIGHT_STRUCT cannot
423	 *       be set simultaneously.
424	 */
425	sample_type = (PERF_SAMPLE_MAX - 1) & ~PERF_SAMPLE_WEIGHT;
426	sample_regs = 0x3fff; /* shared yb intr and user regs */
427	for (i = 0; i < ARRAY_SIZE(rf); i++) {
428		err = do_test(sample_type, sample_regs, rf[i]);
429		if (err)
430			return err;
431	}
432
433	return 0;
434}
435
436DEFINE_SUITE("Sample parsing", sample_parsing);