Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v4.6
 
  1#include <stdbool.h>
 
 
 
 
 
  2#include <linux/types.h>
  3
  4#include "util.h"
 
  5#include "event.h"
  6#include "evsel.h"
  7#include "debug.h"
 
  8
  9#include "tests.h"
 10
 11#define COMP(m) do {					\
 12	if (s1->m != s2->m) {				\
 13		pr_debug("Samples differ at '"#m"'\n");	\
 14		return false;				\
 15	}						\
 16} while (0)
 17
 18#define MCOMP(m) do {					\
 19	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
 20		pr_debug("Samples differ at '"#m"'\n");	\
 21		return false;				\
 22	}						\
 23} while (0)
 24
 25static bool samples_same(const struct perf_sample *s1,
 26			 const struct perf_sample *s2,
 27			 u64 type, u64 read_format)
 28{
 29	size_t i;
 30
 31	if (type & PERF_SAMPLE_IDENTIFIER)
 32		COMP(id);
 33
 34	if (type & PERF_SAMPLE_IP)
 35		COMP(ip);
 36
 37	if (type & PERF_SAMPLE_TID) {
 38		COMP(pid);
 39		COMP(tid);
 40	}
 41
 42	if (type & PERF_SAMPLE_TIME)
 43		COMP(time);
 44
 45	if (type & PERF_SAMPLE_ADDR)
 46		COMP(addr);
 47
 48	if (type & PERF_SAMPLE_ID)
 49		COMP(id);
 50
 51	if (type & PERF_SAMPLE_STREAM_ID)
 52		COMP(stream_id);
 53
 54	if (type & PERF_SAMPLE_CPU)
 55		COMP(cpu);
 56
 57	if (type & PERF_SAMPLE_PERIOD)
 58		COMP(period);
 59
 60	if (type & PERF_SAMPLE_READ) {
 61		if (read_format & PERF_FORMAT_GROUP)
 62			COMP(read.group.nr);
 63		else
 64			COMP(read.one.value);
 65		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 66			COMP(read.time_enabled);
 67		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 68			COMP(read.time_running);
 69		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
 70		if (read_format & PERF_FORMAT_GROUP) {
 71			for (i = 0; i < s1->read.group.nr; i++)
 72				MCOMP(read.group.values[i]);
 73		} else {
 74			COMP(read.one.id);
 75		}
 76	}
 77
 78	if (type & PERF_SAMPLE_CALLCHAIN) {
 79		COMP(callchain->nr);
 80		for (i = 0; i < s1->callchain->nr; i++)
 81			COMP(callchain->ips[i]);
 82	}
 83
 84	if (type & PERF_SAMPLE_RAW) {
 85		COMP(raw_size);
 86		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
 87			pr_debug("Samples differ at 'raw_data'\n");
 88			return false;
 89		}
 90	}
 91
 92	if (type & PERF_SAMPLE_BRANCH_STACK) {
 93		COMP(branch_stack->nr);
 
 94		for (i = 0; i < s1->branch_stack->nr; i++)
 95			MCOMP(branch_stack->entries[i]);
 96	}
 97
 98	if (type & PERF_SAMPLE_REGS_USER) {
 99		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
100
101		COMP(user_regs.mask);
102		COMP(user_regs.abi);
103		if (s1->user_regs.abi &&
104		    (!s1->user_regs.regs || !s2->user_regs.regs ||
105		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
106			pr_debug("Samples differ at 'user_regs'\n");
107			return false;
108		}
109	}
110
111	if (type & PERF_SAMPLE_STACK_USER) {
112		COMP(user_stack.size);
113		if (memcmp(s1->user_stack.data, s2->user_stack.data,
114			   s1->user_stack.size)) {
115			pr_debug("Samples differ at 'user_stack'\n");
116			return false;
117		}
118	}
119
120	if (type & PERF_SAMPLE_WEIGHT)
121		COMP(weight);
122
123	if (type & PERF_SAMPLE_DATA_SRC)
124		COMP(data_src);
125
126	if (type & PERF_SAMPLE_TRANSACTION)
127		COMP(transaction);
128
129	if (type & PERF_SAMPLE_REGS_INTR) {
130		size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
131
132		COMP(intr_regs.mask);
133		COMP(intr_regs.abi);
134		if (s1->intr_regs.abi &&
135		    (!s1->intr_regs.regs || !s2->intr_regs.regs ||
136		     memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
137			pr_debug("Samples differ at 'intr_regs'\n");
138			return false;
139		}
140	}
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142	return true;
143}
144
145static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
146{
147	struct perf_evsel evsel = {
148		.needs_swap = false,
149		.attr = {
150			.sample_type = sample_type,
151			.read_format = read_format,
 
 
152		},
153	};
154	union perf_event *event;
155	union {
156		struct ip_callchain callchain;
157		u64 data[64];
158	} callchain = {
159		/* 3 ips */
160		.data = {3, 201, 202, 203},
161	};
162	union {
163		struct branch_stack branch_stack;
164		u64 data[64];
165	} branch_stack = {
166		/* 1 branch_entry */
167		.data = {1, 211, 212, 213},
168	};
169	u64 regs[64];
170	const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
171	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
 
172	struct perf_sample sample = {
173		.ip		= 101,
174		.pid		= 102,
175		.tid		= 103,
176		.time		= 104,
177		.addr		= 105,
178		.id		= 106,
179		.stream_id	= 107,
180		.period		= 108,
181		.weight		= 109,
182		.cpu		= 110,
183		.raw_size	= sizeof(raw_data),
184		.data_src	= 111,
185		.transaction	= 112,
186		.raw_data	= (void *)raw_data,
187		.callchain	= &callchain.callchain,
 
188		.branch_stack	= &branch_stack.branch_stack,
189		.user_regs	= {
190			.abi	= PERF_SAMPLE_REGS_ABI_64,
191			.mask	= sample_regs,
192			.regs	= regs,
193		},
194		.user_stack	= {
195			.size	= sizeof(data),
196			.data	= (void *)data,
197		},
198		.read		= {
199			.time_enabled = 0x030a59d664fca7deULL,
200			.time_running = 0x011b6ae553eb98edULL,
201		},
202		.intr_regs	= {
203			.abi	= PERF_SAMPLE_REGS_ABI_64,
204			.mask	= sample_regs,
205			.regs	= regs,
206		},
 
 
 
 
 
 
207	};
208	struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
209	struct perf_sample sample_out;
210	size_t i, sz, bufsz;
211	int err, ret = -1;
212
213	if (sample_type & PERF_SAMPLE_REGS_USER)
214		evsel.attr.sample_regs_user = sample_regs;
215
216	if (sample_type & PERF_SAMPLE_REGS_INTR)
217		evsel.attr.sample_regs_intr = sample_regs;
 
 
 
218
219	for (i = 0; i < sizeof(regs); i++)
220		*(i + (u8 *)regs) = i & 0xfe;
221
222	if (read_format & PERF_FORMAT_GROUP) {
223		sample.read.group.nr     = 4;
224		sample.read.group.values = values;
225	} else {
226		sample.read.one.value = 0x08789faeb786aa87ULL;
227		sample.read.one.id    = 99;
228	}
229
230	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
231	bufsz = sz + 4096; /* Add a bit for overrun checking */
232	event = malloc(bufsz);
233	if (!event) {
234		pr_debug("malloc failed\n");
235		return -1;
236	}
237
238	memset(event, 0xff, bufsz);
239	event->header.type = PERF_RECORD_SAMPLE;
240	event->header.misc = 0;
241	event->header.size = sz;
242
243	err = perf_event__synthesize_sample(event, sample_type, read_format,
244					    &sample, false);
245	if (err) {
246		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
247			 "perf_event__synthesize_sample", sample_type, err);
248		goto out_free;
249	}
250
251	/* The data does not contain 0xff so we use that to check the size */
252	for (i = bufsz; i > 0; i--) {
253		if (*(i - 1 + (u8 *)event) != 0xff)
254			break;
255	}
256	if (i != sz) {
257		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
258			 i, sz);
259		goto out_free;
260	}
261
262	evsel.sample_size = __perf_evsel__sample_size(sample_type);
263
264	err = perf_evsel__parse_sample(&evsel, event, &sample_out);
265	if (err) {
266		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
267			 "perf_evsel__parse_sample", sample_type, err);
268		goto out_free;
269	}
270
271	if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
272		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
273			 sample_type);
274		goto out_free;
275	}
276
277	ret = 0;
278out_free:
279	free(event);
280	if (ret && read_format)
281		pr_debug("read_format %#"PRIx64"\n", read_format);
282	return ret;
283}
284
285/**
286 * test__sample_parsing - test sample parsing.
287 *
288 * This function implements a test that synthesizes a sample event, parses it
289 * and then checks that the parsed sample matches the original sample.  The test
290 * checks sample format bits separately and together.  If the test passes %0 is
291 * returned, otherwise %-1 is returned.
292 */
293int test__sample_parsing(int subtest __maybe_unused)
294{
295	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
296	u64 sample_type;
297	u64 sample_regs;
298	size_t i;
299	int err;
300
301	/*
302	 * Fail the test if it has not been updated when new sample format bits
303	 * were added.  Please actually update the test rather than just change
304	 * the condition below.
305	 */
306	if (PERF_SAMPLE_MAX > PERF_SAMPLE_REGS_INTR << 1) {
307		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
308		return -1;
309	}
310
311	/* Test each sample format bit separately */
312	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
313	     sample_type <<= 1) {
314		/* Test read_format variations */
315		if (sample_type == PERF_SAMPLE_READ) {
316			for (i = 0; i < ARRAY_SIZE(rf); i++) {
317				err = do_test(sample_type, 0, rf[i]);
318				if (err)
319					return err;
320			}
321			continue;
322		}
323		sample_regs = 0;
324
325		if (sample_type == PERF_SAMPLE_REGS_USER)
326			sample_regs = 0x3fff;
327
328		if (sample_type == PERF_SAMPLE_REGS_INTR)
329			sample_regs = 0xff0fff;
330
331		err = do_test(sample_type, sample_regs, 0);
332		if (err)
333			return err;
334	}
335
336	/* Test all sample format bits together */
337	sample_type = PERF_SAMPLE_MAX - 1;
338	sample_regs = 0x3fff; /* shared yb intr and user regs */
339	for (i = 0; i < ARRAY_SIZE(rf); i++) {
340		err = do_test(sample_type, sample_regs, rf[i]);
341		if (err)
342			return err;
343	}
344
345	return 0;
346}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdbool.h>
  3#include <inttypes.h>
  4#include <stdlib.h>
  5#include <string.h>
  6#include <linux/bitops.h>
  7#include <linux/kernel.h>
  8#include <linux/types.h>
  9
 10#include "map_symbol.h"
 11#include "branch.h"
 12#include "event.h"
 13#include "evsel.h"
 14#include "debug.h"
 15#include "util/synthetic-events.h"
 16
 17#include "tests.h"
 18
 19#define COMP(m) do {					\
 20	if (s1->m != s2->m) {				\
 21		pr_debug("Samples differ at '"#m"'\n");	\
 22		return false;				\
 23	}						\
 24} while (0)
 25
 26#define MCOMP(m) do {					\
 27	if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) {	\
 28		pr_debug("Samples differ at '"#m"'\n");	\
 29		return false;				\
 30	}						\
 31} while (0)
 32
 33static bool samples_same(const struct perf_sample *s1,
 34			 const struct perf_sample *s2,
 35			 u64 type, u64 read_format)
 36{
 37	size_t i;
 38
 39	if (type & PERF_SAMPLE_IDENTIFIER)
 40		COMP(id);
 41
 42	if (type & PERF_SAMPLE_IP)
 43		COMP(ip);
 44
 45	if (type & PERF_SAMPLE_TID) {
 46		COMP(pid);
 47		COMP(tid);
 48	}
 49
 50	if (type & PERF_SAMPLE_TIME)
 51		COMP(time);
 52
 53	if (type & PERF_SAMPLE_ADDR)
 54		COMP(addr);
 55
 56	if (type & PERF_SAMPLE_ID)
 57		COMP(id);
 58
 59	if (type & PERF_SAMPLE_STREAM_ID)
 60		COMP(stream_id);
 61
 62	if (type & PERF_SAMPLE_CPU)
 63		COMP(cpu);
 64
 65	if (type & PERF_SAMPLE_PERIOD)
 66		COMP(period);
 67
 68	if (type & PERF_SAMPLE_READ) {
 69		if (read_format & PERF_FORMAT_GROUP)
 70			COMP(read.group.nr);
 71		else
 72			COMP(read.one.value);
 73		if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 74			COMP(read.time_enabled);
 75		if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 76			COMP(read.time_running);
 77		/* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
 78		if (read_format & PERF_FORMAT_GROUP) {
 79			for (i = 0; i < s1->read.group.nr; i++)
 80				MCOMP(read.group.values[i]);
 81		} else {
 82			COMP(read.one.id);
 83		}
 84	}
 85
 86	if (type & PERF_SAMPLE_CALLCHAIN) {
 87		COMP(callchain->nr);
 88		for (i = 0; i < s1->callchain->nr; i++)
 89			COMP(callchain->ips[i]);
 90	}
 91
 92	if (type & PERF_SAMPLE_RAW) {
 93		COMP(raw_size);
 94		if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
 95			pr_debug("Samples differ at 'raw_data'\n");
 96			return false;
 97		}
 98	}
 99
100	if (type & PERF_SAMPLE_BRANCH_STACK) {
101		COMP(branch_stack->nr);
102		COMP(branch_stack->hw_idx);
103		for (i = 0; i < s1->branch_stack->nr; i++)
104			MCOMP(branch_stack->entries[i]);
105	}
106
107	if (type & PERF_SAMPLE_REGS_USER) {
108		size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
109
110		COMP(user_regs.mask);
111		COMP(user_regs.abi);
112		if (s1->user_regs.abi &&
113		    (!s1->user_regs.regs || !s2->user_regs.regs ||
114		     memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
115			pr_debug("Samples differ at 'user_regs'\n");
116			return false;
117		}
118	}
119
120	if (type & PERF_SAMPLE_STACK_USER) {
121		COMP(user_stack.size);
122		if (memcmp(s1->user_stack.data, s2->user_stack.data,
123			   s1->user_stack.size)) {
124			pr_debug("Samples differ at 'user_stack'\n");
125			return false;
126		}
127	}
128
129	if (type & PERF_SAMPLE_WEIGHT)
130		COMP(weight);
131
132	if (type & PERF_SAMPLE_DATA_SRC)
133		COMP(data_src);
134
135	if (type & PERF_SAMPLE_TRANSACTION)
136		COMP(transaction);
137
138	if (type & PERF_SAMPLE_REGS_INTR) {
139		size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
140
141		COMP(intr_regs.mask);
142		COMP(intr_regs.abi);
143		if (s1->intr_regs.abi &&
144		    (!s1->intr_regs.regs || !s2->intr_regs.regs ||
145		     memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
146			pr_debug("Samples differ at 'intr_regs'\n");
147			return false;
148		}
149	}
150
151	if (type & PERF_SAMPLE_PHYS_ADDR)
152		COMP(phys_addr);
153
154	if (type & PERF_SAMPLE_CGROUP)
155		COMP(cgroup);
156
157	if (type & PERF_SAMPLE_AUX) {
158		COMP(aux_sample.size);
159		if (memcmp(s1->aux_sample.data, s2->aux_sample.data,
160			   s1->aux_sample.size)) {
161			pr_debug("Samples differ at 'aux_sample'\n");
162			return false;
163		}
164	}
165
166	return true;
167}
168
169static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
170{
171	struct evsel evsel = {
172		.needs_swap = false,
173		.core = {
174			. attr = {
175				.sample_type = sample_type,
176				.read_format = read_format,
177			},
178		},
179	};
180	union perf_event *event;
181	union {
182		struct ip_callchain callchain;
183		u64 data[64];
184	} callchain = {
185		/* 3 ips */
186		.data = {3, 201, 202, 203},
187	};
188	union {
189		struct branch_stack branch_stack;
190		u64 data[64];
191	} branch_stack = {
192		/* 1 branch_entry */
193		.data = {1, -1ULL, 211, 212, 213},
194	};
195	u64 regs[64];
196	const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
197	const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
198	const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
199	struct perf_sample sample = {
200		.ip		= 101,
201		.pid		= 102,
202		.tid		= 103,
203		.time		= 104,
204		.addr		= 105,
205		.id		= 106,
206		.stream_id	= 107,
207		.period		= 108,
208		.weight		= 109,
209		.cpu		= 110,
210		.raw_size	= sizeof(raw_data),
211		.data_src	= 111,
212		.transaction	= 112,
213		.raw_data	= (void *)raw_data,
214		.callchain	= &callchain.callchain,
215		.no_hw_idx      = false,
216		.branch_stack	= &branch_stack.branch_stack,
217		.user_regs	= {
218			.abi	= PERF_SAMPLE_REGS_ABI_64,
219			.mask	= sample_regs,
220			.regs	= regs,
221		},
222		.user_stack	= {
223			.size	= sizeof(data),
224			.data	= (void *)data,
225		},
226		.read		= {
227			.time_enabled = 0x030a59d664fca7deULL,
228			.time_running = 0x011b6ae553eb98edULL,
229		},
230		.intr_regs	= {
231			.abi	= PERF_SAMPLE_REGS_ABI_64,
232			.mask	= sample_regs,
233			.regs	= regs,
234		},
235		.phys_addr	= 113,
236		.cgroup		= 114,
237		.aux_sample	= {
238			.size	= sizeof(aux_data),
239			.data	= (void *)aux_data,
240		},
241	};
242	struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
243	struct perf_sample sample_out;
244	size_t i, sz, bufsz;
245	int err, ret = -1;
246
247	if (sample_type & PERF_SAMPLE_REGS_USER)
248		evsel.core.attr.sample_regs_user = sample_regs;
249
250	if (sample_type & PERF_SAMPLE_REGS_INTR)
251		evsel.core.attr.sample_regs_intr = sample_regs;
252
253	if (sample_type & PERF_SAMPLE_BRANCH_STACK)
254		evsel.core.attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
255
256	for (i = 0; i < sizeof(regs); i++)
257		*(i + (u8 *)regs) = i & 0xfe;
258
259	if (read_format & PERF_FORMAT_GROUP) {
260		sample.read.group.nr     = 4;
261		sample.read.group.values = values;
262	} else {
263		sample.read.one.value = 0x08789faeb786aa87ULL;
264		sample.read.one.id    = 99;
265	}
266
267	sz = perf_event__sample_event_size(&sample, sample_type, read_format);
268	bufsz = sz + 4096; /* Add a bit for overrun checking */
269	event = malloc(bufsz);
270	if (!event) {
271		pr_debug("malloc failed\n");
272		return -1;
273	}
274
275	memset(event, 0xff, bufsz);
276	event->header.type = PERF_RECORD_SAMPLE;
277	event->header.misc = 0;
278	event->header.size = sz;
279
280	err = perf_event__synthesize_sample(event, sample_type, read_format,
281					    &sample);
282	if (err) {
283		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
284			 "perf_event__synthesize_sample", sample_type, err);
285		goto out_free;
286	}
287
288	/* The data does not contain 0xff so we use that to check the size */
289	for (i = bufsz; i > 0; i--) {
290		if (*(i - 1 + (u8 *)event) != 0xff)
291			break;
292	}
293	if (i != sz) {
294		pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
295			 i, sz);
296		goto out_free;
297	}
298
299	evsel.sample_size = __evsel__sample_size(sample_type);
300
301	err = evsel__parse_sample(&evsel, event, &sample_out);
302	if (err) {
303		pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
304			 "evsel__parse_sample", sample_type, err);
305		goto out_free;
306	}
307
308	if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
309		pr_debug("parsing failed for sample_type %#"PRIx64"\n",
310			 sample_type);
311		goto out_free;
312	}
313
314	ret = 0;
315out_free:
316	free(event);
317	if (ret && read_format)
318		pr_debug("read_format %#"PRIx64"\n", read_format);
319	return ret;
320}
321
322/**
323 * test__sample_parsing - test sample parsing.
324 *
325 * This function implements a test that synthesizes a sample event, parses it
326 * and then checks that the parsed sample matches the original sample.  The test
327 * checks sample format bits separately and together.  If the test passes %0 is
328 * returned, otherwise %-1 is returned.
329 */
330int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
331{
332	const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
333	u64 sample_type;
334	u64 sample_regs;
335	size_t i;
336	int err;
337
338	/*
339	 * Fail the test if it has not been updated when new sample format bits
340	 * were added.  Please actually update the test rather than just change
341	 * the condition below.
342	 */
343	if (PERF_SAMPLE_MAX > PERF_SAMPLE_CGROUP << 1) {
344		pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
345		return -1;
346	}
347
348	/* Test each sample format bit separately */
349	for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
350	     sample_type <<= 1) {
351		/* Test read_format variations */
352		if (sample_type == PERF_SAMPLE_READ) {
353			for (i = 0; i < ARRAY_SIZE(rf); i++) {
354				err = do_test(sample_type, 0, rf[i]);
355				if (err)
356					return err;
357			}
358			continue;
359		}
360		sample_regs = 0;
361
362		if (sample_type == PERF_SAMPLE_REGS_USER)
363			sample_regs = 0x3fff;
364
365		if (sample_type == PERF_SAMPLE_REGS_INTR)
366			sample_regs = 0xff0fff;
367
368		err = do_test(sample_type, sample_regs, 0);
369		if (err)
370			return err;
371	}
372
373	/* Test all sample format bits together */
374	sample_type = PERF_SAMPLE_MAX - 1;
375	sample_regs = 0x3fff; /* shared yb intr and user regs */
376	for (i = 0; i < ARRAY_SIZE(rf); i++) {
377		err = do_test(sample_type, sample_regs, rf[i]);
378		if (err)
379			return err;
380	}
381
382	return 0;
383}