Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <stdbool.h>
3#include <inttypes.h>
4#include <linux/kernel.h>
5#include <linux/types.h>
6
7#include "util.h"
8#include "event.h"
9#include "evsel.h"
10#include "debug.h"
11
12#include "tests.h"
13
14#define COMP(m) do { \
15 if (s1->m != s2->m) { \
16 pr_debug("Samples differ at '"#m"'\n"); \
17 return false; \
18 } \
19} while (0)
20
21#define MCOMP(m) do { \
22 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
23 pr_debug("Samples differ at '"#m"'\n"); \
24 return false; \
25 } \
26} while (0)
27
28static bool samples_same(const struct perf_sample *s1,
29 const struct perf_sample *s2,
30 u64 type, u64 read_format)
31{
32 size_t i;
33
34 if (type & PERF_SAMPLE_IDENTIFIER)
35 COMP(id);
36
37 if (type & PERF_SAMPLE_IP)
38 COMP(ip);
39
40 if (type & PERF_SAMPLE_TID) {
41 COMP(pid);
42 COMP(tid);
43 }
44
45 if (type & PERF_SAMPLE_TIME)
46 COMP(time);
47
48 if (type & PERF_SAMPLE_ADDR)
49 COMP(addr);
50
51 if (type & PERF_SAMPLE_ID)
52 COMP(id);
53
54 if (type & PERF_SAMPLE_STREAM_ID)
55 COMP(stream_id);
56
57 if (type & PERF_SAMPLE_CPU)
58 COMP(cpu);
59
60 if (type & PERF_SAMPLE_PERIOD)
61 COMP(period);
62
63 if (type & PERF_SAMPLE_READ) {
64 if (read_format & PERF_FORMAT_GROUP)
65 COMP(read.group.nr);
66 else
67 COMP(read.one.value);
68 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
69 COMP(read.time_enabled);
70 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
71 COMP(read.time_running);
72 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
73 if (read_format & PERF_FORMAT_GROUP) {
74 for (i = 0; i < s1->read.group.nr; i++)
75 MCOMP(read.group.values[i]);
76 } else {
77 COMP(read.one.id);
78 }
79 }
80
81 if (type & PERF_SAMPLE_CALLCHAIN) {
82 COMP(callchain->nr);
83 for (i = 0; i < s1->callchain->nr; i++)
84 COMP(callchain->ips[i]);
85 }
86
87 if (type & PERF_SAMPLE_RAW) {
88 COMP(raw_size);
89 if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
90 pr_debug("Samples differ at 'raw_data'\n");
91 return false;
92 }
93 }
94
95 if (type & PERF_SAMPLE_BRANCH_STACK) {
96 COMP(branch_stack->nr);
97 for (i = 0; i < s1->branch_stack->nr; i++)
98 MCOMP(branch_stack->entries[i]);
99 }
100
101 if (type & PERF_SAMPLE_REGS_USER) {
102 size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
103
104 COMP(user_regs.mask);
105 COMP(user_regs.abi);
106 if (s1->user_regs.abi &&
107 (!s1->user_regs.regs || !s2->user_regs.regs ||
108 memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
109 pr_debug("Samples differ at 'user_regs'\n");
110 return false;
111 }
112 }
113
114 if (type & PERF_SAMPLE_STACK_USER) {
115 COMP(user_stack.size);
116 if (memcmp(s1->user_stack.data, s2->user_stack.data,
117 s1->user_stack.size)) {
118 pr_debug("Samples differ at 'user_stack'\n");
119 return false;
120 }
121 }
122
123 if (type & PERF_SAMPLE_WEIGHT)
124 COMP(weight);
125
126 if (type & PERF_SAMPLE_DATA_SRC)
127 COMP(data_src);
128
129 if (type & PERF_SAMPLE_TRANSACTION)
130 COMP(transaction);
131
132 if (type & PERF_SAMPLE_REGS_INTR) {
133 size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
134
135 COMP(intr_regs.mask);
136 COMP(intr_regs.abi);
137 if (s1->intr_regs.abi &&
138 (!s1->intr_regs.regs || !s2->intr_regs.regs ||
139 memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
140 pr_debug("Samples differ at 'intr_regs'\n");
141 return false;
142 }
143 }
144
145 if (type & PERF_SAMPLE_PHYS_ADDR)
146 COMP(phys_addr);
147
148 return true;
149}
150
151static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
152{
153 struct perf_evsel evsel = {
154 .needs_swap = false,
155 .attr = {
156 .sample_type = sample_type,
157 .read_format = read_format,
158 },
159 };
160 union perf_event *event;
161 union {
162 struct ip_callchain callchain;
163 u64 data[64];
164 } callchain = {
165 /* 3 ips */
166 .data = {3, 201, 202, 203},
167 };
168 union {
169 struct branch_stack branch_stack;
170 u64 data[64];
171 } branch_stack = {
172 /* 1 branch_entry */
173 .data = {1, 211, 212, 213},
174 };
175 u64 regs[64];
176 const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
177 const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
178 struct perf_sample sample = {
179 .ip = 101,
180 .pid = 102,
181 .tid = 103,
182 .time = 104,
183 .addr = 105,
184 .id = 106,
185 .stream_id = 107,
186 .period = 108,
187 .weight = 109,
188 .cpu = 110,
189 .raw_size = sizeof(raw_data),
190 .data_src = 111,
191 .transaction = 112,
192 .raw_data = (void *)raw_data,
193 .callchain = &callchain.callchain,
194 .branch_stack = &branch_stack.branch_stack,
195 .user_regs = {
196 .abi = PERF_SAMPLE_REGS_ABI_64,
197 .mask = sample_regs,
198 .regs = regs,
199 },
200 .user_stack = {
201 .size = sizeof(data),
202 .data = (void *)data,
203 },
204 .read = {
205 .time_enabled = 0x030a59d664fca7deULL,
206 .time_running = 0x011b6ae553eb98edULL,
207 },
208 .intr_regs = {
209 .abi = PERF_SAMPLE_REGS_ABI_64,
210 .mask = sample_regs,
211 .regs = regs,
212 },
213 .phys_addr = 113,
214 };
215 struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
216 struct perf_sample sample_out;
217 size_t i, sz, bufsz;
218 int err, ret = -1;
219
220 if (sample_type & PERF_SAMPLE_REGS_USER)
221 evsel.attr.sample_regs_user = sample_regs;
222
223 if (sample_type & PERF_SAMPLE_REGS_INTR)
224 evsel.attr.sample_regs_intr = sample_regs;
225
226 for (i = 0; i < sizeof(regs); i++)
227 *(i + (u8 *)regs) = i & 0xfe;
228
229 if (read_format & PERF_FORMAT_GROUP) {
230 sample.read.group.nr = 4;
231 sample.read.group.values = values;
232 } else {
233 sample.read.one.value = 0x08789faeb786aa87ULL;
234 sample.read.one.id = 99;
235 }
236
237 sz = perf_event__sample_event_size(&sample, sample_type, read_format);
238 bufsz = sz + 4096; /* Add a bit for overrun checking */
239 event = malloc(bufsz);
240 if (!event) {
241 pr_debug("malloc failed\n");
242 return -1;
243 }
244
245 memset(event, 0xff, bufsz);
246 event->header.type = PERF_RECORD_SAMPLE;
247 event->header.misc = 0;
248 event->header.size = sz;
249
250 err = perf_event__synthesize_sample(event, sample_type, read_format,
251 &sample);
252 if (err) {
253 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
254 "perf_event__synthesize_sample", sample_type, err);
255 goto out_free;
256 }
257
258 /* The data does not contain 0xff so we use that to check the size */
259 for (i = bufsz; i > 0; i--) {
260 if (*(i - 1 + (u8 *)event) != 0xff)
261 break;
262 }
263 if (i != sz) {
264 pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
265 i, sz);
266 goto out_free;
267 }
268
269 evsel.sample_size = __perf_evsel__sample_size(sample_type);
270
271 err = perf_evsel__parse_sample(&evsel, event, &sample_out);
272 if (err) {
273 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
274 "perf_evsel__parse_sample", sample_type, err);
275 goto out_free;
276 }
277
278 if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
279 pr_debug("parsing failed for sample_type %#"PRIx64"\n",
280 sample_type);
281 goto out_free;
282 }
283
284 ret = 0;
285out_free:
286 free(event);
287 if (ret && read_format)
288 pr_debug("read_format %#"PRIx64"\n", read_format);
289 return ret;
290}
291
292/**
293 * test__sample_parsing - test sample parsing.
294 *
295 * This function implements a test that synthesizes a sample event, parses it
296 * and then checks that the parsed sample matches the original sample. The test
297 * checks sample format bits separately and together. If the test passes %0 is
298 * returned, otherwise %-1 is returned.
299 */
300int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
301{
302 const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
303 u64 sample_type;
304 u64 sample_regs;
305 size_t i;
306 int err;
307
308 /*
309 * Fail the test if it has not been updated when new sample format bits
310 * were added. Please actually update the test rather than just change
311 * the condition below.
312 */
313 if (PERF_SAMPLE_MAX > PERF_SAMPLE_PHYS_ADDR << 1) {
314 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
315 return -1;
316 }
317
318 /* Test each sample format bit separately */
319 for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
320 sample_type <<= 1) {
321 /* Test read_format variations */
322 if (sample_type == PERF_SAMPLE_READ) {
323 for (i = 0; i < ARRAY_SIZE(rf); i++) {
324 err = do_test(sample_type, 0, rf[i]);
325 if (err)
326 return err;
327 }
328 continue;
329 }
330 sample_regs = 0;
331
332 if (sample_type == PERF_SAMPLE_REGS_USER)
333 sample_regs = 0x3fff;
334
335 if (sample_type == PERF_SAMPLE_REGS_INTR)
336 sample_regs = 0xff0fff;
337
338 err = do_test(sample_type, sample_regs, 0);
339 if (err)
340 return err;
341 }
342
343 /* Test all sample format bits together */
344 sample_type = PERF_SAMPLE_MAX - 1;
345 sample_regs = 0x3fff; /* shared yb intr and user regs */
346 for (i = 0; i < ARRAY_SIZE(rf); i++) {
347 err = do_test(sample_type, sample_regs, rf[i]);
348 if (err)
349 return err;
350 }
351
352 return 0;
353}
1// SPDX-License-Identifier: GPL-2.0
2#include <stdbool.h>
3#include <inttypes.h>
4#include <stdlib.h>
5#include <string.h>
6#include <linux/bitops.h>
7#include <linux/kernel.h>
8#include <linux/types.h>
9
10#include "map_symbol.h"
11#include "branch.h"
12#include "event.h"
13#include "evsel.h"
14#include "debug.h"
15#include "util/synthetic-events.h"
16
17#include "tests.h"
18
19#define COMP(m) do { \
20 if (s1->m != s2->m) { \
21 pr_debug("Samples differ at '"#m"'\n"); \
22 return false; \
23 } \
24} while (0)
25
26#define MCOMP(m) do { \
27 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
28 pr_debug("Samples differ at '"#m"'\n"); \
29 return false; \
30 } \
31} while (0)
32
33static bool samples_same(const struct perf_sample *s1,
34 const struct perf_sample *s2,
35 u64 type, u64 read_format)
36{
37 size_t i;
38
39 if (type & PERF_SAMPLE_IDENTIFIER)
40 COMP(id);
41
42 if (type & PERF_SAMPLE_IP)
43 COMP(ip);
44
45 if (type & PERF_SAMPLE_TID) {
46 COMP(pid);
47 COMP(tid);
48 }
49
50 if (type & PERF_SAMPLE_TIME)
51 COMP(time);
52
53 if (type & PERF_SAMPLE_ADDR)
54 COMP(addr);
55
56 if (type & PERF_SAMPLE_ID)
57 COMP(id);
58
59 if (type & PERF_SAMPLE_STREAM_ID)
60 COMP(stream_id);
61
62 if (type & PERF_SAMPLE_CPU)
63 COMP(cpu);
64
65 if (type & PERF_SAMPLE_PERIOD)
66 COMP(period);
67
68 if (type & PERF_SAMPLE_READ) {
69 if (read_format & PERF_FORMAT_GROUP)
70 COMP(read.group.nr);
71 else
72 COMP(read.one.value);
73 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
74 COMP(read.time_enabled);
75 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
76 COMP(read.time_running);
77 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
78 if (read_format & PERF_FORMAT_GROUP) {
79 for (i = 0; i < s1->read.group.nr; i++)
80 MCOMP(read.group.values[i]);
81 } else {
82 COMP(read.one.id);
83 }
84 }
85
86 if (type & PERF_SAMPLE_CALLCHAIN) {
87 COMP(callchain->nr);
88 for (i = 0; i < s1->callchain->nr; i++)
89 COMP(callchain->ips[i]);
90 }
91
92 if (type & PERF_SAMPLE_RAW) {
93 COMP(raw_size);
94 if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
95 pr_debug("Samples differ at 'raw_data'\n");
96 return false;
97 }
98 }
99
100 if (type & PERF_SAMPLE_BRANCH_STACK) {
101 COMP(branch_stack->nr);
102 COMP(branch_stack->hw_idx);
103 for (i = 0; i < s1->branch_stack->nr; i++)
104 MCOMP(branch_stack->entries[i]);
105 }
106
107 if (type & PERF_SAMPLE_REGS_USER) {
108 size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
109
110 COMP(user_regs.mask);
111 COMP(user_regs.abi);
112 if (s1->user_regs.abi &&
113 (!s1->user_regs.regs || !s2->user_regs.regs ||
114 memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
115 pr_debug("Samples differ at 'user_regs'\n");
116 return false;
117 }
118 }
119
120 if (type & PERF_SAMPLE_STACK_USER) {
121 COMP(user_stack.size);
122 if (memcmp(s1->user_stack.data, s2->user_stack.data,
123 s1->user_stack.size)) {
124 pr_debug("Samples differ at 'user_stack'\n");
125 return false;
126 }
127 }
128
129 if (type & PERF_SAMPLE_WEIGHT)
130 COMP(weight);
131
132 if (type & PERF_SAMPLE_DATA_SRC)
133 COMP(data_src);
134
135 if (type & PERF_SAMPLE_TRANSACTION)
136 COMP(transaction);
137
138 if (type & PERF_SAMPLE_REGS_INTR) {
139 size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
140
141 COMP(intr_regs.mask);
142 COMP(intr_regs.abi);
143 if (s1->intr_regs.abi &&
144 (!s1->intr_regs.regs || !s2->intr_regs.regs ||
145 memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
146 pr_debug("Samples differ at 'intr_regs'\n");
147 return false;
148 }
149 }
150
151 if (type & PERF_SAMPLE_PHYS_ADDR)
152 COMP(phys_addr);
153
154 if (type & PERF_SAMPLE_CGROUP)
155 COMP(cgroup);
156
157 if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
158 COMP(data_page_size);
159
160 if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
161 COMP(code_page_size);
162
163 if (type & PERF_SAMPLE_AUX) {
164 COMP(aux_sample.size);
165 if (memcmp(s1->aux_sample.data, s2->aux_sample.data,
166 s1->aux_sample.size)) {
167 pr_debug("Samples differ at 'aux_sample'\n");
168 return false;
169 }
170 }
171
172 return true;
173}
174
175static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
176{
177 struct evsel evsel = {
178 .needs_swap = false,
179 .core = {
180 . attr = {
181 .sample_type = sample_type,
182 .read_format = read_format,
183 },
184 },
185 };
186 union perf_event *event;
187 union {
188 struct ip_callchain callchain;
189 u64 data[64];
190 } callchain = {
191 /* 3 ips */
192 .data = {3, 201, 202, 203},
193 };
194 union {
195 struct branch_stack branch_stack;
196 u64 data[64];
197 } branch_stack = {
198 /* 1 branch_entry */
199 .data = {1, -1ULL, 211, 212, 213},
200 };
201 u64 regs[64];
202 const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 };
203 const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
204 const u64 aux_data[] = {0xa55a, 0, 0xeeddee, 0x0282028202820282};
205 struct perf_sample sample = {
206 .ip = 101,
207 .pid = 102,
208 .tid = 103,
209 .time = 104,
210 .addr = 105,
211 .id = 106,
212 .stream_id = 107,
213 .period = 108,
214 .weight = 109,
215 .cpu = 110,
216 .raw_size = sizeof(raw_data),
217 .data_src = 111,
218 .transaction = 112,
219 .raw_data = (void *)raw_data,
220 .callchain = &callchain.callchain,
221 .no_hw_idx = false,
222 .branch_stack = &branch_stack.branch_stack,
223 .user_regs = {
224 .abi = PERF_SAMPLE_REGS_ABI_64,
225 .mask = sample_regs,
226 .regs = regs,
227 },
228 .user_stack = {
229 .size = sizeof(data),
230 .data = (void *)data,
231 },
232 .read = {
233 .time_enabled = 0x030a59d664fca7deULL,
234 .time_running = 0x011b6ae553eb98edULL,
235 },
236 .intr_regs = {
237 .abi = PERF_SAMPLE_REGS_ABI_64,
238 .mask = sample_regs,
239 .regs = regs,
240 },
241 .phys_addr = 113,
242 .cgroup = 114,
243 .data_page_size = 115,
244 .code_page_size = 116,
245 .aux_sample = {
246 .size = sizeof(aux_data),
247 .data = (void *)aux_data,
248 },
249 };
250 struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
251 struct perf_sample sample_out;
252 size_t i, sz, bufsz;
253 int err, ret = -1;
254
255 if (sample_type & PERF_SAMPLE_REGS_USER)
256 evsel.core.attr.sample_regs_user = sample_regs;
257
258 if (sample_type & PERF_SAMPLE_REGS_INTR)
259 evsel.core.attr.sample_regs_intr = sample_regs;
260
261 if (sample_type & PERF_SAMPLE_BRANCH_STACK)
262 evsel.core.attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
263
264 for (i = 0; i < sizeof(regs); i++)
265 *(i + (u8 *)regs) = i & 0xfe;
266
267 if (read_format & PERF_FORMAT_GROUP) {
268 sample.read.group.nr = 4;
269 sample.read.group.values = values;
270 } else {
271 sample.read.one.value = 0x08789faeb786aa87ULL;
272 sample.read.one.id = 99;
273 }
274
275 sz = perf_event__sample_event_size(&sample, sample_type, read_format);
276 bufsz = sz + 4096; /* Add a bit for overrun checking */
277 event = malloc(bufsz);
278 if (!event) {
279 pr_debug("malloc failed\n");
280 return -1;
281 }
282
283 memset(event, 0xff, bufsz);
284 event->header.type = PERF_RECORD_SAMPLE;
285 event->header.misc = 0;
286 event->header.size = sz;
287
288 err = perf_event__synthesize_sample(event, sample_type, read_format,
289 &sample);
290 if (err) {
291 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
292 "perf_event__synthesize_sample", sample_type, err);
293 goto out_free;
294 }
295
296 /* The data does not contain 0xff so we use that to check the size */
297 for (i = bufsz; i > 0; i--) {
298 if (*(i - 1 + (u8 *)event) != 0xff)
299 break;
300 }
301 if (i != sz) {
302 pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
303 i, sz);
304 goto out_free;
305 }
306
307 evsel.sample_size = __evsel__sample_size(sample_type);
308
309 err = evsel__parse_sample(&evsel, event, &sample_out);
310 if (err) {
311 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
312 "evsel__parse_sample", sample_type, err);
313 goto out_free;
314 }
315
316 if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
317 pr_debug("parsing failed for sample_type %#"PRIx64"\n",
318 sample_type);
319 goto out_free;
320 }
321
322 ret = 0;
323out_free:
324 free(event);
325 if (ret && read_format)
326 pr_debug("read_format %#"PRIx64"\n", read_format);
327 return ret;
328}
329
330/**
331 * test__sample_parsing - test sample parsing.
332 *
333 * This function implements a test that synthesizes a sample event, parses it
334 * and then checks that the parsed sample matches the original sample. The test
335 * checks sample format bits separately and together. If the test passes %0 is
336 * returned, otherwise %-1 is returned.
337 */
338int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
339{
340 const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
341 u64 sample_type;
342 u64 sample_regs;
343 size_t i;
344 int err;
345
346 /*
347 * Fail the test if it has not been updated when new sample format bits
348 * were added. Please actually update the test rather than just change
349 * the condition below.
350 */
351 if (PERF_SAMPLE_MAX > PERF_SAMPLE_WEIGHT_STRUCT << 1) {
352 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
353 return -1;
354 }
355
356 /* Test each sample format bit separately */
357 for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
358 sample_type <<= 1) {
359 /* Test read_format variations */
360 if (sample_type == PERF_SAMPLE_READ) {
361 for (i = 0; i < ARRAY_SIZE(rf); i++) {
362 err = do_test(sample_type, 0, rf[i]);
363 if (err)
364 return err;
365 }
366 continue;
367 }
368 sample_regs = 0;
369
370 if (sample_type == PERF_SAMPLE_REGS_USER)
371 sample_regs = 0x3fff;
372
373 if (sample_type == PERF_SAMPLE_REGS_INTR)
374 sample_regs = 0xff0fff;
375
376 err = do_test(sample_type, sample_regs, 0);
377 if (err)
378 return err;
379 }
380
381 /*
382 * Test all sample format bits together
383 * Note: PERF_SAMPLE_WEIGHT and PERF_SAMPLE_WEIGHT_STRUCT cannot
384 * be set simultaneously.
385 */
386 sample_type = (PERF_SAMPLE_MAX - 1) & ~PERF_SAMPLE_WEIGHT;
387 sample_regs = 0x3fff; /* shared yb intr and user regs */
388 for (i = 0; i < ARRAY_SIZE(rf); i++) {
389 err = do_test(sample_type, sample_regs, rf[i]);
390 if (err)
391 return err;
392 }
393
394 return 0;
395}