Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <stdbool.h>
3#include <inttypes.h>
4#include <stdlib.h>
5#include <string.h>
6#include <linux/bitops.h>
7#include <linux/kernel.h>
8#include <linux/types.h>
9
10#include "map_symbol.h"
11#include "branch.h"
12#include "event.h"
13#include "evsel.h"
14#include "debug.h"
15#include "util/synthetic-events.h"
16
17#include "tests.h"
18
19#define COMP(m) do { \
20 if (s1->m != s2->m) { \
21 pr_debug("Samples differ at '"#m"'\n"); \
22 return false; \
23 } \
24} while (0)
25
26#define MCOMP(m) do { \
27 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
28 pr_debug("Samples differ at '"#m"'\n"); \
29 return false; \
30 } \
31} while (0)
32
33static bool samples_same(const struct perf_sample *s1,
34 const struct perf_sample *s2,
35 u64 type, u64 read_format)
36{
37 size_t i;
38
39 if (type & PERF_SAMPLE_IDENTIFIER)
40 COMP(id);
41
42 if (type & PERF_SAMPLE_IP)
43 COMP(ip);
44
45 if (type & PERF_SAMPLE_TID) {
46 COMP(pid);
47 COMP(tid);
48 }
49
50 if (type & PERF_SAMPLE_TIME)
51 COMP(time);
52
53 if (type & PERF_SAMPLE_ADDR)
54 COMP(addr);
55
56 if (type & PERF_SAMPLE_ID)
57 COMP(id);
58
59 if (type & PERF_SAMPLE_STREAM_ID)
60 COMP(stream_id);
61
62 if (type & PERF_SAMPLE_CPU)
63 COMP(cpu);
64
65 if (type & PERF_SAMPLE_PERIOD)
66 COMP(period);
67
68 if (type & PERF_SAMPLE_READ) {
69 if (read_format & PERF_FORMAT_GROUP)
70 COMP(read.group.nr);
71 else
72 COMP(read.one.value);
73 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
74 COMP(read.time_enabled);
75 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
76 COMP(read.time_running);
77 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
78 if (read_format & PERF_FORMAT_GROUP) {
79 for (i = 0; i < s1->read.group.nr; i++)
80 MCOMP(read.group.values[i]);
81 } else {
82 COMP(read.one.id);
83 }
84 }
85
86 if (type & PERF_SAMPLE_CALLCHAIN) {
87 COMP(callchain->nr);
88 for (i = 0; i < s1->callchain->nr; i++)
89 COMP(callchain->ips[i]);
90 }
91
92 if (type & PERF_SAMPLE_RAW) {
93 COMP(raw_size);
94 if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
95 pr_debug("Samples differ at 'raw_data'\n");
96 return false;
97 }
98 }
99
100 if (type & PERF_SAMPLE_BRANCH_STACK) {
101 COMP(branch_stack->nr);
102 for (i = 0; i < s1->branch_stack->nr; i++)
103 MCOMP(branch_stack->entries[i]);
104 }
105
106 if (type & PERF_SAMPLE_REGS_USER) {
107 size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
108
109 COMP(user_regs.mask);
110 COMP(user_regs.abi);
111 if (s1->user_regs.abi &&
112 (!s1->user_regs.regs || !s2->user_regs.regs ||
113 memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
114 pr_debug("Samples differ at 'user_regs'\n");
115 return false;
116 }
117 }
118
119 if (type & PERF_SAMPLE_STACK_USER) {
120 COMP(user_stack.size);
121 if (memcmp(s1->user_stack.data, s2->user_stack.data,
122 s1->user_stack.size)) {
123 pr_debug("Samples differ at 'user_stack'\n");
124 return false;
125 }
126 }
127
128 if (type & PERF_SAMPLE_WEIGHT)
129 COMP(weight);
130
131 if (type & PERF_SAMPLE_DATA_SRC)
132 COMP(data_src);
133
134 if (type & PERF_SAMPLE_TRANSACTION)
135 COMP(transaction);
136
137 if (type & PERF_SAMPLE_REGS_INTR) {
138 size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
139
140 COMP(intr_regs.mask);
141 COMP(intr_regs.abi);
142 if (s1->intr_regs.abi &&
143 (!s1->intr_regs.regs || !s2->intr_regs.regs ||
144 memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
145 pr_debug("Samples differ at 'intr_regs'\n");
146 return false;
147 }
148 }
149
150 if (type & PERF_SAMPLE_PHYS_ADDR)
151 COMP(phys_addr);
152
153 return true;
154}
155
156static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
157{
158 struct evsel evsel = {
159 .needs_swap = false,
160 .core = {
161 . attr = {
162 .sample_type = sample_type,
163 .read_format = read_format,
164 },
165 },
166 };
167 union perf_event *event;
168 union {
169 struct ip_callchain callchain;
170 u64 data[64];
171 } callchain = {
172 /* 3 ips */
173 .data = {3, 201, 202, 203},
174 };
175 union {
176 struct branch_stack branch_stack;
177 u64 data[64];
178 } branch_stack = {
179 /* 1 branch_entry */
180 .data = {1, 211, 212, 213},
181 };
182 u64 regs[64];
183 const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
184 const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
185 struct perf_sample sample = {
186 .ip = 101,
187 .pid = 102,
188 .tid = 103,
189 .time = 104,
190 .addr = 105,
191 .id = 106,
192 .stream_id = 107,
193 .period = 108,
194 .weight = 109,
195 .cpu = 110,
196 .raw_size = sizeof(raw_data),
197 .data_src = 111,
198 .transaction = 112,
199 .raw_data = (void *)raw_data,
200 .callchain = &callchain.callchain,
201 .branch_stack = &branch_stack.branch_stack,
202 .user_regs = {
203 .abi = PERF_SAMPLE_REGS_ABI_64,
204 .mask = sample_regs,
205 .regs = regs,
206 },
207 .user_stack = {
208 .size = sizeof(data),
209 .data = (void *)data,
210 },
211 .read = {
212 .time_enabled = 0x030a59d664fca7deULL,
213 .time_running = 0x011b6ae553eb98edULL,
214 },
215 .intr_regs = {
216 .abi = PERF_SAMPLE_REGS_ABI_64,
217 .mask = sample_regs,
218 .regs = regs,
219 },
220 .phys_addr = 113,
221 };
222 struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
223 struct perf_sample sample_out;
224 size_t i, sz, bufsz;
225 int err, ret = -1;
226
227 if (sample_type & PERF_SAMPLE_REGS_USER)
228 evsel.core.attr.sample_regs_user = sample_regs;
229
230 if (sample_type & PERF_SAMPLE_REGS_INTR)
231 evsel.core.attr.sample_regs_intr = sample_regs;
232
233 for (i = 0; i < sizeof(regs); i++)
234 *(i + (u8 *)regs) = i & 0xfe;
235
236 if (read_format & PERF_FORMAT_GROUP) {
237 sample.read.group.nr = 4;
238 sample.read.group.values = values;
239 } else {
240 sample.read.one.value = 0x08789faeb786aa87ULL;
241 sample.read.one.id = 99;
242 }
243
244 sz = perf_event__sample_event_size(&sample, sample_type, read_format);
245 bufsz = sz + 4096; /* Add a bit for overrun checking */
246 event = malloc(bufsz);
247 if (!event) {
248 pr_debug("malloc failed\n");
249 return -1;
250 }
251
252 memset(event, 0xff, bufsz);
253 event->header.type = PERF_RECORD_SAMPLE;
254 event->header.misc = 0;
255 event->header.size = sz;
256
257 err = perf_event__synthesize_sample(event, sample_type, read_format,
258 &sample);
259 if (err) {
260 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
261 "perf_event__synthesize_sample", sample_type, err);
262 goto out_free;
263 }
264
265 /* The data does not contain 0xff so we use that to check the size */
266 for (i = bufsz; i > 0; i--) {
267 if (*(i - 1 + (u8 *)event) != 0xff)
268 break;
269 }
270 if (i != sz) {
271 pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
272 i, sz);
273 goto out_free;
274 }
275
276 evsel.sample_size = __perf_evsel__sample_size(sample_type);
277
278 err = perf_evsel__parse_sample(&evsel, event, &sample_out);
279 if (err) {
280 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
281 "perf_evsel__parse_sample", sample_type, err);
282 goto out_free;
283 }
284
285 if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
286 pr_debug("parsing failed for sample_type %#"PRIx64"\n",
287 sample_type);
288 goto out_free;
289 }
290
291 ret = 0;
292out_free:
293 free(event);
294 if (ret && read_format)
295 pr_debug("read_format %#"PRIx64"\n", read_format);
296 return ret;
297}
298
299/**
300 * test__sample_parsing - test sample parsing.
301 *
302 * This function implements a test that synthesizes a sample event, parses it
303 * and then checks that the parsed sample matches the original sample. The test
304 * checks sample format bits separately and together. If the test passes %0 is
305 * returned, otherwise %-1 is returned.
306 */
307int test__sample_parsing(struct test *test __maybe_unused, int subtest __maybe_unused)
308{
309 const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
310 u64 sample_type;
311 u64 sample_regs;
312 size_t i;
313 int err;
314
315 /*
316 * Fail the test if it has not been updated when new sample format bits
317 * were added. Please actually update the test rather than just change
318 * the condition below.
319 */
320 if (PERF_SAMPLE_MAX > PERF_SAMPLE_PHYS_ADDR << 1) {
321 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
322 return -1;
323 }
324
325 /* Test each sample format bit separately */
326 for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
327 sample_type <<= 1) {
328 /* Test read_format variations */
329 if (sample_type == PERF_SAMPLE_READ) {
330 for (i = 0; i < ARRAY_SIZE(rf); i++) {
331 err = do_test(sample_type, 0, rf[i]);
332 if (err)
333 return err;
334 }
335 continue;
336 }
337 sample_regs = 0;
338
339 if (sample_type == PERF_SAMPLE_REGS_USER)
340 sample_regs = 0x3fff;
341
342 if (sample_type == PERF_SAMPLE_REGS_INTR)
343 sample_regs = 0xff0fff;
344
345 err = do_test(sample_type, sample_regs, 0);
346 if (err)
347 return err;
348 }
349
350 /* Test all sample format bits together */
351 sample_type = PERF_SAMPLE_MAX - 1;
352 sample_regs = 0x3fff; /* shared yb intr and user regs */
353 for (i = 0; i < ARRAY_SIZE(rf); i++) {
354 err = do_test(sample_type, sample_regs, rf[i]);
355 if (err)
356 return err;
357 }
358
359 return 0;
360}
1#include <stdbool.h>
2#include <linux/types.h>
3
4#include "util.h"
5#include "event.h"
6#include "evsel.h"
7#include "debug.h"
8
9#include "tests.h"
10
11#define COMP(m) do { \
12 if (s1->m != s2->m) { \
13 pr_debug("Samples differ at '"#m"'\n"); \
14 return false; \
15 } \
16} while (0)
17
18#define MCOMP(m) do { \
19 if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
20 pr_debug("Samples differ at '"#m"'\n"); \
21 return false; \
22 } \
23} while (0)
24
25static bool samples_same(const struct perf_sample *s1,
26 const struct perf_sample *s2,
27 u64 type, u64 read_format)
28{
29 size_t i;
30
31 if (type & PERF_SAMPLE_IDENTIFIER)
32 COMP(id);
33
34 if (type & PERF_SAMPLE_IP)
35 COMP(ip);
36
37 if (type & PERF_SAMPLE_TID) {
38 COMP(pid);
39 COMP(tid);
40 }
41
42 if (type & PERF_SAMPLE_TIME)
43 COMP(time);
44
45 if (type & PERF_SAMPLE_ADDR)
46 COMP(addr);
47
48 if (type & PERF_SAMPLE_ID)
49 COMP(id);
50
51 if (type & PERF_SAMPLE_STREAM_ID)
52 COMP(stream_id);
53
54 if (type & PERF_SAMPLE_CPU)
55 COMP(cpu);
56
57 if (type & PERF_SAMPLE_PERIOD)
58 COMP(period);
59
60 if (type & PERF_SAMPLE_READ) {
61 if (read_format & PERF_FORMAT_GROUP)
62 COMP(read.group.nr);
63 else
64 COMP(read.one.value);
65 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
66 COMP(read.time_enabled);
67 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
68 COMP(read.time_running);
69 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
70 if (read_format & PERF_FORMAT_GROUP) {
71 for (i = 0; i < s1->read.group.nr; i++)
72 MCOMP(read.group.values[i]);
73 } else {
74 COMP(read.one.id);
75 }
76 }
77
78 if (type & PERF_SAMPLE_CALLCHAIN) {
79 COMP(callchain->nr);
80 for (i = 0; i < s1->callchain->nr; i++)
81 COMP(callchain->ips[i]);
82 }
83
84 if (type & PERF_SAMPLE_RAW) {
85 COMP(raw_size);
86 if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
87 pr_debug("Samples differ at 'raw_data'\n");
88 return false;
89 }
90 }
91
92 if (type & PERF_SAMPLE_BRANCH_STACK) {
93 COMP(branch_stack->nr);
94 for (i = 0; i < s1->branch_stack->nr; i++)
95 MCOMP(branch_stack->entries[i]);
96 }
97
98 if (type & PERF_SAMPLE_REGS_USER) {
99 size_t sz = hweight_long(s1->user_regs.mask) * sizeof(u64);
100
101 COMP(user_regs.mask);
102 COMP(user_regs.abi);
103 if (s1->user_regs.abi &&
104 (!s1->user_regs.regs || !s2->user_regs.regs ||
105 memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
106 pr_debug("Samples differ at 'user_regs'\n");
107 return false;
108 }
109 }
110
111 if (type & PERF_SAMPLE_STACK_USER) {
112 COMP(user_stack.size);
113 if (memcmp(s1->user_stack.data, s2->user_stack.data,
114 s1->user_stack.size)) {
115 pr_debug("Samples differ at 'user_stack'\n");
116 return false;
117 }
118 }
119
120 if (type & PERF_SAMPLE_WEIGHT)
121 COMP(weight);
122
123 if (type & PERF_SAMPLE_DATA_SRC)
124 COMP(data_src);
125
126 if (type & PERF_SAMPLE_TRANSACTION)
127 COMP(transaction);
128
129 if (type & PERF_SAMPLE_REGS_INTR) {
130 size_t sz = hweight_long(s1->intr_regs.mask) * sizeof(u64);
131
132 COMP(intr_regs.mask);
133 COMP(intr_regs.abi);
134 if (s1->intr_regs.abi &&
135 (!s1->intr_regs.regs || !s2->intr_regs.regs ||
136 memcmp(s1->intr_regs.regs, s2->intr_regs.regs, sz))) {
137 pr_debug("Samples differ at 'intr_regs'\n");
138 return false;
139 }
140 }
141
142 return true;
143}
144
145static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
146{
147 struct perf_evsel evsel = {
148 .needs_swap = false,
149 .attr = {
150 .sample_type = sample_type,
151 .read_format = read_format,
152 },
153 };
154 union perf_event *event;
155 union {
156 struct ip_callchain callchain;
157 u64 data[64];
158 } callchain = {
159 /* 3 ips */
160 .data = {3, 201, 202, 203},
161 };
162 union {
163 struct branch_stack branch_stack;
164 u64 data[64];
165 } branch_stack = {
166 /* 1 branch_entry */
167 .data = {1, 211, 212, 213},
168 };
169 u64 regs[64];
170 const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
171 const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
172 struct perf_sample sample = {
173 .ip = 101,
174 .pid = 102,
175 .tid = 103,
176 .time = 104,
177 .addr = 105,
178 .id = 106,
179 .stream_id = 107,
180 .period = 108,
181 .weight = 109,
182 .cpu = 110,
183 .raw_size = sizeof(raw_data),
184 .data_src = 111,
185 .transaction = 112,
186 .raw_data = (void *)raw_data,
187 .callchain = &callchain.callchain,
188 .branch_stack = &branch_stack.branch_stack,
189 .user_regs = {
190 .abi = PERF_SAMPLE_REGS_ABI_64,
191 .mask = sample_regs,
192 .regs = regs,
193 },
194 .user_stack = {
195 .size = sizeof(data),
196 .data = (void *)data,
197 },
198 .read = {
199 .time_enabled = 0x030a59d664fca7deULL,
200 .time_running = 0x011b6ae553eb98edULL,
201 },
202 .intr_regs = {
203 .abi = PERF_SAMPLE_REGS_ABI_64,
204 .mask = sample_regs,
205 .regs = regs,
206 },
207 };
208 struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
209 struct perf_sample sample_out;
210 size_t i, sz, bufsz;
211 int err, ret = -1;
212
213 if (sample_type & PERF_SAMPLE_REGS_USER)
214 evsel.attr.sample_regs_user = sample_regs;
215
216 if (sample_type & PERF_SAMPLE_REGS_INTR)
217 evsel.attr.sample_regs_intr = sample_regs;
218
219 for (i = 0; i < sizeof(regs); i++)
220 *(i + (u8 *)regs) = i & 0xfe;
221
222 if (read_format & PERF_FORMAT_GROUP) {
223 sample.read.group.nr = 4;
224 sample.read.group.values = values;
225 } else {
226 sample.read.one.value = 0x08789faeb786aa87ULL;
227 sample.read.one.id = 99;
228 }
229
230 sz = perf_event__sample_event_size(&sample, sample_type, read_format);
231 bufsz = sz + 4096; /* Add a bit for overrun checking */
232 event = malloc(bufsz);
233 if (!event) {
234 pr_debug("malloc failed\n");
235 return -1;
236 }
237
238 memset(event, 0xff, bufsz);
239 event->header.type = PERF_RECORD_SAMPLE;
240 event->header.misc = 0;
241 event->header.size = sz;
242
243 err = perf_event__synthesize_sample(event, sample_type, read_format,
244 &sample, false);
245 if (err) {
246 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
247 "perf_event__synthesize_sample", sample_type, err);
248 goto out_free;
249 }
250
251 /* The data does not contain 0xff so we use that to check the size */
252 for (i = bufsz; i > 0; i--) {
253 if (*(i - 1 + (u8 *)event) != 0xff)
254 break;
255 }
256 if (i != sz) {
257 pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
258 i, sz);
259 goto out_free;
260 }
261
262 evsel.sample_size = __perf_evsel__sample_size(sample_type);
263
264 err = perf_evsel__parse_sample(&evsel, event, &sample_out);
265 if (err) {
266 pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
267 "perf_evsel__parse_sample", sample_type, err);
268 goto out_free;
269 }
270
271 if (!samples_same(&sample, &sample_out, sample_type, read_format)) {
272 pr_debug("parsing failed for sample_type %#"PRIx64"\n",
273 sample_type);
274 goto out_free;
275 }
276
277 ret = 0;
278out_free:
279 free(event);
280 if (ret && read_format)
281 pr_debug("read_format %#"PRIx64"\n", read_format);
282 return ret;
283}
284
285/**
286 * test__sample_parsing - test sample parsing.
287 *
288 * This function implements a test that synthesizes a sample event, parses it
289 * and then checks that the parsed sample matches the original sample. The test
290 * checks sample format bits separately and together. If the test passes %0 is
291 * returned, otherwise %-1 is returned.
292 */
293int test__sample_parsing(int subtest __maybe_unused)
294{
295 const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
296 u64 sample_type;
297 u64 sample_regs;
298 size_t i;
299 int err;
300
301 /*
302 * Fail the test if it has not been updated when new sample format bits
303 * were added. Please actually update the test rather than just change
304 * the condition below.
305 */
306 if (PERF_SAMPLE_MAX > PERF_SAMPLE_REGS_INTR << 1) {
307 pr_debug("sample format has changed, some new PERF_SAMPLE_ bit was introduced - test needs updating\n");
308 return -1;
309 }
310
311 /* Test each sample format bit separately */
312 for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
313 sample_type <<= 1) {
314 /* Test read_format variations */
315 if (sample_type == PERF_SAMPLE_READ) {
316 for (i = 0; i < ARRAY_SIZE(rf); i++) {
317 err = do_test(sample_type, 0, rf[i]);
318 if (err)
319 return err;
320 }
321 continue;
322 }
323 sample_regs = 0;
324
325 if (sample_type == PERF_SAMPLE_REGS_USER)
326 sample_regs = 0x3fff;
327
328 if (sample_type == PERF_SAMPLE_REGS_INTR)
329 sample_regs = 0xff0fff;
330
331 err = do_test(sample_type, sample_regs, 0);
332 if (err)
333 return err;
334 }
335
336 /* Test all sample format bits together */
337 sample_type = PERF_SAMPLE_MAX - 1;
338 sample_regs = 0x3fff; /* shared yb intr and user regs */
339 for (i = 0; i < ARRAY_SIZE(rf); i++) {
340 err = do_test(sample_type, sample_regs, rf[i]);
341 if (err)
342 return err;
343 }
344
345 return 0;
346}