Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <unistd.h>
4#include <sys/syscall.h>
5#include <perf/evsel.h>
6#include <perf/cpumap.h>
7#include <perf/threadmap.h>
8#include <linux/list.h>
9#include <internal/evsel.h>
10#include <linux/zalloc.h>
11#include <stdlib.h>
12#include <internal/xyarray.h>
13#include <internal/cpumap.h>
14#include <internal/mmap.h>
15#include <internal/threadmap.h>
16#include <internal/lib.h>
17#include <linux/string.h>
18#include <sys/ioctl.h>
19#include <sys/mman.h>
20#include <asm/bug.h>
21
22void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
23 int idx)
24{
25 INIT_LIST_HEAD(&evsel->node);
26 evsel->attr = *attr;
27 evsel->idx = idx;
28 evsel->leader = evsel;
29}
30
31struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
32{
33 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
34
35 if (evsel != NULL)
36 perf_evsel__init(evsel, attr, 0);
37
38 return evsel;
39}
40
41void perf_evsel__delete(struct perf_evsel *evsel)
42{
43 free(evsel);
44}
45
46#define FD(_evsel, _cpu_map_idx, _thread) \
47 ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
48#define MMAP(_evsel, _cpu_map_idx, _thread) \
49 (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
50 : NULL)
51
52int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
53{
54 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
55
56 if (evsel->fd) {
57 int idx, thread;
58
59 for (idx = 0; idx < ncpus; idx++) {
60 for (thread = 0; thread < nthreads; thread++) {
61 int *fd = FD(evsel, idx, thread);
62
63 if (fd)
64 *fd = -1;
65 }
66 }
67 }
68
69 return evsel->fd != NULL ? 0 : -ENOMEM;
70}
71
72static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
73{
74 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
75
76 return evsel->mmap != NULL ? 0 : -ENOMEM;
77}
78
79static int
80sys_perf_event_open(struct perf_event_attr *attr,
81 pid_t pid, struct perf_cpu cpu, int group_fd,
82 unsigned long flags)
83{
84 return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
85}
86
87static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
88{
89 struct perf_evsel *leader = evsel->leader;
90 int *fd;
91
92 if (evsel == leader) {
93 *group_fd = -1;
94 return 0;
95 }
96
97 /*
98 * Leader must be already processed/open,
99 * if not it's a bug.
100 */
101 if (!leader->fd)
102 return -ENOTCONN;
103
104 fd = FD(leader, cpu_map_idx, thread);
105 if (fd == NULL || *fd == -1)
106 return -EBADF;
107
108 *group_fd = *fd;
109
110 return 0;
111}
112
113int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
114 struct perf_thread_map *threads)
115{
116 struct perf_cpu cpu;
117 int idx, thread, err = 0;
118
119 if (cpus == NULL) {
120 static struct perf_cpu_map *empty_cpu_map;
121
122 if (empty_cpu_map == NULL) {
123 empty_cpu_map = perf_cpu_map__dummy_new();
124 if (empty_cpu_map == NULL)
125 return -ENOMEM;
126 }
127
128 cpus = empty_cpu_map;
129 }
130
131 if (threads == NULL) {
132 static struct perf_thread_map *empty_thread_map;
133
134 if (empty_thread_map == NULL) {
135 empty_thread_map = perf_thread_map__new_dummy();
136 if (empty_thread_map == NULL)
137 return -ENOMEM;
138 }
139
140 threads = empty_thread_map;
141 }
142
143 if (evsel->fd == NULL &&
144 perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
145 return -ENOMEM;
146
147 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
148 for (thread = 0; thread < threads->nr; thread++) {
149 int fd, group_fd, *evsel_fd;
150
151 evsel_fd = FD(evsel, idx, thread);
152 if (evsel_fd == NULL) {
153 err = -EINVAL;
154 goto out;
155 }
156
157 err = get_group_fd(evsel, idx, thread, &group_fd);
158 if (err < 0)
159 goto out;
160
161 fd = sys_perf_event_open(&evsel->attr,
162 threads->map[thread].pid,
163 cpu, group_fd, 0);
164
165 if (fd < 0) {
166 err = -errno;
167 goto out;
168 }
169
170 *evsel_fd = fd;
171 }
172 }
173out:
174 if (err)
175 perf_evsel__close(evsel);
176
177 return err;
178}
179
180static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
181{
182 int thread;
183
184 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
185 int *fd = FD(evsel, cpu_map_idx, thread);
186
187 if (fd && *fd >= 0) {
188 close(*fd);
189 *fd = -1;
190 }
191 }
192}
193
194void perf_evsel__close_fd(struct perf_evsel *evsel)
195{
196 for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
197 perf_evsel__close_fd_cpu(evsel, idx);
198}
199
200void perf_evsel__free_fd(struct perf_evsel *evsel)
201{
202 xyarray__delete(evsel->fd);
203 evsel->fd = NULL;
204}
205
206void perf_evsel__close(struct perf_evsel *evsel)
207{
208 if (evsel->fd == NULL)
209 return;
210
211 perf_evsel__close_fd(evsel);
212 perf_evsel__free_fd(evsel);
213}
214
215void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
216{
217 if (evsel->fd == NULL)
218 return;
219
220 perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
221}
222
223void perf_evsel__munmap(struct perf_evsel *evsel)
224{
225 int idx, thread;
226
227 if (evsel->fd == NULL || evsel->mmap == NULL)
228 return;
229
230 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
231 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
232 int *fd = FD(evsel, idx, thread);
233
234 if (fd == NULL || *fd < 0)
235 continue;
236
237 perf_mmap__munmap(MMAP(evsel, idx, thread));
238 }
239 }
240
241 xyarray__delete(evsel->mmap);
242 evsel->mmap = NULL;
243}
244
245int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
246{
247 int ret, idx, thread;
248 struct perf_mmap_param mp = {
249 .prot = PROT_READ | PROT_WRITE,
250 .mask = (pages * page_size) - 1,
251 };
252
253 if (evsel->fd == NULL || evsel->mmap)
254 return -EINVAL;
255
256 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
257 return -ENOMEM;
258
259 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
260 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
261 int *fd = FD(evsel, idx, thread);
262 struct perf_mmap *map;
263 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
264
265 if (fd == NULL || *fd < 0)
266 continue;
267
268 map = MMAP(evsel, idx, thread);
269 perf_mmap__init(map, NULL, false, NULL);
270
271 ret = perf_mmap__mmap(map, &mp, *fd, cpu);
272 if (ret) {
273 perf_evsel__munmap(evsel);
274 return ret;
275 }
276 }
277 }
278
279 return 0;
280}
281
282void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
283{
284 int *fd = FD(evsel, cpu_map_idx, thread);
285
286 if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
287 return NULL;
288
289 return MMAP(evsel, cpu_map_idx, thread)->base;
290}
291
292int perf_evsel__read_size(struct perf_evsel *evsel)
293{
294 u64 read_format = evsel->attr.read_format;
295 int entry = sizeof(u64); /* value */
296 int size = 0;
297 int nr = 1;
298
299 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
300 size += sizeof(u64);
301
302 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
303 size += sizeof(u64);
304
305 if (read_format & PERF_FORMAT_ID)
306 entry += sizeof(u64);
307
308 if (read_format & PERF_FORMAT_LOST)
309 entry += sizeof(u64);
310
311 if (read_format & PERF_FORMAT_GROUP) {
312 nr = evsel->nr_members;
313 size += sizeof(u64);
314 }
315
316 size += entry * nr;
317 return size;
318}
319
320/* This only reads values for the leader */
321static int perf_evsel__read_group(struct perf_evsel *evsel, int cpu_map_idx,
322 int thread, struct perf_counts_values *count)
323{
324 size_t size = perf_evsel__read_size(evsel);
325 int *fd = FD(evsel, cpu_map_idx, thread);
326 u64 read_format = evsel->attr.read_format;
327 u64 *data;
328 int idx = 1;
329
330 if (fd == NULL || *fd < 0)
331 return -EINVAL;
332
333 data = calloc(1, size);
334 if (data == NULL)
335 return -ENOMEM;
336
337 if (readn(*fd, data, size) <= 0) {
338 free(data);
339 return -errno;
340 }
341
342 /*
343 * This reads only the leader event intentionally since we don't have
344 * perf counts values for sibling events.
345 */
346 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
347 count->ena = data[idx++];
348 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
349 count->run = data[idx++];
350
351 /* value is always available */
352 count->val = data[idx++];
353 if (read_format & PERF_FORMAT_ID)
354 count->id = data[idx++];
355 if (read_format & PERF_FORMAT_LOST)
356 count->lost = data[idx++];
357
358 free(data);
359 return 0;
360}
361
362/*
363 * The perf read format is very flexible. It needs to set the proper
364 * values according to the read format.
365 */
366static void perf_evsel__adjust_values(struct perf_evsel *evsel, u64 *buf,
367 struct perf_counts_values *count)
368{
369 u64 read_format = evsel->attr.read_format;
370 int n = 0;
371
372 count->val = buf[n++];
373
374 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
375 count->ena = buf[n++];
376
377 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
378 count->run = buf[n++];
379
380 if (read_format & PERF_FORMAT_ID)
381 count->id = buf[n++];
382
383 if (read_format & PERF_FORMAT_LOST)
384 count->lost = buf[n++];
385}
386
387int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
388 struct perf_counts_values *count)
389{
390 size_t size = perf_evsel__read_size(evsel);
391 int *fd = FD(evsel, cpu_map_idx, thread);
392 u64 read_format = evsel->attr.read_format;
393 struct perf_counts_values buf;
394
395 memset(count, 0, sizeof(*count));
396
397 if (fd == NULL || *fd < 0)
398 return -EINVAL;
399
400 if (read_format & PERF_FORMAT_GROUP)
401 return perf_evsel__read_group(evsel, cpu_map_idx, thread, count);
402
403 if (MMAP(evsel, cpu_map_idx, thread) &&
404 !(read_format & (PERF_FORMAT_ID | PERF_FORMAT_LOST)) &&
405 !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
406 return 0;
407
408 if (readn(*fd, buf.values, size) <= 0)
409 return -errno;
410
411 perf_evsel__adjust_values(evsel, buf.values, count);
412 return 0;
413}
414
415static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg,
416 int cpu_map_idx, int thread)
417{
418 int *fd = FD(evsel, cpu_map_idx, thread);
419
420 if (fd == NULL || *fd < 0)
421 return -1;
422
423 return ioctl(*fd, ioc, arg);
424}
425
426static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
427 int ioc, void *arg,
428 int cpu_map_idx)
429{
430 int thread;
431
432 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
433 int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
434
435 if (err)
436 return err;
437 }
438
439 return 0;
440}
441
442int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
443{
444 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
445}
446
447int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
448{
449 struct perf_cpu cpu __maybe_unused;
450 int idx;
451 int err;
452
453 perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
454 err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
455 if (err)
456 return err;
457 }
458
459 return 0;
460}
461
462int perf_evsel__enable(struct perf_evsel *evsel)
463{
464 int i;
465 int err = 0;
466
467 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
468 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
469 return err;
470}
471
472int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
473{
474 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
475}
476
477int perf_evsel__disable(struct perf_evsel *evsel)
478{
479 int i;
480 int err = 0;
481
482 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
483 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
484 return err;
485}
486
487int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
488{
489 int err = 0, i;
490
491 for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
492 err = perf_evsel__run_ioctl(evsel,
493 PERF_EVENT_IOC_SET_FILTER,
494 (void *)filter, i);
495 return err;
496}
497
498struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
499{
500 return evsel->cpus;
501}
502
503struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
504{
505 return evsel->threads;
506}
507
508struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
509{
510 return &evsel->attr;
511}
512
513int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
514{
515 if (ncpus == 0 || nthreads == 0)
516 return 0;
517
518 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
519 if (evsel->sample_id == NULL)
520 return -ENOMEM;
521
522 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
523 if (evsel->id == NULL) {
524 xyarray__delete(evsel->sample_id);
525 evsel->sample_id = NULL;
526 return -ENOMEM;
527 }
528
529 return 0;
530}
531
532void perf_evsel__free_id(struct perf_evsel *evsel)
533{
534 xyarray__delete(evsel->sample_id);
535 evsel->sample_id = NULL;
536 zfree(&evsel->id);
537 evsel->ids = 0;
538}
539
540void perf_counts_values__scale(struct perf_counts_values *count,
541 bool scale, __s8 *pscaled)
542{
543 s8 scaled = 0;
544
545 if (scale) {
546 if (count->run == 0) {
547 scaled = -1;
548 count->val = 0;
549 } else if (count->run < count->ena) {
550 scaled = 1;
551 count->val = (u64)((double)count->val * count->ena / count->run);
552 }
553 }
554
555 if (pscaled)
556 *pscaled = scaled;
557}
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <unistd.h>
4#include <sys/syscall.h>
5#include <perf/evsel.h>
6#include <perf/cpumap.h>
7#include <perf/threadmap.h>
8#include <linux/hash.h>
9#include <linux/list.h>
10#include <internal/evsel.h>
11#include <linux/zalloc.h>
12#include <stdlib.h>
13#include <internal/xyarray.h>
14#include <internal/cpumap.h>
15#include <internal/mmap.h>
16#include <internal/threadmap.h>
17#include <internal/lib.h>
18#include <linux/string.h>
19#include <sys/ioctl.h>
20#include <sys/mman.h>
21#include <asm/bug.h>
22
23void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
24 int idx)
25{
26 INIT_LIST_HEAD(&evsel->node);
27 INIT_LIST_HEAD(&evsel->per_stream_periods);
28 evsel->attr = *attr;
29 evsel->idx = idx;
30 evsel->leader = evsel;
31}
32
33struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
34{
35 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
36
37 if (evsel != NULL)
38 perf_evsel__init(evsel, attr, 0);
39
40 return evsel;
41}
42
43void perf_evsel__delete(struct perf_evsel *evsel)
44{
45 free(evsel);
46}
47
48#define FD(_evsel, _cpu_map_idx, _thread) \
49 ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
50#define MMAP(_evsel, _cpu_map_idx, _thread) \
51 (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
52 : NULL)
53
54int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
55{
56 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
57
58 if (evsel->fd) {
59 int idx, thread;
60
61 for (idx = 0; idx < ncpus; idx++) {
62 for (thread = 0; thread < nthreads; thread++) {
63 int *fd = FD(evsel, idx, thread);
64
65 if (fd)
66 *fd = -1;
67 }
68 }
69 }
70
71 return evsel->fd != NULL ? 0 : -ENOMEM;
72}
73
74static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
75{
76 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
77
78 return evsel->mmap != NULL ? 0 : -ENOMEM;
79}
80
81static int
82sys_perf_event_open(struct perf_event_attr *attr,
83 pid_t pid, struct perf_cpu cpu, int group_fd,
84 unsigned long flags)
85{
86 return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
87}
88
89static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
90{
91 struct perf_evsel *leader = evsel->leader;
92 int *fd;
93
94 if (evsel == leader) {
95 *group_fd = -1;
96 return 0;
97 }
98
99 /*
100 * Leader must be already processed/open,
101 * if not it's a bug.
102 */
103 if (!leader->fd)
104 return -ENOTCONN;
105
106 fd = FD(leader, cpu_map_idx, thread);
107 if (fd == NULL || *fd == -1)
108 return -EBADF;
109
110 *group_fd = *fd;
111
112 return 0;
113}
114
115int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
116 struct perf_thread_map *threads)
117{
118 struct perf_cpu cpu;
119 int idx, thread, err = 0;
120
121 if (cpus == NULL) {
122 static struct perf_cpu_map *empty_cpu_map;
123
124 if (empty_cpu_map == NULL) {
125 empty_cpu_map = perf_cpu_map__new_any_cpu();
126 if (empty_cpu_map == NULL)
127 return -ENOMEM;
128 }
129
130 cpus = empty_cpu_map;
131 }
132
133 if (threads == NULL) {
134 static struct perf_thread_map *empty_thread_map;
135
136 if (empty_thread_map == NULL) {
137 empty_thread_map = perf_thread_map__new_dummy();
138 if (empty_thread_map == NULL)
139 return -ENOMEM;
140 }
141
142 threads = empty_thread_map;
143 }
144
145 if (evsel->fd == NULL &&
146 perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
147 return -ENOMEM;
148
149 perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
150 for (thread = 0; thread < threads->nr; thread++) {
151 int fd, group_fd, *evsel_fd;
152
153 evsel_fd = FD(evsel, idx, thread);
154 if (evsel_fd == NULL) {
155 err = -EINVAL;
156 goto out;
157 }
158
159 err = get_group_fd(evsel, idx, thread, &group_fd);
160 if (err < 0)
161 goto out;
162
163 fd = sys_perf_event_open(&evsel->attr,
164 threads->map[thread].pid,
165 cpu, group_fd, 0);
166
167 if (fd < 0) {
168 err = -errno;
169 goto out;
170 }
171
172 *evsel_fd = fd;
173 }
174 }
175out:
176 if (err)
177 perf_evsel__close(evsel);
178
179 return err;
180}
181
182static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
183{
184 int thread;
185
186 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
187 int *fd = FD(evsel, cpu_map_idx, thread);
188
189 if (fd && *fd >= 0) {
190 close(*fd);
191 *fd = -1;
192 }
193 }
194}
195
196void perf_evsel__close_fd(struct perf_evsel *evsel)
197{
198 for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
199 perf_evsel__close_fd_cpu(evsel, idx);
200}
201
202void perf_evsel__free_fd(struct perf_evsel *evsel)
203{
204 xyarray__delete(evsel->fd);
205 evsel->fd = NULL;
206}
207
208void perf_evsel__close(struct perf_evsel *evsel)
209{
210 if (evsel->fd == NULL)
211 return;
212
213 perf_evsel__close_fd(evsel);
214 perf_evsel__free_fd(evsel);
215}
216
217void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
218{
219 if (evsel->fd == NULL)
220 return;
221
222 perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
223}
224
225void perf_evsel__munmap(struct perf_evsel *evsel)
226{
227 int idx, thread;
228
229 if (evsel->fd == NULL || evsel->mmap == NULL)
230 return;
231
232 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
233 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
234 int *fd = FD(evsel, idx, thread);
235
236 if (fd == NULL || *fd < 0)
237 continue;
238
239 perf_mmap__munmap(MMAP(evsel, idx, thread));
240 }
241 }
242
243 xyarray__delete(evsel->mmap);
244 evsel->mmap = NULL;
245}
246
247int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
248{
249 int ret, idx, thread;
250 struct perf_mmap_param mp = {
251 .prot = PROT_READ | PROT_WRITE,
252 .mask = (pages * page_size) - 1,
253 };
254
255 if (evsel->fd == NULL || evsel->mmap)
256 return -EINVAL;
257
258 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
259 return -ENOMEM;
260
261 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
262 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
263 int *fd = FD(evsel, idx, thread);
264 struct perf_mmap *map;
265 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
266
267 if (fd == NULL || *fd < 0)
268 continue;
269
270 map = MMAP(evsel, idx, thread);
271 perf_mmap__init(map, NULL, false, NULL);
272
273 ret = perf_mmap__mmap(map, &mp, *fd, cpu);
274 if (ret) {
275 perf_evsel__munmap(evsel);
276 return ret;
277 }
278 }
279 }
280
281 return 0;
282}
283
284void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
285{
286 int *fd = FD(evsel, cpu_map_idx, thread);
287
288 if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
289 return NULL;
290
291 return MMAP(evsel, cpu_map_idx, thread)->base;
292}
293
294int perf_evsel__read_size(struct perf_evsel *evsel)
295{
296 u64 read_format = evsel->attr.read_format;
297 int entry = sizeof(u64); /* value */
298 int size = 0;
299 int nr = 1;
300
301 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
302 size += sizeof(u64);
303
304 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
305 size += sizeof(u64);
306
307 if (read_format & PERF_FORMAT_ID)
308 entry += sizeof(u64);
309
310 if (read_format & PERF_FORMAT_LOST)
311 entry += sizeof(u64);
312
313 if (read_format & PERF_FORMAT_GROUP) {
314 nr = evsel->nr_members;
315 size += sizeof(u64);
316 }
317
318 size += entry * nr;
319 return size;
320}
321
322/* This only reads values for the leader */
323static int perf_evsel__read_group(struct perf_evsel *evsel, int cpu_map_idx,
324 int thread, struct perf_counts_values *count)
325{
326 size_t size = perf_evsel__read_size(evsel);
327 int *fd = FD(evsel, cpu_map_idx, thread);
328 u64 read_format = evsel->attr.read_format;
329 u64 *data;
330 int idx = 1;
331
332 if (fd == NULL || *fd < 0)
333 return -EINVAL;
334
335 data = calloc(1, size);
336 if (data == NULL)
337 return -ENOMEM;
338
339 if (readn(*fd, data, size) <= 0) {
340 free(data);
341 return -errno;
342 }
343
344 /*
345 * This reads only the leader event intentionally since we don't have
346 * perf counts values for sibling events.
347 */
348 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
349 count->ena = data[idx++];
350 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
351 count->run = data[idx++];
352
353 /* value is always available */
354 count->val = data[idx++];
355 if (read_format & PERF_FORMAT_ID)
356 count->id = data[idx++];
357 if (read_format & PERF_FORMAT_LOST)
358 count->lost = data[idx++];
359
360 free(data);
361 return 0;
362}
363
364/*
365 * The perf read format is very flexible. It needs to set the proper
366 * values according to the read format.
367 */
368static void perf_evsel__adjust_values(struct perf_evsel *evsel, u64 *buf,
369 struct perf_counts_values *count)
370{
371 u64 read_format = evsel->attr.read_format;
372 int n = 0;
373
374 count->val = buf[n++];
375
376 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
377 count->ena = buf[n++];
378
379 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
380 count->run = buf[n++];
381
382 if (read_format & PERF_FORMAT_ID)
383 count->id = buf[n++];
384
385 if (read_format & PERF_FORMAT_LOST)
386 count->lost = buf[n++];
387}
388
389int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
390 struct perf_counts_values *count)
391{
392 size_t size = perf_evsel__read_size(evsel);
393 int *fd = FD(evsel, cpu_map_idx, thread);
394 u64 read_format = evsel->attr.read_format;
395 struct perf_counts_values buf;
396
397 memset(count, 0, sizeof(*count));
398
399 if (fd == NULL || *fd < 0)
400 return -EINVAL;
401
402 if (read_format & PERF_FORMAT_GROUP)
403 return perf_evsel__read_group(evsel, cpu_map_idx, thread, count);
404
405 if (MMAP(evsel, cpu_map_idx, thread) &&
406 !(read_format & (PERF_FORMAT_ID | PERF_FORMAT_LOST)) &&
407 !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
408 return 0;
409
410 if (readn(*fd, buf.values, size) <= 0)
411 return -errno;
412
413 perf_evsel__adjust_values(evsel, buf.values, count);
414 return 0;
415}
416
417static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg,
418 int cpu_map_idx, int thread)
419{
420 int *fd = FD(evsel, cpu_map_idx, thread);
421
422 if (fd == NULL || *fd < 0)
423 return -1;
424
425 return ioctl(*fd, ioc, arg);
426}
427
428static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
429 int ioc, void *arg,
430 int cpu_map_idx)
431{
432 int thread;
433
434 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
435 int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
436
437 if (err)
438 return err;
439 }
440
441 return 0;
442}
443
444int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
445{
446 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
447}
448
449int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
450{
451 struct perf_cpu cpu __maybe_unused;
452 int idx;
453 int err;
454
455 perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
456 err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
457 if (err)
458 return err;
459 }
460
461 return 0;
462}
463
464int perf_evsel__enable(struct perf_evsel *evsel)
465{
466 int i;
467 int err = 0;
468
469 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
470 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i);
471 return err;
472}
473
474int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
475{
476 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
477}
478
479int perf_evsel__disable(struct perf_evsel *evsel)
480{
481 int i;
482 int err = 0;
483
484 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++)
485 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i);
486 return err;
487}
488
489int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
490{
491 int err = 0, i;
492
493 for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
494 err = perf_evsel__run_ioctl(evsel,
495 PERF_EVENT_IOC_SET_FILTER,
496 (void *)filter, i);
497 return err;
498}
499
500struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
501{
502 return evsel->cpus;
503}
504
505struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel)
506{
507 return evsel->threads;
508}
509
510struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel)
511{
512 return &evsel->attr;
513}
514
515int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
516{
517 if (ncpus == 0 || nthreads == 0)
518 return 0;
519
520 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
521 if (evsel->sample_id == NULL)
522 return -ENOMEM;
523
524 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
525 if (evsel->id == NULL) {
526 xyarray__delete(evsel->sample_id);
527 evsel->sample_id = NULL;
528 return -ENOMEM;
529 }
530
531 return 0;
532}
533
534void perf_evsel__free_id(struct perf_evsel *evsel)
535{
536 struct perf_sample_id_period *pos, *n;
537
538 xyarray__delete(evsel->sample_id);
539 evsel->sample_id = NULL;
540 zfree(&evsel->id);
541 evsel->ids = 0;
542
543 perf_evsel_for_each_per_thread_period_safe(evsel, n, pos) {
544 list_del_init(&pos->node);
545 free(pos);
546 }
547}
548
549bool perf_evsel__attr_has_per_thread_sample_period(struct perf_evsel *evsel)
550{
551 return (evsel->attr.sample_type & PERF_SAMPLE_READ) &&
552 (evsel->attr.sample_type & PERF_SAMPLE_TID) &&
553 evsel->attr.inherit;
554}
555
556u64 *perf_sample_id__get_period_storage(struct perf_sample_id *sid, u32 tid, bool per_thread)
557{
558 struct hlist_head *head;
559 struct perf_sample_id_period *res;
560 int hash;
561
562 if (!per_thread)
563 return &sid->period;
564
565 hash = hash_32(tid, PERF_SAMPLE_ID__HLIST_BITS);
566 head = &sid->periods[hash];
567
568 hlist_for_each_entry(res, head, hnode)
569 if (res->tid == tid)
570 return &res->period;
571
572 if (sid->evsel == NULL)
573 return NULL;
574
575 res = zalloc(sizeof(struct perf_sample_id_period));
576 if (res == NULL)
577 return NULL;
578
579 INIT_LIST_HEAD(&res->node);
580 res->tid = tid;
581
582 list_add_tail(&res->node, &sid->evsel->per_stream_periods);
583 hlist_add_head(&res->hnode, &sid->periods[hash]);
584
585 return &res->period;
586}
587
588void perf_counts_values__scale(struct perf_counts_values *count,
589 bool scale, __s8 *pscaled)
590{
591 s8 scaled = 0;
592
593 if (scale) {
594 if (count->run == 0) {
595 scaled = -1;
596 count->val = 0;
597 } else if (count->run < count->ena) {
598 scaled = 1;
599 count->val = (u64)((double)count->val * count->ena / count->run);
600 }
601 }
602
603 if (pscaled)
604 *pscaled = scaled;
605}