Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LIBPERF_INTERNAL_EVSEL_H
3#define __LIBPERF_INTERNAL_EVSEL_H
4
5#include <linux/types.h>
6#include <linux/perf_event.h>
7#include <stdbool.h>
8#include <sys/types.h>
9#include <internal/cpumap.h>
10
11struct perf_thread_map;
12struct xyarray;
13
14/*
15 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
16 * more than one entry in the evlist.
17 */
18struct perf_sample_id {
19 struct hlist_node node;
20 u64 id;
21 struct perf_evsel *evsel;
22 /*
23 * 'idx' will be used for AUX area sampling. A sample will have AUX area
24 * data that will be queued for decoding, where there are separate
25 * queues for each CPU (per-cpu tracing) or task (per-thread tracing).
26 * The sample ID can be used to lookup 'idx' which is effectively the
27 * queue number.
28 */
29 int idx;
30 struct perf_cpu cpu;
31 pid_t tid;
32
33 /* Guest machine pid and VCPU, valid only if machine_pid is non-zero */
34 pid_t machine_pid;
35 struct perf_cpu vcpu;
36
37 /* Holds total ID period value for PERF_SAMPLE_READ processing. */
38 u64 period;
39};
40
41struct perf_evsel {
42 struct list_head node;
43 struct perf_event_attr attr;
44 /** The commonly used cpu map of CPUs the event should be opened upon, etc. */
45 struct perf_cpu_map *cpus;
46 /**
47 * The cpu map read from the PMU. For core PMUs this is the list of all
48 * CPUs the event can be opened upon. For other PMUs this is the default
49 * cpu map for opening the event on, for example, the first CPU on a
50 * socket for an uncore event.
51 */
52 struct perf_cpu_map *own_cpus;
53 struct perf_thread_map *threads;
54 struct xyarray *fd;
55 struct xyarray *mmap;
56 struct xyarray *sample_id;
57 u64 *id;
58 u32 ids;
59 struct perf_evsel *leader;
60
61 /* parse modifier helper */
62 int nr_members;
63 /*
64 * system_wide is for events that need to be on every CPU, irrespective
65 * of user requested CPUs or threads. Tha main example of this is the
66 * dummy event. Map propagation will set cpus for this event to all CPUs
67 * as software PMU events like dummy, have a CPU map that is empty.
68 */
69 bool system_wide;
70 /*
71 * Some events, for example uncore events, require a CPU.
72 * i.e. it cannot be the 'any CPU' value of -1.
73 */
74 bool requires_cpu;
75 /** Is the PMU for the event a core one? Effects the handling of own_cpus. */
76 bool is_pmu_core;
77 int idx;
78};
79
80void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
81 int idx);
82int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
83void perf_evsel__close_fd(struct perf_evsel *evsel);
84void perf_evsel__free_fd(struct perf_evsel *evsel);
85int perf_evsel__read_size(struct perf_evsel *evsel);
86int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
87
88int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
89void perf_evsel__free_id(struct perf_evsel *evsel);
90
91#endif /* __LIBPERF_INTERNAL_EVSEL_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LIBPERF_INTERNAL_EVSEL_H
3#define __LIBPERF_INTERNAL_EVSEL_H
4
5#include <linux/types.h>
6#include <linux/perf_event.h>
7#include <stdbool.h>
8#include <sys/types.h>
9#include <internal/cpumap.h>
10
11struct perf_thread_map;
12struct xyarray;
13
14/**
15 * The per-thread accumulated period storage node.
16 */
17struct perf_sample_id_period {
18 struct list_head node;
19 struct hlist_node hnode;
20 /* Holds total ID period value for PERF_SAMPLE_READ processing. */
21 u64 period;
22 /* The TID that the values belongs to */
23 u32 tid;
24};
25
26/**
27 * perf_evsel_for_each_per_thread_period_safe - safely iterate thru all the
28 * per_stream_periods
29 * @evlist:perf_evsel instance to iterate
30 * @item: struct perf_sample_id_period iterator
31 * @tmp: struct perf_sample_id_period temp iterator
32 */
33#define perf_evsel_for_each_per_thread_period_safe(evsel, tmp, item) \
34 list_for_each_entry_safe(item, tmp, &(evsel)->per_stream_periods, node)
35
36
37#define PERF_SAMPLE_ID__HLIST_BITS 4
38#define PERF_SAMPLE_ID__HLIST_SIZE (1 << PERF_SAMPLE_ID__HLIST_BITS)
39
40/*
41 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
42 * more than one entry in the evlist.
43 */
44struct perf_sample_id {
45 struct hlist_node node;
46 u64 id;
47 struct perf_evsel *evsel;
48 /*
49 * 'idx' will be used for AUX area sampling. A sample will have AUX area
50 * data that will be queued for decoding, where there are separate
51 * queues for each CPU (per-cpu tracing) or task (per-thread tracing).
52 * The sample ID can be used to lookup 'idx' which is effectively the
53 * queue number.
54 */
55 int idx;
56 struct perf_cpu cpu;
57 pid_t tid;
58
59 /* Guest machine pid and VCPU, valid only if machine_pid is non-zero */
60 pid_t machine_pid;
61 struct perf_cpu vcpu;
62
63 /*
64 * Per-thread, and global event counts are mutually exclusive:
65 * Whilst it is possible to combine events into a group with differing
66 * values of PERF_SAMPLE_READ, it is not valid to have inconsistent
67 * values for `inherit`. Therefore it is not possible to have a
68 * situation where a per-thread event is sampled as a global event;
69 * all !inherit groups are global, and all groups where the sampling
70 * event is inherit + PERF_SAMPLE_READ will be per-thread. Any event
71 * that is part of such a group that is inherit but not PERF_SAMPLE_READ
72 * will be read as per-thread. If such an event can also trigger a
73 * sample (such as with sample_period > 0) then it will not cause
74 * `read_format` to be included in its PERF_RECORD_SAMPLE, and
75 * therefore will not expose the per-thread group members as global.
76 */
77 union {
78 /*
79 * Holds total ID period value for PERF_SAMPLE_READ processing
80 * (when period is not per-thread).
81 */
82 u64 period;
83 /*
84 * Holds total ID period value for PERF_SAMPLE_READ processing
85 * (when period is per-thread).
86 */
87 struct hlist_head periods[PERF_SAMPLE_ID__HLIST_SIZE];
88 };
89};
90
91struct perf_evsel {
92 struct list_head node;
93 struct perf_event_attr attr;
94 /** The commonly used cpu map of CPUs the event should be opened upon, etc. */
95 struct perf_cpu_map *cpus;
96 /**
97 * The cpu map read from the PMU. For core PMUs this is the list of all
98 * CPUs the event can be opened upon. For other PMUs this is the default
99 * cpu map for opening the event on, for example, the first CPU on a
100 * socket for an uncore event.
101 */
102 struct perf_cpu_map *own_cpus;
103 struct perf_thread_map *threads;
104 struct xyarray *fd;
105 struct xyarray *mmap;
106 struct xyarray *sample_id;
107 u64 *id;
108 u32 ids;
109 struct perf_evsel *leader;
110
111 /* For events where the read_format value is per-thread rather than
112 * global, stores the per-thread cumulative period */
113 struct list_head per_stream_periods;
114
115 /* parse modifier helper */
116 int nr_members;
117 /*
118 * system_wide is for events that need to be on every CPU, irrespective
119 * of user requested CPUs or threads. Tha main example of this is the
120 * dummy event. Map propagation will set cpus for this event to all CPUs
121 * as software PMU events like dummy, have a CPU map that is empty.
122 */
123 bool system_wide;
124 /*
125 * Some events, for example uncore events, require a CPU.
126 * i.e. it cannot be the 'any CPU' value of -1.
127 */
128 bool requires_cpu;
129 /** Is the PMU for the event a core one? Effects the handling of own_cpus. */
130 bool is_pmu_core;
131 int idx;
132};
133
134void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
135 int idx);
136int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
137void perf_evsel__close_fd(struct perf_evsel *evsel);
138void perf_evsel__free_fd(struct perf_evsel *evsel);
139int perf_evsel__read_size(struct perf_evsel *evsel);
140int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter);
141
142int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
143void perf_evsel__free_id(struct perf_evsel *evsel);
144
145bool perf_evsel__attr_has_per_thread_sample_period(struct perf_evsel *evsel);
146
147u64 *perf_sample_id__get_period_storage(struct perf_sample_id *sid, u32 tid,
148 bool per_thread);
149
150#endif /* __LIBPERF_INTERNAL_EVSEL_H */