Loading...
1#ifndef __PERF_EVSEL_H
2#define __PERF_EVSEL_H 1
3
4#include <linux/list.h>
5#include <stdbool.h>
6#include <stddef.h>
7#include <linux/perf_event.h>
8#include <linux/types.h>
9#include "xyarray.h"
10#include "symbol.h"
11#include "cpumap.h"
12#include "counts.h"
13
14struct perf_evsel;
15
16/*
17 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
18 * more than one entry in the evlist.
19 */
20struct perf_sample_id {
21 struct hlist_node node;
22 u64 id;
23 struct perf_evsel *evsel;
24 int idx;
25 int cpu;
26 pid_t tid;
27
28 /* Holds total ID period value for PERF_SAMPLE_READ processing. */
29 u64 period;
30};
31
32struct cgroup_sel;
33
34/*
35 * The 'struct perf_evsel_config_term' is used to pass event
36 * specific configuration data to perf_evsel__config routine.
37 * It is allocated within event parsing and attached to
38 * perf_evsel::config_terms list head.
39*/
40enum {
41 PERF_EVSEL__CONFIG_TERM_PERIOD,
42 PERF_EVSEL__CONFIG_TERM_FREQ,
43 PERF_EVSEL__CONFIG_TERM_TIME,
44 PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
45 PERF_EVSEL__CONFIG_TERM_STACK_USER,
46 PERF_EVSEL__CONFIG_TERM_INHERIT,
47 PERF_EVSEL__CONFIG_TERM_MAX,
48};
49
50struct perf_evsel_config_term {
51 struct list_head list;
52 int type;
53 union {
54 u64 period;
55 u64 freq;
56 bool time;
57 char *callgraph;
58 u64 stack_user;
59 bool inherit;
60 } val;
61};
62
63/** struct perf_evsel - event selector
64 *
65 * @evlist - evlist this evsel is in, if it is in one.
66 * @node - To insert it into evlist->entries or in other list_heads, say in
67 * the event parsing routines.
68 * @name - Can be set to retain the original event name passed by the user,
69 * so that when showing results in tools such as 'perf stat', we
70 * show the name used, not some alias.
71 * @id_pos: the position of the event id (PERF_SAMPLE_ID or
72 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
73 * struct sample_event
74 * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
75 * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
76 * is used there is an id sample appended to non-sample events
77 * @priv: And what is in its containing unnamed union are tool specific
78 */
79struct perf_evsel {
80 struct list_head node;
81 struct perf_evlist *evlist;
82 struct perf_event_attr attr;
83 char *filter;
84 struct xyarray *fd;
85 struct xyarray *sample_id;
86 u64 *id;
87 struct perf_counts *counts;
88 struct perf_counts *prev_raw_counts;
89 int idx;
90 u32 ids;
91 char *name;
92 double scale;
93 const char *unit;
94 struct event_format *tp_format;
95 off_t id_offset;
96 void *priv;
97 u64 db_id;
98 struct cgroup_sel *cgrp;
99 void *handler;
100 struct cpu_map *cpus;
101 struct cpu_map *own_cpus;
102 struct thread_map *threads;
103 unsigned int sample_size;
104 int id_pos;
105 int is_pos;
106 bool snapshot;
107 bool supported;
108 bool needs_swap;
109 bool no_aux_samples;
110 bool immediate;
111 bool system_wide;
112 bool tracking;
113 bool per_pkg;
114 bool precise_max;
115 /* parse modifier helper */
116 int exclude_GH;
117 int nr_members;
118 int sample_read;
119 unsigned long *per_pkg_mask;
120 struct perf_evsel *leader;
121 char *group_name;
122 bool cmdline_group_boundary;
123 struct list_head config_terms;
124 int bpf_fd;
125};
126
127union u64_swap {
128 u64 val64;
129 u32 val32[2];
130};
131
132struct cpu_map;
133struct target;
134struct thread_map;
135struct record_opts;
136
137static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
138{
139 return evsel->cpus;
140}
141
142static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
143{
144 return perf_evsel__cpus(evsel)->nr;
145}
146
147void perf_counts_values__scale(struct perf_counts_values *count,
148 bool scale, s8 *pscaled);
149
150void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
151 struct perf_counts_values *count);
152
153int perf_evsel__object_config(size_t object_size,
154 int (*init)(struct perf_evsel *evsel),
155 void (*fini)(struct perf_evsel *evsel));
156
157struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
158
159static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
160{
161 return perf_evsel__new_idx(attr, 0);
162}
163
164struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
165
166/*
167 * Returns pointer with encoded error via <linux/err.h> interface.
168 */
169static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
170{
171 return perf_evsel__newtp_idx(sys, name, 0);
172}
173
174struct event_format *event_format__new(const char *sys, const char *name);
175
176void perf_evsel__init(struct perf_evsel *evsel,
177 struct perf_event_attr *attr, int idx);
178void perf_evsel__exit(struct perf_evsel *evsel);
179void perf_evsel__delete(struct perf_evsel *evsel);
180
181void perf_evsel__config(struct perf_evsel *evsel,
182 struct record_opts *opts);
183
184int __perf_evsel__sample_size(u64 sample_type);
185void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
186
187bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
188
189#define PERF_EVSEL__MAX_ALIASES 8
190
191extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
192 [PERF_EVSEL__MAX_ALIASES];
193extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
194 [PERF_EVSEL__MAX_ALIASES];
195extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
196 [PERF_EVSEL__MAX_ALIASES];
197extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
198extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
199int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
200 char *bf, size_t size);
201const char *perf_evsel__name(struct perf_evsel *evsel);
202
203const char *perf_evsel__group_name(struct perf_evsel *evsel);
204int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
205
206int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
207void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
208
209void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
210 enum perf_event_sample_format bit);
211void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
212 enum perf_event_sample_format bit);
213
214#define perf_evsel__set_sample_bit(evsel, bit) \
215 __perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
216
217#define perf_evsel__reset_sample_bit(evsel, bit) \
218 __perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
219
220void perf_evsel__set_sample_id(struct perf_evsel *evsel,
221 bool use_sample_identifier);
222
223int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter);
224int perf_evsel__append_filter(struct perf_evsel *evsel,
225 const char *op, const char *filter);
226int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
227 const char *filter);
228int perf_evsel__enable(struct perf_evsel *evsel);
229int perf_evsel__disable(struct perf_evsel *evsel);
230
231int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
232 struct cpu_map *cpus);
233int perf_evsel__open_per_thread(struct perf_evsel *evsel,
234 struct thread_map *threads);
235int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
236 struct thread_map *threads);
237void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
238
239struct perf_sample;
240
241void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
242 const char *name);
243u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
244 const char *name);
245
246static inline char *perf_evsel__strval(struct perf_evsel *evsel,
247 struct perf_sample *sample,
248 const char *name)
249{
250 return perf_evsel__rawptr(evsel, sample, name);
251}
252
253struct format_field;
254
255struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
256
257#define perf_evsel__match(evsel, t, c) \
258 (evsel->attr.type == PERF_TYPE_##t && \
259 evsel->attr.config == PERF_COUNT_##c)
260
261static inline bool perf_evsel__match2(struct perf_evsel *e1,
262 struct perf_evsel *e2)
263{
264 return (e1->attr.type == e2->attr.type) &&
265 (e1->attr.config == e2->attr.config);
266}
267
268#define perf_evsel__cmp(a, b) \
269 ((a) && \
270 (b) && \
271 (a)->attr.type == (b)->attr.type && \
272 (a)->attr.config == (b)->attr.config)
273
274int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
275 struct perf_counts_values *count);
276
277int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
278 int cpu, int thread, bool scale);
279
280/**
281 * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
282 *
283 * @evsel - event selector to read value
284 * @cpu - CPU of interest
285 * @thread - thread of interest
286 */
287static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
288 int cpu, int thread)
289{
290 return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
291}
292
293/**
294 * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
295 *
296 * @evsel - event selector to read value
297 * @cpu - CPU of interest
298 * @thread - thread of interest
299 */
300static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
301 int cpu, int thread)
302{
303 return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
304}
305
306int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
307 struct perf_sample *sample);
308
309static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
310{
311 return list_entry(evsel->node.next, struct perf_evsel, node);
312}
313
314static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
315{
316 return list_entry(evsel->node.prev, struct perf_evsel, node);
317}
318
319/**
320 * perf_evsel__is_group_leader - Return whether given evsel is a leader event
321 *
322 * @evsel - evsel selector to be tested
323 *
324 * Return %true if @evsel is a group leader or a stand-alone event
325 */
326static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
327{
328 return evsel->leader == evsel;
329}
330
331/**
332 * perf_evsel__is_group_event - Return whether given evsel is a group event
333 *
334 * @evsel - evsel selector to be tested
335 *
336 * Return %true iff event group view is enabled and @evsel is a actual group
337 * leader which has other members in the group
338 */
339static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
340{
341 if (!symbol_conf.event_group)
342 return false;
343
344 return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
345}
346
347/**
348 * perf_evsel__is_function_event - Return whether given evsel is a function
349 * trace event
350 *
351 * @evsel - evsel selector to be tested
352 *
353 * Return %true if event is function trace event
354 */
355static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
356{
357#define FUNCTION_EVENT "ftrace:function"
358
359 return evsel->name &&
360 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
361
362#undef FUNCTION_EVENT
363}
364
365static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
366{
367 struct perf_event_attr *attr = &evsel->attr;
368
369 return (attr->config == PERF_COUNT_SW_BPF_OUTPUT) &&
370 (attr->type == PERF_TYPE_SOFTWARE);
371}
372
373struct perf_attr_details {
374 bool freq;
375 bool verbose;
376 bool event_group;
377 bool force;
378 bool trace_fields;
379};
380
381int perf_evsel__fprintf(struct perf_evsel *evsel,
382 struct perf_attr_details *details, FILE *fp);
383
384bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
385 char *msg, size_t msgsize);
386int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
387 int err, char *msg, size_t size);
388
389static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
390{
391 return evsel->idx - evsel->leader->idx;
392}
393
394#define for_each_group_member(_evsel, _leader) \
395for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
396 (_evsel) && (_evsel)->leader == (_leader); \
397 (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
398
399static inline bool has_branch_callstack(struct perf_evsel *evsel)
400{
401 return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
402}
403
404typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
405
406int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
407 attr__fprintf_f attr__fprintf, void *priv);
408
409#endif /* __PERF_EVSEL_H */
1#ifndef __PERF_EVSEL_H
2#define __PERF_EVSEL_H 1
3
4#include <linux/list.h>
5#include <stdbool.h>
6#include "../../../include/linux/perf_event.h"
7#include "types.h"
8#include "xyarray.h"
9#include "cgroup.h"
10#include "hist.h"
11
12struct perf_counts_values {
13 union {
14 struct {
15 u64 val;
16 u64 ena;
17 u64 run;
18 };
19 u64 values[3];
20 };
21};
22
23struct perf_counts {
24 s8 scaled;
25 struct perf_counts_values aggr;
26 struct perf_counts_values cpu[];
27};
28
29struct perf_evsel;
30
31/*
32 * Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
33 * more than one entry in the evlist.
34 */
35struct perf_sample_id {
36 struct hlist_node node;
37 u64 id;
38 struct perf_evsel *evsel;
39};
40
41/** struct perf_evsel - event selector
42 *
43 * @name - Can be set to retain the original event name passed by the user,
44 * so that when showing results in tools such as 'perf stat', we
45 * show the name used, not some alias.
46 */
47struct perf_evsel {
48 struct list_head node;
49 struct perf_event_attr attr;
50 char *filter;
51 struct xyarray *fd;
52 struct xyarray *sample_id;
53 u64 *id;
54 struct perf_counts *counts;
55 int idx;
56 int ids;
57 struct hists hists;
58 char *name;
59 union {
60 void *priv;
61 off_t id_offset;
62 };
63 struct cgroup_sel *cgrp;
64 bool supported;
65};
66
67struct cpu_map;
68struct thread_map;
69struct perf_evlist;
70
71struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
72void perf_evsel__init(struct perf_evsel *evsel,
73 struct perf_event_attr *attr, int idx);
74void perf_evsel__exit(struct perf_evsel *evsel);
75void perf_evsel__delete(struct perf_evsel *evsel);
76
77int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
78int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
79int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
80void perf_evsel__free_fd(struct perf_evsel *evsel);
81void perf_evsel__free_id(struct perf_evsel *evsel);
82void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
83
84int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
85 struct cpu_map *cpus, bool group);
86int perf_evsel__open_per_thread(struct perf_evsel *evsel,
87 struct thread_map *threads, bool group);
88int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
89 struct thread_map *threads, bool group);
90
91#define perf_evsel__match(evsel, t, c) \
92 (evsel->attr.type == PERF_TYPE_##t && \
93 evsel->attr.config == PERF_COUNT_##c)
94
95int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
96 int cpu, int thread, bool scale);
97
98/**
99 * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
100 *
101 * @evsel - event selector to read value
102 * @cpu - CPU of interest
103 * @thread - thread of interest
104 */
105static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
106 int cpu, int thread)
107{
108 return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
109}
110
111/**
112 * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
113 *
114 * @evsel - event selector to read value
115 * @cpu - CPU of interest
116 * @thread - thread of interest
117 */
118static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
119 int cpu, int thread)
120{
121 return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
122}
123
124int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
125 bool scale);
126
127/**
128 * perf_evsel__read - Read the aggregate results on all CPUs
129 *
130 * @evsel - event selector to read value
131 * @ncpus - Number of cpus affected, from zero
132 * @nthreads - Number of threads affected, from zero
133 */
134static inline int perf_evsel__read(struct perf_evsel *evsel,
135 int ncpus, int nthreads)
136{
137 return __perf_evsel__read(evsel, ncpus, nthreads, false);
138}
139
140/**
141 * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
142 *
143 * @evsel - event selector to read value
144 * @ncpus - Number of cpus affected, from zero
145 * @nthreads - Number of threads affected, from zero
146 */
147static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
148 int ncpus, int nthreads)
149{
150 return __perf_evsel__read(evsel, ncpus, nthreads, true);
151}
152
153int __perf_evsel__sample_size(u64 sample_type);
154
155static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
156{
157 return __perf_evsel__sample_size(evsel->attr.sample_type);
158}
159
160#endif /* __PERF_EVSEL_H */