Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_EVLIST_H
3#define __PERF_EVLIST_H 1
4
5#include <linux/compiler.h>
6#include <linux/kernel.h>
7#include <linux/refcount.h>
8#include <linux/list.h>
9#include <api/fd/array.h>
10#include <stdio.h>
11#include "../perf.h"
12#include "event.h"
13#include "evsel.h"
14#include "mmap.h"
15#include "util.h"
16#include <signal.h>
17#include <unistd.h>
18
19struct pollfd;
20struct thread_map;
21struct cpu_map;
22struct record_opts;
23
24#define PERF_EVLIST__HLIST_BITS 8
25#define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS)
26
27struct perf_evlist {
28 struct list_head entries;
29 struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
30 int nr_entries;
31 int nr_groups;
32 int nr_mmaps;
33 bool enabled;
34 bool has_user_cpus;
35 size_t mmap_len;
36 int id_pos;
37 int is_pos;
38 u64 combined_sample_type;
39 enum bkw_mmap_state bkw_mmap_state;
40 struct {
41 int cork_fd;
42 pid_t pid;
43 } workload;
44 struct fdarray pollfd;
45 struct perf_mmap *mmap;
46 struct perf_mmap *overwrite_mmap;
47 struct thread_map *threads;
48 struct cpu_map *cpus;
49 struct perf_evsel *selected;
50 struct events_stats stats;
51 struct perf_env *env;
52 u64 first_sample_time;
53 u64 last_sample_time;
54};
55
56struct perf_evsel_str_handler {
57 const char *name;
58 void *handler;
59};
60
61struct perf_evlist *perf_evlist__new(void);
62struct perf_evlist *perf_evlist__new_default(void);
63struct perf_evlist *perf_evlist__new_dummy(void);
64void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
65 struct thread_map *threads);
66void perf_evlist__exit(struct perf_evlist *evlist);
67void perf_evlist__delete(struct perf_evlist *evlist);
68
69void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
70void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel);
71
72int __perf_evlist__add_default(struct perf_evlist *evlist, bool precise);
73
74static inline int perf_evlist__add_default(struct perf_evlist *evlist)
75{
76 return __perf_evlist__add_default(evlist, true);
77}
78
79int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
80 struct perf_event_attr *attrs, size_t nr_attrs);
81
82#define perf_evlist__add_default_attrs(evlist, array) \
83 __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
84
85int perf_evlist__add_dummy(struct perf_evlist *evlist);
86
87int perf_evlist__add_newtp(struct perf_evlist *evlist,
88 const char *sys, const char *name, void *handler);
89
90void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
91 enum perf_event_sample_format bit);
92void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
93 enum perf_event_sample_format bit);
94
95#define perf_evlist__set_sample_bit(evlist, bit) \
96 __perf_evlist__set_sample_bit(evlist, PERF_SAMPLE_##bit)
97
98#define perf_evlist__reset_sample_bit(evlist, bit) \
99 __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit)
100
101int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
102int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid);
103int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids);
104
105struct perf_evsel *
106perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
107
108struct perf_evsel *
109perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
110 const char *name);
111
112void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
113 int cpu, int thread, u64 id);
114int perf_evlist__id_add_fd(struct perf_evlist *evlist,
115 struct perf_evsel *evsel,
116 int cpu, int thread, int fd);
117
118int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
119int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
120int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask);
121
122int perf_evlist__poll(struct perf_evlist *evlist, int timeout);
123
124struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
125struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
126 u64 id);
127
128struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
129
130void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
131
132void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
133
134int perf_evlist__open(struct perf_evlist *evlist);
135void perf_evlist__close(struct perf_evlist *evlist);
136
137struct callchain_param;
138
139void perf_evlist__set_id_pos(struct perf_evlist *evlist);
140bool perf_can_sample_identifier(void);
141bool perf_can_record_switch_events(void);
142bool perf_can_record_cpu_wide(void);
143void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
144 struct callchain_param *callchain);
145int record_opts__config(struct record_opts *opts);
146
147int perf_evlist__prepare_workload(struct perf_evlist *evlist,
148 struct target *target,
149 const char *argv[], bool pipe_output,
150 void (*exec_error)(int signo, siginfo_t *info,
151 void *ucontext));
152int perf_evlist__start_workload(struct perf_evlist *evlist);
153
154struct option;
155
156int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
157int perf_evlist__parse_mmap_pages(const struct option *opt,
158 const char *str,
159 int unset);
160
161unsigned long perf_event_mlock_kb_in_pages(void);
162
163int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
164 unsigned int auxtrace_pages,
165 bool auxtrace_overwrite);
166int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
167void perf_evlist__munmap(struct perf_evlist *evlist);
168
169size_t perf_evlist__mmap_size(unsigned long pages);
170
171void perf_evlist__disable(struct perf_evlist *evlist);
172void perf_evlist__enable(struct perf_evlist *evlist);
173void perf_evlist__toggle_enable(struct perf_evlist *evlist);
174
175int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
176 struct perf_evsel *evsel, int idx);
177
178void perf_evlist__set_selected(struct perf_evlist *evlist,
179 struct perf_evsel *evsel);
180
181void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
182 struct thread_map *threads);
183int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
184int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
185
186void __perf_evlist__set_leader(struct list_head *list);
187void perf_evlist__set_leader(struct perf_evlist *evlist);
188
189u64 perf_evlist__read_format(struct perf_evlist *evlist);
190u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
191u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
192u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist);
193bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
194u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
195
196int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
197 struct perf_sample *sample);
198
199int perf_evlist__parse_sample_timestamp(struct perf_evlist *evlist,
200 union perf_event *event,
201 u64 *timestamp);
202
203bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
204bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
205bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
206
207void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
208 struct list_head *list);
209
210static inline bool perf_evlist__empty(struct perf_evlist *evlist)
211{
212 return list_empty(&evlist->entries);
213}
214
215static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
216{
217 return list_entry(evlist->entries.next, struct perf_evsel, node);
218}
219
220static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist)
221{
222 return list_entry(evlist->entries.prev, struct perf_evsel, node);
223}
224
225size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
226
227int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
228int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
229
230bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str);
231void perf_evlist__to_front(struct perf_evlist *evlist,
232 struct perf_evsel *move_evsel);
233
234/**
235 * __evlist__for_each_entry - iterate thru all the evsels
236 * @list: list_head instance to iterate
237 * @evsel: struct evsel iterator
238 */
239#define __evlist__for_each_entry(list, evsel) \
240 list_for_each_entry(evsel, list, node)
241
242/**
243 * evlist__for_each_entry - iterate thru all the evsels
244 * @evlist: evlist instance to iterate
245 * @evsel: struct evsel iterator
246 */
247#define evlist__for_each_entry(evlist, evsel) \
248 __evlist__for_each_entry(&(evlist)->entries, evsel)
249
250/**
251 * __evlist__for_each_entry_continue - continue iteration thru all the evsels
252 * @list: list_head instance to iterate
253 * @evsel: struct evsel iterator
254 */
255#define __evlist__for_each_entry_continue(list, evsel) \
256 list_for_each_entry_continue(evsel, list, node)
257
258/**
259 * evlist__for_each_entry_continue - continue iteration thru all the evsels
260 * @evlist: evlist instance to iterate
261 * @evsel: struct evsel iterator
262 */
263#define evlist__for_each_entry_continue(evlist, evsel) \
264 __evlist__for_each_entry_continue(&(evlist)->entries, evsel)
265
266/**
267 * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
268 * @list: list_head instance to iterate
269 * @evsel: struct evsel iterator
270 */
271#define __evlist__for_each_entry_reverse(list, evsel) \
272 list_for_each_entry_reverse(evsel, list, node)
273
274/**
275 * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
276 * @evlist: evlist instance to iterate
277 * @evsel: struct evsel iterator
278 */
279#define evlist__for_each_entry_reverse(evlist, evsel) \
280 __evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
281
282/**
283 * __evlist__for_each_entry_safe - safely iterate thru all the evsels
284 * @list: list_head instance to iterate
285 * @tmp: struct evsel temp iterator
286 * @evsel: struct evsel iterator
287 */
288#define __evlist__for_each_entry_safe(list, tmp, evsel) \
289 list_for_each_entry_safe(evsel, tmp, list, node)
290
291/**
292 * evlist__for_each_entry_safe - safely iterate thru all the evsels
293 * @evlist: evlist instance to iterate
294 * @evsel: struct evsel iterator
295 * @tmp: struct evsel temp iterator
296 */
297#define evlist__for_each_entry_safe(evlist, tmp, evsel) \
298 __evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
299
300void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
301 struct perf_evsel *tracking_evsel);
302
303void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
304
305struct perf_evsel *
306perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
307
308struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
309 union perf_event *event);
310
311bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
312#endif /* __PERF_EVLIST_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_EVLIST_H
3#define __PERF_EVLIST_H 1
4
5#include <linux/compiler.h>
6#include <linux/kernel.h>
7#include <linux/refcount.h>
8#include <linux/list.h>
9#include <api/fd/array.h>
10#include <internal/evlist.h>
11#include <internal/evsel.h>
12#include <perf/evlist.h>
13#include "events_stats.h"
14#include "evsel.h"
15#include <pthread.h>
16#include <signal.h>
17#include <unistd.h>
18
19struct pollfd;
20struct thread_map;
21struct perf_cpu_map;
22struct record_opts;
23struct target;
24
25/*
26 * State machine of bkw_mmap_state:
27 *
28 * .________________(forbid)_____________.
29 * | V
30 * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
31 * ^ ^ | ^ |
32 * | |__(forbid)____/ |___(forbid)___/|
33 * | |
34 * \_________________(3)_______________/
35 *
36 * NOTREADY : Backward ring buffers are not ready
37 * RUNNING : Backward ring buffers are recording
38 * DATA_PENDING : We are required to collect data from backward ring buffers
39 * EMPTY : We have collected data from backward ring buffers.
40 *
41 * (0): Setup backward ring buffer
42 * (1): Pause ring buffers for reading
43 * (2): Read from ring buffers
44 * (3): Resume ring buffers for recording
45 */
46enum bkw_mmap_state {
47 BKW_MMAP_NOTREADY,
48 BKW_MMAP_RUNNING,
49 BKW_MMAP_DATA_PENDING,
50 BKW_MMAP_EMPTY,
51};
52
53struct event_enable_timer;
54
55struct evlist {
56 struct perf_evlist core;
57 bool enabled;
58 int id_pos;
59 int is_pos;
60 int nr_br_cntr;
61 u64 combined_sample_type;
62 enum bkw_mmap_state bkw_mmap_state;
63 struct {
64 int cork_fd;
65 pid_t pid;
66 } workload;
67 struct mmap *mmap;
68 struct mmap *overwrite_mmap;
69 struct evsel *selected;
70 struct events_stats stats;
71 struct perf_env *env;
72 void (*trace_event_sample_raw)(struct evlist *evlist,
73 union perf_event *event,
74 struct perf_sample *sample);
75 u64 first_sample_time;
76 u64 last_sample_time;
77 struct {
78 pthread_t th;
79 volatile int done;
80 } thread;
81 struct {
82 int fd; /* control file descriptor */
83 int ack; /* ack file descriptor for control commands */
84 int pos; /* index at evlist core object to check signals */
85 } ctl_fd;
86 struct event_enable_timer *eet;
87};
88
89struct evsel_str_handler {
90 const char *name;
91 void *handler;
92};
93
94struct evlist *evlist__new(void);
95struct evlist *evlist__new_default(void);
96struct evlist *evlist__new_dummy(void);
97void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
98 struct perf_thread_map *threads);
99void evlist__exit(struct evlist *evlist);
100void evlist__delete(struct evlist *evlist);
101
102void evlist__add(struct evlist *evlist, struct evsel *entry);
103void evlist__remove(struct evlist *evlist, struct evsel *evsel);
104
105int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs);
106
107int evlist__add_dummy(struct evlist *evlist);
108struct evsel *evlist__add_aux_dummy(struct evlist *evlist, bool system_wide);
109static inline struct evsel *evlist__add_dummy_on_all_cpus(struct evlist *evlist)
110{
111 return evlist__add_aux_dummy(evlist, true);
112}
113#ifdef HAVE_LIBTRACEEVENT
114struct evsel *evlist__add_sched_switch(struct evlist *evlist, bool system_wide);
115#endif
116
117int evlist__add_sb_event(struct evlist *evlist, struct perf_event_attr *attr,
118 evsel__sb_cb_t cb, void *data);
119void evlist__set_cb(struct evlist *evlist, evsel__sb_cb_t cb, void *data);
120int evlist__start_sb_thread(struct evlist *evlist, struct target *target);
121void evlist__stop_sb_thread(struct evlist *evlist);
122
123#ifdef HAVE_LIBTRACEEVENT
124int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler);
125#endif
126
127int __evlist__set_tracepoints_handlers(struct evlist *evlist,
128 const struct evsel_str_handler *assocs,
129 size_t nr_assocs);
130
131#define evlist__set_tracepoints_handlers(evlist, array) \
132 __evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
133
134int evlist__set_tp_filter(struct evlist *evlist, const char *filter);
135int evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
136
137int evlist__append_tp_filter(struct evlist *evlist, const char *filter);
138
139int evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid);
140int evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids);
141
142struct evsel *evlist__find_tracepoint_by_name(struct evlist *evlist, const char *name);
143
144int evlist__add_pollfd(struct evlist *evlist, int fd);
145int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask);
146
147#ifdef HAVE_EVENTFD_SUPPORT
148int evlist__add_wakeup_eventfd(struct evlist *evlist, int fd);
149#endif
150
151int evlist__poll(struct evlist *evlist, int timeout);
152
153struct evsel *evlist__id2evsel(struct evlist *evlist, u64 id);
154struct evsel *evlist__id2evsel_strict(struct evlist *evlist, u64 id);
155
156struct perf_sample_id *evlist__id2sid(struct evlist *evlist, u64 id);
157
158void evlist__toggle_bkw_mmap(struct evlist *evlist, enum bkw_mmap_state state);
159
160void evlist__mmap_consume(struct evlist *evlist, int idx);
161
162int evlist__open(struct evlist *evlist);
163void evlist__close(struct evlist *evlist);
164
165struct callchain_param;
166
167void evlist__set_id_pos(struct evlist *evlist);
168void evlist__config(struct evlist *evlist, struct record_opts *opts, struct callchain_param *callchain);
169int record_opts__config(struct record_opts *opts);
170
171int evlist__prepare_workload(struct evlist *evlist, struct target *target,
172 const char *argv[], bool pipe_output,
173 void (*exec_error)(int signo, siginfo_t *info, void *ucontext));
174int evlist__start_workload(struct evlist *evlist);
175void evlist__cancel_workload(struct evlist *evlist);
176
177struct option;
178
179int __evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
180int evlist__parse_mmap_pages(const struct option *opt, const char *str, int unset);
181
182unsigned long perf_event_mlock_kb_in_pages(void);
183
184int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
185 unsigned int auxtrace_pages,
186 bool auxtrace_overwrite, int nr_cblocks,
187 int affinity, int flush, int comp_level);
188int evlist__mmap(struct evlist *evlist, unsigned int pages);
189void evlist__munmap(struct evlist *evlist);
190
191size_t evlist__mmap_size(unsigned long pages);
192
193void evlist__disable(struct evlist *evlist);
194void evlist__enable(struct evlist *evlist);
195void evlist__toggle_enable(struct evlist *evlist);
196void evlist__disable_evsel(struct evlist *evlist, char *evsel_name);
197void evlist__enable_evsel(struct evlist *evlist, char *evsel_name);
198void evlist__disable_non_dummy(struct evlist *evlist);
199void evlist__enable_non_dummy(struct evlist *evlist);
200
201void evlist__set_selected(struct evlist *evlist, struct evsel *evsel);
202
203int evlist__create_maps(struct evlist *evlist, struct target *target);
204int evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel,
205 struct target *target);
206
207u64 __evlist__combined_sample_type(struct evlist *evlist);
208u64 evlist__combined_sample_type(struct evlist *evlist);
209u64 evlist__combined_branch_type(struct evlist *evlist);
210void evlist__update_br_cntr(struct evlist *evlist);
211bool evlist__sample_id_all(struct evlist *evlist);
212u16 evlist__id_hdr_size(struct evlist *evlist);
213
214int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample);
215int evlist__parse_sample_timestamp(struct evlist *evlist, union perf_event *event, u64 *timestamp);
216
217bool evlist__valid_sample_type(struct evlist *evlist);
218bool evlist__valid_sample_id_all(struct evlist *evlist);
219bool evlist__valid_read_format(struct evlist *evlist);
220
221void evlist__splice_list_tail(struct evlist *evlist, struct list_head *list);
222
223static inline bool evlist__empty(struct evlist *evlist)
224{
225 return list_empty(&evlist->core.entries);
226}
227
228static inline struct evsel *evlist__first(struct evlist *evlist)
229{
230 struct perf_evsel *evsel = perf_evlist__first(&evlist->core);
231
232 return container_of(evsel, struct evsel, core);
233}
234
235static inline struct evsel *evlist__last(struct evlist *evlist)
236{
237 struct perf_evsel *evsel = perf_evlist__last(&evlist->core);
238
239 return container_of(evsel, struct evsel, core);
240}
241
242static inline int evlist__nr_groups(struct evlist *evlist)
243{
244 return perf_evlist__nr_groups(&evlist->core);
245}
246
247int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size);
248int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size);
249
250bool evlist__can_select_event(struct evlist *evlist, const char *str);
251void evlist__to_front(struct evlist *evlist, struct evsel *move_evsel);
252
253/**
254 * __evlist__for_each_entry - iterate thru all the evsels
255 * @list: list_head instance to iterate
256 * @evsel: struct evsel iterator
257 */
258#define __evlist__for_each_entry(list, evsel) \
259 list_for_each_entry(evsel, list, core.node)
260
261/**
262 * evlist__for_each_entry - iterate thru all the evsels
263 * @evlist: evlist instance to iterate
264 * @evsel: struct evsel iterator
265 */
266#define evlist__for_each_entry(evlist, evsel) \
267 __evlist__for_each_entry(&(evlist)->core.entries, evsel)
268
269/**
270 * __evlist__for_each_entry_continue - continue iteration thru all the evsels
271 * @list: list_head instance to iterate
272 * @evsel: struct evsel iterator
273 */
274#define __evlist__for_each_entry_continue(list, evsel) \
275 list_for_each_entry_continue(evsel, list, core.node)
276
277/**
278 * evlist__for_each_entry_continue - continue iteration thru all the evsels
279 * @evlist: evlist instance to iterate
280 * @evsel: struct evsel iterator
281 */
282#define evlist__for_each_entry_continue(evlist, evsel) \
283 __evlist__for_each_entry_continue(&(evlist)->core.entries, evsel)
284
285/**
286 * __evlist__for_each_entry_from - continue iteration from @evsel (included)
287 * @list: list_head instance to iterate
288 * @evsel: struct evsel iterator
289 */
290#define __evlist__for_each_entry_from(list, evsel) \
291 list_for_each_entry_from(evsel, list, core.node)
292
293/**
294 * evlist__for_each_entry_from - continue iteration from @evsel (included)
295 * @evlist: evlist instance to iterate
296 * @evsel: struct evsel iterator
297 */
298#define evlist__for_each_entry_from(evlist, evsel) \
299 __evlist__for_each_entry_from(&(evlist)->core.entries, evsel)
300
301/**
302 * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
303 * @list: list_head instance to iterate
304 * @evsel: struct evsel iterator
305 */
306#define __evlist__for_each_entry_reverse(list, evsel) \
307 list_for_each_entry_reverse(evsel, list, core.node)
308
309/**
310 * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
311 * @evlist: evlist instance to iterate
312 * @evsel: struct evsel iterator
313 */
314#define evlist__for_each_entry_reverse(evlist, evsel) \
315 __evlist__for_each_entry_reverse(&(evlist)->core.entries, evsel)
316
317/**
318 * __evlist__for_each_entry_safe - safely iterate thru all the evsels
319 * @list: list_head instance to iterate
320 * @tmp: struct evsel temp iterator
321 * @evsel: struct evsel iterator
322 */
323#define __evlist__for_each_entry_safe(list, tmp, evsel) \
324 list_for_each_entry_safe(evsel, tmp, list, core.node)
325
326/**
327 * evlist__for_each_entry_safe - safely iterate thru all the evsels
328 * @evlist: evlist instance to iterate
329 * @evsel: struct evsel iterator
330 * @tmp: struct evsel temp iterator
331 */
332#define evlist__for_each_entry_safe(evlist, tmp, evsel) \
333 __evlist__for_each_entry_safe(&(evlist)->core.entries, tmp, evsel)
334
335/** Iterator state for evlist__for_each_cpu */
336struct evlist_cpu_iterator {
337 /** The list being iterated through. */
338 struct evlist *container;
339 /** The current evsel of the iterator. */
340 struct evsel *evsel;
341 /** The CPU map index corresponding to the evsel->core.cpus for the current CPU. */
342 int cpu_map_idx;
343 /**
344 * The CPU map index corresponding to evlist->core.all_cpus for the
345 * current CPU. Distinct from cpu_map_idx as the evsel's cpu map may
346 * contain fewer entries.
347 */
348 int evlist_cpu_map_idx;
349 /** The number of CPU map entries in evlist->core.all_cpus. */
350 int evlist_cpu_map_nr;
351 /** The current CPU of the iterator. */
352 struct perf_cpu cpu;
353 /** If present, used to set the affinity when switching between CPUs. */
354 struct affinity *affinity;
355};
356
357/**
358 * evlist__for_each_cpu - without affinity, iterate over the evlist. With
359 * affinity, iterate over all CPUs and then the evlist
360 * for each evsel on that CPU. When switching between
361 * CPUs the affinity is set to the CPU to avoid IPIs
362 * during syscalls.
363 * @evlist_cpu_itr: the iterator instance.
364 * @evlist: evlist instance to iterate.
365 * @affinity: NULL or used to set the affinity to the current CPU.
366 */
367#define evlist__for_each_cpu(evlist_cpu_itr, evlist, affinity) \
368 for ((evlist_cpu_itr) = evlist__cpu_begin(evlist, affinity); \
369 !evlist_cpu_iterator__end(&evlist_cpu_itr); \
370 evlist_cpu_iterator__next(&evlist_cpu_itr))
371
372/** Returns an iterator set to the first CPU/evsel of evlist. */
373struct evlist_cpu_iterator evlist__cpu_begin(struct evlist *evlist, struct affinity *affinity);
374/** Move to next element in iterator, updating CPU, evsel and the affinity. */
375void evlist_cpu_iterator__next(struct evlist_cpu_iterator *evlist_cpu_itr);
376/** Returns true when iterator is at the end of the CPUs and evlist. */
377bool evlist_cpu_iterator__end(const struct evlist_cpu_iterator *evlist_cpu_itr);
378
379struct evsel *evlist__get_tracking_event(struct evlist *evlist);
380void evlist__set_tracking_event(struct evlist *evlist, struct evsel *tracking_evsel);
381struct evsel *evlist__findnew_tracking_event(struct evlist *evlist, bool system_wide);
382
383struct evsel *evlist__find_evsel_by_str(struct evlist *evlist, const char *str);
384
385struct evsel *evlist__event2evsel(struct evlist *evlist, union perf_event *event);
386
387bool evlist__exclude_kernel(struct evlist *evlist);
388
389void evlist__force_leader(struct evlist *evlist);
390
391struct evsel *evlist__reset_weak_group(struct evlist *evlist, struct evsel *evsel, bool close);
392
393#define EVLIST_CTL_CMD_ENABLE_TAG "enable"
394#define EVLIST_CTL_CMD_DISABLE_TAG "disable"
395#define EVLIST_CTL_CMD_ACK_TAG "ack\n"
396#define EVLIST_CTL_CMD_SNAPSHOT_TAG "snapshot"
397#define EVLIST_CTL_CMD_EVLIST_TAG "evlist"
398#define EVLIST_CTL_CMD_STOP_TAG "stop"
399#define EVLIST_CTL_CMD_PING_TAG "ping"
400
401#define EVLIST_CTL_CMD_MAX_LEN 64
402
403enum evlist_ctl_cmd {
404 EVLIST_CTL_CMD_UNSUPPORTED = 0,
405 EVLIST_CTL_CMD_ENABLE,
406 EVLIST_CTL_CMD_DISABLE,
407 EVLIST_CTL_CMD_ACK,
408 EVLIST_CTL_CMD_SNAPSHOT,
409 EVLIST_CTL_CMD_EVLIST,
410 EVLIST_CTL_CMD_STOP,
411 EVLIST_CTL_CMD_PING,
412};
413
414int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close);
415void evlist__close_control(int ctl_fd, int ctl_fd_ack, bool *ctl_fd_close);
416int evlist__initialize_ctlfd(struct evlist *evlist, int ctl_fd, int ctl_fd_ack);
417int evlist__finalize_ctlfd(struct evlist *evlist);
418bool evlist__ctlfd_initialized(struct evlist *evlist);
419int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd);
420int evlist__ctlfd_ack(struct evlist *evlist);
421
422#define EVLIST_ENABLED_MSG "Events enabled\n"
423#define EVLIST_DISABLED_MSG "Events disabled\n"
424
425int evlist__parse_event_enable_time(struct evlist *evlist, struct record_opts *opts,
426 const char *str, int unset);
427int event_enable_timer__start(struct event_enable_timer *eet);
428void event_enable_timer__exit(struct event_enable_timer **ep);
429int event_enable_timer__process(struct event_enable_timer *eet);
430
431struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
432
433int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
434void evlist__check_mem_load_aux(struct evlist *evlist);
435void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list);
436void evlist__uniquify_name(struct evlist *evlist);
437bool evlist__has_bpf_output(struct evlist *evlist);
438
439#endif /* __PERF_EVLIST_H */