Loading...
1#ifndef __PERF_HIST_H
2#define __PERF_HIST_H
3
4#include <linux/types.h>
5#include <pthread.h>
6#include "callchain.h"
7#include "header.h"
8#include "color.h"
9#include "ui/progress.h"
10
11extern struct callchain_param callchain_param;
12
13struct hist_entry;
14struct addr_location;
15struct symbol;
16
17enum hist_filter {
18 HIST_FILTER__DSO,
19 HIST_FILTER__THREAD,
20 HIST_FILTER__PARENT,
21 HIST_FILTER__SYMBOL,
22 HIST_FILTER__GUEST,
23 HIST_FILTER__HOST,
24};
25
26/*
27 * The kernel collects the number of events it couldn't send in a stretch and
28 * when possible sends this number in a PERF_RECORD_LOST event. The number of
29 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
30 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
31 * the sum of all struct lost_event.lost fields reported.
32 *
33 * The total_period is needed because by default auto-freq is used, so
34 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
35 * the total number of low level events, it is necessary to to sum all struct
36 * sample_event.period and stash the result in total_period.
37 */
38struct events_stats {
39 u64 total_period;
40 u64 total_lost;
41 u64 total_invalid_chains;
42 u32 nr_events[PERF_RECORD_HEADER_MAX];
43 u32 nr_lost_warned;
44 u32 nr_unknown_events;
45 u32 nr_invalid_chains;
46 u32 nr_unknown_id;
47 u32 nr_unprocessable_samples;
48};
49
50enum hist_column {
51 HISTC_SYMBOL,
52 HISTC_DSO,
53 HISTC_THREAD,
54 HISTC_COMM,
55 HISTC_PARENT,
56 HISTC_CPU,
57 HISTC_SRCLINE,
58 HISTC_MISPREDICT,
59 HISTC_IN_TX,
60 HISTC_ABORT,
61 HISTC_SYMBOL_FROM,
62 HISTC_SYMBOL_TO,
63 HISTC_DSO_FROM,
64 HISTC_DSO_TO,
65 HISTC_LOCAL_WEIGHT,
66 HISTC_GLOBAL_WEIGHT,
67 HISTC_MEM_DADDR_SYMBOL,
68 HISTC_MEM_DADDR_DSO,
69 HISTC_MEM_LOCKED,
70 HISTC_MEM_TLB,
71 HISTC_MEM_LVL,
72 HISTC_MEM_SNOOP,
73 HISTC_TRANSACTION,
74 HISTC_NR_COLS, /* Last entry */
75};
76
77struct thread;
78struct dso;
79
80struct hists {
81 struct rb_root entries_in_array[2];
82 struct rb_root *entries_in;
83 struct rb_root entries;
84 struct rb_root entries_collapsed;
85 u64 nr_entries;
86 const struct thread *thread_filter;
87 const struct dso *dso_filter;
88 const char *uid_filter_str;
89 const char *symbol_filter_str;
90 pthread_mutex_t lock;
91 struct events_stats stats;
92 u64 event_stream;
93 u16 col_len[HISTC_NR_COLS];
94};
95
96struct hist_entry *__hists__add_entry(struct hists *hists,
97 struct addr_location *al,
98 struct symbol *parent,
99 struct branch_info *bi,
100 struct mem_info *mi, u64 period,
101 u64 weight, u64 transaction);
102int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
103int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
104int hist_entry__transaction_len(void);
105int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
106 struct hists *hists);
107void hist_entry__free(struct hist_entry *);
108
109void hists__output_resort(struct hists *hists);
110void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
111
112void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
113void hists__output_recalc_col_len(struct hists *hists, int max_rows);
114
115void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h);
116void hists__inc_nr_events(struct hists *hists, u32 type);
117void events_stats__inc(struct events_stats *stats, u32 type);
118size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
119
120size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
121 int max_cols, float min_pcnt, FILE *fp);
122
123void hists__filter_by_dso(struct hists *hists);
124void hists__filter_by_thread(struct hists *hists);
125void hists__filter_by_symbol(struct hists *hists);
126
127u16 hists__col_len(struct hists *hists, enum hist_column col);
128void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len);
129bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len);
130void hists__reset_col_len(struct hists *hists);
131void hists__calc_col_len(struct hists *hists, struct hist_entry *he);
132
133void hists__match(struct hists *leader, struct hists *other);
134int hists__link(struct hists *leader, struct hists *other);
135
136struct perf_hpp {
137 char *buf;
138 size_t size;
139 const char *sep;
140 void *ptr;
141};
142
143struct perf_hpp_fmt {
144 int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
145 struct perf_evsel *evsel);
146 int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
147 struct perf_evsel *evsel);
148 int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
149 struct hist_entry *he);
150 int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
151 struct hist_entry *he);
152
153 struct list_head list;
154};
155
156extern struct list_head perf_hpp__list;
157
158#define perf_hpp__for_each_format(format) \
159 list_for_each_entry(format, &perf_hpp__list, list)
160
161extern struct perf_hpp_fmt perf_hpp__format[];
162
163enum {
164 /* Matches perf_hpp__format array. */
165 PERF_HPP__OVERHEAD,
166 PERF_HPP__OVERHEAD_SYS,
167 PERF_HPP__OVERHEAD_US,
168 PERF_HPP__OVERHEAD_GUEST_SYS,
169 PERF_HPP__OVERHEAD_GUEST_US,
170 PERF_HPP__SAMPLES,
171 PERF_HPP__PERIOD,
172
173 PERF_HPP__MAX_INDEX
174};
175
176void perf_hpp__init(void);
177void perf_hpp__column_register(struct perf_hpp_fmt *format);
178void perf_hpp__column_enable(unsigned col);
179
180typedef u64 (*hpp_field_fn)(struct hist_entry *he);
181typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front);
182typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...);
183
184int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
185 hpp_field_fn get_field, hpp_callback_fn callback,
186 const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent);
187
188static inline void advance_hpp(struct perf_hpp *hpp, int inc)
189{
190 hpp->buf += inc;
191 hpp->size -= inc;
192}
193
194static inline size_t perf_hpp__use_color(void)
195{
196 return !symbol_conf.field_sep;
197}
198
199static inline size_t perf_hpp__color_overhead(void)
200{
201 return perf_hpp__use_color() ?
202 (COLOR_MAXLEN + sizeof(PERF_COLOR_RESET)) * PERF_HPP__MAX_INDEX
203 : 0;
204}
205
206struct perf_evlist;
207
208struct hist_browser_timer {
209 void (*timer)(void *arg);
210 void *arg;
211 int refresh;
212};
213
214#ifdef HAVE_SLANG_SUPPORT
215#include "../ui/keysyms.h"
216int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
217 struct hist_browser_timer *hbt);
218
219int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
220 struct hist_browser_timer *hbt,
221 float min_pcnt,
222 struct perf_session_env *env);
223int script_browse(const char *script_opt);
224#else
225static inline
226int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused,
227 const char *help __maybe_unused,
228 struct hist_browser_timer *hbt __maybe_unused,
229 float min_pcnt __maybe_unused,
230 struct perf_session_env *env __maybe_unused)
231{
232 return 0;
233}
234
235static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused,
236 struct perf_evsel *evsel __maybe_unused,
237 struct hist_browser_timer *hbt __maybe_unused)
238{
239 return 0;
240}
241
242static inline int script_browse(const char *script_opt __maybe_unused)
243{
244 return 0;
245}
246
247#define K_LEFT -1000
248#define K_RIGHT -2000
249#define K_SWITCH_INPUT_DATA -3000
250#endif
251
252unsigned int hists__sort_list_width(struct hists *hists);
253#endif /* __PERF_HIST_H */
1#ifndef __PERF_HIST_H
2#define __PERF_HIST_H
3
4#include <linux/types.h>
5#include <pthread.h>
6#include "callchain.h"
7
8extern struct callchain_param callchain_param;
9
10struct hist_entry;
11struct addr_location;
12struct symbol;
13
14/*
15 * The kernel collects the number of events it couldn't send in a stretch and
16 * when possible sends this number in a PERF_RECORD_LOST event. The number of
17 * such "chunks" of lost events is stored in .nr_events[PERF_EVENT_LOST] while
18 * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
19 * the sum of all struct lost_event.lost fields reported.
20 *
21 * The total_period is needed because by default auto-freq is used, so
22 * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
23 * the total number of low level events, it is necessary to to sum all struct
24 * sample_event.period and stash the result in total_period.
25 */
26struct events_stats {
27 u64 total_period;
28 u64 total_lost;
29 u64 total_invalid_chains;
30 u32 nr_events[PERF_RECORD_HEADER_MAX];
31 u32 nr_lost_warned;
32 u32 nr_unknown_events;
33 u32 nr_invalid_chains;
34 u32 nr_unknown_id;
35 u32 nr_unprocessable_samples;
36};
37
38enum hist_column {
39 HISTC_SYMBOL,
40 HISTC_DSO,
41 HISTC_THREAD,
42 HISTC_COMM,
43 HISTC_PARENT,
44 HISTC_CPU,
45 HISTC_MISPREDICT,
46 HISTC_SYMBOL_FROM,
47 HISTC_SYMBOL_TO,
48 HISTC_DSO_FROM,
49 HISTC_DSO_TO,
50 HISTC_NR_COLS, /* Last entry */
51};
52
53struct thread;
54struct dso;
55
56struct hists {
57 struct rb_root entries_in_array[2];
58 struct rb_root *entries_in;
59 struct rb_root entries;
60 struct rb_root entries_collapsed;
61 u64 nr_entries;
62 const struct thread *thread_filter;
63 const struct dso *dso_filter;
64 const char *uid_filter_str;
65 const char *symbol_filter_str;
66 pthread_mutex_t lock;
67 struct events_stats stats;
68 u64 event_stream;
69 u16 col_len[HISTC_NR_COLS];
70};
71
72struct hist_entry *__hists__add_entry(struct hists *self,
73 struct addr_location *al,
74 struct symbol *parent, u64 period);
75int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
76int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
77int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size,
78 struct hists *hists);
79void hist_entry__free(struct hist_entry *);
80
81struct hist_entry *__hists__add_branch_entry(struct hists *self,
82 struct addr_location *al,
83 struct symbol *sym_parent,
84 struct branch_info *bi,
85 u64 period);
86
87void hists__output_resort(struct hists *self);
88void hists__output_resort_threaded(struct hists *hists);
89void hists__collapse_resort(struct hists *self);
90void hists__collapse_resort_threaded(struct hists *hists);
91
92void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
93void hists__decay_entries_threaded(struct hists *hists, bool zap_user,
94 bool zap_kernel);
95void hists__output_recalc_col_len(struct hists *hists, int max_rows);
96
97void hists__inc_nr_events(struct hists *self, u32 type);
98size_t hists__fprintf_nr_events(struct hists *self, FILE *fp);
99
100size_t hists__fprintf(struct hists *self, struct hists *pair,
101 bool show_displacement, bool show_header,
102 int max_rows, int max_cols, FILE *fp);
103
104int hist_entry__inc_addr_samples(struct hist_entry *self, int evidx, u64 addr);
105int hist_entry__annotate(struct hist_entry *self, size_t privsize);
106
107void hists__filter_by_dso(struct hists *hists);
108void hists__filter_by_thread(struct hists *hists);
109void hists__filter_by_symbol(struct hists *hists);
110
111u16 hists__col_len(struct hists *self, enum hist_column col);
112void hists__set_col_len(struct hists *self, enum hist_column col, u16 len);
113bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len);
114
115struct perf_evlist;
116
117#ifdef NO_NEWT_SUPPORT
118static inline
119int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used,
120 const char *help __used,
121 void(*timer)(void *arg) __used,
122 void *arg __used,
123 int refresh __used)
124{
125 return 0;
126}
127
128static inline int hist_entry__tui_annotate(struct hist_entry *self __used,
129 int evidx __used,
130 void(*timer)(void *arg) __used,
131 void *arg __used,
132 int delay_secs __used)
133{
134 return 0;
135}
136#define K_LEFT -1
137#define K_RIGHT -2
138#else
139#include "../ui/keysyms.h"
140int hist_entry__tui_annotate(struct hist_entry *he, int evidx,
141 void(*timer)(void *arg), void *arg, int delay_secs);
142
143int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
144 void(*timer)(void *arg), void *arg,
145 int refresh);
146#endif
147
148#ifdef NO_GTK2_SUPPORT
149static inline
150int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used,
151 const char *help __used,
152 void(*timer)(void *arg) __used,
153 void *arg __used,
154 int refresh __used)
155{
156 return 0;
157}
158
159#else
160int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, const char *help,
161 void(*timer)(void *arg), void *arg,
162 int refresh);
163#endif
164
165unsigned int hists__sort_list_width(struct hists *self);
166
167#endif /* __PERF_HIST_H */