Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <inttypes.h>
4#include <math.h>
5#include "stat.h"
6#include "evlist.h"
7#include "evsel.h"
8#include "thread_map.h"
9
10void update_stats(struct stats *stats, u64 val)
11{
12 double delta;
13
14 stats->n++;
15 delta = val - stats->mean;
16 stats->mean += delta / stats->n;
17 stats->M2 += delta*(val - stats->mean);
18
19 if (val > stats->max)
20 stats->max = val;
21
22 if (val < stats->min)
23 stats->min = val;
24}
25
26double avg_stats(struct stats *stats)
27{
28 return stats->mean;
29}
30
31/*
32 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
33 *
34 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
35 * s^2 = -------------------------------
36 * n - 1
37 *
38 * http://en.wikipedia.org/wiki/Stddev
39 *
40 * The std dev of the mean is related to the std dev by:
41 *
42 * s
43 * s_mean = -------
44 * sqrt(n)
45 *
46 */
47double stddev_stats(struct stats *stats)
48{
49 double variance, variance_mean;
50
51 if (stats->n < 2)
52 return 0.0;
53
54 variance = stats->M2 / (stats->n - 1);
55 variance_mean = variance / stats->n;
56
57 return sqrt(variance_mean);
58}
59
60double rel_stddev_stats(double stddev, double avg)
61{
62 double pct = 0.0;
63
64 if (avg)
65 pct = 100.0 * stddev/avg;
66
67 return pct;
68}
69
70bool __perf_evsel_stat__is(struct perf_evsel *evsel,
71 enum perf_stat_evsel_id id)
72{
73 struct perf_stat_evsel *ps = evsel->stats;
74
75 return ps->id == id;
76}
77
78#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
79static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
80 ID(NONE, x),
81 ID(CYCLES_IN_TX, cpu/cycles-t/),
82 ID(TRANSACTION_START, cpu/tx-start/),
83 ID(ELISION_START, cpu/el-start/),
84 ID(CYCLES_IN_TX_CP, cpu/cycles-ct/),
85 ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
86 ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
87 ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
88 ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
89 ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
90 ID(SMI_NUM, msr/smi/),
91 ID(APERF, msr/aperf/),
92};
93#undef ID
94
95static void perf_stat_evsel_id_init(struct perf_evsel *evsel)
96{
97 struct perf_stat_evsel *ps = evsel->stats;
98 int i;
99
100 /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
101
102 for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
103 if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
104 ps->id = i;
105 break;
106 }
107 }
108}
109
110static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
111{
112 int i;
113 struct perf_stat_evsel *ps = evsel->stats;
114
115 for (i = 0; i < 3; i++)
116 init_stats(&ps->res_stats[i]);
117
118 perf_stat_evsel_id_init(evsel);
119}
120
121static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
122{
123 evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
124 if (evsel->stats == NULL)
125 return -ENOMEM;
126 perf_evsel__reset_stat_priv(evsel);
127 return 0;
128}
129
130static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
131{
132 struct perf_stat_evsel *ps = evsel->stats;
133
134 if (ps)
135 free(ps->group_data);
136 zfree(&evsel->stats);
137}
138
139static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
140 int ncpus, int nthreads)
141{
142 struct perf_counts *counts;
143
144 counts = perf_counts__new(ncpus, nthreads);
145 if (counts)
146 evsel->prev_raw_counts = counts;
147
148 return counts ? 0 : -ENOMEM;
149}
150
151static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
152{
153 perf_counts__delete(evsel->prev_raw_counts);
154 evsel->prev_raw_counts = NULL;
155}
156
157static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
158{
159 int ncpus = perf_evsel__nr_cpus(evsel);
160 int nthreads = thread_map__nr(evsel->threads);
161
162 if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
163 perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
164 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
165 return -ENOMEM;
166
167 return 0;
168}
169
170int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
171{
172 struct perf_evsel *evsel;
173
174 evlist__for_each_entry(evlist, evsel) {
175 if (perf_evsel__alloc_stats(evsel, alloc_raw))
176 goto out_free;
177 }
178
179 return 0;
180
181out_free:
182 perf_evlist__free_stats(evlist);
183 return -1;
184}
185
186void perf_evlist__free_stats(struct perf_evlist *evlist)
187{
188 struct perf_evsel *evsel;
189
190 evlist__for_each_entry(evlist, evsel) {
191 perf_evsel__free_stat_priv(evsel);
192 perf_evsel__free_counts(evsel);
193 perf_evsel__free_prev_raw_counts(evsel);
194 }
195}
196
197void perf_evlist__reset_stats(struct perf_evlist *evlist)
198{
199 struct perf_evsel *evsel;
200
201 evlist__for_each_entry(evlist, evsel) {
202 perf_evsel__reset_stat_priv(evsel);
203 perf_evsel__reset_counts(evsel);
204 }
205}
206
207static void zero_per_pkg(struct perf_evsel *counter)
208{
209 if (counter->per_pkg_mask)
210 memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
211}
212
213static int check_per_pkg(struct perf_evsel *counter,
214 struct perf_counts_values *vals, int cpu, bool *skip)
215{
216 unsigned long *mask = counter->per_pkg_mask;
217 struct cpu_map *cpus = perf_evsel__cpus(counter);
218 int s;
219
220 *skip = false;
221
222 if (!counter->per_pkg)
223 return 0;
224
225 if (cpu_map__empty(cpus))
226 return 0;
227
228 if (!mask) {
229 mask = zalloc(MAX_NR_CPUS);
230 if (!mask)
231 return -ENOMEM;
232
233 counter->per_pkg_mask = mask;
234 }
235
236 /*
237 * we do not consider an event that has not run as a good
238 * instance to mark a package as used (skip=1). Otherwise
239 * we may run into a situation where the first CPU in a package
240 * is not running anything, yet the second is, and this function
241 * would mark the package as used after the first CPU and would
242 * not read the values from the second CPU.
243 */
244 if (!(vals->run && vals->ena))
245 return 0;
246
247 s = cpu_map__get_socket(cpus, cpu, NULL);
248 if (s < 0)
249 return -1;
250
251 *skip = test_and_set_bit(s, mask) == 1;
252 return 0;
253}
254
255static int
256process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel,
257 int cpu, int thread,
258 struct perf_counts_values *count)
259{
260 struct perf_counts_values *aggr = &evsel->counts->aggr;
261 static struct perf_counts_values zero;
262 bool skip = false;
263
264 if (check_per_pkg(evsel, count, cpu, &skip)) {
265 pr_err("failed to read per-pkg counter\n");
266 return -1;
267 }
268
269 if (skip)
270 count = &zero;
271
272 switch (config->aggr_mode) {
273 case AGGR_THREAD:
274 case AGGR_CORE:
275 case AGGR_SOCKET:
276 case AGGR_NONE:
277 if (!evsel->snapshot)
278 perf_evsel__compute_deltas(evsel, cpu, thread, count);
279 perf_counts_values__scale(count, config->scale, NULL);
280 if (config->aggr_mode == AGGR_NONE)
281 perf_stat__update_shadow_stats(evsel, count->val, cpu,
282 &rt_stat);
283 if (config->aggr_mode == AGGR_THREAD) {
284 if (config->stats)
285 perf_stat__update_shadow_stats(evsel,
286 count->val, 0, &config->stats[thread]);
287 else
288 perf_stat__update_shadow_stats(evsel,
289 count->val, 0, &rt_stat);
290 }
291 break;
292 case AGGR_GLOBAL:
293 aggr->val += count->val;
294 if (config->scale) {
295 aggr->ena += count->ena;
296 aggr->run += count->run;
297 }
298 case AGGR_UNSET:
299 default:
300 break;
301 }
302
303 return 0;
304}
305
306static int process_counter_maps(struct perf_stat_config *config,
307 struct perf_evsel *counter)
308{
309 int nthreads = thread_map__nr(counter->threads);
310 int ncpus = perf_evsel__nr_cpus(counter);
311 int cpu, thread;
312
313 if (counter->system_wide)
314 nthreads = 1;
315
316 for (thread = 0; thread < nthreads; thread++) {
317 for (cpu = 0; cpu < ncpus; cpu++) {
318 if (process_counter_values(config, counter, cpu, thread,
319 perf_counts(counter->counts, cpu, thread)))
320 return -1;
321 }
322 }
323
324 return 0;
325}
326
327int perf_stat_process_counter(struct perf_stat_config *config,
328 struct perf_evsel *counter)
329{
330 struct perf_counts_values *aggr = &counter->counts->aggr;
331 struct perf_stat_evsel *ps = counter->stats;
332 u64 *count = counter->counts->aggr.values;
333 int i, ret;
334
335 aggr->val = aggr->ena = aggr->run = 0;
336
337 /*
338 * We calculate counter's data every interval,
339 * and the display code shows ps->res_stats
340 * avg value. We need to zero the stats for
341 * interval mode, otherwise overall avg running
342 * averages will be shown for each interval.
343 */
344 if (config->interval)
345 init_stats(ps->res_stats);
346
347 if (counter->per_pkg)
348 zero_per_pkg(counter);
349
350 ret = process_counter_maps(config, counter);
351 if (ret)
352 return ret;
353
354 if (config->aggr_mode != AGGR_GLOBAL)
355 return 0;
356
357 if (!counter->snapshot)
358 perf_evsel__compute_deltas(counter, -1, -1, aggr);
359 perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
360
361 for (i = 0; i < 3; i++)
362 update_stats(&ps->res_stats[i], count[i]);
363
364 if (verbose > 0) {
365 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
366 perf_evsel__name(counter), count[0], count[1], count[2]);
367 }
368
369 /*
370 * Save the full runtime - to allow normalization during printout:
371 */
372 perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
373
374 return 0;
375}
376
377int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused,
378 union perf_event *event,
379 struct perf_session *session)
380{
381 struct perf_counts_values count;
382 struct stat_event *st = &event->stat;
383 struct perf_evsel *counter;
384
385 count.val = st->val;
386 count.ena = st->ena;
387 count.run = st->run;
388
389 counter = perf_evlist__id2evsel(session->evlist, st->id);
390 if (!counter) {
391 pr_err("Failed to resolve counter for stat event.\n");
392 return -EINVAL;
393 }
394
395 *perf_counts(counter->counts, st->cpu, st->thread) = count;
396 counter->supported = true;
397 return 0;
398}
399
400size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
401{
402 struct stat_event *st = (struct stat_event *) event;
403 size_t ret;
404
405 ret = fprintf(fp, "\n... id %" PRIu64 ", cpu %d, thread %d\n",
406 st->id, st->cpu, st->thread);
407 ret += fprintf(fp, "... value %" PRIu64 ", enabled %" PRIu64 ", running %" PRIu64 "\n",
408 st->val, st->ena, st->run);
409
410 return ret;
411}
412
413size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
414{
415 struct stat_round_event *rd = (struct stat_round_event *)event;
416 size_t ret;
417
418 ret = fprintf(fp, "\n... time %" PRIu64 ", type %s\n", rd->time,
419 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
420
421 return ret;
422}
423
424size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
425{
426 struct perf_stat_config sc;
427 size_t ret;
428
429 perf_event__read_stat_config(&sc, &event->stat_config);
430
431 ret = fprintf(fp, "\n");
432 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
433 ret += fprintf(fp, "... scale %d\n", sc.scale);
434 ret += fprintf(fp, "... interval %u\n", sc.interval);
435
436 return ret;
437}
1// SPDX-License-Identifier: GPL-2.0
2#include <errno.h>
3#include <linux/err.h>
4#include <inttypes.h>
5#include <math.h>
6#include <string.h>
7#include "counts.h"
8#include "cpumap.h"
9#include "debug.h"
10#include "header.h"
11#include "stat.h"
12#include "session.h"
13#include "target.h"
14#include "evlist.h"
15#include "evsel.h"
16#include "thread_map.h"
17#include "util/hashmap.h"
18#include <linux/zalloc.h>
19
20void update_stats(struct stats *stats, u64 val)
21{
22 double delta;
23
24 stats->n++;
25 delta = val - stats->mean;
26 stats->mean += delta / stats->n;
27 stats->M2 += delta*(val - stats->mean);
28
29 if (val > stats->max)
30 stats->max = val;
31
32 if (val < stats->min)
33 stats->min = val;
34}
35
36double avg_stats(struct stats *stats)
37{
38 return stats->mean;
39}
40
41/*
42 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
43 *
44 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
45 * s^2 = -------------------------------
46 * n - 1
47 *
48 * http://en.wikipedia.org/wiki/Stddev
49 *
50 * The std dev of the mean is related to the std dev by:
51 *
52 * s
53 * s_mean = -------
54 * sqrt(n)
55 *
56 */
57double stddev_stats(struct stats *stats)
58{
59 double variance, variance_mean;
60
61 if (stats->n < 2)
62 return 0.0;
63
64 variance = stats->M2 / (stats->n - 1);
65 variance_mean = variance / stats->n;
66
67 return sqrt(variance_mean);
68}
69
70double rel_stddev_stats(double stddev, double avg)
71{
72 double pct = 0.0;
73
74 if (avg)
75 pct = 100.0 * stddev/avg;
76
77 return pct;
78}
79
80static void evsel__reset_aggr_stats(struct evsel *evsel)
81{
82 struct perf_stat_evsel *ps = evsel->stats;
83 struct perf_stat_aggr *aggr = ps->aggr;
84
85 if (aggr)
86 memset(aggr, 0, sizeof(*aggr) * ps->nr_aggr);
87}
88
89static void evsel__reset_stat_priv(struct evsel *evsel)
90{
91 struct perf_stat_evsel *ps = evsel->stats;
92
93 init_stats(&ps->res_stats);
94 evsel__reset_aggr_stats(evsel);
95}
96
97static int evsel__alloc_aggr_stats(struct evsel *evsel, int nr_aggr)
98{
99 struct perf_stat_evsel *ps = evsel->stats;
100
101 if (ps == NULL)
102 return 0;
103
104 ps->nr_aggr = nr_aggr;
105 ps->aggr = calloc(nr_aggr, sizeof(*ps->aggr));
106 if (ps->aggr == NULL)
107 return -ENOMEM;
108
109 return 0;
110}
111
112int evlist__alloc_aggr_stats(struct evlist *evlist, int nr_aggr)
113{
114 struct evsel *evsel;
115
116 evlist__for_each_entry(evlist, evsel) {
117 if (evsel__alloc_aggr_stats(evsel, nr_aggr) < 0)
118 return -1;
119 }
120 return 0;
121}
122
123static int evsel__alloc_stat_priv(struct evsel *evsel, int nr_aggr)
124{
125 struct perf_stat_evsel *ps;
126
127 ps = zalloc(sizeof(*ps));
128 if (ps == NULL)
129 return -ENOMEM;
130
131 evsel->stats = ps;
132
133 if (nr_aggr && evsel__alloc_aggr_stats(evsel, nr_aggr) < 0) {
134 evsel->stats = NULL;
135 free(ps);
136 return -ENOMEM;
137 }
138
139 evsel__reset_stat_priv(evsel);
140 return 0;
141}
142
143static void evsel__free_stat_priv(struct evsel *evsel)
144{
145 struct perf_stat_evsel *ps = evsel->stats;
146
147 if (ps) {
148 zfree(&ps->aggr);
149 zfree(&ps->group_data);
150 }
151 zfree(&evsel->stats);
152}
153
154static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
155{
156 int cpu_map_nr = evsel__nr_cpus(evsel);
157 int nthreads = perf_thread_map__nr(evsel->core.threads);
158 struct perf_counts *counts;
159
160 counts = perf_counts__new(cpu_map_nr, nthreads);
161 if (counts)
162 evsel->prev_raw_counts = counts;
163
164 return counts ? 0 : -ENOMEM;
165}
166
167static void evsel__free_prev_raw_counts(struct evsel *evsel)
168{
169 perf_counts__delete(evsel->prev_raw_counts);
170 evsel->prev_raw_counts = NULL;
171}
172
173static void evsel__reset_prev_raw_counts(struct evsel *evsel)
174{
175 if (evsel->prev_raw_counts)
176 perf_counts__reset(evsel->prev_raw_counts);
177}
178
179static int evsel__alloc_stats(struct evsel *evsel, int nr_aggr, bool alloc_raw)
180{
181 if (evsel__alloc_stat_priv(evsel, nr_aggr) < 0 ||
182 evsel__alloc_counts(evsel) < 0 ||
183 (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
184 return -ENOMEM;
185
186 return 0;
187}
188
189int evlist__alloc_stats(struct perf_stat_config *config,
190 struct evlist *evlist, bool alloc_raw)
191{
192 struct evsel *evsel;
193 int nr_aggr = 0;
194
195 if (config && config->aggr_map)
196 nr_aggr = config->aggr_map->nr;
197
198 evlist__for_each_entry(evlist, evsel) {
199 if (evsel__alloc_stats(evsel, nr_aggr, alloc_raw))
200 goto out_free;
201 }
202
203 return 0;
204
205out_free:
206 evlist__free_stats(evlist);
207 return -1;
208}
209
210void evlist__free_stats(struct evlist *evlist)
211{
212 struct evsel *evsel;
213
214 evlist__for_each_entry(evlist, evsel) {
215 evsel__free_stat_priv(evsel);
216 evsel__free_counts(evsel);
217 evsel__free_prev_raw_counts(evsel);
218 }
219}
220
221void evlist__reset_stats(struct evlist *evlist)
222{
223 struct evsel *evsel;
224
225 evlist__for_each_entry(evlist, evsel) {
226 evsel__reset_stat_priv(evsel);
227 evsel__reset_counts(evsel);
228 }
229}
230
231void evlist__reset_aggr_stats(struct evlist *evlist)
232{
233 struct evsel *evsel;
234
235 evlist__for_each_entry(evlist, evsel)
236 evsel__reset_aggr_stats(evsel);
237}
238
239void evlist__reset_prev_raw_counts(struct evlist *evlist)
240{
241 struct evsel *evsel;
242
243 evlist__for_each_entry(evlist, evsel)
244 evsel__reset_prev_raw_counts(evsel);
245}
246
247static void evsel__copy_prev_raw_counts(struct evsel *evsel)
248{
249 int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
250
251 for (int thread = 0; thread < nthreads; thread++) {
252 perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
253 *perf_counts(evsel->counts, idx, thread) =
254 *perf_counts(evsel->prev_raw_counts, idx, thread);
255 }
256 }
257}
258
259void evlist__copy_prev_raw_counts(struct evlist *evlist)
260{
261 struct evsel *evsel;
262
263 evlist__for_each_entry(evlist, evsel)
264 evsel__copy_prev_raw_counts(evsel);
265}
266
267static void evsel__copy_res_stats(struct evsel *evsel)
268{
269 struct perf_stat_evsel *ps = evsel->stats;
270
271 /*
272 * For GLOBAL aggregation mode, it updates the counts for each run
273 * in the evsel->stats.res_stats. See perf_stat_process_counter().
274 */
275 *ps->aggr[0].counts.values = avg_stats(&ps->res_stats);
276}
277
278void evlist__copy_res_stats(struct perf_stat_config *config, struct evlist *evlist)
279{
280 struct evsel *evsel;
281
282 if (config->aggr_mode != AGGR_GLOBAL)
283 return;
284
285 evlist__for_each_entry(evlist, evsel)
286 evsel__copy_res_stats(evsel);
287}
288
289static size_t pkg_id_hash(long __key, void *ctx __maybe_unused)
290{
291 uint64_t *key = (uint64_t *) __key;
292
293 return *key & 0xffffffff;
294}
295
296static bool pkg_id_equal(long __key1, long __key2, void *ctx __maybe_unused)
297{
298 uint64_t *key1 = (uint64_t *) __key1;
299 uint64_t *key2 = (uint64_t *) __key2;
300
301 return *key1 == *key2;
302}
303
304static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
305 int cpu_map_idx, bool *skip)
306{
307 struct hashmap *mask = counter->per_pkg_mask;
308 struct perf_cpu_map *cpus = evsel__cpus(counter);
309 struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
310 int s, d, ret = 0;
311 uint64_t *key;
312
313 *skip = false;
314
315 if (!counter->per_pkg)
316 return 0;
317
318 if (perf_cpu_map__is_any_cpu_or_is_empty(cpus))
319 return 0;
320
321 if (!mask) {
322 mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
323 if (IS_ERR(mask))
324 return -ENOMEM;
325
326 counter->per_pkg_mask = mask;
327 }
328
329 /*
330 * we do not consider an event that has not run as a good
331 * instance to mark a package as used (skip=1). Otherwise
332 * we may run into a situation where the first CPU in a package
333 * is not running anything, yet the second is, and this function
334 * would mark the package as used after the first CPU and would
335 * not read the values from the second CPU.
336 */
337 if (!(vals->run && vals->ena))
338 return 0;
339
340 s = cpu__get_socket_id(cpu);
341 if (s < 0)
342 return -1;
343
344 /*
345 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
346 * We use hashmap(socket, die) to check the used socket+die pair.
347 */
348 d = cpu__get_die_id(cpu);
349 if (d < 0)
350 return -1;
351
352 key = malloc(sizeof(*key));
353 if (!key)
354 return -ENOMEM;
355
356 *key = (uint64_t)d << 32 | s;
357 if (hashmap__find(mask, key, NULL)) {
358 *skip = true;
359 free(key);
360 } else
361 ret = hashmap__add(mask, key, 1);
362
363 return ret;
364}
365
366static bool evsel__count_has_error(struct evsel *evsel,
367 struct perf_counts_values *count,
368 struct perf_stat_config *config)
369{
370 /* the evsel was failed already */
371 if (evsel->err || evsel->counts->scaled == -1)
372 return true;
373
374 /* this is meaningful for CPU aggregation modes only */
375 if (config->aggr_mode == AGGR_GLOBAL)
376 return false;
377
378 /* it's considered ok when it actually ran */
379 if (count->ena != 0 && count->run != 0)
380 return false;
381
382 return true;
383}
384
385static int
386process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
387 int cpu_map_idx, int thread,
388 struct perf_counts_values *count)
389{
390 struct perf_stat_evsel *ps = evsel->stats;
391 static struct perf_counts_values zero;
392 bool skip = false;
393
394 if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
395 pr_err("failed to read per-pkg counter\n");
396 return -1;
397 }
398
399 if (skip)
400 count = &zero;
401
402 if (!evsel->snapshot)
403 evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
404 perf_counts_values__scale(count, config->scale, NULL);
405
406 if (config->aggr_mode == AGGR_THREAD) {
407 struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts;
408
409 /*
410 * Skip value 0 when enabling --per-thread globally,
411 * otherwise too many 0 output.
412 */
413 if (count->val == 0 && config->system_wide)
414 return 0;
415
416 ps->aggr[thread].nr++;
417
418 aggr_counts->val += count->val;
419 aggr_counts->ena += count->ena;
420 aggr_counts->run += count->run;
421 return 0;
422 }
423
424 if (ps->aggr) {
425 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
426 struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
427 struct perf_stat_aggr *ps_aggr;
428 int i;
429
430 for (i = 0; i < ps->nr_aggr; i++) {
431 if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
432 continue;
433
434 ps_aggr = &ps->aggr[i];
435 ps_aggr->nr++;
436
437 /*
438 * When any result is bad, make them all to give consistent output
439 * in interval mode. But per-task counters can have 0 enabled time
440 * when some tasks are idle.
441 */
442 if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
443 ps_aggr->counts.val = 0;
444 ps_aggr->counts.ena = 0;
445 ps_aggr->counts.run = 0;
446 ps_aggr->failed = true;
447 }
448
449 if (!ps_aggr->failed) {
450 ps_aggr->counts.val += count->val;
451 ps_aggr->counts.ena += count->ena;
452 ps_aggr->counts.run += count->run;
453 }
454 break;
455 }
456 }
457
458 return 0;
459}
460
461static int process_counter_maps(struct perf_stat_config *config,
462 struct evsel *counter)
463{
464 int nthreads = perf_thread_map__nr(counter->core.threads);
465 int ncpus = evsel__nr_cpus(counter);
466 int idx, thread;
467
468 for (thread = 0; thread < nthreads; thread++) {
469 for (idx = 0; idx < ncpus; idx++) {
470 if (process_counter_values(config, counter, idx, thread,
471 perf_counts(counter->counts, idx, thread)))
472 return -1;
473 }
474 }
475
476 return 0;
477}
478
479int perf_stat_process_counter(struct perf_stat_config *config,
480 struct evsel *counter)
481{
482 struct perf_stat_evsel *ps = counter->stats;
483 u64 *count;
484 int ret;
485
486 if (counter->per_pkg)
487 evsel__zero_per_pkg(counter);
488
489 ret = process_counter_maps(config, counter);
490 if (ret)
491 return ret;
492
493 if (config->aggr_mode != AGGR_GLOBAL)
494 return 0;
495
496 /*
497 * GLOBAL aggregation mode only has a single aggr counts,
498 * so we can use ps->aggr[0] as the actual output.
499 */
500 count = ps->aggr[0].counts.values;
501 update_stats(&ps->res_stats, *count);
502
503 if (verbose > 0) {
504 fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
505 evsel__name(counter), count[0], count[1], count[2]);
506 }
507
508 return 0;
509}
510
511static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
512{
513 struct perf_stat_evsel *ps_a = evsel->stats;
514 struct perf_stat_evsel *ps_b = alias->stats;
515 int i;
516
517 if (ps_a->aggr == NULL && ps_b->aggr == NULL)
518 return 0;
519
520 if (ps_a->nr_aggr != ps_b->nr_aggr) {
521 pr_err("Unmatched aggregation mode between aliases\n");
522 return -1;
523 }
524
525 for (i = 0; i < ps_a->nr_aggr; i++) {
526 struct perf_counts_values *aggr_counts_a = &ps_a->aggr[i].counts;
527 struct perf_counts_values *aggr_counts_b = &ps_b->aggr[i].counts;
528
529 /* NB: don't increase aggr.nr for aliases */
530
531 aggr_counts_a->val += aggr_counts_b->val;
532 aggr_counts_a->ena += aggr_counts_b->ena;
533 aggr_counts_a->run += aggr_counts_b->run;
534 }
535
536 return 0;
537}
538/* events should have the same name, scale, unit, cgroup but on different PMUs */
539static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b)
540{
541 if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b)))
542 return false;
543
544 if (evsel_a->scale != evsel_b->scale)
545 return false;
546
547 if (evsel_a->cgrp != evsel_b->cgrp)
548 return false;
549
550 if (strcmp(evsel_a->unit, evsel_b->unit))
551 return false;
552
553 if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b))
554 return false;
555
556 return evsel_a->pmu != evsel_b->pmu;
557}
558
559static void evsel__merge_aliases(struct evsel *evsel)
560{
561 struct evlist *evlist = evsel->evlist;
562 struct evsel *alias;
563
564 alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
565 list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
566 /* Merge the same events on different PMUs. */
567 if (evsel__is_alias(evsel, alias)) {
568 evsel__merge_aggr_counters(evsel, alias);
569 alias->merged_stat = true;
570 }
571 }
572}
573
574static bool evsel__should_merge_hybrid(const struct evsel *evsel,
575 const struct perf_stat_config *config)
576{
577 return config->hybrid_merge && evsel__is_hybrid(evsel);
578}
579
580static void evsel__merge_stats(struct evsel *evsel, struct perf_stat_config *config)
581{
582 /* this evsel is already merged */
583 if (evsel->merged_stat)
584 return;
585
586 if (evsel->auto_merge_stats || evsel__should_merge_hybrid(evsel, config))
587 evsel__merge_aliases(evsel);
588}
589
590/* merge the same uncore and hybrid events if requested */
591void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist)
592{
593 struct evsel *evsel;
594
595 if (config->aggr_mode == AGGR_NONE)
596 return;
597
598 evlist__for_each_entry(evlist, evsel)
599 evsel__merge_stats(evsel, config);
600}
601
602static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id *core_id)
603{
604 struct perf_stat_evsel *ps = evsel->stats;
605 struct perf_counts_values counts = { 0, };
606 struct aggr_cpu_id id;
607 struct perf_cpu cpu;
608 int idx;
609
610 /* collect per-core counts */
611 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
612 struct perf_stat_aggr *aggr = &ps->aggr[idx];
613
614 id = aggr_cpu_id__core(cpu, NULL);
615 if (!aggr_cpu_id__equal(core_id, &id))
616 continue;
617
618 counts.val += aggr->counts.val;
619 counts.ena += aggr->counts.ena;
620 counts.run += aggr->counts.run;
621 }
622
623 /* update aggregated per-core counts for each CPU */
624 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
625 struct perf_stat_aggr *aggr = &ps->aggr[idx];
626
627 id = aggr_cpu_id__core(cpu, NULL);
628 if (!aggr_cpu_id__equal(core_id, &id))
629 continue;
630
631 aggr->counts.val = counts.val;
632 aggr->counts.ena = counts.ena;
633 aggr->counts.run = counts.run;
634
635 aggr->used = true;
636 }
637}
638
639/* we have an aggr_map for cpu, but want to aggregate the counters per-core */
640static void evsel__process_percore(struct evsel *evsel)
641{
642 struct perf_stat_evsel *ps = evsel->stats;
643 struct aggr_cpu_id core_id;
644 struct perf_cpu cpu;
645 int idx;
646
647 if (!evsel->percore)
648 return;
649
650 perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
651 struct perf_stat_aggr *aggr = &ps->aggr[idx];
652
653 if (aggr->used)
654 continue;
655
656 core_id = aggr_cpu_id__core(cpu, NULL);
657 evsel__update_percore_stats(evsel, &core_id);
658 }
659}
660
661/* process cpu stats on per-core events */
662void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist)
663{
664 struct evsel *evsel;
665
666 if (config->aggr_mode != AGGR_NONE)
667 return;
668
669 evlist__for_each_entry(evlist, evsel)
670 evsel__process_percore(evsel);
671}
672
673int perf_event__process_stat_event(struct perf_session *session,
674 union perf_event *event)
675{
676 struct perf_counts_values count, *ptr;
677 struct perf_record_stat *st = &event->stat;
678 struct evsel *counter;
679 int cpu_map_idx;
680
681 count.val = st->val;
682 count.ena = st->ena;
683 count.run = st->run;
684
685 counter = evlist__id2evsel(session->evlist, st->id);
686 if (!counter) {
687 pr_err("Failed to resolve counter for stat event.\n");
688 return -EINVAL;
689 }
690 cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
691 if (cpu_map_idx == -1) {
692 pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
693 return -EINVAL;
694 }
695 ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
696 if (ptr == NULL) {
697 pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
698 st->cpu, st->thread, evsel__name(counter));
699 return -EINVAL;
700 }
701 *ptr = count;
702 counter->supported = true;
703 return 0;
704}
705
706size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
707{
708 struct perf_record_stat *st = (struct perf_record_stat *)event;
709 size_t ret;
710
711 ret = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
712 st->id, st->cpu, st->thread);
713 ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
714 st->val, st->ena, st->run);
715
716 return ret;
717}
718
719size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
720{
721 struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
722 size_t ret;
723
724 ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
725 rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
726
727 return ret;
728}
729
730size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
731{
732 struct perf_stat_config sc = {};
733 size_t ret;
734
735 perf_event__read_stat_config(&sc, &event->stat_config);
736
737 ret = fprintf(fp, "\n");
738 ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
739 ret += fprintf(fp, "... scale %d\n", sc.scale);
740 ret += fprintf(fp, "... interval %u\n", sc.interval);
741
742 return ret;
743}
744
745int create_perf_stat_counter(struct evsel *evsel,
746 struct perf_stat_config *config,
747 struct target *target,
748 int cpu_map_idx)
749{
750 struct perf_event_attr *attr = &evsel->core.attr;
751 struct evsel *leader = evsel__leader(evsel);
752
753 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
754 PERF_FORMAT_TOTAL_TIME_RUNNING;
755
756 /*
757 * The event is part of non trivial group, let's enable
758 * the group read (for leader) and ID retrieval for all
759 * members.
760 */
761 if (leader->core.nr_members > 1)
762 attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
763
764 attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
765
766 /*
767 * Some events get initialized with sample_(period/type) set,
768 * like tracepoints. Clear it up for counting.
769 */
770 attr->sample_period = 0;
771
772 if (config->identifier)
773 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
774
775 if (config->all_user) {
776 attr->exclude_kernel = 1;
777 attr->exclude_user = 0;
778 }
779
780 if (config->all_kernel) {
781 attr->exclude_kernel = 0;
782 attr->exclude_user = 1;
783 }
784
785 /*
786 * Disabling all counters initially, they will be enabled
787 * either manually by us or by kernel via enable_on_exec
788 * set later.
789 */
790 if (evsel__is_group_leader(evsel)) {
791 attr->disabled = 1;
792
793 if (target__enable_on_exec(target))
794 attr->enable_on_exec = 1;
795 }
796
797 if (target__has_cpu(target) && !target__has_per_thread(target))
798 return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
799
800 return evsel__open_per_thread(evsel, evsel->core.threads);
801}