Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <errno.h>
  3#include <linux/err.h>
  4#include <inttypes.h>
  5#include <math.h>
  6#include <string.h>
  7#include "counts.h"
  8#include "cpumap.h"
  9#include "debug.h"
 10#include "header.h"
 11#include "stat.h"
 12#include "session.h"
 13#include "target.h"
 14#include "evlist.h"
 15#include "evsel.h"
 16#include "thread_map.h"
 17#include "util/hashmap.h"
 18#include <linux/zalloc.h>
 19
 20void update_stats(struct stats *stats, u64 val)
 21{
 22	double delta;
 23
 24	stats->n++;
 25	delta = val - stats->mean;
 26	stats->mean += delta / stats->n;
 27	stats->M2 += delta*(val - stats->mean);
 28
 29	if (val > stats->max)
 30		stats->max = val;
 31
 32	if (val < stats->min)
 33		stats->min = val;
 34}
 35
 36double avg_stats(struct stats *stats)
 37{
 38	return stats->mean;
 39}
 40
 41/*
 42 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
 43 *
 44 *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
 45 * s^2 = -------------------------------
 46 *                  n - 1
 47 *
 48 * http://en.wikipedia.org/wiki/Stddev
 49 *
 50 * The std dev of the mean is related to the std dev by:
 51 *
 52 *             s
 53 * s_mean = -------
 54 *          sqrt(n)
 55 *
 56 */
 57double stddev_stats(struct stats *stats)
 58{
 59	double variance, variance_mean;
 60
 61	if (stats->n < 2)
 62		return 0.0;
 63
 64	variance = stats->M2 / (stats->n - 1);
 65	variance_mean = variance / stats->n;
 66
 67	return sqrt(variance_mean);
 68}
 69
 70double rel_stddev_stats(double stddev, double avg)
 71{
 72	double pct = 0.0;
 73
 74	if (avg)
 75		pct = 100.0 * stddev/avg;
 76
 77	return pct;
 78}
 79
 80bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id)
 81{
 82	struct perf_stat_evsel *ps = evsel->stats;
 83
 84	return ps->id == id;
 85}
 86
 87#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
 88static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
 89	ID(NONE,		x),
 90	ID(CYCLES_IN_TX,	cpu/cycles-t/),
 91	ID(TRANSACTION_START,	cpu/tx-start/),
 92	ID(ELISION_START,	cpu/el-start/),
 93	ID(CYCLES_IN_TX_CP,	cpu/cycles-ct/),
 94	ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
 95	ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
 96	ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
 97	ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
 98	ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
 99	ID(TOPDOWN_RETIRING, topdown-retiring),
100	ID(TOPDOWN_BAD_SPEC, topdown-bad-spec),
101	ID(TOPDOWN_FE_BOUND, topdown-fe-bound),
102	ID(TOPDOWN_BE_BOUND, topdown-be-bound),
103	ID(TOPDOWN_HEAVY_OPS, topdown-heavy-ops),
104	ID(TOPDOWN_BR_MISPREDICT, topdown-br-mispredict),
105	ID(TOPDOWN_FETCH_LAT, topdown-fetch-lat),
106	ID(TOPDOWN_MEM_BOUND, topdown-mem-bound),
107	ID(SMI_NUM, msr/smi/),
108	ID(APERF, msr/aperf/),
109};
110#undef ID
111
112static void perf_stat_evsel_id_init(struct evsel *evsel)
113{
114	struct perf_stat_evsel *ps = evsel->stats;
115	int i;
116
117	/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
118
119	for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
120		if (!strcmp(evsel__name(evsel), id_str[i]) ||
121		    (strstr(evsel__name(evsel), id_str[i]) && evsel->pmu_name
122		     && strstr(evsel__name(evsel), evsel->pmu_name))) {
123			ps->id = i;
124			break;
125		}
126	}
127}
128
129static void evsel__reset_aggr_stats(struct evsel *evsel)
130{
131	struct perf_stat_evsel *ps = evsel->stats;
132	struct perf_stat_aggr *aggr = ps->aggr;
133
134	if (aggr)
135		memset(aggr, 0, sizeof(*aggr) * ps->nr_aggr);
136}
137
138static void evsel__reset_stat_priv(struct evsel *evsel)
139{
140	struct perf_stat_evsel *ps = evsel->stats;
141
142	init_stats(&ps->res_stats);
143	evsel__reset_aggr_stats(evsel);
144}
145
146static int evsel__alloc_aggr_stats(struct evsel *evsel, int nr_aggr)
147{
148	struct perf_stat_evsel *ps = evsel->stats;
149
150	if (ps == NULL)
151		return 0;
152
153	ps->nr_aggr = nr_aggr;
154	ps->aggr = calloc(nr_aggr, sizeof(*ps->aggr));
155	if (ps->aggr == NULL)
156		return -ENOMEM;
157
158	return 0;
159}
160
161int evlist__alloc_aggr_stats(struct evlist *evlist, int nr_aggr)
162{
163	struct evsel *evsel;
164
165	evlist__for_each_entry(evlist, evsel) {
166		if (evsel__alloc_aggr_stats(evsel, nr_aggr) < 0)
167			return -1;
168	}
169	return 0;
170}
171
172static int evsel__alloc_stat_priv(struct evsel *evsel, int nr_aggr)
173{
174	struct perf_stat_evsel *ps;
175
176	ps = zalloc(sizeof(*ps));
177	if (ps == NULL)
178		return -ENOMEM;
179
180	evsel->stats = ps;
181
182	if (nr_aggr && evsel__alloc_aggr_stats(evsel, nr_aggr) < 0) {
183		evsel->stats = NULL;
184		free(ps);
185		return -ENOMEM;
186	}
187
188	perf_stat_evsel_id_init(evsel);
189	evsel__reset_stat_priv(evsel);
190	return 0;
191}
192
193static void evsel__free_stat_priv(struct evsel *evsel)
194{
195	struct perf_stat_evsel *ps = evsel->stats;
196
197	if (ps) {
198		zfree(&ps->aggr);
199		zfree(&ps->group_data);
200	}
201	zfree(&evsel->stats);
202}
203
204static int evsel__alloc_prev_raw_counts(struct evsel *evsel)
205{
206	int cpu_map_nr = evsel__nr_cpus(evsel);
207	int nthreads = perf_thread_map__nr(evsel->core.threads);
208	struct perf_counts *counts;
209
210	counts = perf_counts__new(cpu_map_nr, nthreads);
211	if (counts)
212		evsel->prev_raw_counts = counts;
213
214	return counts ? 0 : -ENOMEM;
215}
216
217static void evsel__free_prev_raw_counts(struct evsel *evsel)
218{
219	perf_counts__delete(evsel->prev_raw_counts);
220	evsel->prev_raw_counts = NULL;
221}
222
223static void evsel__reset_prev_raw_counts(struct evsel *evsel)
224{
225	if (evsel->prev_raw_counts)
226		perf_counts__reset(evsel->prev_raw_counts);
227}
228
229static int evsel__alloc_stats(struct evsel *evsel, int nr_aggr, bool alloc_raw)
230{
231	if (evsel__alloc_stat_priv(evsel, nr_aggr) < 0 ||
232	    evsel__alloc_counts(evsel) < 0 ||
233	    (alloc_raw && evsel__alloc_prev_raw_counts(evsel) < 0))
234		return -ENOMEM;
235
236	return 0;
237}
238
239int evlist__alloc_stats(struct perf_stat_config *config,
240			struct evlist *evlist, bool alloc_raw)
241{
242	struct evsel *evsel;
243	int nr_aggr = 0;
244
245	if (config && config->aggr_map)
246		nr_aggr = config->aggr_map->nr;
247
248	evlist__for_each_entry(evlist, evsel) {
249		if (evsel__alloc_stats(evsel, nr_aggr, alloc_raw))
250			goto out_free;
251	}
252
253	return 0;
254
255out_free:
256	evlist__free_stats(evlist);
257	return -1;
258}
259
260void evlist__free_stats(struct evlist *evlist)
261{
262	struct evsel *evsel;
263
264	evlist__for_each_entry(evlist, evsel) {
265		evsel__free_stat_priv(evsel);
266		evsel__free_counts(evsel);
267		evsel__free_prev_raw_counts(evsel);
268	}
269}
270
271void evlist__reset_stats(struct evlist *evlist)
272{
273	struct evsel *evsel;
274
275	evlist__for_each_entry(evlist, evsel) {
276		evsel__reset_stat_priv(evsel);
277		evsel__reset_counts(evsel);
278	}
279}
280
281void evlist__reset_aggr_stats(struct evlist *evlist)
282{
283	struct evsel *evsel;
284
285	evlist__for_each_entry(evlist, evsel)
286		evsel__reset_aggr_stats(evsel);
287}
288
289void evlist__reset_prev_raw_counts(struct evlist *evlist)
290{
291	struct evsel *evsel;
292
293	evlist__for_each_entry(evlist, evsel)
294		evsel__reset_prev_raw_counts(evsel);
295}
296
297static void evsel__copy_prev_raw_counts(struct evsel *evsel)
298{
299	int idx, nthreads = perf_thread_map__nr(evsel->core.threads);
300
301	for (int thread = 0; thread < nthreads; thread++) {
302		perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
303			*perf_counts(evsel->counts, idx, thread) =
304				*perf_counts(evsel->prev_raw_counts, idx, thread);
305		}
306	}
307}
308
309void evlist__copy_prev_raw_counts(struct evlist *evlist)
310{
311	struct evsel *evsel;
312
313	evlist__for_each_entry(evlist, evsel)
314		evsel__copy_prev_raw_counts(evsel);
315}
316
317static size_t pkg_id_hash(long __key, void *ctx __maybe_unused)
318{
319	uint64_t *key = (uint64_t *) __key;
320
321	return *key & 0xffffffff;
322}
323
324static bool pkg_id_equal(long __key1, long __key2, void *ctx __maybe_unused)
325{
326	uint64_t *key1 = (uint64_t *) __key1;
327	uint64_t *key2 = (uint64_t *) __key2;
328
329	return *key1 == *key2;
330}
331
332static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals,
333			 int cpu_map_idx, bool *skip)
334{
335	struct hashmap *mask = counter->per_pkg_mask;
336	struct perf_cpu_map *cpus = evsel__cpus(counter);
337	struct perf_cpu cpu = perf_cpu_map__cpu(cpus, cpu_map_idx);
338	int s, d, ret = 0;
339	uint64_t *key;
340
341	*skip = false;
342
343	if (!counter->per_pkg)
344		return 0;
345
346	if (perf_cpu_map__empty(cpus))
347		return 0;
348
349	if (!mask) {
350		mask = hashmap__new(pkg_id_hash, pkg_id_equal, NULL);
351		if (IS_ERR(mask))
352			return -ENOMEM;
353
354		counter->per_pkg_mask = mask;
355	}
356
357	/*
358	 * we do not consider an event that has not run as a good
359	 * instance to mark a package as used (skip=1). Otherwise
360	 * we may run into a situation where the first CPU in a package
361	 * is not running anything, yet the second is, and this function
362	 * would mark the package as used after the first CPU and would
363	 * not read the values from the second CPU.
364	 */
365	if (!(vals->run && vals->ena))
366		return 0;
367
368	s = cpu__get_socket_id(cpu);
369	if (s < 0)
370		return -1;
371
372	/*
373	 * On multi-die system, die_id > 0. On no-die system, die_id = 0.
374	 * We use hashmap(socket, die) to check the used socket+die pair.
375	 */
376	d = cpu__get_die_id(cpu);
377	if (d < 0)
378		return -1;
379
380	key = malloc(sizeof(*key));
381	if (!key)
382		return -ENOMEM;
383
384	*key = (uint64_t)d << 32 | s;
385	if (hashmap__find(mask, key, NULL)) {
386		*skip = true;
387		free(key);
388	} else
389		ret = hashmap__add(mask, key, 1);
390
391	return ret;
392}
393
394static bool evsel__count_has_error(struct evsel *evsel,
395				   struct perf_counts_values *count,
396				   struct perf_stat_config *config)
397{
398	/* the evsel was failed already */
399	if (evsel->err || evsel->counts->scaled == -1)
400		return true;
401
402	/* this is meaningful for CPU aggregation modes only */
403	if (config->aggr_mode == AGGR_GLOBAL)
404		return false;
405
406	/* it's considered ok when it actually ran */
407	if (count->ena != 0 && count->run != 0)
408		return false;
409
410	return true;
411}
412
413static int
414process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
415		       int cpu_map_idx, int thread,
416		       struct perf_counts_values *count)
417{
418	struct perf_stat_evsel *ps = evsel->stats;
419	static struct perf_counts_values zero;
420	bool skip = false;
421
422	if (check_per_pkg(evsel, count, cpu_map_idx, &skip)) {
423		pr_err("failed to read per-pkg counter\n");
424		return -1;
425	}
426
427	if (skip)
428		count = &zero;
429
430	if (!evsel->snapshot)
431		evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
432	perf_counts_values__scale(count, config->scale, NULL);
433
434	if (config->aggr_mode == AGGR_THREAD) {
435		struct perf_counts_values *aggr_counts = &ps->aggr[thread].counts;
436
437		/*
438		 * Skip value 0 when enabling --per-thread globally,
439		 * otherwise too many 0 output.
440		 */
441		if (count->val == 0 && config->system_wide)
442			return 0;
443
444		ps->aggr[thread].nr++;
445
446		aggr_counts->val += count->val;
447		aggr_counts->ena += count->ena;
448		aggr_counts->run += count->run;
449		return 0;
450	}
451
452	if (ps->aggr) {
453		struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
454		struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
455		struct perf_stat_aggr *ps_aggr;
456		int i;
457
458		for (i = 0; i < ps->nr_aggr; i++) {
459			if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
460				continue;
461
462			ps_aggr = &ps->aggr[i];
463			ps_aggr->nr++;
464
465			/*
466			 * When any result is bad, make them all to give consistent output
467			 * in interval mode.  But per-task counters can have 0 enabled time
468			 * when some tasks are idle.
469			 */
470			if (evsel__count_has_error(evsel, count, config) && !ps_aggr->failed) {
471				ps_aggr->counts.val = 0;
472				ps_aggr->counts.ena = 0;
473				ps_aggr->counts.run = 0;
474				ps_aggr->failed = true;
475			}
476
477			if (!ps_aggr->failed) {
478				ps_aggr->counts.val += count->val;
479				ps_aggr->counts.ena += count->ena;
480				ps_aggr->counts.run += count->run;
481			}
482			break;
483		}
484	}
485
486	return 0;
487}
488
489static int process_counter_maps(struct perf_stat_config *config,
490				struct evsel *counter)
491{
492	int nthreads = perf_thread_map__nr(counter->core.threads);
493	int ncpus = evsel__nr_cpus(counter);
494	int idx, thread;
495
496	for (thread = 0; thread < nthreads; thread++) {
497		for (idx = 0; idx < ncpus; idx++) {
498			if (process_counter_values(config, counter, idx, thread,
499						   perf_counts(counter->counts, idx, thread)))
500				return -1;
501		}
502	}
503
504	return 0;
505}
506
507int perf_stat_process_counter(struct perf_stat_config *config,
508			      struct evsel *counter)
509{
510	struct perf_stat_evsel *ps = counter->stats;
511	u64 *count;
512	int ret;
513
514	if (counter->per_pkg)
515		evsel__zero_per_pkg(counter);
516
517	ret = process_counter_maps(config, counter);
518	if (ret)
519		return ret;
520
521	if (config->aggr_mode != AGGR_GLOBAL)
522		return 0;
523
524	/*
525	 * GLOBAL aggregation mode only has a single aggr counts,
526	 * so we can use ps->aggr[0] as the actual output.
527	 */
528	count = ps->aggr[0].counts.values;
529	update_stats(&ps->res_stats, *count);
530
531	if (verbose > 0) {
532		fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
533			evsel__name(counter), count[0], count[1], count[2]);
534	}
535
536	return 0;
537}
538
539static int evsel__merge_aggr_counters(struct evsel *evsel, struct evsel *alias)
540{
541	struct perf_stat_evsel *ps_a = evsel->stats;
542	struct perf_stat_evsel *ps_b = alias->stats;
543	int i;
544
545	if (ps_a->aggr == NULL && ps_b->aggr == NULL)
546		return 0;
547
548	if (ps_a->nr_aggr != ps_b->nr_aggr) {
549		pr_err("Unmatched aggregation mode between aliases\n");
550		return -1;
551	}
552
553	for (i = 0; i < ps_a->nr_aggr; i++) {
554		struct perf_counts_values *aggr_counts_a = &ps_a->aggr[i].counts;
555		struct perf_counts_values *aggr_counts_b = &ps_b->aggr[i].counts;
556
557		/* NB: don't increase aggr.nr for aliases */
558
559		aggr_counts_a->val += aggr_counts_b->val;
560		aggr_counts_a->ena += aggr_counts_b->ena;
561		aggr_counts_a->run += aggr_counts_b->run;
562	}
563
564	return 0;
565}
566/* events should have the same name, scale, unit, cgroup but on different PMUs */
567static bool evsel__is_alias(struct evsel *evsel_a, struct evsel *evsel_b)
568{
569	if (strcmp(evsel__name(evsel_a), evsel__name(evsel_b)))
570		return false;
571
572	if (evsel_a->scale != evsel_b->scale)
573		return false;
574
575	if (evsel_a->cgrp != evsel_b->cgrp)
576		return false;
577
578	if (strcmp(evsel_a->unit, evsel_b->unit))
579		return false;
580
581	if (evsel__is_clock(evsel_a) != evsel__is_clock(evsel_b))
582		return false;
583
584	return !!strcmp(evsel_a->pmu_name, evsel_b->pmu_name);
585}
586
587static void evsel__merge_aliases(struct evsel *evsel)
588{
589	struct evlist *evlist = evsel->evlist;
590	struct evsel *alias;
591
592	alias = list_prepare_entry(evsel, &(evlist->core.entries), core.node);
593	list_for_each_entry_continue(alias, &evlist->core.entries, core.node) {
594		/* Merge the same events on different PMUs. */
595		if (evsel__is_alias(evsel, alias)) {
596			evsel__merge_aggr_counters(evsel, alias);
597			alias->merged_stat = true;
598		}
599	}
600}
601
602static bool evsel__should_merge_hybrid(const struct evsel *evsel,
603				       const struct perf_stat_config *config)
604{
605	return config->hybrid_merge && evsel__is_hybrid(evsel);
606}
607
608static void evsel__merge_stats(struct evsel *evsel, struct perf_stat_config *config)
609{
610	/* this evsel is already merged */
611	if (evsel->merged_stat)
612		return;
613
614	if (evsel->auto_merge_stats || evsel__should_merge_hybrid(evsel, config))
615		evsel__merge_aliases(evsel);
616}
617
618/* merge the same uncore and hybrid events if requested */
619void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *evlist)
620{
621	struct evsel *evsel;
622
623	if (config->no_merge)
624		return;
625
626	evlist__for_each_entry(evlist, evsel)
627		evsel__merge_stats(evsel, config);
628}
629
630static void evsel__update_percore_stats(struct evsel *evsel, struct aggr_cpu_id *core_id)
631{
632	struct perf_stat_evsel *ps = evsel->stats;
633	struct perf_counts_values counts = { 0, };
634	struct aggr_cpu_id id;
635	struct perf_cpu cpu;
636	int idx;
637
638	/* collect per-core counts */
639	perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
640		struct perf_stat_aggr *aggr = &ps->aggr[idx];
641
642		id = aggr_cpu_id__core(cpu, NULL);
643		if (!aggr_cpu_id__equal(core_id, &id))
644			continue;
645
646		counts.val += aggr->counts.val;
647		counts.ena += aggr->counts.ena;
648		counts.run += aggr->counts.run;
649	}
650
651	/* update aggregated per-core counts for each CPU */
652	perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
653		struct perf_stat_aggr *aggr = &ps->aggr[idx];
654
655		id = aggr_cpu_id__core(cpu, NULL);
656		if (!aggr_cpu_id__equal(core_id, &id))
657			continue;
658
659		aggr->counts.val = counts.val;
660		aggr->counts.ena = counts.ena;
661		aggr->counts.run = counts.run;
662
663		aggr->used = true;
664	}
665}
666
667/* we have an aggr_map for cpu, but want to aggregate the counters per-core */
668static void evsel__process_percore(struct evsel *evsel)
669{
670	struct perf_stat_evsel *ps = evsel->stats;
671	struct aggr_cpu_id core_id;
672	struct perf_cpu cpu;
673	int idx;
674
675	if (!evsel->percore)
676		return;
677
678	perf_cpu_map__for_each_cpu(cpu, idx, evsel->core.cpus) {
679		struct perf_stat_aggr *aggr = &ps->aggr[idx];
680
681		if (aggr->used)
682			continue;
683
684		core_id = aggr_cpu_id__core(cpu, NULL);
685		evsel__update_percore_stats(evsel, &core_id);
686	}
687}
688
689/* process cpu stats on per-core events */
690void perf_stat_process_percore(struct perf_stat_config *config, struct evlist *evlist)
691{
692	struct evsel *evsel;
693
694	if (config->aggr_mode != AGGR_NONE)
695		return;
696
697	evlist__for_each_entry(evlist, evsel)
698		evsel__process_percore(evsel);
699}
700
701static void evsel__update_shadow_stats(struct evsel *evsel)
702{
703	struct perf_stat_evsel *ps = evsel->stats;
704	int i;
705
706	if (ps->aggr == NULL)
707		return;
708
709	for (i = 0; i < ps->nr_aggr; i++) {
710		struct perf_counts_values *aggr_counts = &ps->aggr[i].counts;
711
712		perf_stat__update_shadow_stats(evsel, aggr_counts->val, i, &rt_stat);
713	}
714}
715
716void perf_stat_process_shadow_stats(struct perf_stat_config *config __maybe_unused,
717				    struct evlist *evlist)
718{
719	struct evsel *evsel;
720
721	evlist__for_each_entry(evlist, evsel)
722		evsel__update_shadow_stats(evsel);
723}
724
725int perf_event__process_stat_event(struct perf_session *session,
726				   union perf_event *event)
727{
728	struct perf_counts_values count, *ptr;
729	struct perf_record_stat *st = &event->stat;
730	struct evsel *counter;
731	int cpu_map_idx;
732
733	count.val = st->val;
734	count.ena = st->ena;
735	count.run = st->run;
736
737	counter = evlist__id2evsel(session->evlist, st->id);
738	if (!counter) {
739		pr_err("Failed to resolve counter for stat event.\n");
740		return -EINVAL;
741	}
742	cpu_map_idx = perf_cpu_map__idx(evsel__cpus(counter), (struct perf_cpu){.cpu = st->cpu});
743	if (cpu_map_idx == -1) {
744		pr_err("Invalid CPU %d for event %s.\n", st->cpu, evsel__name(counter));
745		return -EINVAL;
746	}
747	ptr = perf_counts(counter->counts, cpu_map_idx, st->thread);
748	if (ptr == NULL) {
749		pr_err("Failed to find perf count for CPU %d thread %d on event %s.\n",
750			st->cpu, st->thread, evsel__name(counter));
751		return -EINVAL;
752	}
753	*ptr = count;
754	counter->supported = true;
755	return 0;
756}
757
758size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
759{
760	struct perf_record_stat *st = (struct perf_record_stat *)event;
761	size_t ret;
762
763	ret  = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
764		       st->id, st->cpu, st->thread);
765	ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
766		       st->val, st->ena, st->run);
767
768	return ret;
769}
770
771size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
772{
773	struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
774	size_t ret;
775
776	ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
777		      rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
778
779	return ret;
780}
781
782size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
783{
784	struct perf_stat_config sc;
785	size_t ret;
786
787	perf_event__read_stat_config(&sc, &event->stat_config);
788
789	ret  = fprintf(fp, "\n");
790	ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
791	ret += fprintf(fp, "... scale     %d\n", sc.scale);
792	ret += fprintf(fp, "... interval  %u\n", sc.interval);
793
794	return ret;
795}
796
797int create_perf_stat_counter(struct evsel *evsel,
798			     struct perf_stat_config *config,
799			     struct target *target,
800			     int cpu_map_idx)
801{
802	struct perf_event_attr *attr = &evsel->core.attr;
803	struct evsel *leader = evsel__leader(evsel);
804
805	attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
806			    PERF_FORMAT_TOTAL_TIME_RUNNING;
807
808	/*
809	 * The event is part of non trivial group, let's enable
810	 * the group read (for leader) and ID retrieval for all
811	 * members.
812	 */
813	if (leader->core.nr_members > 1)
814		attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
815
816	attr->inherit = !config->no_inherit && list_empty(&evsel->bpf_counter_list);
817
818	/*
819	 * Some events get initialized with sample_(period/type) set,
820	 * like tracepoints. Clear it up for counting.
821	 */
822	attr->sample_period = 0;
823
824	if (config->identifier)
825		attr->sample_type = PERF_SAMPLE_IDENTIFIER;
826
827	if (config->all_user) {
828		attr->exclude_kernel = 1;
829		attr->exclude_user   = 0;
830	}
831
832	if (config->all_kernel) {
833		attr->exclude_kernel = 0;
834		attr->exclude_user   = 1;
835	}
836
837	/*
838	 * Disabling all counters initially, they will be enabled
839	 * either manually by us or by kernel via enable_on_exec
840	 * set later.
841	 */
842	if (evsel__is_group_leader(evsel)) {
843		attr->disabled = 1;
844
845		/*
846		 * In case of initial_delay we enable tracee
847		 * events manually.
848		 */
849		if (target__none(target) && !config->initial_delay)
850			attr->enable_on_exec = 1;
851	}
852
853	if (target__has_cpu(target) && !target__has_per_thread(target))
854		return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu_map_idx);
855
856	return evsel__open_per_thread(evsel, evsel->core.threads);
857}
v3.15
 
 
 
 
 1#include <math.h>
 2
 
 
 
 
 3#include "stat.h"
 
 
 
 
 
 
 
 4
 5void update_stats(struct stats *stats, u64 val)
 6{
 7	double delta;
 8
 9	stats->n++;
10	delta = val - stats->mean;
11	stats->mean += delta / stats->n;
12	stats->M2 += delta*(val - stats->mean);
13
14	if (val > stats->max)
15		stats->max = val;
16
17	if (val < stats->min)
18		stats->min = val;
19}
20
21double avg_stats(struct stats *stats)
22{
23	return stats->mean;
24}
25
26/*
27 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
28 *
29 *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
30 * s^2 = -------------------------------
31 *                  n - 1
32 *
33 * http://en.wikipedia.org/wiki/Stddev
34 *
35 * The std dev of the mean is related to the std dev by:
36 *
37 *             s
38 * s_mean = -------
39 *          sqrt(n)
40 *
41 */
42double stddev_stats(struct stats *stats)
43{
44	double variance, variance_mean;
45
46	if (stats->n < 2)
47		return 0.0;
48
49	variance = stats->M2 / (stats->n - 1);
50	variance_mean = variance / stats->n;
51
52	return sqrt(variance_mean);
53}
54
55double rel_stddev_stats(double stddev, double avg)
56{
57	double pct = 0.0;
58
59	if (avg)
60		pct = 100.0 * stddev/avg;
61
62	return pct;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63}