Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <errno.h>
  3#include <inttypes.h>
  4#include <math.h>
 
 
 
 
 
  5#include "stat.h"
 
 
  6#include "evlist.h"
  7#include "evsel.h"
  8#include "thread_map.h"
 
  9
 10void update_stats(struct stats *stats, u64 val)
 11{
 12	double delta;
 13
 14	stats->n++;
 15	delta = val - stats->mean;
 16	stats->mean += delta / stats->n;
 17	stats->M2 += delta*(val - stats->mean);
 18
 19	if (val > stats->max)
 20		stats->max = val;
 21
 22	if (val < stats->min)
 23		stats->min = val;
 24}
 25
 26double avg_stats(struct stats *stats)
 27{
 28	return stats->mean;
 29}
 30
 31/*
 32 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
 33 *
 34 *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
 35 * s^2 = -------------------------------
 36 *                  n - 1
 37 *
 38 * http://en.wikipedia.org/wiki/Stddev
 39 *
 40 * The std dev of the mean is related to the std dev by:
 41 *
 42 *             s
 43 * s_mean = -------
 44 *          sqrt(n)
 45 *
 46 */
 47double stddev_stats(struct stats *stats)
 48{
 49	double variance, variance_mean;
 50
 51	if (stats->n < 2)
 52		return 0.0;
 53
 54	variance = stats->M2 / (stats->n - 1);
 55	variance_mean = variance / stats->n;
 56
 57	return sqrt(variance_mean);
 58}
 59
 60double rel_stddev_stats(double stddev, double avg)
 61{
 62	double pct = 0.0;
 63
 64	if (avg)
 65		pct = 100.0 * stddev/avg;
 66
 67	return pct;
 68}
 69
 70bool __perf_evsel_stat__is(struct perf_evsel *evsel,
 71			   enum perf_stat_evsel_id id)
 72{
 73	struct perf_stat_evsel *ps = evsel->stats;
 74
 75	return ps->id == id;
 76}
 77
 78#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
 79static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
 80	ID(NONE,		x),
 81	ID(CYCLES_IN_TX,	cpu/cycles-t/),
 82	ID(TRANSACTION_START,	cpu/tx-start/),
 83	ID(ELISION_START,	cpu/el-start/),
 84	ID(CYCLES_IN_TX_CP,	cpu/cycles-ct/),
 85	ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
 86	ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
 87	ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
 88	ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
 89	ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
 90	ID(SMI_NUM, msr/smi/),
 91	ID(APERF, msr/aperf/),
 92};
 93#undef ID
 94
 95static void perf_stat_evsel_id_init(struct perf_evsel *evsel)
 96{
 97	struct perf_stat_evsel *ps = evsel->stats;
 98	int i;
 99
100	/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
101
102	for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
103		if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
104			ps->id = i;
105			break;
106		}
107	}
108}
109
110static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
111{
112	int i;
113	struct perf_stat_evsel *ps = evsel->stats;
114
115	for (i = 0; i < 3; i++)
116		init_stats(&ps->res_stats[i]);
117
118	perf_stat_evsel_id_init(evsel);
119}
120
121static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
122{
123	evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
124	if (evsel->stats == NULL)
125		return -ENOMEM;
126	perf_evsel__reset_stat_priv(evsel);
127	return 0;
128}
129
130static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
131{
132	struct perf_stat_evsel *ps = evsel->stats;
133
134	if (ps)
135		free(ps->group_data);
136	zfree(&evsel->stats);
137}
138
139static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
140					     int ncpus, int nthreads)
141{
142	struct perf_counts *counts;
143
144	counts = perf_counts__new(ncpus, nthreads);
145	if (counts)
146		evsel->prev_raw_counts = counts;
147
148	return counts ? 0 : -ENOMEM;
149}
150
151static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
152{
153	perf_counts__delete(evsel->prev_raw_counts);
154	evsel->prev_raw_counts = NULL;
155}
156
157static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
 
 
 
 
 
 
 
 
 
158{
159	int ncpus = perf_evsel__nr_cpus(evsel);
160	int nthreads = thread_map__nr(evsel->threads);
161
162	if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
163	    perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
164	    (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
165		return -ENOMEM;
166
167	return 0;
168}
169
170int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
171{
172	struct perf_evsel *evsel;
173
174	evlist__for_each_entry(evlist, evsel) {
175		if (perf_evsel__alloc_stats(evsel, alloc_raw))
176			goto out_free;
177	}
178
179	return 0;
180
181out_free:
182	perf_evlist__free_stats(evlist);
183	return -1;
184}
185
186void perf_evlist__free_stats(struct perf_evlist *evlist)
187{
188	struct perf_evsel *evsel;
189
190	evlist__for_each_entry(evlist, evsel) {
191		perf_evsel__free_stat_priv(evsel);
192		perf_evsel__free_counts(evsel);
193		perf_evsel__free_prev_raw_counts(evsel);
194	}
195}
196
197void perf_evlist__reset_stats(struct perf_evlist *evlist)
198{
199	struct perf_evsel *evsel;
200
201	evlist__for_each_entry(evlist, evsel) {
202		perf_evsel__reset_stat_priv(evsel);
203		perf_evsel__reset_counts(evsel);
204	}
205}
206
207static void zero_per_pkg(struct perf_evsel *counter)
 
 
 
 
 
 
 
 
208{
209	if (counter->per_pkg_mask)
210		memset(counter->per_pkg_mask, 0, MAX_NR_CPUS);
211}
212
213static int check_per_pkg(struct perf_evsel *counter,
214			 struct perf_counts_values *vals, int cpu, bool *skip)
215{
216	unsigned long *mask = counter->per_pkg_mask;
217	struct cpu_map *cpus = perf_evsel__cpus(counter);
218	int s;
219
220	*skip = false;
221
222	if (!counter->per_pkg)
223		return 0;
224
225	if (cpu_map__empty(cpus))
226		return 0;
227
228	if (!mask) {
229		mask = zalloc(MAX_NR_CPUS);
230		if (!mask)
231			return -ENOMEM;
232
233		counter->per_pkg_mask = mask;
234	}
235
236	/*
237	 * we do not consider an event that has not run as a good
238	 * instance to mark a package as used (skip=1). Otherwise
239	 * we may run into a situation where the first CPU in a package
240	 * is not running anything, yet the second is, and this function
241	 * would mark the package as used after the first CPU and would
242	 * not read the values from the second CPU.
243	 */
244	if (!(vals->run && vals->ena))
245		return 0;
246
247	s = cpu_map__get_socket(cpus, cpu, NULL);
248	if (s < 0)
249		return -1;
250
251	*skip = test_and_set_bit(s, mask) == 1;
252	return 0;
253}
254
255static int
256process_counter_values(struct perf_stat_config *config, struct perf_evsel *evsel,
257		       int cpu, int thread,
258		       struct perf_counts_values *count)
259{
260	struct perf_counts_values *aggr = &evsel->counts->aggr;
261	static struct perf_counts_values zero;
262	bool skip = false;
263
264	if (check_per_pkg(evsel, count, cpu, &skip)) {
265		pr_err("failed to read per-pkg counter\n");
266		return -1;
267	}
268
269	if (skip)
270		count = &zero;
271
272	switch (config->aggr_mode) {
273	case AGGR_THREAD:
274	case AGGR_CORE:
 
275	case AGGR_SOCKET:
276	case AGGR_NONE:
277		if (!evsel->snapshot)
278			perf_evsel__compute_deltas(evsel, cpu, thread, count);
279		perf_counts_values__scale(count, config->scale, NULL);
280		if (config->aggr_mode == AGGR_NONE)
281			perf_stat__update_shadow_stats(evsel, count->val, cpu,
282						       &rt_stat);
 
 
283		if (config->aggr_mode == AGGR_THREAD) {
284			if (config->stats)
285				perf_stat__update_shadow_stats(evsel,
286					count->val, 0, &config->stats[thread]);
287			else
288				perf_stat__update_shadow_stats(evsel,
289					count->val, 0, &rt_stat);
290		}
291		break;
292	case AGGR_GLOBAL:
293		aggr->val += count->val;
294		if (config->scale) {
295			aggr->ena += count->ena;
296			aggr->run += count->run;
297		}
298	case AGGR_UNSET:
299	default:
300		break;
301	}
302
303	return 0;
304}
305
306static int process_counter_maps(struct perf_stat_config *config,
307				struct perf_evsel *counter)
308{
309	int nthreads = thread_map__nr(counter->threads);
310	int ncpus = perf_evsel__nr_cpus(counter);
311	int cpu, thread;
312
313	if (counter->system_wide)
314		nthreads = 1;
315
316	for (thread = 0; thread < nthreads; thread++) {
317		for (cpu = 0; cpu < ncpus; cpu++) {
318			if (process_counter_values(config, counter, cpu, thread,
319						   perf_counts(counter->counts, cpu, thread)))
320				return -1;
321		}
322	}
323
324	return 0;
325}
326
327int perf_stat_process_counter(struct perf_stat_config *config,
328			      struct perf_evsel *counter)
329{
330	struct perf_counts_values *aggr = &counter->counts->aggr;
331	struct perf_stat_evsel *ps = counter->stats;
332	u64 *count = counter->counts->aggr.values;
333	int i, ret;
334
335	aggr->val = aggr->ena = aggr->run = 0;
336
337	/*
338	 * We calculate counter's data every interval,
339	 * and the display code shows ps->res_stats
340	 * avg value. We need to zero the stats for
341	 * interval mode, otherwise overall avg running
342	 * averages will be shown for each interval.
343	 */
344	if (config->interval)
345		init_stats(ps->res_stats);
346
347	if (counter->per_pkg)
348		zero_per_pkg(counter);
349
350	ret = process_counter_maps(config, counter);
351	if (ret)
352		return ret;
353
354	if (config->aggr_mode != AGGR_GLOBAL)
355		return 0;
356
357	if (!counter->snapshot)
358		perf_evsel__compute_deltas(counter, -1, -1, aggr);
359	perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
360
361	for (i = 0; i < 3; i++)
362		update_stats(&ps->res_stats[i], count[i]);
363
364	if (verbose > 0) {
365		fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
366			perf_evsel__name(counter), count[0], count[1], count[2]);
367	}
368
369	/*
370	 * Save the full runtime - to allow normalization during printout:
371	 */
372	perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
373
374	return 0;
375}
376
377int perf_event__process_stat_event(struct perf_tool *tool __maybe_unused,
378				   union perf_event *event,
379				   struct perf_session *session)
380{
381	struct perf_counts_values count;
382	struct stat_event *st = &event->stat;
383	struct perf_evsel *counter;
384
385	count.val = st->val;
386	count.ena = st->ena;
387	count.run = st->run;
388
389	counter = perf_evlist__id2evsel(session->evlist, st->id);
390	if (!counter) {
391		pr_err("Failed to resolve counter for stat event.\n");
392		return -EINVAL;
393	}
394
395	*perf_counts(counter->counts, st->cpu, st->thread) = count;
396	counter->supported = true;
397	return 0;
398}
399
400size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
401{
402	struct stat_event *st = (struct stat_event *) event;
403	size_t ret;
404
405	ret  = fprintf(fp, "\n... id %" PRIu64 ", cpu %d, thread %d\n",
406		       st->id, st->cpu, st->thread);
407	ret += fprintf(fp, "... value %" PRIu64 ", enabled %" PRIu64 ", running %" PRIu64 "\n",
408		       st->val, st->ena, st->run);
409
410	return ret;
411}
412
413size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
414{
415	struct stat_round_event *rd = (struct stat_round_event *)event;
416	size_t ret;
417
418	ret = fprintf(fp, "\n... time %" PRIu64 ", type %s\n", rd->time,
419		      rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
420
421	return ret;
422}
423
424size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
425{
426	struct perf_stat_config sc;
427	size_t ret;
428
429	perf_event__read_stat_config(&sc, &event->stat_config);
430
431	ret  = fprintf(fp, "\n");
432	ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
433	ret += fprintf(fp, "... scale     %d\n", sc.scale);
434	ret += fprintf(fp, "... interval  %u\n", sc.interval);
435
436	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <errno.h>
  3#include <inttypes.h>
  4#include <math.h>
  5#include <string.h>
  6#include "counts.h"
  7#include "cpumap.h"
  8#include "debug.h"
  9#include "header.h"
 10#include "stat.h"
 11#include "session.h"
 12#include "target.h"
 13#include "evlist.h"
 14#include "evsel.h"
 15#include "thread_map.h"
 16#include <linux/zalloc.h>
 17
 18void update_stats(struct stats *stats, u64 val)
 19{
 20	double delta;
 21
 22	stats->n++;
 23	delta = val - stats->mean;
 24	stats->mean += delta / stats->n;
 25	stats->M2 += delta*(val - stats->mean);
 26
 27	if (val > stats->max)
 28		stats->max = val;
 29
 30	if (val < stats->min)
 31		stats->min = val;
 32}
 33
 34double avg_stats(struct stats *stats)
 35{
 36	return stats->mean;
 37}
 38
 39/*
 40 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
 41 *
 42 *       (\Sum n_i^2) - ((\Sum n_i)^2)/n
 43 * s^2 = -------------------------------
 44 *                  n - 1
 45 *
 46 * http://en.wikipedia.org/wiki/Stddev
 47 *
 48 * The std dev of the mean is related to the std dev by:
 49 *
 50 *             s
 51 * s_mean = -------
 52 *          sqrt(n)
 53 *
 54 */
 55double stddev_stats(struct stats *stats)
 56{
 57	double variance, variance_mean;
 58
 59	if (stats->n < 2)
 60		return 0.0;
 61
 62	variance = stats->M2 / (stats->n - 1);
 63	variance_mean = variance / stats->n;
 64
 65	return sqrt(variance_mean);
 66}
 67
 68double rel_stddev_stats(double stddev, double avg)
 69{
 70	double pct = 0.0;
 71
 72	if (avg)
 73		pct = 100.0 * stddev/avg;
 74
 75	return pct;
 76}
 77
 78bool __perf_evsel_stat__is(struct evsel *evsel,
 79			   enum perf_stat_evsel_id id)
 80{
 81	struct perf_stat_evsel *ps = evsel->stats;
 82
 83	return ps->id == id;
 84}
 85
 86#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
 87static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
 88	ID(NONE,		x),
 89	ID(CYCLES_IN_TX,	cpu/cycles-t/),
 90	ID(TRANSACTION_START,	cpu/tx-start/),
 91	ID(ELISION_START,	cpu/el-start/),
 92	ID(CYCLES_IN_TX_CP,	cpu/cycles-ct/),
 93	ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
 94	ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
 95	ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
 96	ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
 97	ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
 98	ID(SMI_NUM, msr/smi/),
 99	ID(APERF, msr/aperf/),
100};
101#undef ID
102
103static void perf_stat_evsel_id_init(struct evsel *evsel)
104{
105	struct perf_stat_evsel *ps = evsel->stats;
106	int i;
107
108	/* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
109
110	for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
111		if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
112			ps->id = i;
113			break;
114		}
115	}
116}
117
118static void perf_evsel__reset_stat_priv(struct evsel *evsel)
119{
120	int i;
121	struct perf_stat_evsel *ps = evsel->stats;
122
123	for (i = 0; i < 3; i++)
124		init_stats(&ps->res_stats[i]);
125
126	perf_stat_evsel_id_init(evsel);
127}
128
129static int perf_evsel__alloc_stat_priv(struct evsel *evsel)
130{
131	evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
132	if (evsel->stats == NULL)
133		return -ENOMEM;
134	perf_evsel__reset_stat_priv(evsel);
135	return 0;
136}
137
138static void perf_evsel__free_stat_priv(struct evsel *evsel)
139{
140	struct perf_stat_evsel *ps = evsel->stats;
141
142	if (ps)
143		zfree(&ps->group_data);
144	zfree(&evsel->stats);
145}
146
147static int perf_evsel__alloc_prev_raw_counts(struct evsel *evsel,
148					     int ncpus, int nthreads)
149{
150	struct perf_counts *counts;
151
152	counts = perf_counts__new(ncpus, nthreads);
153	if (counts)
154		evsel->prev_raw_counts = counts;
155
156	return counts ? 0 : -ENOMEM;
157}
158
159static void perf_evsel__free_prev_raw_counts(struct evsel *evsel)
160{
161	perf_counts__delete(evsel->prev_raw_counts);
162	evsel->prev_raw_counts = NULL;
163}
164
165static void perf_evsel__reset_prev_raw_counts(struct evsel *evsel)
166{
167	if (evsel->prev_raw_counts) {
168		evsel->prev_raw_counts->aggr.val = 0;
169		evsel->prev_raw_counts->aggr.ena = 0;
170		evsel->prev_raw_counts->aggr.run = 0;
171       }
172}
173
174static int perf_evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
175{
176	int ncpus = perf_evsel__nr_cpus(evsel);
177	int nthreads = perf_thread_map__nr(evsel->core.threads);
178
179	if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
180	    perf_evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
181	    (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
182		return -ENOMEM;
183
184	return 0;
185}
186
187int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw)
188{
189	struct evsel *evsel;
190
191	evlist__for_each_entry(evlist, evsel) {
192		if (perf_evsel__alloc_stats(evsel, alloc_raw))
193			goto out_free;
194	}
195
196	return 0;
197
198out_free:
199	perf_evlist__free_stats(evlist);
200	return -1;
201}
202
203void perf_evlist__free_stats(struct evlist *evlist)
204{
205	struct evsel *evsel;
206
207	evlist__for_each_entry(evlist, evsel) {
208		perf_evsel__free_stat_priv(evsel);
209		perf_evsel__free_counts(evsel);
210		perf_evsel__free_prev_raw_counts(evsel);
211	}
212}
213
214void perf_evlist__reset_stats(struct evlist *evlist)
215{
216	struct evsel *evsel;
217
218	evlist__for_each_entry(evlist, evsel) {
219		perf_evsel__reset_stat_priv(evsel);
220		perf_evsel__reset_counts(evsel);
221	}
222}
223
224void perf_evlist__reset_prev_raw_counts(struct evlist *evlist)
225{
226	struct evsel *evsel;
227
228	evlist__for_each_entry(evlist, evsel)
229		perf_evsel__reset_prev_raw_counts(evsel);
230}
231
232static void zero_per_pkg(struct evsel *counter)
233{
234	if (counter->per_pkg_mask)
235		memset(counter->per_pkg_mask, 0, cpu__max_cpu());
236}
237
238static int check_per_pkg(struct evsel *counter,
239			 struct perf_counts_values *vals, int cpu, bool *skip)
240{
241	unsigned long *mask = counter->per_pkg_mask;
242	struct perf_cpu_map *cpus = evsel__cpus(counter);
243	int s;
244
245	*skip = false;
246
247	if (!counter->per_pkg)
248		return 0;
249
250	if (perf_cpu_map__empty(cpus))
251		return 0;
252
253	if (!mask) {
254		mask = zalloc(cpu__max_cpu());
255		if (!mask)
256			return -ENOMEM;
257
258		counter->per_pkg_mask = mask;
259	}
260
261	/*
262	 * we do not consider an event that has not run as a good
263	 * instance to mark a package as used (skip=1). Otherwise
264	 * we may run into a situation where the first CPU in a package
265	 * is not running anything, yet the second is, and this function
266	 * would mark the package as used after the first CPU and would
267	 * not read the values from the second CPU.
268	 */
269	if (!(vals->run && vals->ena))
270		return 0;
271
272	s = cpu_map__get_socket(cpus, cpu, NULL);
273	if (s < 0)
274		return -1;
275
276	*skip = test_and_set_bit(s, mask) == 1;
277	return 0;
278}
279
280static int
281process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
282		       int cpu, int thread,
283		       struct perf_counts_values *count)
284{
285	struct perf_counts_values *aggr = &evsel->counts->aggr;
286	static struct perf_counts_values zero;
287	bool skip = false;
288
289	if (check_per_pkg(evsel, count, cpu, &skip)) {
290		pr_err("failed to read per-pkg counter\n");
291		return -1;
292	}
293
294	if (skip)
295		count = &zero;
296
297	switch (config->aggr_mode) {
298	case AGGR_THREAD:
299	case AGGR_CORE:
300	case AGGR_DIE:
301	case AGGR_SOCKET:
302	case AGGR_NONE:
303		if (!evsel->snapshot)
304			perf_evsel__compute_deltas(evsel, cpu, thread, count);
305		perf_counts_values__scale(count, config->scale, NULL);
306		if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
307			perf_stat__update_shadow_stats(evsel, count->val,
308						       cpu, &rt_stat);
309		}
310
311		if (config->aggr_mode == AGGR_THREAD) {
312			if (config->stats)
313				perf_stat__update_shadow_stats(evsel,
314					count->val, 0, &config->stats[thread]);
315			else
316				perf_stat__update_shadow_stats(evsel,
317					count->val, 0, &rt_stat);
318		}
319		break;
320	case AGGR_GLOBAL:
321		aggr->val += count->val;
322		aggr->ena += count->ena;
323		aggr->run += count->run;
 
 
324	case AGGR_UNSET:
325	default:
326		break;
327	}
328
329	return 0;
330}
331
332static int process_counter_maps(struct perf_stat_config *config,
333				struct evsel *counter)
334{
335	int nthreads = perf_thread_map__nr(counter->core.threads);
336	int ncpus = perf_evsel__nr_cpus(counter);
337	int cpu, thread;
338
339	if (counter->core.system_wide)
340		nthreads = 1;
341
342	for (thread = 0; thread < nthreads; thread++) {
343		for (cpu = 0; cpu < ncpus; cpu++) {
344			if (process_counter_values(config, counter, cpu, thread,
345						   perf_counts(counter->counts, cpu, thread)))
346				return -1;
347		}
348	}
349
350	return 0;
351}
352
353int perf_stat_process_counter(struct perf_stat_config *config,
354			      struct evsel *counter)
355{
356	struct perf_counts_values *aggr = &counter->counts->aggr;
357	struct perf_stat_evsel *ps = counter->stats;
358	u64 *count = counter->counts->aggr.values;
359	int i, ret;
360
361	aggr->val = aggr->ena = aggr->run = 0;
362
363	/*
364	 * We calculate counter's data every interval,
365	 * and the display code shows ps->res_stats
366	 * avg value. We need to zero the stats for
367	 * interval mode, otherwise overall avg running
368	 * averages will be shown for each interval.
369	 */
370	if (config->interval)
371		init_stats(ps->res_stats);
372
373	if (counter->per_pkg)
374		zero_per_pkg(counter);
375
376	ret = process_counter_maps(config, counter);
377	if (ret)
378		return ret;
379
380	if (config->aggr_mode != AGGR_GLOBAL)
381		return 0;
382
383	if (!counter->snapshot)
384		perf_evsel__compute_deltas(counter, -1, -1, aggr);
385	perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
386
387	for (i = 0; i < 3; i++)
388		update_stats(&ps->res_stats[i], count[i]);
389
390	if (verbose > 0) {
391		fprintf(config->output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
392			perf_evsel__name(counter), count[0], count[1], count[2]);
393	}
394
395	/*
396	 * Save the full runtime - to allow normalization during printout:
397	 */
398	perf_stat__update_shadow_stats(counter, *count, 0, &rt_stat);
399
400	return 0;
401}
402
403int perf_event__process_stat_event(struct perf_session *session,
404				   union perf_event *event)
 
405{
406	struct perf_counts_values count;
407	struct perf_record_stat *st = &event->stat;
408	struct evsel *counter;
409
410	count.val = st->val;
411	count.ena = st->ena;
412	count.run = st->run;
413
414	counter = perf_evlist__id2evsel(session->evlist, st->id);
415	if (!counter) {
416		pr_err("Failed to resolve counter for stat event.\n");
417		return -EINVAL;
418	}
419
420	*perf_counts(counter->counts, st->cpu, st->thread) = count;
421	counter->supported = true;
422	return 0;
423}
424
425size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp)
426{
427	struct perf_record_stat *st = (struct perf_record_stat *)event;
428	size_t ret;
429
430	ret  = fprintf(fp, "\n... id %" PRI_lu64 ", cpu %d, thread %d\n",
431		       st->id, st->cpu, st->thread);
432	ret += fprintf(fp, "... value %" PRI_lu64 ", enabled %" PRI_lu64 ", running %" PRI_lu64 "\n",
433		       st->val, st->ena, st->run);
434
435	return ret;
436}
437
438size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp)
439{
440	struct perf_record_stat_round *rd = (struct perf_record_stat_round *)event;
441	size_t ret;
442
443	ret = fprintf(fp, "\n... time %" PRI_lu64 ", type %s\n", rd->time,
444		      rd->type == PERF_STAT_ROUND_TYPE__FINAL ? "FINAL" : "INTERVAL");
445
446	return ret;
447}
448
449size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp)
450{
451	struct perf_stat_config sc;
452	size_t ret;
453
454	perf_event__read_stat_config(&sc, &event->stat_config);
455
456	ret  = fprintf(fp, "\n");
457	ret += fprintf(fp, "... aggr_mode %d\n", sc.aggr_mode);
458	ret += fprintf(fp, "... scale     %d\n", sc.scale);
459	ret += fprintf(fp, "... interval  %u\n", sc.interval);
460
461	return ret;
462}
463
464int create_perf_stat_counter(struct evsel *evsel,
465			     struct perf_stat_config *config,
466			     struct target *target)
467{
468	struct perf_event_attr *attr = &evsel->core.attr;
469	struct evsel *leader = evsel->leader;
470
471	attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
472			    PERF_FORMAT_TOTAL_TIME_RUNNING;
473
474	/*
475	 * The event is part of non trivial group, let's enable
476	 * the group read (for leader) and ID retrieval for all
477	 * members.
478	 */
479	if (leader->core.nr_members > 1)
480		attr->read_format |= PERF_FORMAT_ID|PERF_FORMAT_GROUP;
481
482	attr->inherit = !config->no_inherit;
483
484	/*
485	 * Some events get initialized with sample_(period/type) set,
486	 * like tracepoints. Clear it up for counting.
487	 */
488	attr->sample_period = 0;
489
490	if (config->identifier)
491		attr->sample_type = PERF_SAMPLE_IDENTIFIER;
492
493	/*
494	 * Disabling all counters initially, they will be enabled
495	 * either manually by us or by kernel via enable_on_exec
496	 * set later.
497	 */
498	if (perf_evsel__is_group_leader(evsel)) {
499		attr->disabled = 1;
500
501		/*
502		 * In case of initial_delay we enable tracee
503		 * events manually.
504		 */
505		if (target__none(target) && !config->initial_delay)
506			attr->enable_on_exec = 1;
507	}
508
509	if (target__has_cpu(target) && !target__has_per_thread(target))
510		return perf_evsel__open_per_cpu(evsel, evsel__cpus(evsel));
511
512	return perf_evsel__open_per_thread(evsel, evsel->core.threads);
513}