Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2017, Intel Corporation.
   4 */
   5
   6/* Manage metrics and groups of metrics from JSON files */
   7
   8#include "metricgroup.h"
   9#include "debug.h"
  10#include "evlist.h"
  11#include "evsel.h"
  12#include "strbuf.h"
  13#include "pmu.h"
  14#include "pmus.h"
  15#include "print-events.h"
  16#include "smt.h"
  17#include "expr.h"
  18#include "rblist.h"
  19#include <string.h>
  20#include <errno.h>
  21#include "strlist.h"
  22#include <assert.h>
  23#include <linux/ctype.h>
  24#include <linux/list_sort.h>
  25#include <linux/string.h>
  26#include <linux/zalloc.h>
  27#include <perf/cpumap.h>
  28#include <subcmd/parse-options.h>
  29#include <api/fs/fs.h>
  30#include "util.h"
  31#include <asm/bug.h>
  32#include "cgroup.h"
  33#include "util/hashmap.h"
  34
  35struct metric_event *metricgroup__lookup(struct rblist *metric_events,
  36					 struct evsel *evsel,
  37					 bool create)
  38{
  39	struct rb_node *nd;
  40	struct metric_event me = {
  41		.evsel = evsel
  42	};
  43
  44	if (!metric_events)
  45		return NULL;
  46
  47	nd = rblist__find(metric_events, &me);
  48	if (nd)
  49		return container_of(nd, struct metric_event, nd);
  50	if (create) {
  51		rblist__add_node(metric_events, &me);
  52		nd = rblist__find(metric_events, &me);
  53		if (nd)
  54			return container_of(nd, struct metric_event, nd);
  55	}
  56	return NULL;
  57}
  58
  59static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
  60{
  61	struct metric_event *a = container_of(rb_node,
  62					      struct metric_event,
  63					      nd);
  64	const struct metric_event *b = entry;
  65
  66	if (a->evsel == b->evsel)
  67		return 0;
  68	if ((char *)a->evsel < (char *)b->evsel)
  69		return -1;
  70	return +1;
  71}
  72
  73static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
  74					const void *entry)
  75{
  76	struct metric_event *me = malloc(sizeof(struct metric_event));
  77
  78	if (!me)
  79		return NULL;
  80	memcpy(me, entry, sizeof(struct metric_event));
  81	me->evsel = ((struct metric_event *)entry)->evsel;
  82	me->is_default = false;
  83	INIT_LIST_HEAD(&me->head);
  84	return &me->nd;
  85}
  86
  87static void metric_event_delete(struct rblist *rblist __maybe_unused,
  88				struct rb_node *rb_node)
  89{
  90	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
  91	struct metric_expr *expr, *tmp;
  92
  93	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
  94		zfree(&expr->metric_name);
  95		zfree(&expr->metric_refs);
  96		zfree(&expr->metric_events);
  97		free(expr);
  98	}
  99
 100	free(me);
 101}
 102
 103static void metricgroup__rblist_init(struct rblist *metric_events)
 104{
 105	rblist__init(metric_events);
 106	metric_events->node_cmp = metric_event_cmp;
 107	metric_events->node_new = metric_event_new;
 108	metric_events->node_delete = metric_event_delete;
 109}
 110
 111void metricgroup__rblist_exit(struct rblist *metric_events)
 112{
 113	rblist__exit(metric_events);
 114}
 115
 116/**
 117 * The metric under construction. The data held here will be placed in a
 118 * metric_expr.
 119 */
 120struct metric {
 121	struct list_head nd;
 122	/**
 123	 * The expression parse context importantly holding the IDs contained
 124	 * within the expression.
 125	 */
 126	struct expr_parse_ctx *pctx;
 127	const char *pmu;
 128	/** The name of the metric such as "IPC". */
 129	const char *metric_name;
 130	/** Modifier on the metric such as "u" or NULL for none. */
 131	const char *modifier;
 132	/** The expression to parse, for example, "instructions/cycles". */
 133	const char *metric_expr;
 134	/** Optional threshold expression where zero value is green, otherwise red. */
 135	const char *metric_threshold;
 136	/**
 137	 * The "ScaleUnit" that scales and adds a unit to the metric during
 138	 * output.
 139	 */
 140	const char *metric_unit;
 141	/**
 142	 * Optional name of the metric group reported
 143	 * if the Default metric group is being processed.
 144	 */
 145	const char *default_metricgroup_name;
 146	/** Optional null terminated array of referenced metrics. */
 147	struct metric_ref *metric_refs;
 148	/**
 149	 * Should events of the metric be grouped?
 
 150	 */
 151	bool group_events;
 152	/**
 153	 * Parsed events for the metric. Optional as events may be taken from a
 154	 * different metric whose group contains all the IDs necessary for this
 155	 * one.
 156	 */
 157	struct evlist *evlist;
 158};
 159
 160static void metric__watchdog_constraint_hint(const char *name, bool foot)
 161{
 162	static bool violate_nmi_constraint;
 163
 164	if (!foot) {
 165		pr_warning("Not grouping metric %s's events.\n", name);
 166		violate_nmi_constraint = true;
 167		return;
 168	}
 169
 170	if (!violate_nmi_constraint)
 171		return;
 172
 173	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
 174		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
 175		   "    perf stat ...\n"
 176		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
 177}
 178
 179static bool metric__group_events(const struct pmu_metric *pm)
 180{
 181	switch (pm->event_grouping) {
 182	case MetricNoGroupEvents:
 183		return false;
 184	case MetricNoGroupEventsNmi:
 185		if (!sysctl__nmi_watchdog_enabled())
 186			return true;
 187		metric__watchdog_constraint_hint(pm->metric_name, /*foot=*/false);
 188		return false;
 189	case MetricNoGroupEventsSmt:
 190		return !smt_on();
 191	case MetricGroupEvents:
 192	default:
 193		return true;
 194	}
 
 
 195}
 196
 197static void metric__free(struct metric *m)
 198{
 199	if (!m)
 200		return;
 201
 202	zfree(&m->metric_refs);
 203	expr__ctx_free(m->pctx);
 204	zfree(&m->modifier);
 205	evlist__delete(m->evlist);
 206	free(m);
 207}
 208
 209static struct metric *metric__new(const struct pmu_metric *pm,
 210				  const char *modifier,
 211				  bool metric_no_group,
 212				  int runtime,
 213				  const char *user_requested_cpu_list,
 214				  bool system_wide)
 215{
 216	struct metric *m;
 217
 218	m = zalloc(sizeof(*m));
 219	if (!m)
 220		return NULL;
 221
 222	m->pctx = expr__ctx_new();
 223	if (!m->pctx)
 224		goto out_err;
 225
 226	m->pmu = pm->pmu ?: "cpu";
 227	m->metric_name = pm->metric_name;
 228	m->default_metricgroup_name = pm->default_metricgroup_name ?: "";
 229	m->modifier = NULL;
 230	if (modifier) {
 231		m->modifier = strdup(modifier);
 232		if (!m->modifier)
 233			goto out_err;
 234	}
 235	m->metric_expr = pm->metric_expr;
 236	m->metric_threshold = pm->metric_threshold;
 237	m->metric_unit = pm->unit;
 238	m->pctx->sctx.user_requested_cpu_list = NULL;
 239	if (user_requested_cpu_list) {
 240		m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
 241		if (!m->pctx->sctx.user_requested_cpu_list)
 242			goto out_err;
 243	}
 244	m->pctx->sctx.runtime = runtime;
 245	m->pctx->sctx.system_wide = system_wide;
 246	m->group_events = !metric_no_group && metric__group_events(pm);
 247	m->metric_refs = NULL;
 248	m->evlist = NULL;
 249
 250	return m;
 251out_err:
 252	metric__free(m);
 253	return NULL;
 254}
 255
 256static bool contains_metric_id(struct evsel **metric_events, int num_events,
 257			       const char *metric_id)
 258{
 259	int i;
 260
 261	for (i = 0; i < num_events; i++) {
 262		if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
 263			return true;
 264	}
 265	return false;
 266}
 267
 268/**
 269 * setup_metric_events - Find a group of events in metric_evlist that correspond
 270 *                       to the IDs from a parsed metric expression.
 271 * @pmu: The PMU for the IDs.
 272 * @ids: the metric IDs to match.
 273 * @metric_evlist: the list of perf events.
 274 * @out_metric_events: holds the created metric events array.
 275 */
 276static int setup_metric_events(const char *pmu, struct hashmap *ids,
 277			       struct evlist *metric_evlist,
 278			       struct evsel ***out_metric_events)
 279{
 280	struct evsel **metric_events;
 281	const char *metric_id;
 282	struct evsel *ev;
 283	size_t ids_size, matched_events, i;
 284	bool all_pmus = !strcmp(pmu, "all") || perf_pmus__num_core_pmus() == 1 || !is_pmu_core(pmu);
 285
 286	*out_metric_events = NULL;
 287	ids_size = hashmap__size(ids);
 288
 289	metric_events = calloc(ids_size + 1, sizeof(void *));
 290	if (!metric_events)
 291		return -ENOMEM;
 292
 293	matched_events = 0;
 294	evlist__for_each_entry(metric_evlist, ev) {
 295		struct expr_id_data *val_ptr;
 296
 297		/* Don't match events for the wrong hybrid PMU. */
 298		if (!all_pmus && ev->pmu_name && evsel__is_hybrid(ev) &&
 299		    strcmp(ev->pmu_name, pmu))
 300			continue;
 301		/*
 302		 * Check for duplicate events with the same name. For
 303		 * example, uncore_imc/cas_count_read/ will turn into 6
 304		 * events per socket on skylakex. Only the first such
 305		 * event is placed in metric_events.
 306		 */
 307		metric_id = evsel__metric_id(ev);
 308		if (contains_metric_id(metric_events, matched_events, metric_id))
 309			continue;
 310		/*
 311		 * Does this event belong to the parse context? For
 312		 * combined or shared groups, this metric may not care
 313		 * about this event.
 314		 */
 315		if (hashmap__find(ids, metric_id, &val_ptr)) {
 316			pr_debug("Matched metric-id %s to %s\n", metric_id, evsel__name(ev));
 317			metric_events[matched_events++] = ev;
 318
 319			if (matched_events >= ids_size)
 320				break;
 321		}
 322	}
 323	if (matched_events < ids_size) {
 324		free(metric_events);
 325		return -EINVAL;
 326	}
 327	for (i = 0; i < ids_size; i++) {
 328		ev = metric_events[i];
 329		ev->collect_stat = true;
 330
 331		/*
 332		 * The metric leader points to the identically named
 333		 * event in metric_events.
 334		 */
 335		ev->metric_leader = ev;
 336		/*
 337		 * Mark two events with identical names in the same
 338		 * group (or globally) as being in use as uncore events
 339		 * may be duplicated for each pmu. Set the metric leader
 340		 * of such events to be the event that appears in
 341		 * metric_events.
 342		 */
 343		metric_id = evsel__metric_id(ev);
 344		evlist__for_each_entry_continue(metric_evlist, ev) {
 345			if (!strcmp(evsel__metric_id(ev), metric_id))
 346				ev->metric_leader = metric_events[i];
 347		}
 348	}
 349	*out_metric_events = metric_events;
 350	return 0;
 351}
 352
 353static bool match_metric(const char *n, const char *list)
 354{
 355	int len;
 356	char *m;
 357
 358	if (!list)
 359		return false;
 360	if (!strcmp(list, "all"))
 361		return true;
 362	if (!n)
 363		return !strcasecmp(list, "No_group");
 364	len = strlen(list);
 365	m = strcasestr(n, list);
 366	if (!m)
 367		return false;
 368	if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
 369	    (m[len] == 0 || m[len] == ';'))
 370		return true;
 371	return false;
 372}
 373
 374static bool match_pm_metric(const struct pmu_metric *pm, const char *pmu, const char *metric)
 375{
 376	const char *pm_pmu = pm->pmu ?: "cpu";
 377
 378	if (strcmp(pmu, "all") && strcmp(pm_pmu, pmu))
 379		return false;
 380
 381	return match_metric(pm->metric_group, metric) ||
 382	       match_metric(pm->metric_name, metric);
 383}
 384
 385/** struct mep - RB-tree node for building printing information. */
 386struct mep {
 387	/** nd - RB-tree element. */
 388	struct rb_node nd;
 389	/** @metric_group: Owned metric group name, separated others with ';'. */
 390	char *metric_group;
 391	const char *metric_name;
 392	const char *metric_desc;
 393	const char *metric_long_desc;
 394	const char *metric_expr;
 395	const char *metric_threshold;
 396	const char *metric_unit;
 397};
 398
 399static int mep_cmp(struct rb_node *rb_node, const void *entry)
 400{
 401	struct mep *a = container_of(rb_node, struct mep, nd);
 402	struct mep *b = (struct mep *)entry;
 403	int ret;
 404
 405	ret = strcmp(a->metric_group, b->metric_group);
 406	if (ret)
 407		return ret;
 408
 409	return strcmp(a->metric_name, b->metric_name);
 410}
 411
 412static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
 413{
 414	struct mep *me = malloc(sizeof(struct mep));
 415
 416	if (!me)
 417		return NULL;
 418
 419	memcpy(me, entry, sizeof(struct mep));
 420	return &me->nd;
 421}
 422
 423static void mep_delete(struct rblist *rl __maybe_unused,
 424		       struct rb_node *nd)
 425{
 426	struct mep *me = container_of(nd, struct mep, nd);
 427
 428	zfree(&me->metric_group);
 429	free(me);
 430}
 431
 432static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
 433			      const char *metric_name)
 434{
 435	struct rb_node *nd;
 436	struct mep me = {
 437		.metric_group = strdup(metric_group),
 438		.metric_name = metric_name,
 439	};
 440	nd = rblist__find(groups, &me);
 441	if (nd) {
 442		free(me.metric_group);
 443		return container_of(nd, struct mep, nd);
 444	}
 445	rblist__add_node(groups, &me);
 446	nd = rblist__find(groups, &me);
 447	if (nd)
 448		return container_of(nd, struct mep, nd);
 449	return NULL;
 450}
 451
 452static int metricgroup__add_to_mep_groups(const struct pmu_metric *pm,
 453					struct rblist *groups)
 454{
 455	const char *g;
 456	char *omg, *mg;
 457
 458	mg = strdup(pm->metric_group ?: "No_group");
 459	if (!mg)
 460		return -ENOMEM;
 461	omg = mg;
 462	while ((g = strsep(&mg, ";")) != NULL) {
 463		struct mep *me;
 464
 465		g = skip_spaces(g);
 466		if (strlen(g))
 467			me = mep_lookup(groups, g, pm->metric_name);
 468		else
 469			me = mep_lookup(groups, "No_group", pm->metric_name);
 470
 471		if (me) {
 472			me->metric_desc = pm->desc;
 473			me->metric_long_desc = pm->long_desc;
 474			me->metric_expr = pm->metric_expr;
 475			me->metric_threshold = pm->metric_threshold;
 476			me->metric_unit = pm->unit;
 477		}
 478	}
 479	free(omg);
 480
 481	return 0;
 482}
 483
 484struct metricgroup_iter_data {
 485	pmu_metric_iter_fn fn;
 486	void *data;
 487};
 488
 489static int metricgroup__sys_event_iter(const struct pmu_metric *pm,
 490				       const struct pmu_metrics_table *table,
 491				       void *data)
 492{
 493	struct metricgroup_iter_data *d = data;
 494	struct perf_pmu *pmu = NULL;
 495
 496	if (!pm->metric_expr || !pm->compat)
 497		return 0;
 498
 499	while ((pmu = perf_pmus__scan(pmu))) {
 500
 501		if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id))
 502			continue;
 503
 504		return d->fn(pm, table, d->data);
 505	}
 506	return 0;
 507}
 508
 509static int metricgroup__add_to_mep_groups_callback(const struct pmu_metric *pm,
 510					const struct pmu_metrics_table *table __maybe_unused,
 511					void *vdata)
 512{
 513	struct rblist *groups = vdata;
 514
 515	return metricgroup__add_to_mep_groups(pm, groups);
 
 
 
 516}
 517
 518void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
 519{
 520	struct rblist groups;
 521	const struct pmu_metrics_table *table;
 522	struct rb_node *node, *next;
 523
 524	rblist__init(&groups);
 525	groups.node_new = mep_new;
 526	groups.node_cmp = mep_cmp;
 527	groups.node_delete = mep_delete;
 528	table = pmu_metrics_table__find();
 529	if (table) {
 530		pmu_metrics_table__for_each_metric(table,
 531						 metricgroup__add_to_mep_groups_callback,
 532						 &groups);
 533	}
 534	{
 535		struct metricgroup_iter_data data = {
 536			.fn = metricgroup__add_to_mep_groups_callback,
 537			.data = &groups,
 538		};
 539		pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
 540	}
 541
 542	for (node = rb_first_cached(&groups.entries); node; node = next) {
 543		struct mep *me = container_of(node, struct mep, nd);
 544
 545		print_cb->print_metric(print_state,
 546				me->metric_group,
 547				me->metric_name,
 548				me->metric_desc,
 549				me->metric_long_desc,
 550				me->metric_expr,
 551				me->metric_threshold,
 552				me->metric_unit);
 553		next = rb_next(node);
 554		rblist__remove_node(&groups, node);
 555	}
 556}
 557
 558static const char *code_characters = ",-=@";
 559
 560static int encode_metric_id(struct strbuf *sb, const char *x)
 561{
 562	char *c;
 563	int ret = 0;
 564
 565	for (; *x; x++) {
 566		c = strchr(code_characters, *x);
 567		if (c) {
 568			ret = strbuf_addch(sb, '!');
 569			if (ret)
 570				break;
 571
 572			ret = strbuf_addch(sb, '0' + (c - code_characters));
 573			if (ret)
 574				break;
 575		} else {
 576			ret = strbuf_addch(sb, *x);
 577			if (ret)
 578				break;
 579		}
 580	}
 581	return ret;
 582}
 583
 584static int decode_metric_id(struct strbuf *sb, const char *x)
 585{
 586	const char *orig = x;
 587	size_t i;
 588	char c;
 589	int ret;
 590
 591	for (; *x; x++) {
 592		c = *x;
 593		if (*x == '!') {
 594			x++;
 595			i = *x - '0';
 596			if (i > strlen(code_characters)) {
 597				pr_err("Bad metric-id encoding in: '%s'", orig);
 598				return -1;
 599			}
 600			c = code_characters[i];
 601		}
 602		ret = strbuf_addch(sb, c);
 603		if (ret)
 604			return ret;
 605	}
 606	return 0;
 607}
 608
 609static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
 610{
 611	struct evsel *ev;
 612	struct strbuf sb = STRBUF_INIT;
 613	char *cur;
 614	int ret = 0;
 615
 616	evlist__for_each_entry(perf_evlist, ev) {
 617		if (!ev->metric_id)
 618			continue;
 619
 620		ret = strbuf_setlen(&sb, 0);
 621		if (ret)
 622			break;
 623
 624		ret = decode_metric_id(&sb, ev->metric_id);
 625		if (ret)
 626			break;
 627
 628		free((char *)ev->metric_id);
 629		ev->metric_id = strdup(sb.buf);
 630		if (!ev->metric_id) {
 631			ret = -ENOMEM;
 632			break;
 633		}
 634		/*
 635		 * If the name is just the parsed event, use the metric-id to
 636		 * give a more friendly display version.
 637		 */
 638		if (strstr(ev->name, "metric-id=")) {
 639			bool has_slash = false;
 640
 641			zfree(&ev->name);
 642			for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
 643				*cur = '/';
 644				has_slash = true;
 645			}
 646
 647			if (modifier) {
 648				if (!has_slash && !strchr(sb.buf, ':')) {
 649					ret = strbuf_addch(&sb, ':');
 650					if (ret)
 651						break;
 652				}
 653				ret = strbuf_addstr(&sb, modifier);
 654				if (ret)
 655					break;
 656			}
 657			ev->name = strdup(sb.buf);
 658			if (!ev->name) {
 659				ret = -ENOMEM;
 660				break;
 661			}
 662		}
 663	}
 664	strbuf_release(&sb);
 665	return ret;
 666}
 667
 668static int metricgroup__build_event_string(struct strbuf *events,
 669					   const struct expr_parse_ctx *ctx,
 670					   const char *modifier,
 671					   bool group_events)
 672{
 673	struct hashmap_entry *cur;
 674	size_t bkt;
 675	bool no_group = true, has_tool_events = false;
 676	bool tool_events[PERF_TOOL_MAX] = {false};
 677	int ret = 0;
 678
 679#define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
 680
 681	hashmap__for_each_entry(ctx->ids, cur, bkt) {
 682		const char *sep, *rsep, *id = cur->pkey;
 683		enum perf_tool_event ev;
 684
 685		pr_debug("found event %s\n", id);
 686
 687		/* Always move tool events outside of the group. */
 688		ev = perf_tool_event__from_str(id);
 689		if (ev != PERF_TOOL_NONE) {
 690			has_tool_events = true;
 691			tool_events[ev] = true;
 692			continue;
 693		}
 694		/* Separate events with commas and open the group if necessary. */
 695		if (no_group) {
 696			if (group_events) {
 697				ret = strbuf_addch(events, '{');
 698				RETURN_IF_NON_ZERO(ret);
 699			}
 700
 701			no_group = false;
 702		} else {
 703			ret = strbuf_addch(events, ',');
 704			RETURN_IF_NON_ZERO(ret);
 705		}
 706		/*
 707		 * Encode the ID as an event string. Add a qualifier for
 708		 * metric_id that is the original name except with characters
 709		 * that parse-events can't parse replaced. For example,
 710		 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
 711		 */
 712		sep = strchr(id, '@');
 713		if (sep != NULL) {
 714			ret = strbuf_add(events, id, sep - id);
 715			RETURN_IF_NON_ZERO(ret);
 716			ret = strbuf_addch(events, '/');
 717			RETURN_IF_NON_ZERO(ret);
 718			rsep = strrchr(sep, '@');
 719			ret = strbuf_add(events, sep + 1, rsep - sep - 1);
 720			RETURN_IF_NON_ZERO(ret);
 721			ret = strbuf_addstr(events, ",metric-id=");
 722			RETURN_IF_NON_ZERO(ret);
 723			sep = rsep;
 724		} else {
 725			sep = strchr(id, ':');
 726			if (sep != NULL) {
 727				ret = strbuf_add(events, id, sep - id);
 728				RETURN_IF_NON_ZERO(ret);
 729			} else {
 730				ret = strbuf_addstr(events, id);
 731				RETURN_IF_NON_ZERO(ret);
 732			}
 733			ret = strbuf_addstr(events, "/metric-id=");
 734			RETURN_IF_NON_ZERO(ret);
 735		}
 736		ret = encode_metric_id(events, id);
 737		RETURN_IF_NON_ZERO(ret);
 738		ret = strbuf_addstr(events, "/");
 739		RETURN_IF_NON_ZERO(ret);
 740
 741		if (sep != NULL) {
 742			ret = strbuf_addstr(events, sep + 1);
 743			RETURN_IF_NON_ZERO(ret);
 744		}
 745		if (modifier) {
 746			ret = strbuf_addstr(events, modifier);
 747			RETURN_IF_NON_ZERO(ret);
 748		}
 749	}
 750	if (!no_group && group_events) {
 751		ret = strbuf_addf(events, "}:W");
 752		RETURN_IF_NON_ZERO(ret);
 753	}
 754	if (has_tool_events) {
 755		int i;
 756
 757		perf_tool_event__for_each_event(i) {
 758			if (tool_events[i]) {
 759				if (!no_group) {
 760					ret = strbuf_addch(events, ',');
 761					RETURN_IF_NON_ZERO(ret);
 762				}
 763				no_group = false;
 764				ret = strbuf_addstr(events, perf_tool_event__to_str(i));
 765				RETURN_IF_NON_ZERO(ret);
 766			}
 767		}
 768	}
 769
 770	return ret;
 771#undef RETURN_IF_NON_ZERO
 772}
 773
 774int __weak arch_get_runtimeparam(const struct pmu_metric *pm __maybe_unused)
 775{
 776	return 1;
 777}
 778
 779/*
 780 * A singly linked list on the stack of the names of metrics being
 781 * processed. Used to identify recursion.
 782 */
 783struct visited_metric {
 784	const char *name;
 785	const struct visited_metric *parent;
 786};
 787
 788struct metricgroup_add_iter_data {
 789	struct list_head *metric_list;
 790	const char *pmu;
 791	const char *metric_name;
 792	const char *modifier;
 793	int *ret;
 794	bool *has_match;
 795	bool metric_no_group;
 796	bool metric_no_threshold;
 797	const char *user_requested_cpu_list;
 798	bool system_wide;
 799	struct metric *root_metric;
 800	const struct visited_metric *visited;
 801	const struct pmu_metrics_table *table;
 802};
 803
 804static bool metricgroup__find_metric(const char *pmu,
 805				     const char *metric,
 806				     const struct pmu_metrics_table *table,
 807				     struct pmu_metric *pm);
 808
 809static int add_metric(struct list_head *metric_list,
 810		      const struct pmu_metric *pm,
 811		      const char *modifier,
 812		      bool metric_no_group,
 813		      bool metric_no_threshold,
 814		      const char *user_requested_cpu_list,
 815		      bool system_wide,
 816		      struct metric *root_metric,
 817		      const struct visited_metric *visited,
 818		      const struct pmu_metrics_table *table);
 819
 820/**
 821 * resolve_metric - Locate metrics within the root metric and recursively add
 822 *                    references to them.
 823 * @metric_list: The list the metric is added to.
 824 * @pmu: The PMU name to resolve metrics on, or "all" for all PMUs.
 825 * @modifier: if non-null event modifiers like "u".
 826 * @metric_no_group: Should events written to events be grouped "{}" or
 827 *                   global. Grouping is the default but due to multiplexing the
 828 *                   user may override.
 829 * @user_requested_cpu_list: Command line specified CPUs to record on.
 830 * @system_wide: Are events for all processes recorded.
 831 * @root_metric: Metrics may reference other metrics to form a tree. In this
 832 *               case the root_metric holds all the IDs and a list of referenced
 833 *               metrics. When adding a root this argument is NULL.
 834 * @visited: A singly linked list of metric names being added that is used to
 835 *           detect recursion.
 836 * @table: The table that is searched for metrics, most commonly the table for the
 837 *       architecture perf is running upon.
 838 */
 839static int resolve_metric(struct list_head *metric_list,
 840			  const char *pmu,
 841			  const char *modifier,
 842			  bool metric_no_group,
 843			  bool metric_no_threshold,
 844			  const char *user_requested_cpu_list,
 845			  bool system_wide,
 846			  struct metric *root_metric,
 847			  const struct visited_metric *visited,
 848			  const struct pmu_metrics_table *table)
 849{
 850	struct hashmap_entry *cur;
 851	size_t bkt;
 852	struct to_resolve {
 853		/* The metric to resolve. */
 854		struct pmu_metric pm;
 855		/*
 856		 * The key in the IDs map, this may differ from in case,
 857		 * etc. from pm->metric_name.
 858		 */
 859		const char *key;
 860	} *pending = NULL;
 861	int i, ret = 0, pending_cnt = 0;
 862
 863	/*
 864	 * Iterate all the parsed IDs and if there's a matching metric and it to
 865	 * the pending array.
 866	 */
 867	hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
 868		struct pmu_metric pm;
 869
 870		if (metricgroup__find_metric(pmu, cur->pkey, table, &pm)) {
 871			pending = realloc(pending,
 872					(pending_cnt + 1) * sizeof(struct to_resolve));
 873			if (!pending)
 874				return -ENOMEM;
 875
 876			memcpy(&pending[pending_cnt].pm, &pm, sizeof(pm));
 877			pending[pending_cnt].key = cur->pkey;
 878			pending_cnt++;
 879		}
 880	}
 881
 882	/* Remove the metric IDs from the context. */
 883	for (i = 0; i < pending_cnt; i++)
 884		expr__del_id(root_metric->pctx, pending[i].key);
 885
 886	/*
 887	 * Recursively add all the metrics, IDs are added to the root metric's
 888	 * context.
 889	 */
 890	for (i = 0; i < pending_cnt; i++) {
 891		ret = add_metric(metric_list, &pending[i].pm, modifier, metric_no_group,
 892				 metric_no_threshold, user_requested_cpu_list, system_wide,
 893				 root_metric, visited, table);
 894		if (ret)
 895			break;
 896	}
 897
 898	free(pending);
 899	return ret;
 900}
 901
 902/**
 903 * __add_metric - Add a metric to metric_list.
 904 * @metric_list: The list the metric is added to.
 905 * @pm: The pmu_metric containing the metric to be added.
 906 * @modifier: if non-null event modifiers like "u".
 907 * @metric_no_group: Should events written to events be grouped "{}" or
 908 *                   global. Grouping is the default but due to multiplexing the
 909 *                   user may override.
 910 * @metric_no_threshold: Should threshold expressions be ignored?
 911 * @runtime: A special argument for the parser only known at runtime.
 912 * @user_requested_cpu_list: Command line specified CPUs to record on.
 913 * @system_wide: Are events for all processes recorded.
 914 * @root_metric: Metrics may reference other metrics to form a tree. In this
 915 *               case the root_metric holds all the IDs and a list of referenced
 916 *               metrics. When adding a root this argument is NULL.
 917 * @visited: A singly linked list of metric names being added that is used to
 918 *           detect recursion.
 919 * @table: The table that is searched for metrics, most commonly the table for the
 920 *       architecture perf is running upon.
 921 */
 922static int __add_metric(struct list_head *metric_list,
 923			const struct pmu_metric *pm,
 924			const char *modifier,
 925			bool metric_no_group,
 926			bool metric_no_threshold,
 927			int runtime,
 928			const char *user_requested_cpu_list,
 929			bool system_wide,
 930			struct metric *root_metric,
 931			const struct visited_metric *visited,
 932			const struct pmu_metrics_table *table)
 933{
 934	const struct visited_metric *vm;
 935	int ret;
 936	bool is_root = !root_metric;
 937	const char *expr;
 938	struct visited_metric visited_node = {
 939		.name = pm->metric_name,
 940		.parent = visited,
 941	};
 942
 943	for (vm = visited; vm; vm = vm->parent) {
 944		if (!strcmp(pm->metric_name, vm->name)) {
 945			pr_err("failed: recursion detected for %s\n", pm->metric_name);
 946			return -1;
 947		}
 948	}
 949
 950	if (is_root) {
 951		/*
 952		 * This metric is the root of a tree and may reference other
 953		 * metrics that are added recursively.
 954		 */
 955		root_metric = metric__new(pm, modifier, metric_no_group, runtime,
 956					  user_requested_cpu_list, system_wide);
 957		if (!root_metric)
 958			return -ENOMEM;
 959
 960	} else {
 961		int cnt = 0;
 962
 963		/*
 964		 * This metric was referenced in a metric higher in the
 965		 * tree. Check if the same metric is already resolved in the
 966		 * metric_refs list.
 967		 */
 968		if (root_metric->metric_refs) {
 969			for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
 970				if (!strcmp(pm->metric_name,
 971					    root_metric->metric_refs[cnt].metric_name))
 972					return 0;
 973			}
 974		}
 975
 976		/* Create reference. Need space for the entry and the terminator. */
 977		root_metric->metric_refs = realloc(root_metric->metric_refs,
 978						(cnt + 2) * sizeof(struct metric_ref));
 979		if (!root_metric->metric_refs)
 980			return -ENOMEM;
 981
 982		/*
 983		 * Intentionally passing just const char pointers,
 984		 * from 'pe' object, so they never go away. We don't
 985		 * need to change them, so there's no need to create
 986		 * our own copy.
 987		 */
 988		root_metric->metric_refs[cnt].metric_name = pm->metric_name;
 989		root_metric->metric_refs[cnt].metric_expr = pm->metric_expr;
 990
 991		/* Null terminate array. */
 992		root_metric->metric_refs[cnt+1].metric_name = NULL;
 993		root_metric->metric_refs[cnt+1].metric_expr = NULL;
 994	}
 995
 996	/*
 997	 * For both the parent and referenced metrics, we parse
 998	 * all the metric's IDs and add it to the root context.
 999	 */
1000	ret = 0;
1001	expr = pm->metric_expr;
1002	if (is_root && pm->metric_threshold) {
1003		/*
1004		 * Threshold expressions are built off the actual metric. Switch
1005		 * to use that in case of additional necessary events. Change
1006		 * the visited node name to avoid this being flagged as
1007		 * recursion. If the threshold events are disabled, just use the
1008		 * metric's name as a reference. This allows metric threshold
1009		 * computation if there are sufficient events.
1010		 */
1011		assert(strstr(pm->metric_threshold, pm->metric_name));
1012		expr = metric_no_threshold ? pm->metric_name : pm->metric_threshold;
1013		visited_node.name = "__threshold__";
1014	}
1015	if (expr__find_ids(expr, NULL, root_metric->pctx) < 0) {
1016		/* Broken metric. */
1017		ret = -EINVAL;
1018	}
1019	if (!ret) {
1020		/* Resolve referenced metrics. */
1021		const char *pmu = pm->pmu ?: "cpu";
1022
1023		ret = resolve_metric(metric_list, pmu, modifier, metric_no_group,
1024				     metric_no_threshold, user_requested_cpu_list,
1025				     system_wide, root_metric, &visited_node,
1026				     table);
1027	}
 
1028	if (ret) {
1029		if (is_root)
1030			metric__free(root_metric);
1031
1032	} else if (is_root)
1033		list_add(&root_metric->nd, metric_list);
1034
1035	return ret;
1036}
1037
1038struct metricgroup__find_metric_data {
1039	const char *pmu;
1040	const char *metric;
1041	struct pmu_metric *pm;
1042};
1043
1044static int metricgroup__find_metric_callback(const struct pmu_metric *pm,
1045					     const struct pmu_metrics_table *table  __maybe_unused,
1046					     void *vdata)
1047{
1048	struct metricgroup__find_metric_data *data = vdata;
1049	const char *pm_pmu = pm->pmu ?: "cpu";
1050
1051	if (strcmp(data->pmu, "all") && strcmp(pm_pmu, data->pmu))
1052		return 0;
1053
1054	if (!match_metric(pm->metric_name, data->metric))
1055		return 0;
1056
1057	memcpy(data->pm, pm, sizeof(*pm));
1058	return 1;
1059}
1060
1061static bool metricgroup__find_metric(const char *pmu,
1062				     const char *metric,
1063				     const struct pmu_metrics_table *table,
1064				     struct pmu_metric *pm)
1065{
1066	struct metricgroup__find_metric_data data = {
1067		.pmu = pmu,
1068		.metric = metric,
1069		.pm = pm,
1070	};
1071
1072	return pmu_metrics_table__for_each_metric(table, metricgroup__find_metric_callback, &data)
1073		? true : false;
1074}
1075
1076static int add_metric(struct list_head *metric_list,
1077		      const struct pmu_metric *pm,
1078		      const char *modifier,
1079		      bool metric_no_group,
1080		      bool metric_no_threshold,
1081		      const char *user_requested_cpu_list,
1082		      bool system_wide,
1083		      struct metric *root_metric,
1084		      const struct visited_metric *visited,
1085		      const struct pmu_metrics_table *table)
1086{
1087	int ret = 0;
1088
1089	pr_debug("metric expr %s for %s\n", pm->metric_expr, pm->metric_name);
1090
1091	if (!strstr(pm->metric_expr, "?")) {
1092		ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1093				   metric_no_threshold, 0, user_requested_cpu_list,
1094				   system_wide, root_metric, visited, table);
1095	} else {
1096		int j, count;
1097
1098		count = arch_get_runtimeparam(pm);
1099
1100		/* This loop is added to create multiple
1101		 * events depend on count value and add
1102		 * those events to metric_list.
1103		 */
1104
1105		for (j = 0; j < count && !ret; j++)
1106			ret = __add_metric(metric_list, pm, modifier, metric_no_group,
1107					   metric_no_threshold, j, user_requested_cpu_list,
1108					   system_wide, root_metric, visited, table);
1109	}
1110
1111	return ret;
1112}
1113
1114static int metricgroup__add_metric_sys_event_iter(const struct pmu_metric *pm,
1115					const struct pmu_metrics_table *table __maybe_unused,
1116					void *data)
1117{
1118	struct metricgroup_add_iter_data *d = data;
1119	int ret;
1120
1121	if (!match_pm_metric(pm, d->pmu, d->metric_name))
1122		return 0;
1123
1124	ret = add_metric(d->metric_list, pm, d->modifier, d->metric_no_group,
1125			 d->metric_no_threshold, d->user_requested_cpu_list,
1126			 d->system_wide, d->root_metric, d->visited, d->table);
1127	if (ret)
1128		goto out;
1129
1130	*(d->has_match) = true;
1131
1132out:
1133	*(d->ret) = ret;
1134	return ret;
1135}
1136
1137/**
1138 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1139 *                   the front. tool events are excluded from the count.
1140 */
1141static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
1142			   const struct list_head *r)
1143{
1144	const struct metric *left = container_of(l, struct metric, nd);
1145	const struct metric *right = container_of(r, struct metric, nd);
1146	struct expr_id_data *data;
1147	int i, left_count, right_count;
1148
1149	left_count = hashmap__size(left->pctx->ids);
1150	perf_tool_event__for_each_event(i) {
1151		if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data))
1152			left_count--;
1153	}
1154
1155	right_count = hashmap__size(right->pctx->ids);
1156	perf_tool_event__for_each_event(i) {
1157		if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data))
1158			right_count--;
1159	}
1160
1161	return right_count - left_count;
1162}
1163
1164/**
1165 * default_metricgroup_cmp - Implements complex key for the Default metricgroup
1166 *			     that first sorts by default_metricgroup_name, then
1167 *			     metric_name.
1168 */
1169static int default_metricgroup_cmp(void *priv __maybe_unused,
1170				   const struct list_head *l,
1171				   const struct list_head *r)
1172{
1173	const struct metric *left = container_of(l, struct metric, nd);
1174	const struct metric *right = container_of(r, struct metric, nd);
1175	int diff = strcmp(right->default_metricgroup_name, left->default_metricgroup_name);
1176
1177	if (diff)
1178		return diff;
1179
1180	return strcmp(right->metric_name, left->metric_name);
1181}
1182
1183struct metricgroup__add_metric_data {
1184	struct list_head *list;
1185	const char *pmu;
1186	const char *metric_name;
1187	const char *modifier;
1188	const char *user_requested_cpu_list;
1189	bool metric_no_group;
1190	bool metric_no_threshold;
1191	bool system_wide;
1192	bool has_match;
1193};
1194
1195static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
1196					    const struct pmu_metrics_table *table,
1197					    void *vdata)
1198{
1199	struct metricgroup__add_metric_data *data = vdata;
1200	int ret = 0;
1201
1202	if (pm->metric_expr && match_pm_metric(pm, data->pmu, data->metric_name)) {
1203		bool metric_no_group = data->metric_no_group ||
1204			match_metric(pm->metricgroup_no_group, data->metric_name);
1205
1206		data->has_match = true;
1207		ret = add_metric(data->list, pm, data->modifier, metric_no_group,
1208				 data->metric_no_threshold, data->user_requested_cpu_list,
1209				 data->system_wide, /*root_metric=*/NULL,
1210				 /*visited_metrics=*/NULL, table);
1211	}
1212	return ret;
1213}
1214
1215/**
1216 * metricgroup__add_metric - Find and add a metric, or a metric group.
1217 * @pmu: The PMU name to search for metrics on, or "all" for all PMUs.
1218 * @metric_name: The name of the metric or metric group. For example, "IPC"
1219 *               could be the name of a metric and "TopDownL1" the name of a
1220 *               metric group.
1221 * @modifier: if non-null event modifiers like "u".
1222 * @metric_no_group: Should events written to events be grouped "{}" or
1223 *                   global. Grouping is the default but due to multiplexing the
1224 *                   user may override.
1225 * @user_requested_cpu_list: Command line specified CPUs to record on.
1226 * @system_wide: Are events for all processes recorded.
1227 * @metric_list: The list that the metric or metric group are added to.
1228 * @table: The table that is searched for metrics, most commonly the table for the
1229 *       architecture perf is running upon.
1230 */
1231static int metricgroup__add_metric(const char *pmu, const char *metric_name, const char *modifier,
1232				   bool metric_no_group, bool metric_no_threshold,
1233				   const char *user_requested_cpu_list,
1234				   bool system_wide,
1235				   struct list_head *metric_list,
1236				   const struct pmu_metrics_table *table)
1237{
1238	LIST_HEAD(list);
1239	int ret;
1240	bool has_match = false;
1241
1242	{
1243		struct metricgroup__add_metric_data data = {
1244			.list = &list,
1245			.pmu = pmu,
1246			.metric_name = metric_name,
1247			.modifier = modifier,
1248			.metric_no_group = metric_no_group,
1249			.metric_no_threshold = metric_no_threshold,
1250			.user_requested_cpu_list = user_requested_cpu_list,
1251			.system_wide = system_wide,
1252			.has_match = false,
1253		};
1254		/*
1255		 * Iterate over all metrics seeing if metric matches either the
1256		 * name or group. When it does add the metric to the list.
1257		 */
1258		ret = pmu_metrics_table__for_each_metric(table, metricgroup__add_metric_callback,
1259						       &data);
1260		if (ret)
1261			goto out;
1262
1263		has_match = data.has_match;
1264	}
1265	{
1266		struct metricgroup_iter_data data = {
1267			.fn = metricgroup__add_metric_sys_event_iter,
1268			.data = (void *) &(struct metricgroup_add_iter_data) {
1269				.metric_list = &list,
1270				.pmu = pmu,
1271				.metric_name = metric_name,
1272				.modifier = modifier,
1273				.metric_no_group = metric_no_group,
1274				.user_requested_cpu_list = user_requested_cpu_list,
1275				.system_wide = system_wide,
1276				.has_match = &has_match,
1277				.ret = &ret,
1278				.table = table,
1279			},
1280		};
1281
1282		pmu_for_each_sys_metric(metricgroup__sys_event_iter, &data);
1283	}
1284	/* End of pmu events. */
1285	if (!has_match)
1286		ret = -EINVAL;
1287
1288out:
1289	/*
1290	 * add to metric_list so that they can be released
1291	 * even if it's failed
1292	 */
1293	list_splice(&list, metric_list);
1294	return ret;
1295}
1296
1297/**
1298 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1299 *                                specified in a list.
1300 * @pmu: A pmu to restrict the metrics to, or "all" for all PMUS.
1301 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1302 *        would match the IPC and CPI metrics, and TopDownL1 would match all
1303 *        the metrics in the TopDownL1 group.
1304 * @metric_no_group: Should events written to events be grouped "{}" or
1305 *                   global. Grouping is the default but due to multiplexing the
1306 *                   user may override.
1307 * @user_requested_cpu_list: Command line specified CPUs to record on.
1308 * @system_wide: Are events for all processes recorded.
1309 * @metric_list: The list that metrics are added to.
1310 * @table: The table that is searched for metrics, most commonly the table for the
1311 *       architecture perf is running upon.
1312 */
1313static int metricgroup__add_metric_list(const char *pmu, const char *list,
1314					bool metric_no_group,
1315					bool metric_no_threshold,
1316					const char *user_requested_cpu_list,
1317					bool system_wide, struct list_head *metric_list,
1318					const struct pmu_metrics_table *table)
1319{
1320	char *list_itr, *list_copy, *metric_name, *modifier;
1321	int ret, count = 0;
1322
1323	list_copy = strdup(list);
1324	if (!list_copy)
1325		return -ENOMEM;
1326	list_itr = list_copy;
1327
1328	while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1329		modifier = strchr(metric_name, ':');
1330		if (modifier)
1331			*modifier++ = '\0';
1332
1333		ret = metricgroup__add_metric(pmu, metric_name, modifier,
1334					      metric_no_group, metric_no_threshold,
1335					      user_requested_cpu_list,
1336					      system_wide, metric_list, table);
1337		if (ret == -EINVAL)
1338			pr_err("Cannot find metric or group `%s'\n", metric_name);
1339
1340		if (ret)
1341			break;
1342
1343		count++;
1344	}
1345	free(list_copy);
1346
1347	if (!ret) {
1348		/*
1349		 * Warn about nmi_watchdog if any parsed metrics had the
1350		 * NO_NMI_WATCHDOG constraint.
1351		 */
1352		metric__watchdog_constraint_hint(NULL, /*foot=*/true);
1353		/* No metrics. */
1354		if (count == 0)
1355			return -EINVAL;
1356	}
1357	return ret;
1358}
1359
1360static void metricgroup__free_metrics(struct list_head *metric_list)
1361{
1362	struct metric *m, *tmp;
1363
1364	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1365		list_del_init(&m->nd);
1366		metric__free(m);
1367	}
1368}
1369
1370/**
1371 * find_tool_events - Search for the pressence of tool events in metric_list.
1372 * @metric_list: List to take metrics from.
1373 * @tool_events: Array of false values, indices corresponding to tool events set
1374 *               to true if tool event is found.
1375 */
1376static void find_tool_events(const struct list_head *metric_list,
1377			     bool tool_events[PERF_TOOL_MAX])
1378{
1379	struct metric *m;
1380
1381	list_for_each_entry(m, metric_list, nd) {
1382		int i;
1383
1384		perf_tool_event__for_each_event(i) {
1385			struct expr_id_data *data;
1386
1387			if (!tool_events[i] &&
1388			    !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data))
1389				tool_events[i] = true;
1390		}
1391	}
1392}
1393
1394/**
1395 * build_combined_expr_ctx - Make an expr_parse_ctx with all !group_events
1396 *                           metric IDs, as the IDs are held in a set,
1397 *                           duplicates will be removed.
1398 * @metric_list: List to take metrics from.
1399 * @combined: Out argument for result.
1400 */
1401static int build_combined_expr_ctx(const struct list_head *metric_list,
1402				   struct expr_parse_ctx **combined)
1403{
1404	struct hashmap_entry *cur;
1405	size_t bkt;
1406	struct metric *m;
1407	char *dup;
1408	int ret;
1409
1410	*combined = expr__ctx_new();
1411	if (!*combined)
1412		return -ENOMEM;
1413
1414	list_for_each_entry(m, metric_list, nd) {
1415		if (!m->group_events && !m->modifier) {
1416			hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1417				dup = strdup(cur->pkey);
1418				if (!dup) {
1419					ret = -ENOMEM;
1420					goto err_out;
1421				}
1422				ret = expr__add_id(*combined, dup);
1423				if (ret)
1424					goto err_out;
1425			}
1426		}
1427	}
1428	return 0;
1429err_out:
1430	expr__ctx_free(*combined);
1431	*combined = NULL;
1432	return ret;
1433}
1434
1435/**
1436 * parse_ids - Build the event string for the ids and parse them creating an
1437 *             evlist. The encoded metric_ids are decoded.
1438 * @metric_no_merge: is metric sharing explicitly disabled.
1439 * @fake_pmu: used when testing metrics not supported by the current CPU.
1440 * @ids: the event identifiers parsed from a metric.
1441 * @modifier: any modifiers added to the events.
1442 * @group_events: should events be placed in a weak group.
1443 * @tool_events: entries set true if the tool event of index could be present in
1444 *               the overall list of metrics.
1445 * @out_evlist: the created list of events.
1446 */
1447static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
1448		     struct expr_parse_ctx *ids, const char *modifier,
1449		     bool group_events, const bool tool_events[PERF_TOOL_MAX],
1450		     struct evlist **out_evlist)
1451{
1452	struct parse_events_error parse_error;
1453	struct evlist *parsed_evlist;
1454	struct strbuf events = STRBUF_INIT;
1455	int ret;
1456
1457	*out_evlist = NULL;
1458	if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1459		bool added_event = false;
1460		int i;
1461		/*
1462		 * We may fail to share events between metrics because a tool
1463		 * event isn't present in one metric. For example, a ratio of
1464		 * cache misses doesn't need duration_time but the same events
1465		 * may be used for a misses per second. Events without sharing
1466		 * implies multiplexing, that is best avoided, so place
1467		 * all tool events in every group.
1468		 *
1469		 * Also, there may be no ids/events in the expression parsing
1470		 * context because of constant evaluation, e.g.:
1471		 *    event1 if #smt_on else 0
1472		 * Add a tool event to avoid a parse error on an empty string.
1473		 */
1474		perf_tool_event__for_each_event(i) {
1475			if (tool_events[i]) {
1476				char *tmp = strdup(perf_tool_event__to_str(i));
1477
1478				if (!tmp)
1479					return -ENOMEM;
1480				ids__insert(ids->ids, tmp);
1481				added_event = true;
1482			}
1483		}
1484		if (!added_event && hashmap__size(ids->ids) == 0) {
1485			char *tmp = strdup("duration_time");
1486
1487			if (!tmp)
1488				return -ENOMEM;
1489			ids__insert(ids->ids, tmp);
1490		}
1491	}
1492	ret = metricgroup__build_event_string(&events, ids, modifier,
1493					      group_events);
1494	if (ret)
1495		return ret;
1496
1497	parsed_evlist = evlist__new();
1498	if (!parsed_evlist) {
1499		ret = -ENOMEM;
1500		goto err_out;
1501	}
1502	pr_debug("Parsing metric events '%s'\n", events.buf);
1503	parse_events_error__init(&parse_error);
1504	ret = __parse_events(parsed_evlist, events.buf, /*pmu_filter=*/NULL,
1505			     &parse_error, fake_pmu, /*warn_if_reordered=*/false);
1506	if (ret) {
1507		parse_events_error__print(&parse_error, events.buf);
1508		goto err_out;
1509	}
1510	ret = decode_all_metric_ids(parsed_evlist, modifier);
1511	if (ret)
1512		goto err_out;
1513
1514	*out_evlist = parsed_evlist;
1515	parsed_evlist = NULL;
1516err_out:
1517	parse_events_error__exit(&parse_error);
1518	evlist__delete(parsed_evlist);
1519	strbuf_release(&events);
1520	return ret;
1521}
1522
1523static int parse_groups(struct evlist *perf_evlist,
1524			const char *pmu, const char *str,
1525			bool metric_no_group,
1526			bool metric_no_merge,
1527			bool metric_no_threshold,
1528			const char *user_requested_cpu_list,
1529			bool system_wide,
1530			struct perf_pmu *fake_pmu,
1531			struct rblist *metric_events_list,
1532			const struct pmu_metrics_table *table)
1533{
1534	struct evlist *combined_evlist = NULL;
1535	LIST_HEAD(metric_list);
1536	struct metric *m;
1537	bool tool_events[PERF_TOOL_MAX] = {false};
1538	bool is_default = !strcmp(str, "Default");
1539	int ret;
1540
1541	if (metric_events_list->nr_entries == 0)
1542		metricgroup__rblist_init(metric_events_list);
1543	ret = metricgroup__add_metric_list(pmu, str, metric_no_group, metric_no_threshold,
1544					   user_requested_cpu_list,
1545					   system_wide, &metric_list, table);
1546	if (ret)
1547		goto out;
1548
1549	/* Sort metrics from largest to smallest. */
1550	list_sort(NULL, &metric_list, metric_list_cmp);
1551
1552	if (!metric_no_merge) {
1553		struct expr_parse_ctx *combined = NULL;
1554
1555		find_tool_events(&metric_list, tool_events);
1556
1557		ret = build_combined_expr_ctx(&metric_list, &combined);
1558
1559		if (!ret && combined && hashmap__size(combined->ids)) {
1560			ret = parse_ids(metric_no_merge, fake_pmu, combined,
1561					/*modifier=*/NULL,
1562					/*group_events=*/false,
1563					tool_events,
1564					&combined_evlist);
1565		}
1566		if (combined)
1567			expr__ctx_free(combined);
1568
1569		if (ret)
1570			goto out;
1571	}
1572
1573	if (is_default)
1574		list_sort(NULL, &metric_list, default_metricgroup_cmp);
1575
1576	list_for_each_entry(m, &metric_list, nd) {
1577		struct metric_event *me;
1578		struct evsel **metric_events;
1579		struct evlist *metric_evlist = NULL;
1580		struct metric *n;
1581		struct metric_expr *expr;
1582
1583		if (combined_evlist && !m->group_events) {
1584			metric_evlist = combined_evlist;
1585		} else if (!metric_no_merge) {
1586			/*
1587			 * See if the IDs for this metric are a subset of an
1588			 * earlier metric.
1589			 */
1590			list_for_each_entry(n, &metric_list, nd) {
1591				if (m == n)
1592					break;
1593
1594				if (n->evlist == NULL)
1595					continue;
1596
1597				if ((!m->modifier && n->modifier) ||
1598				    (m->modifier && !n->modifier) ||
1599				    (m->modifier && n->modifier &&
1600					    strcmp(m->modifier, n->modifier)))
1601					continue;
1602
1603				if ((!m->pmu && n->pmu) ||
1604				    (m->pmu && !n->pmu) ||
1605				    (m->pmu && n->pmu && strcmp(m->pmu, n->pmu)))
1606					continue;
1607
1608				if (expr__subset_of_ids(n->pctx, m->pctx)) {
1609					pr_debug("Events in '%s' fully contained within '%s'\n",
1610						 m->metric_name, n->metric_name);
1611					metric_evlist = n->evlist;
1612					break;
1613				}
1614
1615			}
1616		}
1617		if (!metric_evlist) {
1618			ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1619					m->group_events, tool_events, &m->evlist);
1620			if (ret)
1621				goto out;
1622
1623			metric_evlist = m->evlist;
1624		}
1625		ret = setup_metric_events(fake_pmu ? "all" : m->pmu, m->pctx->ids,
1626					  metric_evlist, &metric_events);
1627		if (ret) {
1628			pr_err("Cannot resolve IDs for %s: %s\n",
1629				m->metric_name, m->metric_expr);
1630			goto out;
1631		}
1632
1633		me = metricgroup__lookup(metric_events_list, metric_events[0], true);
1634
1635		expr = malloc(sizeof(struct metric_expr));
1636		if (!expr) {
1637			ret = -ENOMEM;
1638			free(metric_events);
1639			goto out;
1640		}
1641
1642		expr->metric_refs = m->metric_refs;
1643		m->metric_refs = NULL;
1644		expr->metric_expr = m->metric_expr;
1645		if (m->modifier) {
1646			char *tmp;
1647
1648			if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1649				expr->metric_name = NULL;
1650			else
1651				expr->metric_name = tmp;
1652		} else
1653			expr->metric_name = strdup(m->metric_name);
1654
1655		if (!expr->metric_name) {
1656			ret = -ENOMEM;
1657			free(metric_events);
1658			goto out;
1659		}
1660		expr->metric_threshold = m->metric_threshold;
1661		expr->metric_unit = m->metric_unit;
1662		expr->metric_events = metric_events;
1663		expr->runtime = m->pctx->sctx.runtime;
1664		expr->default_metricgroup_name = m->default_metricgroup_name;
1665		me->is_default = is_default;
1666		list_add(&expr->nd, &me->head);
1667	}
1668
1669
1670	if (combined_evlist) {
1671		evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1672		evlist__delete(combined_evlist);
1673	}
1674
1675	list_for_each_entry(m, &metric_list, nd) {
1676		if (m->evlist)
1677			evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1678	}
1679
1680out:
1681	metricgroup__free_metrics(&metric_list);
1682	return ret;
1683}
1684
1685int metricgroup__parse_groups(struct evlist *perf_evlist,
1686			      const char *pmu,
1687			      const char *str,
1688			      bool metric_no_group,
1689			      bool metric_no_merge,
1690			      bool metric_no_threshold,
1691			      const char *user_requested_cpu_list,
1692			      bool system_wide,
1693			      struct rblist *metric_events)
1694{
1695	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1696
1697	if (!table)
1698		return -EINVAL;
1699
1700	return parse_groups(perf_evlist, pmu, str, metric_no_group, metric_no_merge,
1701			    metric_no_threshold, user_requested_cpu_list, system_wide,
1702			    /*fake_pmu=*/NULL, metric_events, table);
1703}
1704
1705int metricgroup__parse_groups_test(struct evlist *evlist,
1706				   const struct pmu_metrics_table *table,
1707				   const char *str,
 
 
1708				   struct rblist *metric_events)
1709{
1710	return parse_groups(evlist, "all", str,
1711			    /*metric_no_group=*/false,
1712			    /*metric_no_merge=*/false,
1713			    /*metric_no_threshold=*/false,
1714			    /*user_requested_cpu_list=*/NULL,
1715			    /*system_wide=*/false,
1716			    &perf_pmu__fake, metric_events, table);
1717}
1718
1719struct metricgroup__has_metric_data {
1720	const char *pmu;
1721	const char *metric;
1722};
1723static int metricgroup__has_metric_callback(const struct pmu_metric *pm,
1724					    const struct pmu_metrics_table *table __maybe_unused,
1725					    void *vdata)
1726{
1727	struct metricgroup__has_metric_data *data = vdata;
1728
1729	return match_pm_metric(pm, data->pmu, data->metric) ? 1 : 0;
1730}
1731
1732bool metricgroup__has_metric(const char *pmu, const char *metric)
1733{
1734	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1735	struct metricgroup__has_metric_data data = {
1736		.pmu = pmu,
1737		.metric = metric,
1738	};
1739
1740	if (!table)
1741		return false;
1742
1743	return pmu_metrics_table__for_each_metric(table, metricgroup__has_metric_callback, &data)
1744		? true : false;
1745}
1746
1747static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
1748					    const struct pmu_metrics_table *table __maybe_unused,
1749					    void *data)
1750{
1751	unsigned int *max_level = data;
1752	unsigned int level;
1753	const char *p = strstr(pm->metric_group ?: "", "TopdownL");
1754
1755	if (!p || p[8] == '\0')
1756		return 0;
1757
1758	level = p[8] - '0';
1759	if (level > *max_level)
1760		*max_level = level;
1761
1762	return 0;
1763}
1764
1765unsigned int metricgroups__topdown_max_level(void)
1766{
1767	unsigned int max_level = 0;
1768	const struct pmu_metrics_table *table = pmu_metrics_table__find();
1769
1770	if (!table)
1771		return false;
1772
1773	pmu_metrics_table__for_each_metric(table, metricgroup__topdown_max_level_callback,
1774					  &max_level);
1775	return max_level;
1776}
1777
1778int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1779				    struct rblist *new_metric_events,
1780				    struct rblist *old_metric_events)
1781{
1782	unsigned int i;
1783
1784	for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1785		struct rb_node *nd;
1786		struct metric_event *old_me, *new_me;
1787		struct metric_expr *old_expr, *new_expr;
1788		struct evsel *evsel;
1789		size_t alloc_size;
1790		int idx, nr;
1791
1792		nd = rblist__entry(old_metric_events, i);
1793		old_me = container_of(nd, struct metric_event, nd);
1794
1795		evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1796		if (!evsel)
1797			return -EINVAL;
1798		new_me = metricgroup__lookup(new_metric_events, evsel, true);
1799		if (!new_me)
1800			return -ENOMEM;
1801
1802		pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1803			 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1804
1805		list_for_each_entry(old_expr, &old_me->head, nd) {
1806			new_expr = malloc(sizeof(*new_expr));
1807			if (!new_expr)
1808				return -ENOMEM;
1809
1810			new_expr->metric_expr = old_expr->metric_expr;
1811			new_expr->metric_threshold = old_expr->metric_threshold;
1812			new_expr->metric_name = strdup(old_expr->metric_name);
1813			if (!new_expr->metric_name)
1814				return -ENOMEM;
1815
1816			new_expr->metric_unit = old_expr->metric_unit;
1817			new_expr->runtime = old_expr->runtime;
1818
1819			if (old_expr->metric_refs) {
1820				/* calculate number of metric_events */
1821				for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1822					continue;
1823				alloc_size = sizeof(*new_expr->metric_refs);
1824				new_expr->metric_refs = calloc(nr + 1, alloc_size);
1825				if (!new_expr->metric_refs) {
1826					free(new_expr);
1827					return -ENOMEM;
1828				}
1829
1830				memcpy(new_expr->metric_refs, old_expr->metric_refs,
1831				       nr * alloc_size);
1832			} else {
1833				new_expr->metric_refs = NULL;
1834			}
1835
1836			/* calculate number of metric_events */
1837			for (nr = 0; old_expr->metric_events[nr]; nr++)
1838				continue;
1839			alloc_size = sizeof(*new_expr->metric_events);
1840			new_expr->metric_events = calloc(nr + 1, alloc_size);
1841			if (!new_expr->metric_events) {
1842				zfree(&new_expr->metric_refs);
1843				free(new_expr);
1844				return -ENOMEM;
1845			}
1846
1847			/* copy evsel in the same position */
1848			for (idx = 0; idx < nr; idx++) {
1849				evsel = old_expr->metric_events[idx];
1850				evsel = evlist__find_evsel(evlist, evsel->core.idx);
1851				if (evsel == NULL) {
1852					zfree(&new_expr->metric_events);
1853					zfree(&new_expr->metric_refs);
1854					free(new_expr);
1855					return -EINVAL;
1856				}
1857				new_expr->metric_events[idx] = evsel;
1858			}
1859
1860			list_add(&new_expr->nd, &new_me->head);
1861		}
1862	}
1863	return 0;
1864}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2017, Intel Corporation.
   4 */
   5
   6/* Manage metrics and groups of metrics from JSON files */
   7
   8#include "metricgroup.h"
   9#include "debug.h"
  10#include "evlist.h"
  11#include "evsel.h"
  12#include "strbuf.h"
  13#include "pmu.h"
  14#include "pmu-hybrid.h"
  15#include "print-events.h"
 
  16#include "expr.h"
  17#include "rblist.h"
  18#include <string.h>
  19#include <errno.h>
  20#include "strlist.h"
  21#include <assert.h>
  22#include <linux/ctype.h>
  23#include <linux/list_sort.h>
  24#include <linux/string.h>
  25#include <linux/zalloc.h>
  26#include <perf/cpumap.h>
  27#include <subcmd/parse-options.h>
  28#include <api/fs/fs.h>
  29#include "util.h"
  30#include <asm/bug.h>
  31#include "cgroup.h"
  32#include "util/hashmap.h"
  33
  34struct metric_event *metricgroup__lookup(struct rblist *metric_events,
  35					 struct evsel *evsel,
  36					 bool create)
  37{
  38	struct rb_node *nd;
  39	struct metric_event me = {
  40		.evsel = evsel
  41	};
  42
  43	if (!metric_events)
  44		return NULL;
  45
  46	nd = rblist__find(metric_events, &me);
  47	if (nd)
  48		return container_of(nd, struct metric_event, nd);
  49	if (create) {
  50		rblist__add_node(metric_events, &me);
  51		nd = rblist__find(metric_events, &me);
  52		if (nd)
  53			return container_of(nd, struct metric_event, nd);
  54	}
  55	return NULL;
  56}
  57
  58static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
  59{
  60	struct metric_event *a = container_of(rb_node,
  61					      struct metric_event,
  62					      nd);
  63	const struct metric_event *b = entry;
  64
  65	if (a->evsel == b->evsel)
  66		return 0;
  67	if ((char *)a->evsel < (char *)b->evsel)
  68		return -1;
  69	return +1;
  70}
  71
  72static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
  73					const void *entry)
  74{
  75	struct metric_event *me = malloc(sizeof(struct metric_event));
  76
  77	if (!me)
  78		return NULL;
  79	memcpy(me, entry, sizeof(struct metric_event));
  80	me->evsel = ((struct metric_event *)entry)->evsel;
 
  81	INIT_LIST_HEAD(&me->head);
  82	return &me->nd;
  83}
  84
  85static void metric_event_delete(struct rblist *rblist __maybe_unused,
  86				struct rb_node *rb_node)
  87{
  88	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
  89	struct metric_expr *expr, *tmp;
  90
  91	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
  92		free((char *)expr->metric_name);
  93		free(expr->metric_refs);
  94		free(expr->metric_events);
  95		free(expr);
  96	}
  97
  98	free(me);
  99}
 100
 101static void metricgroup__rblist_init(struct rblist *metric_events)
 102{
 103	rblist__init(metric_events);
 104	metric_events->node_cmp = metric_event_cmp;
 105	metric_events->node_new = metric_event_new;
 106	metric_events->node_delete = metric_event_delete;
 107}
 108
 109void metricgroup__rblist_exit(struct rblist *metric_events)
 110{
 111	rblist__exit(metric_events);
 112}
 113
 114/**
 115 * The metric under construction. The data held here will be placed in a
 116 * metric_expr.
 117 */
 118struct metric {
 119	struct list_head nd;
 120	/**
 121	 * The expression parse context importantly holding the IDs contained
 122	 * within the expression.
 123	 */
 124	struct expr_parse_ctx *pctx;
 
 125	/** The name of the metric such as "IPC". */
 126	const char *metric_name;
 127	/** Modifier on the metric such as "u" or NULL for none. */
 128	const char *modifier;
 129	/** The expression to parse, for example, "instructions/cycles". */
 130	const char *metric_expr;
 
 
 131	/**
 132	 * The "ScaleUnit" that scales and adds a unit to the metric during
 133	 * output.
 134	 */
 135	const char *metric_unit;
 
 
 
 
 
 136	/** Optional null terminated array of referenced metrics. */
 137	struct metric_ref *metric_refs;
 138	/**
 139	 * Is there a constraint on the group of events? In which case the
 140	 * events won't be grouped.
 141	 */
 142	bool has_constraint;
 143	/**
 144	 * Parsed events for the metric. Optional as events may be taken from a
 145	 * different metric whose group contains all the IDs necessary for this
 146	 * one.
 147	 */
 148	struct evlist *evlist;
 149};
 150
 151static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
 152{
 153	static bool violate_nmi_constraint;
 154
 155	if (!foot) {
 156		pr_warning("Splitting metric group %s into standalone metrics.\n", name);
 157		violate_nmi_constraint = true;
 158		return;
 159	}
 160
 161	if (!violate_nmi_constraint)
 162		return;
 163
 164	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
 165		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
 166		   "    perf stat ...\n"
 167		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
 168}
 169
 170static bool metricgroup__has_constraint(const struct pmu_event *pe)
 171{
 172	if (!pe->metric_constraint)
 
 
 
 
 
 
 173		return false;
 174
 175	if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
 176	    sysctl__nmi_watchdog_enabled()) {
 177		metricgroup___watchdog_constraint_hint(pe->metric_name, false);
 178		return true;
 179	}
 180
 181	return false;
 182}
 183
 184static void metric__free(struct metric *m)
 185{
 186	if (!m)
 187		return;
 188
 189	free(m->metric_refs);
 190	expr__ctx_free(m->pctx);
 191	free((char *)m->modifier);
 192	evlist__delete(m->evlist);
 193	free(m);
 194}
 195
 196static struct metric *metric__new(const struct pmu_event *pe,
 197				  const char *modifier,
 198				  bool metric_no_group,
 199				  int runtime,
 200				  const char *user_requested_cpu_list,
 201				  bool system_wide)
 202{
 203	struct metric *m;
 204
 205	m = zalloc(sizeof(*m));
 206	if (!m)
 207		return NULL;
 208
 209	m->pctx = expr__ctx_new();
 210	if (!m->pctx)
 211		goto out_err;
 212
 213	m->metric_name = pe->metric_name;
 
 
 214	m->modifier = NULL;
 215	if (modifier) {
 216		m->modifier = strdup(modifier);
 217		if (!m->modifier)
 218			goto out_err;
 219	}
 220	m->metric_expr = pe->metric_expr;
 221	m->metric_unit = pe->unit;
 
 222	m->pctx->sctx.user_requested_cpu_list = NULL;
 223	if (user_requested_cpu_list) {
 224		m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
 225		if (!m->pctx->sctx.user_requested_cpu_list)
 226			goto out_err;
 227	}
 228	m->pctx->sctx.runtime = runtime;
 229	m->pctx->sctx.system_wide = system_wide;
 230	m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
 231	m->metric_refs = NULL;
 232	m->evlist = NULL;
 233
 234	return m;
 235out_err:
 236	metric__free(m);
 237	return NULL;
 238}
 239
 240static bool contains_metric_id(struct evsel **metric_events, int num_events,
 241			       const char *metric_id)
 242{
 243	int i;
 244
 245	for (i = 0; i < num_events; i++) {
 246		if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
 247			return true;
 248	}
 249	return false;
 250}
 251
 252/**
 253 * setup_metric_events - Find a group of events in metric_evlist that correspond
 254 *                       to the IDs from a parsed metric expression.
 
 255 * @ids: the metric IDs to match.
 256 * @metric_evlist: the list of perf events.
 257 * @out_metric_events: holds the created metric events array.
 258 */
 259static int setup_metric_events(struct hashmap *ids,
 260			       struct evlist *metric_evlist,
 261			       struct evsel ***out_metric_events)
 262{
 263	struct evsel **metric_events;
 264	const char *metric_id;
 265	struct evsel *ev;
 266	size_t ids_size, matched_events, i;
 
 267
 268	*out_metric_events = NULL;
 269	ids_size = hashmap__size(ids);
 270
 271	metric_events = calloc(sizeof(void *), ids_size + 1);
 272	if (!metric_events)
 273		return -ENOMEM;
 274
 275	matched_events = 0;
 276	evlist__for_each_entry(metric_evlist, ev) {
 277		struct expr_id_data *val_ptr;
 278
 
 
 
 
 279		/*
 280		 * Check for duplicate events with the same name. For
 281		 * example, uncore_imc/cas_count_read/ will turn into 6
 282		 * events per socket on skylakex. Only the first such
 283		 * event is placed in metric_events.
 284		 */
 285		metric_id = evsel__metric_id(ev);
 286		if (contains_metric_id(metric_events, matched_events, metric_id))
 287			continue;
 288		/*
 289		 * Does this event belong to the parse context? For
 290		 * combined or shared groups, this metric may not care
 291		 * about this event.
 292		 */
 293		if (hashmap__find(ids, metric_id, &val_ptr)) {
 
 294			metric_events[matched_events++] = ev;
 295
 296			if (matched_events >= ids_size)
 297				break;
 298		}
 299	}
 300	if (matched_events < ids_size) {
 301		free(metric_events);
 302		return -EINVAL;
 303	}
 304	for (i = 0; i < ids_size; i++) {
 305		ev = metric_events[i];
 306		ev->collect_stat = true;
 307
 308		/*
 309		 * The metric leader points to the identically named
 310		 * event in metric_events.
 311		 */
 312		ev->metric_leader = ev;
 313		/*
 314		 * Mark two events with identical names in the same
 315		 * group (or globally) as being in use as uncore events
 316		 * may be duplicated for each pmu. Set the metric leader
 317		 * of such events to be the event that appears in
 318		 * metric_events.
 319		 */
 320		metric_id = evsel__metric_id(ev);
 321		evlist__for_each_entry_continue(metric_evlist, ev) {
 322			if (!strcmp(evsel__metric_id(ev), metric_id))
 323				ev->metric_leader = metric_events[i];
 324		}
 325	}
 326	*out_metric_events = metric_events;
 327	return 0;
 328}
 329
 330static bool match_metric(const char *n, const char *list)
 331{
 332	int len;
 333	char *m;
 334
 335	if (!list)
 336		return false;
 337	if (!strcmp(list, "all"))
 338		return true;
 339	if (!n)
 340		return !strcasecmp(list, "No_group");
 341	len = strlen(list);
 342	m = strcasestr(n, list);
 343	if (!m)
 344		return false;
 345	if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
 346	    (m[len] == 0 || m[len] == ';'))
 347		return true;
 348	return false;
 349}
 350
 351static bool match_pe_metric(const struct pmu_event *pe, const char *metric)
 352{
 353	return match_metric(pe->metric_group, metric) ||
 354	       match_metric(pe->metric_name, metric);
 
 
 
 
 
 355}
 356
 357/** struct mep - RB-tree node for building printing information. */
 358struct mep {
 359	/** nd - RB-tree element. */
 360	struct rb_node nd;
 361	/** @metric_group: Owned metric group name, separated others with ';'. */
 362	char *metric_group;
 363	const char *metric_name;
 364	const char *metric_desc;
 365	const char *metric_long_desc;
 366	const char *metric_expr;
 
 367	const char *metric_unit;
 368};
 369
 370static int mep_cmp(struct rb_node *rb_node, const void *entry)
 371{
 372	struct mep *a = container_of(rb_node, struct mep, nd);
 373	struct mep *b = (struct mep *)entry;
 374	int ret;
 375
 376	ret = strcmp(a->metric_group, b->metric_group);
 377	if (ret)
 378		return ret;
 379
 380	return strcmp(a->metric_name, b->metric_name);
 381}
 382
 383static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
 384{
 385	struct mep *me = malloc(sizeof(struct mep));
 386
 387	if (!me)
 388		return NULL;
 389
 390	memcpy(me, entry, sizeof(struct mep));
 391	return &me->nd;
 392}
 393
 394static void mep_delete(struct rblist *rl __maybe_unused,
 395		       struct rb_node *nd)
 396{
 397	struct mep *me = container_of(nd, struct mep, nd);
 398
 399	zfree(&me->metric_group);
 400	free(me);
 401}
 402
 403static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
 404			      const char *metric_name)
 405{
 406	struct rb_node *nd;
 407	struct mep me = {
 408		.metric_group = strdup(metric_group),
 409		.metric_name = metric_name,
 410	};
 411	nd = rblist__find(groups, &me);
 412	if (nd) {
 413		free(me.metric_group);
 414		return container_of(nd, struct mep, nd);
 415	}
 416	rblist__add_node(groups, &me);
 417	nd = rblist__find(groups, &me);
 418	if (nd)
 419		return container_of(nd, struct mep, nd);
 420	return NULL;
 421}
 422
 423static int metricgroup__add_to_mep_groups(const struct pmu_event *pe,
 424					struct rblist *groups)
 425{
 426	const char *g;
 427	char *omg, *mg;
 428
 429	mg = strdup(pe->metric_group ?: "No_group");
 430	if (!mg)
 431		return -ENOMEM;
 432	omg = mg;
 433	while ((g = strsep(&mg, ";")) != NULL) {
 434		struct mep *me;
 435
 436		g = skip_spaces(g);
 437		if (strlen(g))
 438			me = mep_lookup(groups, g, pe->metric_name);
 439		else
 440			me = mep_lookup(groups, "No_group", pe->metric_name);
 441
 442		if (me) {
 443			me->metric_desc = pe->desc;
 444			me->metric_long_desc = pe->long_desc;
 445			me->metric_expr = pe->metric_expr;
 446			me->metric_unit = pe->unit;
 
 447		}
 448	}
 449	free(omg);
 450
 451	return 0;
 452}
 453
 454struct metricgroup_iter_data {
 455	pmu_event_iter_fn fn;
 456	void *data;
 457};
 458
 459static int metricgroup__sys_event_iter(const struct pmu_event *pe,
 460				       const struct pmu_events_table *table,
 461				       void *data)
 462{
 463	struct metricgroup_iter_data *d = data;
 464	struct perf_pmu *pmu = NULL;
 465
 466	if (!pe->metric_expr || !pe->compat)
 467		return 0;
 468
 469	while ((pmu = perf_pmu__scan(pmu))) {
 470
 471		if (!pmu->id || strcmp(pmu->id, pe->compat))
 472			continue;
 473
 474		return d->fn(pe, table, d->data);
 475	}
 476	return 0;
 477}
 478
 479static int metricgroup__add_to_mep_groups_callback(const struct pmu_event *pe,
 480						const struct pmu_events_table *table __maybe_unused,
 481						void *vdata)
 482{
 483	struct rblist *groups = vdata;
 484
 485	if (!pe->metric_name)
 486		return 0;
 487
 488	return metricgroup__add_to_mep_groups(pe, groups);
 489}
 490
 491void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
 492{
 493	struct rblist groups;
 494	const struct pmu_events_table *table;
 495	struct rb_node *node, *next;
 496
 497	rblist__init(&groups);
 498	groups.node_new = mep_new;
 499	groups.node_cmp = mep_cmp;
 500	groups.node_delete = mep_delete;
 501	table = pmu_events_table__find();
 502	if (table) {
 503		pmu_events_table_for_each_event(table,
 504						metricgroup__add_to_mep_groups_callback,
 505						&groups);
 506	}
 507	{
 508		struct metricgroup_iter_data data = {
 509			.fn = metricgroup__add_to_mep_groups_callback,
 510			.data = &groups,
 511		};
 512		pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
 513	}
 514
 515	for (node = rb_first_cached(&groups.entries); node; node = next) {
 516		struct mep *me = container_of(node, struct mep, nd);
 517
 518		print_cb->print_metric(print_state,
 519				me->metric_group,
 520				me->metric_name,
 521				me->metric_desc,
 522				me->metric_long_desc,
 523				me->metric_expr,
 
 524				me->metric_unit);
 525		next = rb_next(node);
 526		rblist__remove_node(&groups, node);
 527	}
 528}
 529
 530static const char *code_characters = ",-=@";
 531
 532static int encode_metric_id(struct strbuf *sb, const char *x)
 533{
 534	char *c;
 535	int ret = 0;
 536
 537	for (; *x; x++) {
 538		c = strchr(code_characters, *x);
 539		if (c) {
 540			ret = strbuf_addch(sb, '!');
 541			if (ret)
 542				break;
 543
 544			ret = strbuf_addch(sb, '0' + (c - code_characters));
 545			if (ret)
 546				break;
 547		} else {
 548			ret = strbuf_addch(sb, *x);
 549			if (ret)
 550				break;
 551		}
 552	}
 553	return ret;
 554}
 555
 556static int decode_metric_id(struct strbuf *sb, const char *x)
 557{
 558	const char *orig = x;
 559	size_t i;
 560	char c;
 561	int ret;
 562
 563	for (; *x; x++) {
 564		c = *x;
 565		if (*x == '!') {
 566			x++;
 567			i = *x - '0';
 568			if (i > strlen(code_characters)) {
 569				pr_err("Bad metric-id encoding in: '%s'", orig);
 570				return -1;
 571			}
 572			c = code_characters[i];
 573		}
 574		ret = strbuf_addch(sb, c);
 575		if (ret)
 576			return ret;
 577	}
 578	return 0;
 579}
 580
 581static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
 582{
 583	struct evsel *ev;
 584	struct strbuf sb = STRBUF_INIT;
 585	char *cur;
 586	int ret = 0;
 587
 588	evlist__for_each_entry(perf_evlist, ev) {
 589		if (!ev->metric_id)
 590			continue;
 591
 592		ret = strbuf_setlen(&sb, 0);
 593		if (ret)
 594			break;
 595
 596		ret = decode_metric_id(&sb, ev->metric_id);
 597		if (ret)
 598			break;
 599
 600		free((char *)ev->metric_id);
 601		ev->metric_id = strdup(sb.buf);
 602		if (!ev->metric_id) {
 603			ret = -ENOMEM;
 604			break;
 605		}
 606		/*
 607		 * If the name is just the parsed event, use the metric-id to
 608		 * give a more friendly display version.
 609		 */
 610		if (strstr(ev->name, "metric-id=")) {
 611			bool has_slash = false;
 612
 613			free(ev->name);
 614			for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
 615				*cur = '/';
 616				has_slash = true;
 617			}
 618
 619			if (modifier) {
 620				if (!has_slash && !strchr(sb.buf, ':')) {
 621					ret = strbuf_addch(&sb, ':');
 622					if (ret)
 623						break;
 624				}
 625				ret = strbuf_addstr(&sb, modifier);
 626				if (ret)
 627					break;
 628			}
 629			ev->name = strdup(sb.buf);
 630			if (!ev->name) {
 631				ret = -ENOMEM;
 632				break;
 633			}
 634		}
 635	}
 636	strbuf_release(&sb);
 637	return ret;
 638}
 639
 640static int metricgroup__build_event_string(struct strbuf *events,
 641					   const struct expr_parse_ctx *ctx,
 642					   const char *modifier,
 643					   bool has_constraint)
 644{
 645	struct hashmap_entry *cur;
 646	size_t bkt;
 647	bool no_group = true, has_tool_events = false;
 648	bool tool_events[PERF_TOOL_MAX] = {false};
 649	int ret = 0;
 650
 651#define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
 652
 653	hashmap__for_each_entry(ctx->ids, cur, bkt) {
 654		const char *sep, *rsep, *id = cur->pkey;
 655		enum perf_tool_event ev;
 656
 657		pr_debug("found event %s\n", id);
 658
 659		/* Always move tool events outside of the group. */
 660		ev = perf_tool_event__from_str(id);
 661		if (ev != PERF_TOOL_NONE) {
 662			has_tool_events = true;
 663			tool_events[ev] = true;
 664			continue;
 665		}
 666		/* Separate events with commas and open the group if necessary. */
 667		if (no_group) {
 668			if (!has_constraint) {
 669				ret = strbuf_addch(events, '{');
 670				RETURN_IF_NON_ZERO(ret);
 671			}
 672
 673			no_group = false;
 674		} else {
 675			ret = strbuf_addch(events, ',');
 676			RETURN_IF_NON_ZERO(ret);
 677		}
 678		/*
 679		 * Encode the ID as an event string. Add a qualifier for
 680		 * metric_id that is the original name except with characters
 681		 * that parse-events can't parse replaced. For example,
 682		 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
 683		 */
 684		sep = strchr(id, '@');
 685		if (sep != NULL) {
 686			ret = strbuf_add(events, id, sep - id);
 687			RETURN_IF_NON_ZERO(ret);
 688			ret = strbuf_addch(events, '/');
 689			RETURN_IF_NON_ZERO(ret);
 690			rsep = strrchr(sep, '@');
 691			ret = strbuf_add(events, sep + 1, rsep - sep - 1);
 692			RETURN_IF_NON_ZERO(ret);
 693			ret = strbuf_addstr(events, ",metric-id=");
 694			RETURN_IF_NON_ZERO(ret);
 695			sep = rsep;
 696		} else {
 697			sep = strchr(id, ':');
 698			if (sep != NULL) {
 699				ret = strbuf_add(events, id, sep - id);
 700				RETURN_IF_NON_ZERO(ret);
 701			} else {
 702				ret = strbuf_addstr(events, id);
 703				RETURN_IF_NON_ZERO(ret);
 704			}
 705			ret = strbuf_addstr(events, "/metric-id=");
 706			RETURN_IF_NON_ZERO(ret);
 707		}
 708		ret = encode_metric_id(events, id);
 709		RETURN_IF_NON_ZERO(ret);
 710		ret = strbuf_addstr(events, "/");
 711		RETURN_IF_NON_ZERO(ret);
 712
 713		if (sep != NULL) {
 714			ret = strbuf_addstr(events, sep + 1);
 715			RETURN_IF_NON_ZERO(ret);
 716		}
 717		if (modifier) {
 718			ret = strbuf_addstr(events, modifier);
 719			RETURN_IF_NON_ZERO(ret);
 720		}
 721	}
 722	if (!no_group && !has_constraint) {
 723		ret = strbuf_addf(events, "}:W");
 724		RETURN_IF_NON_ZERO(ret);
 725	}
 726	if (has_tool_events) {
 727		int i;
 728
 729		perf_tool_event__for_each_event(i) {
 730			if (tool_events[i]) {
 731				if (!no_group) {
 732					ret = strbuf_addch(events, ',');
 733					RETURN_IF_NON_ZERO(ret);
 734				}
 735				no_group = false;
 736				ret = strbuf_addstr(events, perf_tool_event__to_str(i));
 737				RETURN_IF_NON_ZERO(ret);
 738			}
 739		}
 740	}
 741
 742	return ret;
 743#undef RETURN_IF_NON_ZERO
 744}
 745
 746int __weak arch_get_runtimeparam(const struct pmu_event *pe __maybe_unused)
 747{
 748	return 1;
 749}
 750
 751/*
 752 * A singly linked list on the stack of the names of metrics being
 753 * processed. Used to identify recursion.
 754 */
 755struct visited_metric {
 756	const char *name;
 757	const struct visited_metric *parent;
 758};
 759
 760struct metricgroup_add_iter_data {
 761	struct list_head *metric_list;
 
 762	const char *metric_name;
 763	const char *modifier;
 764	int *ret;
 765	bool *has_match;
 766	bool metric_no_group;
 
 767	const char *user_requested_cpu_list;
 768	bool system_wide;
 769	struct metric *root_metric;
 770	const struct visited_metric *visited;
 771	const struct pmu_events_table *table;
 772};
 773
 774static bool metricgroup__find_metric(const char *metric,
 775				     const struct pmu_events_table *table,
 776				     struct pmu_event *pe);
 
 777
 778static int add_metric(struct list_head *metric_list,
 779		      const struct pmu_event *pe,
 780		      const char *modifier,
 781		      bool metric_no_group,
 
 782		      const char *user_requested_cpu_list,
 783		      bool system_wide,
 784		      struct metric *root_metric,
 785		      const struct visited_metric *visited,
 786		      const struct pmu_events_table *table);
 787
 788/**
 789 * resolve_metric - Locate metrics within the root metric and recursively add
 790 *                    references to them.
 791 * @metric_list: The list the metric is added to.
 
 792 * @modifier: if non-null event modifiers like "u".
 793 * @metric_no_group: Should events written to events be grouped "{}" or
 794 *                   global. Grouping is the default but due to multiplexing the
 795 *                   user may override.
 796 * @user_requested_cpu_list: Command line specified CPUs to record on.
 797 * @system_wide: Are events for all processes recorded.
 798 * @root_metric: Metrics may reference other metrics to form a tree. In this
 799 *               case the root_metric holds all the IDs and a list of referenced
 800 *               metrics. When adding a root this argument is NULL.
 801 * @visited: A singly linked list of metric names being added that is used to
 802 *           detect recursion.
 803 * @table: The table that is searched for metrics, most commonly the table for the
 804 *       architecture perf is running upon.
 805 */
 806static int resolve_metric(struct list_head *metric_list,
 
 807			  const char *modifier,
 808			  bool metric_no_group,
 
 809			  const char *user_requested_cpu_list,
 810			  bool system_wide,
 811			  struct metric *root_metric,
 812			  const struct visited_metric *visited,
 813			  const struct pmu_events_table *table)
 814{
 815	struct hashmap_entry *cur;
 816	size_t bkt;
 817	struct to_resolve {
 818		/* The metric to resolve. */
 819		struct pmu_event pe;
 820		/*
 821		 * The key in the IDs map, this may differ from in case,
 822		 * etc. from pe->metric_name.
 823		 */
 824		const char *key;
 825	} *pending = NULL;
 826	int i, ret = 0, pending_cnt = 0;
 827
 828	/*
 829	 * Iterate all the parsed IDs and if there's a matching metric and it to
 830	 * the pending array.
 831	 */
 832	hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
 833		struct pmu_event pe;
 834
 835		if (metricgroup__find_metric(cur->pkey, table, &pe)) {
 836			pending = realloc(pending,
 837					(pending_cnt + 1) * sizeof(struct to_resolve));
 838			if (!pending)
 839				return -ENOMEM;
 840
 841			memcpy(&pending[pending_cnt].pe, &pe, sizeof(pe));
 842			pending[pending_cnt].key = cur->pkey;
 843			pending_cnt++;
 844		}
 845	}
 846
 847	/* Remove the metric IDs from the context. */
 848	for (i = 0; i < pending_cnt; i++)
 849		expr__del_id(root_metric->pctx, pending[i].key);
 850
 851	/*
 852	 * Recursively add all the metrics, IDs are added to the root metric's
 853	 * context.
 854	 */
 855	for (i = 0; i < pending_cnt; i++) {
 856		ret = add_metric(metric_list, &pending[i].pe, modifier, metric_no_group,
 857				 user_requested_cpu_list, system_wide, root_metric, visited,
 858				 table);
 859		if (ret)
 860			break;
 861	}
 862
 863	free(pending);
 864	return ret;
 865}
 866
 867/**
 868 * __add_metric - Add a metric to metric_list.
 869 * @metric_list: The list the metric is added to.
 870 * @pe: The pmu_event containing the metric to be added.
 871 * @modifier: if non-null event modifiers like "u".
 872 * @metric_no_group: Should events written to events be grouped "{}" or
 873 *                   global. Grouping is the default but due to multiplexing the
 874 *                   user may override.
 
 875 * @runtime: A special argument for the parser only known at runtime.
 876 * @user_requested_cpu_list: Command line specified CPUs to record on.
 877 * @system_wide: Are events for all processes recorded.
 878 * @root_metric: Metrics may reference other metrics to form a tree. In this
 879 *               case the root_metric holds all the IDs and a list of referenced
 880 *               metrics. When adding a root this argument is NULL.
 881 * @visited: A singly linked list of metric names being added that is used to
 882 *           detect recursion.
 883 * @table: The table that is searched for metrics, most commonly the table for the
 884 *       architecture perf is running upon.
 885 */
 886static int __add_metric(struct list_head *metric_list,
 887			const struct pmu_event *pe,
 888			const char *modifier,
 889			bool metric_no_group,
 
 890			int runtime,
 891			const char *user_requested_cpu_list,
 892			bool system_wide,
 893			struct metric *root_metric,
 894			const struct visited_metric *visited,
 895			const struct pmu_events_table *table)
 896{
 897	const struct visited_metric *vm;
 898	int ret;
 899	bool is_root = !root_metric;
 
 900	struct visited_metric visited_node = {
 901		.name = pe->metric_name,
 902		.parent = visited,
 903	};
 904
 905	for (vm = visited; vm; vm = vm->parent) {
 906		if (!strcmp(pe->metric_name, vm->name)) {
 907			pr_err("failed: recursion detected for %s\n", pe->metric_name);
 908			return -1;
 909		}
 910	}
 911
 912	if (is_root) {
 913		/*
 914		 * This metric is the root of a tree and may reference other
 915		 * metrics that are added recursively.
 916		 */
 917		root_metric = metric__new(pe, modifier, metric_no_group, runtime,
 918					  user_requested_cpu_list, system_wide);
 919		if (!root_metric)
 920			return -ENOMEM;
 921
 922	} else {
 923		int cnt = 0;
 924
 925		/*
 926		 * This metric was referenced in a metric higher in the
 927		 * tree. Check if the same metric is already resolved in the
 928		 * metric_refs list.
 929		 */
 930		if (root_metric->metric_refs) {
 931			for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
 932				if (!strcmp(pe->metric_name,
 933					    root_metric->metric_refs[cnt].metric_name))
 934					return 0;
 935			}
 936		}
 937
 938		/* Create reference. Need space for the entry and the terminator. */
 939		root_metric->metric_refs = realloc(root_metric->metric_refs,
 940						(cnt + 2) * sizeof(struct metric_ref));
 941		if (!root_metric->metric_refs)
 942			return -ENOMEM;
 943
 944		/*
 945		 * Intentionally passing just const char pointers,
 946		 * from 'pe' object, so they never go away. We don't
 947		 * need to change them, so there's no need to create
 948		 * our own copy.
 949		 */
 950		root_metric->metric_refs[cnt].metric_name = pe->metric_name;
 951		root_metric->metric_refs[cnt].metric_expr = pe->metric_expr;
 952
 953		/* Null terminate array. */
 954		root_metric->metric_refs[cnt+1].metric_name = NULL;
 955		root_metric->metric_refs[cnt+1].metric_expr = NULL;
 956	}
 957
 958	/*
 959	 * For both the parent and referenced metrics, we parse
 960	 * all the metric's IDs and add it to the root context.
 961	 */
 962	if (expr__find_ids(pe->metric_expr, NULL, root_metric->pctx) < 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 963		/* Broken metric. */
 964		ret = -EINVAL;
 965	} else {
 
 966		/* Resolve referenced metrics. */
 967		ret = resolve_metric(metric_list, modifier, metric_no_group,
 968				     user_requested_cpu_list, system_wide,
 969				     root_metric, &visited_node, table);
 
 
 
 970	}
 971
 972	if (ret) {
 973		if (is_root)
 974			metric__free(root_metric);
 975
 976	} else if (is_root)
 977		list_add(&root_metric->nd, metric_list);
 978
 979	return ret;
 980}
 981
 982struct metricgroup__find_metric_data {
 
 983	const char *metric;
 984	struct pmu_event *pe;
 985};
 986
 987static int metricgroup__find_metric_callback(const struct pmu_event *pe,
 988					     const struct pmu_events_table *table  __maybe_unused,
 989					     void *vdata)
 990{
 991	struct metricgroup__find_metric_data *data = vdata;
 
 
 
 
 992
 993	if (!match_metric(pe->metric_name, data->metric))
 994		return 0;
 995
 996	memcpy(data->pe, pe, sizeof(*pe));
 997	return 1;
 998}
 999
1000static bool metricgroup__find_metric(const char *metric,
1001				     const struct pmu_events_table *table,
1002				     struct pmu_event *pe)
 
1003{
1004	struct metricgroup__find_metric_data data = {
 
1005		.metric = metric,
1006		.pe = pe,
1007	};
1008
1009	return pmu_events_table_for_each_event(table, metricgroup__find_metric_callback, &data)
1010		? true : false;
1011}
1012
1013static int add_metric(struct list_head *metric_list,
1014		      const struct pmu_event *pe,
1015		      const char *modifier,
1016		      bool metric_no_group,
 
1017		      const char *user_requested_cpu_list,
1018		      bool system_wide,
1019		      struct metric *root_metric,
1020		      const struct visited_metric *visited,
1021		      const struct pmu_events_table *table)
1022{
1023	int ret = 0;
1024
1025	pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
1026
1027	if (!strstr(pe->metric_expr, "?")) {
1028		ret = __add_metric(metric_list, pe, modifier, metric_no_group, 0,
1029				   user_requested_cpu_list, system_wide, root_metric,
1030				   visited, table);
1031	} else {
1032		int j, count;
1033
1034		count = arch_get_runtimeparam(pe);
1035
1036		/* This loop is added to create multiple
1037		 * events depend on count value and add
1038		 * those events to metric_list.
1039		 */
1040
1041		for (j = 0; j < count && !ret; j++)
1042			ret = __add_metric(metric_list, pe, modifier, metric_no_group, j,
1043					   user_requested_cpu_list, system_wide,
1044					   root_metric, visited, table);
1045	}
1046
1047	return ret;
1048}
1049
1050static int metricgroup__add_metric_sys_event_iter(const struct pmu_event *pe,
1051						const struct pmu_events_table *table __maybe_unused,
1052						void *data)
1053{
1054	struct metricgroup_add_iter_data *d = data;
1055	int ret;
1056
1057	if (!match_pe_metric(pe, d->metric_name))
1058		return 0;
1059
1060	ret = add_metric(d->metric_list, pe, d->modifier, d->metric_no_group,
1061			 d->user_requested_cpu_list, d->system_wide,
1062			 d->root_metric, d->visited, d->table);
1063	if (ret)
1064		goto out;
1065
1066	*(d->has_match) = true;
1067
1068out:
1069	*(d->ret) = ret;
1070	return ret;
1071}
1072
1073/**
1074 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1075 *                   the front. tool events are excluded from the count.
1076 */
1077static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
1078			   const struct list_head *r)
1079{
1080	const struct metric *left = container_of(l, struct metric, nd);
1081	const struct metric *right = container_of(r, struct metric, nd);
1082	struct expr_id_data *data;
1083	int i, left_count, right_count;
1084
1085	left_count = hashmap__size(left->pctx->ids);
1086	perf_tool_event__for_each_event(i) {
1087		if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data))
1088			left_count--;
1089	}
1090
1091	right_count = hashmap__size(right->pctx->ids);
1092	perf_tool_event__for_each_event(i) {
1093		if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data))
1094			right_count--;
1095	}
1096
1097	return right_count - left_count;
1098}
1099
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1100struct metricgroup__add_metric_data {
1101	struct list_head *list;
 
1102	const char *metric_name;
1103	const char *modifier;
1104	const char *user_requested_cpu_list;
1105	bool metric_no_group;
 
1106	bool system_wide;
1107	bool has_match;
1108};
1109
1110static int metricgroup__add_metric_callback(const struct pmu_event *pe,
1111					    const struct pmu_events_table *table,
1112					    void *vdata)
1113{
1114	struct metricgroup__add_metric_data *data = vdata;
1115	int ret = 0;
1116
1117	if (pe->metric_expr &&
1118		(match_metric(pe->metric_group, data->metric_name) ||
1119		 match_metric(pe->metric_name, data->metric_name))) {
1120
1121		data->has_match = true;
1122		ret = add_metric(data->list, pe, data->modifier, data->metric_no_group,
1123				 data->user_requested_cpu_list, data->system_wide,
1124				 /*root_metric=*/NULL, /*visited_metrics=*/NULL, table);
 
1125	}
1126	return ret;
1127}
1128
1129/**
1130 * metricgroup__add_metric - Find and add a metric, or a metric group.
 
1131 * @metric_name: The name of the metric or metric group. For example, "IPC"
1132 *               could be the name of a metric and "TopDownL1" the name of a
1133 *               metric group.
1134 * @modifier: if non-null event modifiers like "u".
1135 * @metric_no_group: Should events written to events be grouped "{}" or
1136 *                   global. Grouping is the default but due to multiplexing the
1137 *                   user may override.
1138 * @user_requested_cpu_list: Command line specified CPUs to record on.
1139 * @system_wide: Are events for all processes recorded.
1140 * @metric_list: The list that the metric or metric group are added to.
1141 * @table: The table that is searched for metrics, most commonly the table for the
1142 *       architecture perf is running upon.
1143 */
1144static int metricgroup__add_metric(const char *metric_name, const char *modifier,
1145				   bool metric_no_group,
1146				   const char *user_requested_cpu_list,
1147				   bool system_wide,
1148				   struct list_head *metric_list,
1149				   const struct pmu_events_table *table)
1150{
1151	LIST_HEAD(list);
1152	int ret;
1153	bool has_match = false;
1154
1155	{
1156		struct metricgroup__add_metric_data data = {
1157			.list = &list,
 
1158			.metric_name = metric_name,
1159			.modifier = modifier,
1160			.metric_no_group = metric_no_group,
 
1161			.user_requested_cpu_list = user_requested_cpu_list,
1162			.system_wide = system_wide,
1163			.has_match = false,
1164		};
1165		/*
1166		 * Iterate over all metrics seeing if metric matches either the
1167		 * name or group. When it does add the metric to the list.
1168		 */
1169		ret = pmu_events_table_for_each_event(table, metricgroup__add_metric_callback,
1170						      &data);
1171		if (ret)
1172			goto out;
1173
1174		has_match = data.has_match;
1175	}
1176	{
1177		struct metricgroup_iter_data data = {
1178			.fn = metricgroup__add_metric_sys_event_iter,
1179			.data = (void *) &(struct metricgroup_add_iter_data) {
1180				.metric_list = &list,
 
1181				.metric_name = metric_name,
1182				.modifier = modifier,
1183				.metric_no_group = metric_no_group,
1184				.user_requested_cpu_list = user_requested_cpu_list,
1185				.system_wide = system_wide,
1186				.has_match = &has_match,
1187				.ret = &ret,
1188				.table = table,
1189			},
1190		};
1191
1192		pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
1193	}
1194	/* End of pmu events. */
1195	if (!has_match)
1196		ret = -EINVAL;
1197
1198out:
1199	/*
1200	 * add to metric_list so that they can be released
1201	 * even if it's failed
1202	 */
1203	list_splice(&list, metric_list);
1204	return ret;
1205}
1206
1207/**
1208 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1209 *                                specified in a list.
 
1210 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1211 *        would match the IPC and CPI metrics, and TopDownL1 would match all
1212 *        the metrics in the TopDownL1 group.
1213 * @metric_no_group: Should events written to events be grouped "{}" or
1214 *                   global. Grouping is the default but due to multiplexing the
1215 *                   user may override.
1216 * @user_requested_cpu_list: Command line specified CPUs to record on.
1217 * @system_wide: Are events for all processes recorded.
1218 * @metric_list: The list that metrics are added to.
1219 * @table: The table that is searched for metrics, most commonly the table for the
1220 *       architecture perf is running upon.
1221 */
1222static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
 
 
1223					const char *user_requested_cpu_list,
1224					bool system_wide, struct list_head *metric_list,
1225					const struct pmu_events_table *table)
1226{
1227	char *list_itr, *list_copy, *metric_name, *modifier;
1228	int ret, count = 0;
1229
1230	list_copy = strdup(list);
1231	if (!list_copy)
1232		return -ENOMEM;
1233	list_itr = list_copy;
1234
1235	while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1236		modifier = strchr(metric_name, ':');
1237		if (modifier)
1238			*modifier++ = '\0';
1239
1240		ret = metricgroup__add_metric(metric_name, modifier,
1241					      metric_no_group, user_requested_cpu_list,
 
1242					      system_wide, metric_list, table);
1243		if (ret == -EINVAL)
1244			pr_err("Cannot find metric or group `%s'\n", metric_name);
1245
1246		if (ret)
1247			break;
1248
1249		count++;
1250	}
1251	free(list_copy);
1252
1253	if (!ret) {
1254		/*
1255		 * Warn about nmi_watchdog if any parsed metrics had the
1256		 * NO_NMI_WATCHDOG constraint.
1257		 */
1258		metricgroup___watchdog_constraint_hint(NULL, true);
1259		/* No metrics. */
1260		if (count == 0)
1261			return -EINVAL;
1262	}
1263	return ret;
1264}
1265
1266static void metricgroup__free_metrics(struct list_head *metric_list)
1267{
1268	struct metric *m, *tmp;
1269
1270	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1271		list_del_init(&m->nd);
1272		metric__free(m);
1273	}
1274}
1275
1276/**
1277 * find_tool_events - Search for the pressence of tool events in metric_list.
1278 * @metric_list: List to take metrics from.
1279 * @tool_events: Array of false values, indices corresponding to tool events set
1280 *               to true if tool event is found.
1281 */
1282static void find_tool_events(const struct list_head *metric_list,
1283			     bool tool_events[PERF_TOOL_MAX])
1284{
1285	struct metric *m;
1286
1287	list_for_each_entry(m, metric_list, nd) {
1288		int i;
1289
1290		perf_tool_event__for_each_event(i) {
1291			struct expr_id_data *data;
1292
1293			if (!tool_events[i] &&
1294			    !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data))
1295				tool_events[i] = true;
1296		}
1297	}
1298}
1299
1300/**
1301 * build_combined_expr_ctx - Make an expr_parse_ctx with all has_constraint
1302 *                           metric IDs, as the IDs are held in a set,
1303 *                           duplicates will be removed.
1304 * @metric_list: List to take metrics from.
1305 * @combined: Out argument for result.
1306 */
1307static int build_combined_expr_ctx(const struct list_head *metric_list,
1308				   struct expr_parse_ctx **combined)
1309{
1310	struct hashmap_entry *cur;
1311	size_t bkt;
1312	struct metric *m;
1313	char *dup;
1314	int ret;
1315
1316	*combined = expr__ctx_new();
1317	if (!*combined)
1318		return -ENOMEM;
1319
1320	list_for_each_entry(m, metric_list, nd) {
1321		if (m->has_constraint && !m->modifier) {
1322			hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1323				dup = strdup(cur->pkey);
1324				if (!dup) {
1325					ret = -ENOMEM;
1326					goto err_out;
1327				}
1328				ret = expr__add_id(*combined, dup);
1329				if (ret)
1330					goto err_out;
1331			}
1332		}
1333	}
1334	return 0;
1335err_out:
1336	expr__ctx_free(*combined);
1337	*combined = NULL;
1338	return ret;
1339}
1340
1341/**
1342 * parse_ids - Build the event string for the ids and parse them creating an
1343 *             evlist. The encoded metric_ids are decoded.
1344 * @metric_no_merge: is metric sharing explicitly disabled.
1345 * @fake_pmu: used when testing metrics not supported by the current CPU.
1346 * @ids: the event identifiers parsed from a metric.
1347 * @modifier: any modifiers added to the events.
1348 * @has_constraint: false if events should be placed in a weak group.
1349 * @tool_events: entries set true if the tool event of index could be present in
1350 *               the overall list of metrics.
1351 * @out_evlist: the created list of events.
1352 */
1353static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
1354		     struct expr_parse_ctx *ids, const char *modifier,
1355		     bool has_constraint, const bool tool_events[PERF_TOOL_MAX],
1356		     struct evlist **out_evlist)
1357{
1358	struct parse_events_error parse_error;
1359	struct evlist *parsed_evlist;
1360	struct strbuf events = STRBUF_INIT;
1361	int ret;
1362
1363	*out_evlist = NULL;
1364	if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1365		bool added_event = false;
1366		int i;
1367		/*
1368		 * We may fail to share events between metrics because a tool
1369		 * event isn't present in one metric. For example, a ratio of
1370		 * cache misses doesn't need duration_time but the same events
1371		 * may be used for a misses per second. Events without sharing
1372		 * implies multiplexing, that is best avoided, so place
1373		 * all tool events in every group.
1374		 *
1375		 * Also, there may be no ids/events in the expression parsing
1376		 * context because of constant evaluation, e.g.:
1377		 *    event1 if #smt_on else 0
1378		 * Add a tool event to avoid a parse error on an empty string.
1379		 */
1380		perf_tool_event__for_each_event(i) {
1381			if (tool_events[i]) {
1382				char *tmp = strdup(perf_tool_event__to_str(i));
1383
1384				if (!tmp)
1385					return -ENOMEM;
1386				ids__insert(ids->ids, tmp);
1387				added_event = true;
1388			}
1389		}
1390		if (!added_event && hashmap__size(ids->ids) == 0) {
1391			char *tmp = strdup("duration_time");
1392
1393			if (!tmp)
1394				return -ENOMEM;
1395			ids__insert(ids->ids, tmp);
1396		}
1397	}
1398	ret = metricgroup__build_event_string(&events, ids, modifier,
1399					      has_constraint);
1400	if (ret)
1401		return ret;
1402
1403	parsed_evlist = evlist__new();
1404	if (!parsed_evlist) {
1405		ret = -ENOMEM;
1406		goto err_out;
1407	}
1408	pr_debug("Parsing metric events '%s'\n", events.buf);
1409	parse_events_error__init(&parse_error);
1410	ret = __parse_events(parsed_evlist, events.buf, &parse_error, fake_pmu);
 
1411	if (ret) {
1412		parse_events_error__print(&parse_error, events.buf);
1413		goto err_out;
1414	}
1415	ret = decode_all_metric_ids(parsed_evlist, modifier);
1416	if (ret)
1417		goto err_out;
1418
1419	*out_evlist = parsed_evlist;
1420	parsed_evlist = NULL;
1421err_out:
1422	parse_events_error__exit(&parse_error);
1423	evlist__delete(parsed_evlist);
1424	strbuf_release(&events);
1425	return ret;
1426}
1427
1428static int parse_groups(struct evlist *perf_evlist, const char *str,
 
1429			bool metric_no_group,
1430			bool metric_no_merge,
 
1431			const char *user_requested_cpu_list,
1432			bool system_wide,
1433			struct perf_pmu *fake_pmu,
1434			struct rblist *metric_events_list,
1435			const struct pmu_events_table *table)
1436{
1437	struct evlist *combined_evlist = NULL;
1438	LIST_HEAD(metric_list);
1439	struct metric *m;
1440	bool tool_events[PERF_TOOL_MAX] = {false};
 
1441	int ret;
1442
1443	if (metric_events_list->nr_entries == 0)
1444		metricgroup__rblist_init(metric_events_list);
1445	ret = metricgroup__add_metric_list(str, metric_no_group,
1446					   user_requested_cpu_list,
1447					   system_wide, &metric_list, table);
1448	if (ret)
1449		goto out;
1450
1451	/* Sort metrics from largest to smallest. */
1452	list_sort(NULL, &metric_list, metric_list_cmp);
1453
1454	if (!metric_no_merge) {
1455		struct expr_parse_ctx *combined = NULL;
1456
1457		find_tool_events(&metric_list, tool_events);
1458
1459		ret = build_combined_expr_ctx(&metric_list, &combined);
1460
1461		if (!ret && combined && hashmap__size(combined->ids)) {
1462			ret = parse_ids(metric_no_merge, fake_pmu, combined,
1463					/*modifier=*/NULL,
1464					/*has_constraint=*/true,
1465					tool_events,
1466					&combined_evlist);
1467		}
1468		if (combined)
1469			expr__ctx_free(combined);
1470
1471		if (ret)
1472			goto out;
1473	}
1474
 
 
 
1475	list_for_each_entry(m, &metric_list, nd) {
1476		struct metric_event *me;
1477		struct evsel **metric_events;
1478		struct evlist *metric_evlist = NULL;
1479		struct metric *n;
1480		struct metric_expr *expr;
1481
1482		if (combined_evlist && m->has_constraint) {
1483			metric_evlist = combined_evlist;
1484		} else if (!metric_no_merge) {
1485			/*
1486			 * See if the IDs for this metric are a subset of an
1487			 * earlier metric.
1488			 */
1489			list_for_each_entry(n, &metric_list, nd) {
1490				if (m == n)
1491					break;
1492
1493				if (n->evlist == NULL)
1494					continue;
1495
1496				if ((!m->modifier && n->modifier) ||
1497				    (m->modifier && !n->modifier) ||
1498				    (m->modifier && n->modifier &&
1499					    strcmp(m->modifier, n->modifier)))
1500					continue;
1501
 
 
 
 
 
1502				if (expr__subset_of_ids(n->pctx, m->pctx)) {
1503					pr_debug("Events in '%s' fully contained within '%s'\n",
1504						 m->metric_name, n->metric_name);
1505					metric_evlist = n->evlist;
1506					break;
1507				}
1508
1509			}
1510		}
1511		if (!metric_evlist) {
1512			ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1513					m->has_constraint, tool_events, &m->evlist);
1514			if (ret)
1515				goto out;
1516
1517			metric_evlist = m->evlist;
1518		}
1519		ret = setup_metric_events(m->pctx->ids, metric_evlist, &metric_events);
 
1520		if (ret) {
1521			pr_debug("Cannot resolve IDs for %s: %s\n",
1522				m->metric_name, m->metric_expr);
1523			goto out;
1524		}
1525
1526		me = metricgroup__lookup(metric_events_list, metric_events[0], true);
1527
1528		expr = malloc(sizeof(struct metric_expr));
1529		if (!expr) {
1530			ret = -ENOMEM;
1531			free(metric_events);
1532			goto out;
1533		}
1534
1535		expr->metric_refs = m->metric_refs;
1536		m->metric_refs = NULL;
1537		expr->metric_expr = m->metric_expr;
1538		if (m->modifier) {
1539			char *tmp;
1540
1541			if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1542				expr->metric_name = NULL;
1543			else
1544				expr->metric_name = tmp;
1545		} else
1546			expr->metric_name = strdup(m->metric_name);
1547
1548		if (!expr->metric_name) {
1549			ret = -ENOMEM;
1550			free(metric_events);
1551			goto out;
1552		}
 
1553		expr->metric_unit = m->metric_unit;
1554		expr->metric_events = metric_events;
1555		expr->runtime = m->pctx->sctx.runtime;
 
 
1556		list_add(&expr->nd, &me->head);
1557	}
1558
1559
1560	if (combined_evlist) {
1561		evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1562		evlist__delete(combined_evlist);
1563	}
1564
1565	list_for_each_entry(m, &metric_list, nd) {
1566		if (m->evlist)
1567			evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1568	}
1569
1570out:
1571	metricgroup__free_metrics(&metric_list);
1572	return ret;
1573}
1574
1575int metricgroup__parse_groups(struct evlist *perf_evlist,
 
1576			      const char *str,
1577			      bool metric_no_group,
1578			      bool metric_no_merge,
 
1579			      const char *user_requested_cpu_list,
1580			      bool system_wide,
1581			      struct rblist *metric_events)
1582{
1583	const struct pmu_events_table *table = pmu_events_table__find();
1584
1585	if (!table)
1586		return -EINVAL;
1587
1588	return parse_groups(perf_evlist, str, metric_no_group, metric_no_merge,
1589			    user_requested_cpu_list, system_wide,
1590			    /*fake_pmu=*/NULL, metric_events, table);
1591}
1592
1593int metricgroup__parse_groups_test(struct evlist *evlist,
1594				   const struct pmu_events_table *table,
1595				   const char *str,
1596				   bool metric_no_group,
1597				   bool metric_no_merge,
1598				   struct rblist *metric_events)
1599{
1600	return parse_groups(evlist, str, metric_no_group, metric_no_merge,
 
 
 
1601			    /*user_requested_cpu_list=*/NULL,
1602			    /*system_wide=*/false,
1603			    &perf_pmu__fake, metric_events, table);
1604}
1605
1606static int metricgroup__has_metric_callback(const struct pmu_event *pe,
1607					    const struct pmu_events_table *table __maybe_unused,
 
 
 
 
1608					    void *vdata)
1609{
1610	const char *metric = vdata;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1611
1612	if (!pe->metric_expr)
1613		return 0;
1614
1615	if (match_metric(pe->metric_name, metric))
1616		return 1;
 
1617
1618	return 0;
1619}
1620
1621bool metricgroup__has_metric(const char *metric)
1622{
1623	const struct pmu_events_table *table = pmu_events_table__find();
 
1624
1625	if (!table)
1626		return false;
1627
1628	return pmu_events_table_for_each_event(table, metricgroup__has_metric_callback,
1629					       (void *)metric) ? true : false;
 
1630}
1631
1632int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1633				    struct rblist *new_metric_events,
1634				    struct rblist *old_metric_events)
1635{
1636	unsigned int i;
1637
1638	for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1639		struct rb_node *nd;
1640		struct metric_event *old_me, *new_me;
1641		struct metric_expr *old_expr, *new_expr;
1642		struct evsel *evsel;
1643		size_t alloc_size;
1644		int idx, nr;
1645
1646		nd = rblist__entry(old_metric_events, i);
1647		old_me = container_of(nd, struct metric_event, nd);
1648
1649		evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1650		if (!evsel)
1651			return -EINVAL;
1652		new_me = metricgroup__lookup(new_metric_events, evsel, true);
1653		if (!new_me)
1654			return -ENOMEM;
1655
1656		pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1657			 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1658
1659		list_for_each_entry(old_expr, &old_me->head, nd) {
1660			new_expr = malloc(sizeof(*new_expr));
1661			if (!new_expr)
1662				return -ENOMEM;
1663
1664			new_expr->metric_expr = old_expr->metric_expr;
 
1665			new_expr->metric_name = strdup(old_expr->metric_name);
1666			if (!new_expr->metric_name)
1667				return -ENOMEM;
1668
1669			new_expr->metric_unit = old_expr->metric_unit;
1670			new_expr->runtime = old_expr->runtime;
1671
1672			if (old_expr->metric_refs) {
1673				/* calculate number of metric_events */
1674				for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1675					continue;
1676				alloc_size = sizeof(*new_expr->metric_refs);
1677				new_expr->metric_refs = calloc(nr + 1, alloc_size);
1678				if (!new_expr->metric_refs) {
1679					free(new_expr);
1680					return -ENOMEM;
1681				}
1682
1683				memcpy(new_expr->metric_refs, old_expr->metric_refs,
1684				       nr * alloc_size);
1685			} else {
1686				new_expr->metric_refs = NULL;
1687			}
1688
1689			/* calculate number of metric_events */
1690			for (nr = 0; old_expr->metric_events[nr]; nr++)
1691				continue;
1692			alloc_size = sizeof(*new_expr->metric_events);
1693			new_expr->metric_events = calloc(nr + 1, alloc_size);
1694			if (!new_expr->metric_events) {
1695				free(new_expr->metric_refs);
1696				free(new_expr);
1697				return -ENOMEM;
1698			}
1699
1700			/* copy evsel in the same position */
1701			for (idx = 0; idx < nr; idx++) {
1702				evsel = old_expr->metric_events[idx];
1703				evsel = evlist__find_evsel(evlist, evsel->core.idx);
1704				if (evsel == NULL) {
1705					free(new_expr->metric_events);
1706					free(new_expr->metric_refs);
1707					free(new_expr);
1708					return -EINVAL;
1709				}
1710				new_expr->metric_events[idx] = evsel;
1711			}
1712
1713			list_add(&new_expr->nd, &new_me->head);
1714		}
1715	}
1716	return 0;
1717}