Loading...
Note: File does not exist in v3.15.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2017, Intel Corporation.
4 */
5
6/* Manage metrics and groups of metrics from JSON files */
7
8#include "metricgroup.h"
9#include "debug.h"
10#include "evlist.h"
11#include "evsel.h"
12#include "strbuf.h"
13#include "pmu.h"
14#include "pmu-hybrid.h"
15#include "print-events.h"
16#include "expr.h"
17#include "rblist.h"
18#include <string.h>
19#include <errno.h>
20#include "strlist.h"
21#include <assert.h>
22#include <linux/ctype.h>
23#include <linux/list_sort.h>
24#include <linux/string.h>
25#include <linux/zalloc.h>
26#include <perf/cpumap.h>
27#include <subcmd/parse-options.h>
28#include <api/fs/fs.h>
29#include "util.h"
30#include <asm/bug.h>
31#include "cgroup.h"
32#include "util/hashmap.h"
33
34struct metric_event *metricgroup__lookup(struct rblist *metric_events,
35 struct evsel *evsel,
36 bool create)
37{
38 struct rb_node *nd;
39 struct metric_event me = {
40 .evsel = evsel
41 };
42
43 if (!metric_events)
44 return NULL;
45
46 nd = rblist__find(metric_events, &me);
47 if (nd)
48 return container_of(nd, struct metric_event, nd);
49 if (create) {
50 rblist__add_node(metric_events, &me);
51 nd = rblist__find(metric_events, &me);
52 if (nd)
53 return container_of(nd, struct metric_event, nd);
54 }
55 return NULL;
56}
57
58static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
59{
60 struct metric_event *a = container_of(rb_node,
61 struct metric_event,
62 nd);
63 const struct metric_event *b = entry;
64
65 if (a->evsel == b->evsel)
66 return 0;
67 if ((char *)a->evsel < (char *)b->evsel)
68 return -1;
69 return +1;
70}
71
72static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
73 const void *entry)
74{
75 struct metric_event *me = malloc(sizeof(struct metric_event));
76
77 if (!me)
78 return NULL;
79 memcpy(me, entry, sizeof(struct metric_event));
80 me->evsel = ((struct metric_event *)entry)->evsel;
81 INIT_LIST_HEAD(&me->head);
82 return &me->nd;
83}
84
85static void metric_event_delete(struct rblist *rblist __maybe_unused,
86 struct rb_node *rb_node)
87{
88 struct metric_event *me = container_of(rb_node, struct metric_event, nd);
89 struct metric_expr *expr, *tmp;
90
91 list_for_each_entry_safe(expr, tmp, &me->head, nd) {
92 free((char *)expr->metric_name);
93 free(expr->metric_refs);
94 free(expr->metric_events);
95 free(expr);
96 }
97
98 free(me);
99}
100
101static void metricgroup__rblist_init(struct rblist *metric_events)
102{
103 rblist__init(metric_events);
104 metric_events->node_cmp = metric_event_cmp;
105 metric_events->node_new = metric_event_new;
106 metric_events->node_delete = metric_event_delete;
107}
108
109void metricgroup__rblist_exit(struct rblist *metric_events)
110{
111 rblist__exit(metric_events);
112}
113
114/**
115 * The metric under construction. The data held here will be placed in a
116 * metric_expr.
117 */
118struct metric {
119 struct list_head nd;
120 /**
121 * The expression parse context importantly holding the IDs contained
122 * within the expression.
123 */
124 struct expr_parse_ctx *pctx;
125 /** The name of the metric such as "IPC". */
126 const char *metric_name;
127 /** Modifier on the metric such as "u" or NULL for none. */
128 const char *modifier;
129 /** The expression to parse, for example, "instructions/cycles". */
130 const char *metric_expr;
131 /**
132 * The "ScaleUnit" that scales and adds a unit to the metric during
133 * output.
134 */
135 const char *metric_unit;
136 /** Optional null terminated array of referenced metrics. */
137 struct metric_ref *metric_refs;
138 /**
139 * Is there a constraint on the group of events? In which case the
140 * events won't be grouped.
141 */
142 bool has_constraint;
143 /**
144 * Parsed events for the metric. Optional as events may be taken from a
145 * different metric whose group contains all the IDs necessary for this
146 * one.
147 */
148 struct evlist *evlist;
149};
150
151static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
152{
153 static bool violate_nmi_constraint;
154
155 if (!foot) {
156 pr_warning("Splitting metric group %s into standalone metrics.\n", name);
157 violate_nmi_constraint = true;
158 return;
159 }
160
161 if (!violate_nmi_constraint)
162 return;
163
164 pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
165 " echo 0 > /proc/sys/kernel/nmi_watchdog\n"
166 " perf stat ...\n"
167 " echo 1 > /proc/sys/kernel/nmi_watchdog\n");
168}
169
170static bool metricgroup__has_constraint(const struct pmu_event *pe)
171{
172 if (!pe->metric_constraint)
173 return false;
174
175 if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
176 sysctl__nmi_watchdog_enabled()) {
177 metricgroup___watchdog_constraint_hint(pe->metric_name, false);
178 return true;
179 }
180
181 return false;
182}
183
184static void metric__free(struct metric *m)
185{
186 if (!m)
187 return;
188
189 free(m->metric_refs);
190 expr__ctx_free(m->pctx);
191 free((char *)m->modifier);
192 evlist__delete(m->evlist);
193 free(m);
194}
195
196static struct metric *metric__new(const struct pmu_event *pe,
197 const char *modifier,
198 bool metric_no_group,
199 int runtime,
200 const char *user_requested_cpu_list,
201 bool system_wide)
202{
203 struct metric *m;
204
205 m = zalloc(sizeof(*m));
206 if (!m)
207 return NULL;
208
209 m->pctx = expr__ctx_new();
210 if (!m->pctx)
211 goto out_err;
212
213 m->metric_name = pe->metric_name;
214 m->modifier = NULL;
215 if (modifier) {
216 m->modifier = strdup(modifier);
217 if (!m->modifier)
218 goto out_err;
219 }
220 m->metric_expr = pe->metric_expr;
221 m->metric_unit = pe->unit;
222 m->pctx->sctx.user_requested_cpu_list = NULL;
223 if (user_requested_cpu_list) {
224 m->pctx->sctx.user_requested_cpu_list = strdup(user_requested_cpu_list);
225 if (!m->pctx->sctx.user_requested_cpu_list)
226 goto out_err;
227 }
228 m->pctx->sctx.runtime = runtime;
229 m->pctx->sctx.system_wide = system_wide;
230 m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
231 m->metric_refs = NULL;
232 m->evlist = NULL;
233
234 return m;
235out_err:
236 metric__free(m);
237 return NULL;
238}
239
240static bool contains_metric_id(struct evsel **metric_events, int num_events,
241 const char *metric_id)
242{
243 int i;
244
245 for (i = 0; i < num_events; i++) {
246 if (!strcmp(evsel__metric_id(metric_events[i]), metric_id))
247 return true;
248 }
249 return false;
250}
251
252/**
253 * setup_metric_events - Find a group of events in metric_evlist that correspond
254 * to the IDs from a parsed metric expression.
255 * @ids: the metric IDs to match.
256 * @metric_evlist: the list of perf events.
257 * @out_metric_events: holds the created metric events array.
258 */
259static int setup_metric_events(struct hashmap *ids,
260 struct evlist *metric_evlist,
261 struct evsel ***out_metric_events)
262{
263 struct evsel **metric_events;
264 const char *metric_id;
265 struct evsel *ev;
266 size_t ids_size, matched_events, i;
267
268 *out_metric_events = NULL;
269 ids_size = hashmap__size(ids);
270
271 metric_events = calloc(sizeof(void *), ids_size + 1);
272 if (!metric_events)
273 return -ENOMEM;
274
275 matched_events = 0;
276 evlist__for_each_entry(metric_evlist, ev) {
277 struct expr_id_data *val_ptr;
278
279 /*
280 * Check for duplicate events with the same name. For
281 * example, uncore_imc/cas_count_read/ will turn into 6
282 * events per socket on skylakex. Only the first such
283 * event is placed in metric_events.
284 */
285 metric_id = evsel__metric_id(ev);
286 if (contains_metric_id(metric_events, matched_events, metric_id))
287 continue;
288 /*
289 * Does this event belong to the parse context? For
290 * combined or shared groups, this metric may not care
291 * about this event.
292 */
293 if (hashmap__find(ids, metric_id, &val_ptr)) {
294 metric_events[matched_events++] = ev;
295
296 if (matched_events >= ids_size)
297 break;
298 }
299 }
300 if (matched_events < ids_size) {
301 free(metric_events);
302 return -EINVAL;
303 }
304 for (i = 0; i < ids_size; i++) {
305 ev = metric_events[i];
306 ev->collect_stat = true;
307
308 /*
309 * The metric leader points to the identically named
310 * event in metric_events.
311 */
312 ev->metric_leader = ev;
313 /*
314 * Mark two events with identical names in the same
315 * group (or globally) as being in use as uncore events
316 * may be duplicated for each pmu. Set the metric leader
317 * of such events to be the event that appears in
318 * metric_events.
319 */
320 metric_id = evsel__metric_id(ev);
321 evlist__for_each_entry_continue(metric_evlist, ev) {
322 if (!strcmp(evsel__metric_id(ev), metric_id))
323 ev->metric_leader = metric_events[i];
324 }
325 }
326 *out_metric_events = metric_events;
327 return 0;
328}
329
330static bool match_metric(const char *n, const char *list)
331{
332 int len;
333 char *m;
334
335 if (!list)
336 return false;
337 if (!strcmp(list, "all"))
338 return true;
339 if (!n)
340 return !strcasecmp(list, "No_group");
341 len = strlen(list);
342 m = strcasestr(n, list);
343 if (!m)
344 return false;
345 if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
346 (m[len] == 0 || m[len] == ';'))
347 return true;
348 return false;
349}
350
351static bool match_pe_metric(const struct pmu_event *pe, const char *metric)
352{
353 return match_metric(pe->metric_group, metric) ||
354 match_metric(pe->metric_name, metric);
355}
356
357/** struct mep - RB-tree node for building printing information. */
358struct mep {
359 /** nd - RB-tree element. */
360 struct rb_node nd;
361 /** @metric_group: Owned metric group name, separated others with ';'. */
362 char *metric_group;
363 const char *metric_name;
364 const char *metric_desc;
365 const char *metric_long_desc;
366 const char *metric_expr;
367 const char *metric_unit;
368};
369
370static int mep_cmp(struct rb_node *rb_node, const void *entry)
371{
372 struct mep *a = container_of(rb_node, struct mep, nd);
373 struct mep *b = (struct mep *)entry;
374 int ret;
375
376 ret = strcmp(a->metric_group, b->metric_group);
377 if (ret)
378 return ret;
379
380 return strcmp(a->metric_name, b->metric_name);
381}
382
383static struct rb_node *mep_new(struct rblist *rl __maybe_unused, const void *entry)
384{
385 struct mep *me = malloc(sizeof(struct mep));
386
387 if (!me)
388 return NULL;
389
390 memcpy(me, entry, sizeof(struct mep));
391 return &me->nd;
392}
393
394static void mep_delete(struct rblist *rl __maybe_unused,
395 struct rb_node *nd)
396{
397 struct mep *me = container_of(nd, struct mep, nd);
398
399 zfree(&me->metric_group);
400 free(me);
401}
402
403static struct mep *mep_lookup(struct rblist *groups, const char *metric_group,
404 const char *metric_name)
405{
406 struct rb_node *nd;
407 struct mep me = {
408 .metric_group = strdup(metric_group),
409 .metric_name = metric_name,
410 };
411 nd = rblist__find(groups, &me);
412 if (nd) {
413 free(me.metric_group);
414 return container_of(nd, struct mep, nd);
415 }
416 rblist__add_node(groups, &me);
417 nd = rblist__find(groups, &me);
418 if (nd)
419 return container_of(nd, struct mep, nd);
420 return NULL;
421}
422
423static int metricgroup__add_to_mep_groups(const struct pmu_event *pe,
424 struct rblist *groups)
425{
426 const char *g;
427 char *omg, *mg;
428
429 mg = strdup(pe->metric_group ?: "No_group");
430 if (!mg)
431 return -ENOMEM;
432 omg = mg;
433 while ((g = strsep(&mg, ";")) != NULL) {
434 struct mep *me;
435
436 g = skip_spaces(g);
437 if (strlen(g))
438 me = mep_lookup(groups, g, pe->metric_name);
439 else
440 me = mep_lookup(groups, "No_group", pe->metric_name);
441
442 if (me) {
443 me->metric_desc = pe->desc;
444 me->metric_long_desc = pe->long_desc;
445 me->metric_expr = pe->metric_expr;
446 me->metric_unit = pe->unit;
447 }
448 }
449 free(omg);
450
451 return 0;
452}
453
454struct metricgroup_iter_data {
455 pmu_event_iter_fn fn;
456 void *data;
457};
458
459static int metricgroup__sys_event_iter(const struct pmu_event *pe,
460 const struct pmu_events_table *table,
461 void *data)
462{
463 struct metricgroup_iter_data *d = data;
464 struct perf_pmu *pmu = NULL;
465
466 if (!pe->metric_expr || !pe->compat)
467 return 0;
468
469 while ((pmu = perf_pmu__scan(pmu))) {
470
471 if (!pmu->id || strcmp(pmu->id, pe->compat))
472 continue;
473
474 return d->fn(pe, table, d->data);
475 }
476 return 0;
477}
478
479static int metricgroup__add_to_mep_groups_callback(const struct pmu_event *pe,
480 const struct pmu_events_table *table __maybe_unused,
481 void *vdata)
482{
483 struct rblist *groups = vdata;
484
485 if (!pe->metric_name)
486 return 0;
487
488 return metricgroup__add_to_mep_groups(pe, groups);
489}
490
491void metricgroup__print(const struct print_callbacks *print_cb, void *print_state)
492{
493 struct rblist groups;
494 const struct pmu_events_table *table;
495 struct rb_node *node, *next;
496
497 rblist__init(&groups);
498 groups.node_new = mep_new;
499 groups.node_cmp = mep_cmp;
500 groups.node_delete = mep_delete;
501 table = pmu_events_table__find();
502 if (table) {
503 pmu_events_table_for_each_event(table,
504 metricgroup__add_to_mep_groups_callback,
505 &groups);
506 }
507 {
508 struct metricgroup_iter_data data = {
509 .fn = metricgroup__add_to_mep_groups_callback,
510 .data = &groups,
511 };
512 pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
513 }
514
515 for (node = rb_first_cached(&groups.entries); node; node = next) {
516 struct mep *me = container_of(node, struct mep, nd);
517
518 print_cb->print_metric(print_state,
519 me->metric_group,
520 me->metric_name,
521 me->metric_desc,
522 me->metric_long_desc,
523 me->metric_expr,
524 me->metric_unit);
525 next = rb_next(node);
526 rblist__remove_node(&groups, node);
527 }
528}
529
530static const char *code_characters = ",-=@";
531
532static int encode_metric_id(struct strbuf *sb, const char *x)
533{
534 char *c;
535 int ret = 0;
536
537 for (; *x; x++) {
538 c = strchr(code_characters, *x);
539 if (c) {
540 ret = strbuf_addch(sb, '!');
541 if (ret)
542 break;
543
544 ret = strbuf_addch(sb, '0' + (c - code_characters));
545 if (ret)
546 break;
547 } else {
548 ret = strbuf_addch(sb, *x);
549 if (ret)
550 break;
551 }
552 }
553 return ret;
554}
555
556static int decode_metric_id(struct strbuf *sb, const char *x)
557{
558 const char *orig = x;
559 size_t i;
560 char c;
561 int ret;
562
563 for (; *x; x++) {
564 c = *x;
565 if (*x == '!') {
566 x++;
567 i = *x - '0';
568 if (i > strlen(code_characters)) {
569 pr_err("Bad metric-id encoding in: '%s'", orig);
570 return -1;
571 }
572 c = code_characters[i];
573 }
574 ret = strbuf_addch(sb, c);
575 if (ret)
576 return ret;
577 }
578 return 0;
579}
580
581static int decode_all_metric_ids(struct evlist *perf_evlist, const char *modifier)
582{
583 struct evsel *ev;
584 struct strbuf sb = STRBUF_INIT;
585 char *cur;
586 int ret = 0;
587
588 evlist__for_each_entry(perf_evlist, ev) {
589 if (!ev->metric_id)
590 continue;
591
592 ret = strbuf_setlen(&sb, 0);
593 if (ret)
594 break;
595
596 ret = decode_metric_id(&sb, ev->metric_id);
597 if (ret)
598 break;
599
600 free((char *)ev->metric_id);
601 ev->metric_id = strdup(sb.buf);
602 if (!ev->metric_id) {
603 ret = -ENOMEM;
604 break;
605 }
606 /*
607 * If the name is just the parsed event, use the metric-id to
608 * give a more friendly display version.
609 */
610 if (strstr(ev->name, "metric-id=")) {
611 bool has_slash = false;
612
613 free(ev->name);
614 for (cur = strchr(sb.buf, '@') ; cur; cur = strchr(++cur, '@')) {
615 *cur = '/';
616 has_slash = true;
617 }
618
619 if (modifier) {
620 if (!has_slash && !strchr(sb.buf, ':')) {
621 ret = strbuf_addch(&sb, ':');
622 if (ret)
623 break;
624 }
625 ret = strbuf_addstr(&sb, modifier);
626 if (ret)
627 break;
628 }
629 ev->name = strdup(sb.buf);
630 if (!ev->name) {
631 ret = -ENOMEM;
632 break;
633 }
634 }
635 }
636 strbuf_release(&sb);
637 return ret;
638}
639
640static int metricgroup__build_event_string(struct strbuf *events,
641 const struct expr_parse_ctx *ctx,
642 const char *modifier,
643 bool has_constraint)
644{
645 struct hashmap_entry *cur;
646 size_t bkt;
647 bool no_group = true, has_tool_events = false;
648 bool tool_events[PERF_TOOL_MAX] = {false};
649 int ret = 0;
650
651#define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0)
652
653 hashmap__for_each_entry(ctx->ids, cur, bkt) {
654 const char *sep, *rsep, *id = cur->pkey;
655 enum perf_tool_event ev;
656
657 pr_debug("found event %s\n", id);
658
659 /* Always move tool events outside of the group. */
660 ev = perf_tool_event__from_str(id);
661 if (ev != PERF_TOOL_NONE) {
662 has_tool_events = true;
663 tool_events[ev] = true;
664 continue;
665 }
666 /* Separate events with commas and open the group if necessary. */
667 if (no_group) {
668 if (!has_constraint) {
669 ret = strbuf_addch(events, '{');
670 RETURN_IF_NON_ZERO(ret);
671 }
672
673 no_group = false;
674 } else {
675 ret = strbuf_addch(events, ',');
676 RETURN_IF_NON_ZERO(ret);
677 }
678 /*
679 * Encode the ID as an event string. Add a qualifier for
680 * metric_id that is the original name except with characters
681 * that parse-events can't parse replaced. For example,
682 * 'msr@tsc@' gets added as msr/tsc,metric-id=msr!3tsc!3/
683 */
684 sep = strchr(id, '@');
685 if (sep != NULL) {
686 ret = strbuf_add(events, id, sep - id);
687 RETURN_IF_NON_ZERO(ret);
688 ret = strbuf_addch(events, '/');
689 RETURN_IF_NON_ZERO(ret);
690 rsep = strrchr(sep, '@');
691 ret = strbuf_add(events, sep + 1, rsep - sep - 1);
692 RETURN_IF_NON_ZERO(ret);
693 ret = strbuf_addstr(events, ",metric-id=");
694 RETURN_IF_NON_ZERO(ret);
695 sep = rsep;
696 } else {
697 sep = strchr(id, ':');
698 if (sep != NULL) {
699 ret = strbuf_add(events, id, sep - id);
700 RETURN_IF_NON_ZERO(ret);
701 } else {
702 ret = strbuf_addstr(events, id);
703 RETURN_IF_NON_ZERO(ret);
704 }
705 ret = strbuf_addstr(events, "/metric-id=");
706 RETURN_IF_NON_ZERO(ret);
707 }
708 ret = encode_metric_id(events, id);
709 RETURN_IF_NON_ZERO(ret);
710 ret = strbuf_addstr(events, "/");
711 RETURN_IF_NON_ZERO(ret);
712
713 if (sep != NULL) {
714 ret = strbuf_addstr(events, sep + 1);
715 RETURN_IF_NON_ZERO(ret);
716 }
717 if (modifier) {
718 ret = strbuf_addstr(events, modifier);
719 RETURN_IF_NON_ZERO(ret);
720 }
721 }
722 if (!no_group && !has_constraint) {
723 ret = strbuf_addf(events, "}:W");
724 RETURN_IF_NON_ZERO(ret);
725 }
726 if (has_tool_events) {
727 int i;
728
729 perf_tool_event__for_each_event(i) {
730 if (tool_events[i]) {
731 if (!no_group) {
732 ret = strbuf_addch(events, ',');
733 RETURN_IF_NON_ZERO(ret);
734 }
735 no_group = false;
736 ret = strbuf_addstr(events, perf_tool_event__to_str(i));
737 RETURN_IF_NON_ZERO(ret);
738 }
739 }
740 }
741
742 return ret;
743#undef RETURN_IF_NON_ZERO
744}
745
746int __weak arch_get_runtimeparam(const struct pmu_event *pe __maybe_unused)
747{
748 return 1;
749}
750
751/*
752 * A singly linked list on the stack of the names of metrics being
753 * processed. Used to identify recursion.
754 */
755struct visited_metric {
756 const char *name;
757 const struct visited_metric *parent;
758};
759
760struct metricgroup_add_iter_data {
761 struct list_head *metric_list;
762 const char *metric_name;
763 const char *modifier;
764 int *ret;
765 bool *has_match;
766 bool metric_no_group;
767 const char *user_requested_cpu_list;
768 bool system_wide;
769 struct metric *root_metric;
770 const struct visited_metric *visited;
771 const struct pmu_events_table *table;
772};
773
774static bool metricgroup__find_metric(const char *metric,
775 const struct pmu_events_table *table,
776 struct pmu_event *pe);
777
778static int add_metric(struct list_head *metric_list,
779 const struct pmu_event *pe,
780 const char *modifier,
781 bool metric_no_group,
782 const char *user_requested_cpu_list,
783 bool system_wide,
784 struct metric *root_metric,
785 const struct visited_metric *visited,
786 const struct pmu_events_table *table);
787
788/**
789 * resolve_metric - Locate metrics within the root metric and recursively add
790 * references to them.
791 * @metric_list: The list the metric is added to.
792 * @modifier: if non-null event modifiers like "u".
793 * @metric_no_group: Should events written to events be grouped "{}" or
794 * global. Grouping is the default but due to multiplexing the
795 * user may override.
796 * @user_requested_cpu_list: Command line specified CPUs to record on.
797 * @system_wide: Are events for all processes recorded.
798 * @root_metric: Metrics may reference other metrics to form a tree. In this
799 * case the root_metric holds all the IDs and a list of referenced
800 * metrics. When adding a root this argument is NULL.
801 * @visited: A singly linked list of metric names being added that is used to
802 * detect recursion.
803 * @table: The table that is searched for metrics, most commonly the table for the
804 * architecture perf is running upon.
805 */
806static int resolve_metric(struct list_head *metric_list,
807 const char *modifier,
808 bool metric_no_group,
809 const char *user_requested_cpu_list,
810 bool system_wide,
811 struct metric *root_metric,
812 const struct visited_metric *visited,
813 const struct pmu_events_table *table)
814{
815 struct hashmap_entry *cur;
816 size_t bkt;
817 struct to_resolve {
818 /* The metric to resolve. */
819 struct pmu_event pe;
820 /*
821 * The key in the IDs map, this may differ from in case,
822 * etc. from pe->metric_name.
823 */
824 const char *key;
825 } *pending = NULL;
826 int i, ret = 0, pending_cnt = 0;
827
828 /*
829 * Iterate all the parsed IDs and if there's a matching metric and it to
830 * the pending array.
831 */
832 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) {
833 struct pmu_event pe;
834
835 if (metricgroup__find_metric(cur->pkey, table, &pe)) {
836 pending = realloc(pending,
837 (pending_cnt + 1) * sizeof(struct to_resolve));
838 if (!pending)
839 return -ENOMEM;
840
841 memcpy(&pending[pending_cnt].pe, &pe, sizeof(pe));
842 pending[pending_cnt].key = cur->pkey;
843 pending_cnt++;
844 }
845 }
846
847 /* Remove the metric IDs from the context. */
848 for (i = 0; i < pending_cnt; i++)
849 expr__del_id(root_metric->pctx, pending[i].key);
850
851 /*
852 * Recursively add all the metrics, IDs are added to the root metric's
853 * context.
854 */
855 for (i = 0; i < pending_cnt; i++) {
856 ret = add_metric(metric_list, &pending[i].pe, modifier, metric_no_group,
857 user_requested_cpu_list, system_wide, root_metric, visited,
858 table);
859 if (ret)
860 break;
861 }
862
863 free(pending);
864 return ret;
865}
866
867/**
868 * __add_metric - Add a metric to metric_list.
869 * @metric_list: The list the metric is added to.
870 * @pe: The pmu_event containing the metric to be added.
871 * @modifier: if non-null event modifiers like "u".
872 * @metric_no_group: Should events written to events be grouped "{}" or
873 * global. Grouping is the default but due to multiplexing the
874 * user may override.
875 * @runtime: A special argument for the parser only known at runtime.
876 * @user_requested_cpu_list: Command line specified CPUs to record on.
877 * @system_wide: Are events for all processes recorded.
878 * @root_metric: Metrics may reference other metrics to form a tree. In this
879 * case the root_metric holds all the IDs and a list of referenced
880 * metrics. When adding a root this argument is NULL.
881 * @visited: A singly linked list of metric names being added that is used to
882 * detect recursion.
883 * @table: The table that is searched for metrics, most commonly the table for the
884 * architecture perf is running upon.
885 */
886static int __add_metric(struct list_head *metric_list,
887 const struct pmu_event *pe,
888 const char *modifier,
889 bool metric_no_group,
890 int runtime,
891 const char *user_requested_cpu_list,
892 bool system_wide,
893 struct metric *root_metric,
894 const struct visited_metric *visited,
895 const struct pmu_events_table *table)
896{
897 const struct visited_metric *vm;
898 int ret;
899 bool is_root = !root_metric;
900 struct visited_metric visited_node = {
901 .name = pe->metric_name,
902 .parent = visited,
903 };
904
905 for (vm = visited; vm; vm = vm->parent) {
906 if (!strcmp(pe->metric_name, vm->name)) {
907 pr_err("failed: recursion detected for %s\n", pe->metric_name);
908 return -1;
909 }
910 }
911
912 if (is_root) {
913 /*
914 * This metric is the root of a tree and may reference other
915 * metrics that are added recursively.
916 */
917 root_metric = metric__new(pe, modifier, metric_no_group, runtime,
918 user_requested_cpu_list, system_wide);
919 if (!root_metric)
920 return -ENOMEM;
921
922 } else {
923 int cnt = 0;
924
925 /*
926 * This metric was referenced in a metric higher in the
927 * tree. Check if the same metric is already resolved in the
928 * metric_refs list.
929 */
930 if (root_metric->metric_refs) {
931 for (; root_metric->metric_refs[cnt].metric_name; cnt++) {
932 if (!strcmp(pe->metric_name,
933 root_metric->metric_refs[cnt].metric_name))
934 return 0;
935 }
936 }
937
938 /* Create reference. Need space for the entry and the terminator. */
939 root_metric->metric_refs = realloc(root_metric->metric_refs,
940 (cnt + 2) * sizeof(struct metric_ref));
941 if (!root_metric->metric_refs)
942 return -ENOMEM;
943
944 /*
945 * Intentionally passing just const char pointers,
946 * from 'pe' object, so they never go away. We don't
947 * need to change them, so there's no need to create
948 * our own copy.
949 */
950 root_metric->metric_refs[cnt].metric_name = pe->metric_name;
951 root_metric->metric_refs[cnt].metric_expr = pe->metric_expr;
952
953 /* Null terminate array. */
954 root_metric->metric_refs[cnt+1].metric_name = NULL;
955 root_metric->metric_refs[cnt+1].metric_expr = NULL;
956 }
957
958 /*
959 * For both the parent and referenced metrics, we parse
960 * all the metric's IDs and add it to the root context.
961 */
962 if (expr__find_ids(pe->metric_expr, NULL, root_metric->pctx) < 0) {
963 /* Broken metric. */
964 ret = -EINVAL;
965 } else {
966 /* Resolve referenced metrics. */
967 ret = resolve_metric(metric_list, modifier, metric_no_group,
968 user_requested_cpu_list, system_wide,
969 root_metric, &visited_node, table);
970 }
971
972 if (ret) {
973 if (is_root)
974 metric__free(root_metric);
975
976 } else if (is_root)
977 list_add(&root_metric->nd, metric_list);
978
979 return ret;
980}
981
982struct metricgroup__find_metric_data {
983 const char *metric;
984 struct pmu_event *pe;
985};
986
987static int metricgroup__find_metric_callback(const struct pmu_event *pe,
988 const struct pmu_events_table *table __maybe_unused,
989 void *vdata)
990{
991 struct metricgroup__find_metric_data *data = vdata;
992
993 if (!match_metric(pe->metric_name, data->metric))
994 return 0;
995
996 memcpy(data->pe, pe, sizeof(*pe));
997 return 1;
998}
999
1000static bool metricgroup__find_metric(const char *metric,
1001 const struct pmu_events_table *table,
1002 struct pmu_event *pe)
1003{
1004 struct metricgroup__find_metric_data data = {
1005 .metric = metric,
1006 .pe = pe,
1007 };
1008
1009 return pmu_events_table_for_each_event(table, metricgroup__find_metric_callback, &data)
1010 ? true : false;
1011}
1012
1013static int add_metric(struct list_head *metric_list,
1014 const struct pmu_event *pe,
1015 const char *modifier,
1016 bool metric_no_group,
1017 const char *user_requested_cpu_list,
1018 bool system_wide,
1019 struct metric *root_metric,
1020 const struct visited_metric *visited,
1021 const struct pmu_events_table *table)
1022{
1023 int ret = 0;
1024
1025 pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
1026
1027 if (!strstr(pe->metric_expr, "?")) {
1028 ret = __add_metric(metric_list, pe, modifier, metric_no_group, 0,
1029 user_requested_cpu_list, system_wide, root_metric,
1030 visited, table);
1031 } else {
1032 int j, count;
1033
1034 count = arch_get_runtimeparam(pe);
1035
1036 /* This loop is added to create multiple
1037 * events depend on count value and add
1038 * those events to metric_list.
1039 */
1040
1041 for (j = 0; j < count && !ret; j++)
1042 ret = __add_metric(metric_list, pe, modifier, metric_no_group, j,
1043 user_requested_cpu_list, system_wide,
1044 root_metric, visited, table);
1045 }
1046
1047 return ret;
1048}
1049
1050static int metricgroup__add_metric_sys_event_iter(const struct pmu_event *pe,
1051 const struct pmu_events_table *table __maybe_unused,
1052 void *data)
1053{
1054 struct metricgroup_add_iter_data *d = data;
1055 int ret;
1056
1057 if (!match_pe_metric(pe, d->metric_name))
1058 return 0;
1059
1060 ret = add_metric(d->metric_list, pe, d->modifier, d->metric_no_group,
1061 d->user_requested_cpu_list, d->system_wide,
1062 d->root_metric, d->visited, d->table);
1063 if (ret)
1064 goto out;
1065
1066 *(d->has_match) = true;
1067
1068out:
1069 *(d->ret) = ret;
1070 return ret;
1071}
1072
1073/**
1074 * metric_list_cmp - list_sort comparator that sorts metrics with more events to
1075 * the front. tool events are excluded from the count.
1076 */
1077static int metric_list_cmp(void *priv __maybe_unused, const struct list_head *l,
1078 const struct list_head *r)
1079{
1080 const struct metric *left = container_of(l, struct metric, nd);
1081 const struct metric *right = container_of(r, struct metric, nd);
1082 struct expr_id_data *data;
1083 int i, left_count, right_count;
1084
1085 left_count = hashmap__size(left->pctx->ids);
1086 perf_tool_event__for_each_event(i) {
1087 if (!expr__get_id(left->pctx, perf_tool_event__to_str(i), &data))
1088 left_count--;
1089 }
1090
1091 right_count = hashmap__size(right->pctx->ids);
1092 perf_tool_event__for_each_event(i) {
1093 if (!expr__get_id(right->pctx, perf_tool_event__to_str(i), &data))
1094 right_count--;
1095 }
1096
1097 return right_count - left_count;
1098}
1099
1100struct metricgroup__add_metric_data {
1101 struct list_head *list;
1102 const char *metric_name;
1103 const char *modifier;
1104 const char *user_requested_cpu_list;
1105 bool metric_no_group;
1106 bool system_wide;
1107 bool has_match;
1108};
1109
1110static int metricgroup__add_metric_callback(const struct pmu_event *pe,
1111 const struct pmu_events_table *table,
1112 void *vdata)
1113{
1114 struct metricgroup__add_metric_data *data = vdata;
1115 int ret = 0;
1116
1117 if (pe->metric_expr &&
1118 (match_metric(pe->metric_group, data->metric_name) ||
1119 match_metric(pe->metric_name, data->metric_name))) {
1120
1121 data->has_match = true;
1122 ret = add_metric(data->list, pe, data->modifier, data->metric_no_group,
1123 data->user_requested_cpu_list, data->system_wide,
1124 /*root_metric=*/NULL, /*visited_metrics=*/NULL, table);
1125 }
1126 return ret;
1127}
1128
1129/**
1130 * metricgroup__add_metric - Find and add a metric, or a metric group.
1131 * @metric_name: The name of the metric or metric group. For example, "IPC"
1132 * could be the name of a metric and "TopDownL1" the name of a
1133 * metric group.
1134 * @modifier: if non-null event modifiers like "u".
1135 * @metric_no_group: Should events written to events be grouped "{}" or
1136 * global. Grouping is the default but due to multiplexing the
1137 * user may override.
1138 * @user_requested_cpu_list: Command line specified CPUs to record on.
1139 * @system_wide: Are events for all processes recorded.
1140 * @metric_list: The list that the metric or metric group are added to.
1141 * @table: The table that is searched for metrics, most commonly the table for the
1142 * architecture perf is running upon.
1143 */
1144static int metricgroup__add_metric(const char *metric_name, const char *modifier,
1145 bool metric_no_group,
1146 const char *user_requested_cpu_list,
1147 bool system_wide,
1148 struct list_head *metric_list,
1149 const struct pmu_events_table *table)
1150{
1151 LIST_HEAD(list);
1152 int ret;
1153 bool has_match = false;
1154
1155 {
1156 struct metricgroup__add_metric_data data = {
1157 .list = &list,
1158 .metric_name = metric_name,
1159 .modifier = modifier,
1160 .metric_no_group = metric_no_group,
1161 .user_requested_cpu_list = user_requested_cpu_list,
1162 .system_wide = system_wide,
1163 .has_match = false,
1164 };
1165 /*
1166 * Iterate over all metrics seeing if metric matches either the
1167 * name or group. When it does add the metric to the list.
1168 */
1169 ret = pmu_events_table_for_each_event(table, metricgroup__add_metric_callback,
1170 &data);
1171 if (ret)
1172 goto out;
1173
1174 has_match = data.has_match;
1175 }
1176 {
1177 struct metricgroup_iter_data data = {
1178 .fn = metricgroup__add_metric_sys_event_iter,
1179 .data = (void *) &(struct metricgroup_add_iter_data) {
1180 .metric_list = &list,
1181 .metric_name = metric_name,
1182 .modifier = modifier,
1183 .metric_no_group = metric_no_group,
1184 .user_requested_cpu_list = user_requested_cpu_list,
1185 .system_wide = system_wide,
1186 .has_match = &has_match,
1187 .ret = &ret,
1188 .table = table,
1189 },
1190 };
1191
1192 pmu_for_each_sys_event(metricgroup__sys_event_iter, &data);
1193 }
1194 /* End of pmu events. */
1195 if (!has_match)
1196 ret = -EINVAL;
1197
1198out:
1199 /*
1200 * add to metric_list so that they can be released
1201 * even if it's failed
1202 */
1203 list_splice(&list, metric_list);
1204 return ret;
1205}
1206
1207/**
1208 * metricgroup__add_metric_list - Find and add metrics, or metric groups,
1209 * specified in a list.
1210 * @list: the list of metrics or metric groups. For example, "IPC,CPI,TopDownL1"
1211 * would match the IPC and CPI metrics, and TopDownL1 would match all
1212 * the metrics in the TopDownL1 group.
1213 * @metric_no_group: Should events written to events be grouped "{}" or
1214 * global. Grouping is the default but due to multiplexing the
1215 * user may override.
1216 * @user_requested_cpu_list: Command line specified CPUs to record on.
1217 * @system_wide: Are events for all processes recorded.
1218 * @metric_list: The list that metrics are added to.
1219 * @table: The table that is searched for metrics, most commonly the table for the
1220 * architecture perf is running upon.
1221 */
1222static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
1223 const char *user_requested_cpu_list,
1224 bool system_wide, struct list_head *metric_list,
1225 const struct pmu_events_table *table)
1226{
1227 char *list_itr, *list_copy, *metric_name, *modifier;
1228 int ret, count = 0;
1229
1230 list_copy = strdup(list);
1231 if (!list_copy)
1232 return -ENOMEM;
1233 list_itr = list_copy;
1234
1235 while ((metric_name = strsep(&list_itr, ",")) != NULL) {
1236 modifier = strchr(metric_name, ':');
1237 if (modifier)
1238 *modifier++ = '\0';
1239
1240 ret = metricgroup__add_metric(metric_name, modifier,
1241 metric_no_group, user_requested_cpu_list,
1242 system_wide, metric_list, table);
1243 if (ret == -EINVAL)
1244 pr_err("Cannot find metric or group `%s'\n", metric_name);
1245
1246 if (ret)
1247 break;
1248
1249 count++;
1250 }
1251 free(list_copy);
1252
1253 if (!ret) {
1254 /*
1255 * Warn about nmi_watchdog if any parsed metrics had the
1256 * NO_NMI_WATCHDOG constraint.
1257 */
1258 metricgroup___watchdog_constraint_hint(NULL, true);
1259 /* No metrics. */
1260 if (count == 0)
1261 return -EINVAL;
1262 }
1263 return ret;
1264}
1265
1266static void metricgroup__free_metrics(struct list_head *metric_list)
1267{
1268 struct metric *m, *tmp;
1269
1270 list_for_each_entry_safe (m, tmp, metric_list, nd) {
1271 list_del_init(&m->nd);
1272 metric__free(m);
1273 }
1274}
1275
1276/**
1277 * find_tool_events - Search for the pressence of tool events in metric_list.
1278 * @metric_list: List to take metrics from.
1279 * @tool_events: Array of false values, indices corresponding to tool events set
1280 * to true if tool event is found.
1281 */
1282static void find_tool_events(const struct list_head *metric_list,
1283 bool tool_events[PERF_TOOL_MAX])
1284{
1285 struct metric *m;
1286
1287 list_for_each_entry(m, metric_list, nd) {
1288 int i;
1289
1290 perf_tool_event__for_each_event(i) {
1291 struct expr_id_data *data;
1292
1293 if (!tool_events[i] &&
1294 !expr__get_id(m->pctx, perf_tool_event__to_str(i), &data))
1295 tool_events[i] = true;
1296 }
1297 }
1298}
1299
1300/**
1301 * build_combined_expr_ctx - Make an expr_parse_ctx with all has_constraint
1302 * metric IDs, as the IDs are held in a set,
1303 * duplicates will be removed.
1304 * @metric_list: List to take metrics from.
1305 * @combined: Out argument for result.
1306 */
1307static int build_combined_expr_ctx(const struct list_head *metric_list,
1308 struct expr_parse_ctx **combined)
1309{
1310 struct hashmap_entry *cur;
1311 size_t bkt;
1312 struct metric *m;
1313 char *dup;
1314 int ret;
1315
1316 *combined = expr__ctx_new();
1317 if (!*combined)
1318 return -ENOMEM;
1319
1320 list_for_each_entry(m, metric_list, nd) {
1321 if (m->has_constraint && !m->modifier) {
1322 hashmap__for_each_entry(m->pctx->ids, cur, bkt) {
1323 dup = strdup(cur->pkey);
1324 if (!dup) {
1325 ret = -ENOMEM;
1326 goto err_out;
1327 }
1328 ret = expr__add_id(*combined, dup);
1329 if (ret)
1330 goto err_out;
1331 }
1332 }
1333 }
1334 return 0;
1335err_out:
1336 expr__ctx_free(*combined);
1337 *combined = NULL;
1338 return ret;
1339}
1340
1341/**
1342 * parse_ids - Build the event string for the ids and parse them creating an
1343 * evlist. The encoded metric_ids are decoded.
1344 * @metric_no_merge: is metric sharing explicitly disabled.
1345 * @fake_pmu: used when testing metrics not supported by the current CPU.
1346 * @ids: the event identifiers parsed from a metric.
1347 * @modifier: any modifiers added to the events.
1348 * @has_constraint: false if events should be placed in a weak group.
1349 * @tool_events: entries set true if the tool event of index could be present in
1350 * the overall list of metrics.
1351 * @out_evlist: the created list of events.
1352 */
1353static int parse_ids(bool metric_no_merge, struct perf_pmu *fake_pmu,
1354 struct expr_parse_ctx *ids, const char *modifier,
1355 bool has_constraint, const bool tool_events[PERF_TOOL_MAX],
1356 struct evlist **out_evlist)
1357{
1358 struct parse_events_error parse_error;
1359 struct evlist *parsed_evlist;
1360 struct strbuf events = STRBUF_INIT;
1361 int ret;
1362
1363 *out_evlist = NULL;
1364 if (!metric_no_merge || hashmap__size(ids->ids) == 0) {
1365 bool added_event = false;
1366 int i;
1367 /*
1368 * We may fail to share events between metrics because a tool
1369 * event isn't present in one metric. For example, a ratio of
1370 * cache misses doesn't need duration_time but the same events
1371 * may be used for a misses per second. Events without sharing
1372 * implies multiplexing, that is best avoided, so place
1373 * all tool events in every group.
1374 *
1375 * Also, there may be no ids/events in the expression parsing
1376 * context because of constant evaluation, e.g.:
1377 * event1 if #smt_on else 0
1378 * Add a tool event to avoid a parse error on an empty string.
1379 */
1380 perf_tool_event__for_each_event(i) {
1381 if (tool_events[i]) {
1382 char *tmp = strdup(perf_tool_event__to_str(i));
1383
1384 if (!tmp)
1385 return -ENOMEM;
1386 ids__insert(ids->ids, tmp);
1387 added_event = true;
1388 }
1389 }
1390 if (!added_event && hashmap__size(ids->ids) == 0) {
1391 char *tmp = strdup("duration_time");
1392
1393 if (!tmp)
1394 return -ENOMEM;
1395 ids__insert(ids->ids, tmp);
1396 }
1397 }
1398 ret = metricgroup__build_event_string(&events, ids, modifier,
1399 has_constraint);
1400 if (ret)
1401 return ret;
1402
1403 parsed_evlist = evlist__new();
1404 if (!parsed_evlist) {
1405 ret = -ENOMEM;
1406 goto err_out;
1407 }
1408 pr_debug("Parsing metric events '%s'\n", events.buf);
1409 parse_events_error__init(&parse_error);
1410 ret = __parse_events(parsed_evlist, events.buf, &parse_error, fake_pmu);
1411 if (ret) {
1412 parse_events_error__print(&parse_error, events.buf);
1413 goto err_out;
1414 }
1415 ret = decode_all_metric_ids(parsed_evlist, modifier);
1416 if (ret)
1417 goto err_out;
1418
1419 *out_evlist = parsed_evlist;
1420 parsed_evlist = NULL;
1421err_out:
1422 parse_events_error__exit(&parse_error);
1423 evlist__delete(parsed_evlist);
1424 strbuf_release(&events);
1425 return ret;
1426}
1427
1428static int parse_groups(struct evlist *perf_evlist, const char *str,
1429 bool metric_no_group,
1430 bool metric_no_merge,
1431 const char *user_requested_cpu_list,
1432 bool system_wide,
1433 struct perf_pmu *fake_pmu,
1434 struct rblist *metric_events_list,
1435 const struct pmu_events_table *table)
1436{
1437 struct evlist *combined_evlist = NULL;
1438 LIST_HEAD(metric_list);
1439 struct metric *m;
1440 bool tool_events[PERF_TOOL_MAX] = {false};
1441 int ret;
1442
1443 if (metric_events_list->nr_entries == 0)
1444 metricgroup__rblist_init(metric_events_list);
1445 ret = metricgroup__add_metric_list(str, metric_no_group,
1446 user_requested_cpu_list,
1447 system_wide, &metric_list, table);
1448 if (ret)
1449 goto out;
1450
1451 /* Sort metrics from largest to smallest. */
1452 list_sort(NULL, &metric_list, metric_list_cmp);
1453
1454 if (!metric_no_merge) {
1455 struct expr_parse_ctx *combined = NULL;
1456
1457 find_tool_events(&metric_list, tool_events);
1458
1459 ret = build_combined_expr_ctx(&metric_list, &combined);
1460
1461 if (!ret && combined && hashmap__size(combined->ids)) {
1462 ret = parse_ids(metric_no_merge, fake_pmu, combined,
1463 /*modifier=*/NULL,
1464 /*has_constraint=*/true,
1465 tool_events,
1466 &combined_evlist);
1467 }
1468 if (combined)
1469 expr__ctx_free(combined);
1470
1471 if (ret)
1472 goto out;
1473 }
1474
1475 list_for_each_entry(m, &metric_list, nd) {
1476 struct metric_event *me;
1477 struct evsel **metric_events;
1478 struct evlist *metric_evlist = NULL;
1479 struct metric *n;
1480 struct metric_expr *expr;
1481
1482 if (combined_evlist && m->has_constraint) {
1483 metric_evlist = combined_evlist;
1484 } else if (!metric_no_merge) {
1485 /*
1486 * See if the IDs for this metric are a subset of an
1487 * earlier metric.
1488 */
1489 list_for_each_entry(n, &metric_list, nd) {
1490 if (m == n)
1491 break;
1492
1493 if (n->evlist == NULL)
1494 continue;
1495
1496 if ((!m->modifier && n->modifier) ||
1497 (m->modifier && !n->modifier) ||
1498 (m->modifier && n->modifier &&
1499 strcmp(m->modifier, n->modifier)))
1500 continue;
1501
1502 if (expr__subset_of_ids(n->pctx, m->pctx)) {
1503 pr_debug("Events in '%s' fully contained within '%s'\n",
1504 m->metric_name, n->metric_name);
1505 metric_evlist = n->evlist;
1506 break;
1507 }
1508
1509 }
1510 }
1511 if (!metric_evlist) {
1512 ret = parse_ids(metric_no_merge, fake_pmu, m->pctx, m->modifier,
1513 m->has_constraint, tool_events, &m->evlist);
1514 if (ret)
1515 goto out;
1516
1517 metric_evlist = m->evlist;
1518 }
1519 ret = setup_metric_events(m->pctx->ids, metric_evlist, &metric_events);
1520 if (ret) {
1521 pr_debug("Cannot resolve IDs for %s: %s\n",
1522 m->metric_name, m->metric_expr);
1523 goto out;
1524 }
1525
1526 me = metricgroup__lookup(metric_events_list, metric_events[0], true);
1527
1528 expr = malloc(sizeof(struct metric_expr));
1529 if (!expr) {
1530 ret = -ENOMEM;
1531 free(metric_events);
1532 goto out;
1533 }
1534
1535 expr->metric_refs = m->metric_refs;
1536 m->metric_refs = NULL;
1537 expr->metric_expr = m->metric_expr;
1538 if (m->modifier) {
1539 char *tmp;
1540
1541 if (asprintf(&tmp, "%s:%s", m->metric_name, m->modifier) < 0)
1542 expr->metric_name = NULL;
1543 else
1544 expr->metric_name = tmp;
1545 } else
1546 expr->metric_name = strdup(m->metric_name);
1547
1548 if (!expr->metric_name) {
1549 ret = -ENOMEM;
1550 free(metric_events);
1551 goto out;
1552 }
1553 expr->metric_unit = m->metric_unit;
1554 expr->metric_events = metric_events;
1555 expr->runtime = m->pctx->sctx.runtime;
1556 list_add(&expr->nd, &me->head);
1557 }
1558
1559
1560 if (combined_evlist) {
1561 evlist__splice_list_tail(perf_evlist, &combined_evlist->core.entries);
1562 evlist__delete(combined_evlist);
1563 }
1564
1565 list_for_each_entry(m, &metric_list, nd) {
1566 if (m->evlist)
1567 evlist__splice_list_tail(perf_evlist, &m->evlist->core.entries);
1568 }
1569
1570out:
1571 metricgroup__free_metrics(&metric_list);
1572 return ret;
1573}
1574
1575int metricgroup__parse_groups(struct evlist *perf_evlist,
1576 const char *str,
1577 bool metric_no_group,
1578 bool metric_no_merge,
1579 const char *user_requested_cpu_list,
1580 bool system_wide,
1581 struct rblist *metric_events)
1582{
1583 const struct pmu_events_table *table = pmu_events_table__find();
1584
1585 if (!table)
1586 return -EINVAL;
1587
1588 return parse_groups(perf_evlist, str, metric_no_group, metric_no_merge,
1589 user_requested_cpu_list, system_wide,
1590 /*fake_pmu=*/NULL, metric_events, table);
1591}
1592
1593int metricgroup__parse_groups_test(struct evlist *evlist,
1594 const struct pmu_events_table *table,
1595 const char *str,
1596 bool metric_no_group,
1597 bool metric_no_merge,
1598 struct rblist *metric_events)
1599{
1600 return parse_groups(evlist, str, metric_no_group, metric_no_merge,
1601 /*user_requested_cpu_list=*/NULL,
1602 /*system_wide=*/false,
1603 &perf_pmu__fake, metric_events, table);
1604}
1605
1606static int metricgroup__has_metric_callback(const struct pmu_event *pe,
1607 const struct pmu_events_table *table __maybe_unused,
1608 void *vdata)
1609{
1610 const char *metric = vdata;
1611
1612 if (!pe->metric_expr)
1613 return 0;
1614
1615 if (match_metric(pe->metric_name, metric))
1616 return 1;
1617
1618 return 0;
1619}
1620
1621bool metricgroup__has_metric(const char *metric)
1622{
1623 const struct pmu_events_table *table = pmu_events_table__find();
1624
1625 if (!table)
1626 return false;
1627
1628 return pmu_events_table_for_each_event(table, metricgroup__has_metric_callback,
1629 (void *)metric) ? true : false;
1630}
1631
1632int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1633 struct rblist *new_metric_events,
1634 struct rblist *old_metric_events)
1635{
1636 unsigned int i;
1637
1638 for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1639 struct rb_node *nd;
1640 struct metric_event *old_me, *new_me;
1641 struct metric_expr *old_expr, *new_expr;
1642 struct evsel *evsel;
1643 size_t alloc_size;
1644 int idx, nr;
1645
1646 nd = rblist__entry(old_metric_events, i);
1647 old_me = container_of(nd, struct metric_event, nd);
1648
1649 evsel = evlist__find_evsel(evlist, old_me->evsel->core.idx);
1650 if (!evsel)
1651 return -EINVAL;
1652 new_me = metricgroup__lookup(new_metric_events, evsel, true);
1653 if (!new_me)
1654 return -ENOMEM;
1655
1656 pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1657 cgrp ? cgrp->name : "root", evsel->name, evsel->core.idx);
1658
1659 list_for_each_entry(old_expr, &old_me->head, nd) {
1660 new_expr = malloc(sizeof(*new_expr));
1661 if (!new_expr)
1662 return -ENOMEM;
1663
1664 new_expr->metric_expr = old_expr->metric_expr;
1665 new_expr->metric_name = strdup(old_expr->metric_name);
1666 if (!new_expr->metric_name)
1667 return -ENOMEM;
1668
1669 new_expr->metric_unit = old_expr->metric_unit;
1670 new_expr->runtime = old_expr->runtime;
1671
1672 if (old_expr->metric_refs) {
1673 /* calculate number of metric_events */
1674 for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1675 continue;
1676 alloc_size = sizeof(*new_expr->metric_refs);
1677 new_expr->metric_refs = calloc(nr + 1, alloc_size);
1678 if (!new_expr->metric_refs) {
1679 free(new_expr);
1680 return -ENOMEM;
1681 }
1682
1683 memcpy(new_expr->metric_refs, old_expr->metric_refs,
1684 nr * alloc_size);
1685 } else {
1686 new_expr->metric_refs = NULL;
1687 }
1688
1689 /* calculate number of metric_events */
1690 for (nr = 0; old_expr->metric_events[nr]; nr++)
1691 continue;
1692 alloc_size = sizeof(*new_expr->metric_events);
1693 new_expr->metric_events = calloc(nr + 1, alloc_size);
1694 if (!new_expr->metric_events) {
1695 free(new_expr->metric_refs);
1696 free(new_expr);
1697 return -ENOMEM;
1698 }
1699
1700 /* copy evsel in the same position */
1701 for (idx = 0; idx < nr; idx++) {
1702 evsel = old_expr->metric_events[idx];
1703 evsel = evlist__find_evsel(evlist, evsel->core.idx);
1704 if (evsel == NULL) {
1705 free(new_expr->metric_events);
1706 free(new_expr->metric_refs);
1707 free(new_expr);
1708 return -EINVAL;
1709 }
1710 new_expr->metric_events[idx] = evsel;
1711 }
1712
1713 list_add(&new_expr->nd, &new_me->head);
1714 }
1715 }
1716 return 0;
1717}