Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdbool.h>
  3#include <assert.h>
  4#include <errno.h>
  5#include <stdlib.h>
  6#include <string.h>
  7#include "metricgroup.h"
  8#include "cpumap.h"
  9#include "cputopo.h"
 10#include "debug.h"
 11#include "evlist.h"
 12#include "expr.h"
 13#include <util/expr-bison.h>
 14#include <util/expr-flex.h>
 15#include "util/hashmap.h"
 16#include "util/header.h"
 17#include "util/pmu.h"
 18#include "smt.h"
 19#include "tsc.h"
 20#include <api/fs/fs.h>
 21#include <linux/err.h>
 22#include <linux/kernel.h>
 23#include <linux/zalloc.h>
 24#include <ctype.h>
 25#include <math.h>
 26#include "pmu.h"
 27
 28#ifdef PARSER_DEBUG
 29extern int expr_debug;
 30#endif
 31
 32struct expr_id_data {
 33	union {
 34		struct {
 35			double val;
 36			int source_count;
 37		} val;
 38		struct {
 39			double val;
 40			const char *metric_name;
 41			const char *metric_expr;
 42		} ref;
 43	};
 44
 45	enum {
 46		/* Holding a double value. */
 47		EXPR_ID_DATA__VALUE,
 48		/* Reference to another metric. */
 49		EXPR_ID_DATA__REF,
 50		/* A reference but the value has been computed. */
 51		EXPR_ID_DATA__REF_VALUE,
 52	} kind;
 53};
 54
 55static size_t key_hash(long key, void *ctx __maybe_unused)
 56{
 57	const char *str = (const char *)key;
 58	size_t hash = 0;
 59
 60	while (*str != '\0') {
 61		hash *= 31;
 62		hash += *str;
 63		str++;
 64	}
 65	return hash;
 66}
 67
 68static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
 69{
 70	return !strcmp((const char *)key1, (const char *)key2);
 71}
 72
 73struct hashmap *ids__new(void)
 74{
 75	struct hashmap *hash;
 76
 77	hash = hashmap__new(key_hash, key_equal, NULL);
 78	if (IS_ERR(hash))
 79		return NULL;
 80	return hash;
 81}
 82
 83void ids__free(struct hashmap *ids)
 84{
 85	struct hashmap_entry *cur;
 86	size_t bkt;
 87
 88	if (ids == NULL)
 89		return;
 90
 91	hashmap__for_each_entry(ids, cur, bkt) {
 92		zfree(&cur->pkey);
 93		zfree(&cur->pvalue);
 94	}
 95
 96	hashmap__free(ids);
 97}
 98
 99int ids__insert(struct hashmap *ids, const char *id)
100{
101	struct expr_id_data *data_ptr = NULL, *old_data = NULL;
102	char *old_key = NULL;
103	int ret;
104
105	ret = hashmap__set(ids, id, data_ptr, &old_key, &old_data);
106	if (ret)
107		free(data_ptr);
108	free(old_key);
109	free(old_data);
110	return ret;
111}
112
113struct hashmap *ids__union(struct hashmap *ids1, struct hashmap *ids2)
114{
115	size_t bkt;
116	struct hashmap_entry *cur;
117	int ret;
118	struct expr_id_data *old_data = NULL;
119	char *old_key = NULL;
120
121	if (!ids1)
122		return ids2;
123
124	if (!ids2)
125		return ids1;
126
127	if (hashmap__size(ids1) <  hashmap__size(ids2)) {
128		struct hashmap *tmp = ids1;
129
130		ids1 = ids2;
131		ids2 = tmp;
132	}
133	hashmap__for_each_entry(ids2, cur, bkt) {
134		ret = hashmap__set(ids1, cur->key, cur->value, &old_key, &old_data);
135		free(old_key);
136		free(old_data);
137
138		if (ret) {
139			hashmap__free(ids1);
140			hashmap__free(ids2);
141			return NULL;
142		}
143	}
144	hashmap__free(ids2);
145	return ids1;
146}
147
148/* Caller must make sure id is allocated */
149int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
150{
151	return ids__insert(ctx->ids, id);
152}
153
154/* Caller must make sure id is allocated */
155int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
156{
157	return expr__add_id_val_source_count(ctx, id, val, /*source_count=*/1);
158}
159
160/* Caller must make sure id is allocated */
161int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
162				  double val, int source_count)
163{
164	struct expr_id_data *data_ptr = NULL, *old_data = NULL;
165	char *old_key = NULL;
166	int ret;
167
168	data_ptr = malloc(sizeof(*data_ptr));
169	if (!data_ptr)
170		return -ENOMEM;
171	data_ptr->val.val = val;
172	data_ptr->val.source_count = source_count;
173	data_ptr->kind = EXPR_ID_DATA__VALUE;
174
175	ret = hashmap__set(ctx->ids, id, data_ptr, &old_key, &old_data);
176	if (ret)
177		free(data_ptr);
178	free(old_key);
179	free(old_data);
180	return ret;
181}
182
183int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
184{
185	struct expr_id_data *data_ptr = NULL, *old_data = NULL;
186	char *old_key = NULL;
187	char *name;
188	int ret;
189
190	data_ptr = zalloc(sizeof(*data_ptr));
191	if (!data_ptr)
192		return -ENOMEM;
193
194	name = strdup(ref->metric_name);
195	if (!name) {
196		free(data_ptr);
197		return -ENOMEM;
198	}
199
200	/*
201	 * Intentionally passing just const char pointers,
202	 * originally from 'struct pmu_event' object.
203	 * We don't need to change them, so there's no
204	 * need to create our own copy.
205	 */
206	data_ptr->ref.metric_name = ref->metric_name;
207	data_ptr->ref.metric_expr = ref->metric_expr;
208	data_ptr->kind = EXPR_ID_DATA__REF;
209
210	ret = hashmap__set(ctx->ids, name, data_ptr, &old_key, &old_data);
211	if (ret)
212		free(data_ptr);
213
214	pr_debug2("adding ref metric %s: %s\n",
215		  ref->metric_name, ref->metric_expr);
216
217	free(old_key);
218	free(old_data);
219	return ret;
220}
221
222int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
223		 struct expr_id_data **data)
224{
225	return hashmap__find(ctx->ids, id, data) ? 0 : -1;
226}
227
228bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
229			 struct expr_parse_ctx *needles)
230{
231	struct hashmap_entry *cur;
232	size_t bkt;
233	struct expr_id_data *data;
234
235	hashmap__for_each_entry(needles->ids, cur, bkt) {
236		if (expr__get_id(haystack, cur->pkey, &data))
237			return false;
238	}
239	return true;
240}
241
242
243int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
244		     struct expr_id_data **datap)
245{
246	struct expr_id_data *data;
247
248	if (expr__get_id(ctx, id, datap) || !*datap) {
249		pr_debug("%s not found\n", id);
250		return -1;
251	}
252
253	data = *datap;
254
255	switch (data->kind) {
256	case EXPR_ID_DATA__VALUE:
257		pr_debug2("lookup(%s): val %f\n", id, data->val.val);
258		break;
259	case EXPR_ID_DATA__REF:
260		pr_debug2("lookup(%s): ref metric name %s\n", id,
261			data->ref.metric_name);
262		pr_debug("processing metric: %s ENTRY\n", id);
263		data->kind = EXPR_ID_DATA__REF_VALUE;
264		if (expr__parse(&data->ref.val, ctx, data->ref.metric_expr)) {
265			pr_debug("%s failed to count\n", id);
266			return -1;
267		}
268		pr_debug("processing metric: %s EXIT: %f\n", id, data->ref.val);
269		break;
270	case EXPR_ID_DATA__REF_VALUE:
271		pr_debug2("lookup(%s): ref val %f metric name %s\n", id,
272			data->ref.val, data->ref.metric_name);
273		break;
274	default:
275		assert(0);  /* Unreachable. */
276	}
277
278	return 0;
279}
280
281void expr__del_id(struct expr_parse_ctx *ctx, const char *id)
282{
283	struct expr_id_data *old_val = NULL;
284	char *old_key = NULL;
285
286	hashmap__delete(ctx->ids, id, &old_key, &old_val);
287	free(old_key);
288	free(old_val);
289}
290
291struct expr_parse_ctx *expr__ctx_new(void)
292{
293	struct expr_parse_ctx *ctx;
294
295	ctx = malloc(sizeof(struct expr_parse_ctx));
296	if (!ctx)
297		return NULL;
298
299	ctx->ids = hashmap__new(key_hash, key_equal, NULL);
300	if (IS_ERR(ctx->ids)) {
301		free(ctx);
302		return NULL;
303	}
304	ctx->sctx.user_requested_cpu_list = NULL;
305	ctx->sctx.runtime = 0;
306	ctx->sctx.system_wide = false;
307
308	return ctx;
309}
310
311void expr__ctx_clear(struct expr_parse_ctx *ctx)
312{
313	struct hashmap_entry *cur;
314	size_t bkt;
315
316	hashmap__for_each_entry(ctx->ids, cur, bkt) {
317		zfree(&cur->pkey);
318		zfree(&cur->pvalue);
319	}
320	hashmap__clear(ctx->ids);
321}
322
323void expr__ctx_free(struct expr_parse_ctx *ctx)
324{
325	struct hashmap_entry *cur;
326	size_t bkt;
327
328	if (!ctx)
329		return;
330
331	zfree(&ctx->sctx.user_requested_cpu_list);
332	hashmap__for_each_entry(ctx->ids, cur, bkt) {
333		zfree(&cur->pkey);
334		zfree(&cur->pvalue);
335	}
336	hashmap__free(ctx->ids);
337	free(ctx);
338}
339
340static int
341__expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
342	      bool compute_ids)
343{
344	YY_BUFFER_STATE buffer;
345	void *scanner;
346	int ret;
347
348	pr_debug2("parsing metric: %s\n", expr);
349
350	ret = expr_lex_init_extra(&ctx->sctx, &scanner);
351	if (ret)
352		return ret;
353
354	buffer = expr__scan_string(expr, scanner);
355
356#ifdef PARSER_DEBUG
357	expr_debug = 1;
358	expr_set_debug(1, scanner);
359#endif
360
361	ret = expr_parse(val, ctx, compute_ids, scanner);
362
363	expr__flush_buffer(buffer, scanner);
364	expr__delete_buffer(buffer, scanner);
365	expr_lex_destroy(scanner);
366	return ret;
367}
368
369int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
370		const char *expr)
371{
372	return __expr__parse(final_val, ctx, expr, /*compute_ids=*/false) ? -1 : 0;
373}
374
375int expr__find_ids(const char *expr, const char *one,
376		   struct expr_parse_ctx *ctx)
377{
378	int ret = __expr__parse(NULL, ctx, expr, /*compute_ids=*/true);
379
380	if (one)
381		expr__del_id(ctx, one);
382
383	return ret;
384}
385
386double expr_id_data__value(const struct expr_id_data *data)
387{
388	if (data->kind == EXPR_ID_DATA__VALUE)
389		return data->val.val;
390	assert(data->kind == EXPR_ID_DATA__REF_VALUE);
391	return data->ref.val;
392}
393
394double expr_id_data__source_count(const struct expr_id_data *data)
395{
396	assert(data->kind == EXPR_ID_DATA__VALUE);
397	return data->val.source_count;
398}
399
400#if !defined(__i386__) && !defined(__x86_64__)
401double arch_get_tsc_freq(void)
402{
403	return 0.0;
404}
405#endif
406
407static double has_pmem(void)
408{
409	static bool has_pmem, cached;
410	const char *sysfs = sysfs__mountpoint();
411	char path[PATH_MAX];
412
413	if (!cached) {
414		snprintf(path, sizeof(path), "%s/firmware/acpi/tables/NFIT", sysfs);
415		has_pmem = access(path, F_OK) == 0;
416		cached = true;
417	}
418	return has_pmem ? 1.0 : 0.0;
419}
420
421double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx)
422{
423	const struct cpu_topology *topology;
424	double result = NAN;
425
426	if (!strcmp("#num_cpus", literal)) {
427		result = cpu__max_present_cpu().cpu;
428		goto out;
429	}
430	if (!strcmp("#num_cpus_online", literal)) {
431		struct perf_cpu_map *online = cpu_map__online();
432
433		if (online)
434			result = perf_cpu_map__nr(online);
435		goto out;
436	}
437
438	if (!strcasecmp("#system_tsc_freq", literal)) {
439		result = arch_get_tsc_freq();
440		goto out;
441	}
442
443	/*
444	 * Assume that topology strings are consistent, such as CPUs "0-1"
445	 * wouldn't be listed as "0,1", and so after deduplication the number of
446	 * these strings gives an indication of the number of packages, dies,
447	 * etc.
448	 */
 
 
 
 
 
 
 
449	if (!strcasecmp("#smt_on", literal)) {
450		result = smt_on() ? 1.0 : 0.0;
451		goto out;
452	}
453	if (!strcmp("#core_wide", literal)) {
454		result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list)
455			? 1.0 : 0.0;
456		goto out;
457	}
458	if (!strcmp("#num_packages", literal)) {
459		topology = online_topology();
460		result = topology->package_cpus_lists;
461		goto out;
462	}
463	if (!strcmp("#num_dies", literal)) {
464		topology = online_topology();
465		result = topology->die_cpus_lists;
466		goto out;
467	}
468	if (!strcmp("#num_cores", literal)) {
469		topology = online_topology();
470		result = topology->core_cpus_lists;
471		goto out;
472	}
473	if (!strcmp("#slots", literal)) {
474		result = perf_pmu__cpu_slots_per_cycle();
475		goto out;
476	}
477	if (!strcmp("#has_pmem", literal)) {
478		result = has_pmem();
479		goto out;
480	}
481
482	pr_err("Unrecognized literal '%s'", literal);
483out:
484	pr_debug2("literal: %s = %f\n", literal, result);
485	return result;
486}
487
488/* Does the event 'id' parse? Determine via ctx->ids if possible. */
489double expr__has_event(const struct expr_parse_ctx *ctx, bool compute_ids, const char *id)
490{
491	struct evlist *tmp;
492	double ret;
493
494	if (hashmap__find(ctx->ids, id, /*value=*/NULL))
495		return 1.0;
496
497	if (!compute_ids)
498		return 0.0;
499
500	tmp = evlist__new();
501	if (!tmp)
502		return NAN;
503	ret = parse_event(tmp, id) ? 0 : 1;
504	evlist__delete(tmp);
505	return ret;
506}
507
508double expr__strcmp_cpuid_str(const struct expr_parse_ctx *ctx __maybe_unused,
509		       bool compute_ids __maybe_unused, const char *test_id)
510{
511	double ret;
512	struct perf_pmu *pmu = perf_pmus__find_core_pmu();
513	char *cpuid = perf_pmu__getcpuid(pmu);
514
515	if (!cpuid)
516		return NAN;
517
518	ret = !strcmp_cpuid_str(test_id, cpuid);
519
520	free(cpuid);
521	return ret;
522}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <stdbool.h>
  3#include <assert.h>
  4#include <errno.h>
  5#include <stdlib.h>
  6#include <string.h>
  7#include "metricgroup.h"
  8#include "cpumap.h"
  9#include "cputopo.h"
 10#include "debug.h"
 
 11#include "expr.h"
 12#include "expr-bison.h"
 13#include "expr-flex.h"
 14#include "util/hashmap.h"
 
 
 15#include "smt.h"
 16#include "tsc.h"
 
 17#include <linux/err.h>
 18#include <linux/kernel.h>
 19#include <linux/zalloc.h>
 20#include <ctype.h>
 21#include <math.h>
 
 22
 23#ifdef PARSER_DEBUG
 24extern int expr_debug;
 25#endif
 26
 27struct expr_id_data {
 28	union {
 29		struct {
 30			double val;
 31			int source_count;
 32		} val;
 33		struct {
 34			double val;
 35			const char *metric_name;
 36			const char *metric_expr;
 37		} ref;
 38	};
 39
 40	enum {
 41		/* Holding a double value. */
 42		EXPR_ID_DATA__VALUE,
 43		/* Reference to another metric. */
 44		EXPR_ID_DATA__REF,
 45		/* A reference but the value has been computed. */
 46		EXPR_ID_DATA__REF_VALUE,
 47	} kind;
 48};
 49
 50static size_t key_hash(long key, void *ctx __maybe_unused)
 51{
 52	const char *str = (const char *)key;
 53	size_t hash = 0;
 54
 55	while (*str != '\0') {
 56		hash *= 31;
 57		hash += *str;
 58		str++;
 59	}
 60	return hash;
 61}
 62
 63static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
 64{
 65	return !strcmp((const char *)key1, (const char *)key2);
 66}
 67
 68struct hashmap *ids__new(void)
 69{
 70	struct hashmap *hash;
 71
 72	hash = hashmap__new(key_hash, key_equal, NULL);
 73	if (IS_ERR(hash))
 74		return NULL;
 75	return hash;
 76}
 77
 78void ids__free(struct hashmap *ids)
 79{
 80	struct hashmap_entry *cur;
 81	size_t bkt;
 82
 83	if (ids == NULL)
 84		return;
 85
 86	hashmap__for_each_entry(ids, cur, bkt) {
 87		free((void *)cur->pkey);
 88		free((void *)cur->pvalue);
 89	}
 90
 91	hashmap__free(ids);
 92}
 93
 94int ids__insert(struct hashmap *ids, const char *id)
 95{
 96	struct expr_id_data *data_ptr = NULL, *old_data = NULL;
 97	char *old_key = NULL;
 98	int ret;
 99
100	ret = hashmap__set(ids, id, data_ptr, &old_key, &old_data);
101	if (ret)
102		free(data_ptr);
103	free(old_key);
104	free(old_data);
105	return ret;
106}
107
108struct hashmap *ids__union(struct hashmap *ids1, struct hashmap *ids2)
109{
110	size_t bkt;
111	struct hashmap_entry *cur;
112	int ret;
113	struct expr_id_data *old_data = NULL;
114	char *old_key = NULL;
115
116	if (!ids1)
117		return ids2;
118
119	if (!ids2)
120		return ids1;
121
122	if (hashmap__size(ids1) <  hashmap__size(ids2)) {
123		struct hashmap *tmp = ids1;
124
125		ids1 = ids2;
126		ids2 = tmp;
127	}
128	hashmap__for_each_entry(ids2, cur, bkt) {
129		ret = hashmap__set(ids1, cur->key, cur->value, &old_key, &old_data);
130		free(old_key);
131		free(old_data);
132
133		if (ret) {
134			hashmap__free(ids1);
135			hashmap__free(ids2);
136			return NULL;
137		}
138	}
139	hashmap__free(ids2);
140	return ids1;
141}
142
143/* Caller must make sure id is allocated */
144int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
145{
146	return ids__insert(ctx->ids, id);
147}
148
149/* Caller must make sure id is allocated */
150int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
151{
152	return expr__add_id_val_source_count(ctx, id, val, /*source_count=*/1);
153}
154
155/* Caller must make sure id is allocated */
156int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
157				  double val, int source_count)
158{
159	struct expr_id_data *data_ptr = NULL, *old_data = NULL;
160	char *old_key = NULL;
161	int ret;
162
163	data_ptr = malloc(sizeof(*data_ptr));
164	if (!data_ptr)
165		return -ENOMEM;
166	data_ptr->val.val = val;
167	data_ptr->val.source_count = source_count;
168	data_ptr->kind = EXPR_ID_DATA__VALUE;
169
170	ret = hashmap__set(ctx->ids, id, data_ptr, &old_key, &old_data);
171	if (ret)
172		free(data_ptr);
173	free(old_key);
174	free(old_data);
175	return ret;
176}
177
178int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
179{
180	struct expr_id_data *data_ptr = NULL, *old_data = NULL;
181	char *old_key = NULL;
182	char *name;
183	int ret;
184
185	data_ptr = zalloc(sizeof(*data_ptr));
186	if (!data_ptr)
187		return -ENOMEM;
188
189	name = strdup(ref->metric_name);
190	if (!name) {
191		free(data_ptr);
192		return -ENOMEM;
193	}
194
195	/*
196	 * Intentionally passing just const char pointers,
197	 * originally from 'struct pmu_event' object.
198	 * We don't need to change them, so there's no
199	 * need to create our own copy.
200	 */
201	data_ptr->ref.metric_name = ref->metric_name;
202	data_ptr->ref.metric_expr = ref->metric_expr;
203	data_ptr->kind = EXPR_ID_DATA__REF;
204
205	ret = hashmap__set(ctx->ids, name, data_ptr, &old_key, &old_data);
206	if (ret)
207		free(data_ptr);
208
209	pr_debug2("adding ref metric %s: %s\n",
210		  ref->metric_name, ref->metric_expr);
211
212	free(old_key);
213	free(old_data);
214	return ret;
215}
216
217int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
218		 struct expr_id_data **data)
219{
220	return hashmap__find(ctx->ids, id, data) ? 0 : -1;
221}
222
223bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
224			 struct expr_parse_ctx *needles)
225{
226	struct hashmap_entry *cur;
227	size_t bkt;
228	struct expr_id_data *data;
229
230	hashmap__for_each_entry(needles->ids, cur, bkt) {
231		if (expr__get_id(haystack, cur->pkey, &data))
232			return false;
233	}
234	return true;
235}
236
237
238int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
239		     struct expr_id_data **datap)
240{
241	struct expr_id_data *data;
242
243	if (expr__get_id(ctx, id, datap) || !*datap) {
244		pr_debug("%s not found\n", id);
245		return -1;
246	}
247
248	data = *datap;
249
250	switch (data->kind) {
251	case EXPR_ID_DATA__VALUE:
252		pr_debug2("lookup(%s): val %f\n", id, data->val.val);
253		break;
254	case EXPR_ID_DATA__REF:
255		pr_debug2("lookup(%s): ref metric name %s\n", id,
256			data->ref.metric_name);
257		pr_debug("processing metric: %s ENTRY\n", id);
258		data->kind = EXPR_ID_DATA__REF_VALUE;
259		if (expr__parse(&data->ref.val, ctx, data->ref.metric_expr)) {
260			pr_debug("%s failed to count\n", id);
261			return -1;
262		}
263		pr_debug("processing metric: %s EXIT: %f\n", id, data->ref.val);
264		break;
265	case EXPR_ID_DATA__REF_VALUE:
266		pr_debug2("lookup(%s): ref val %f metric name %s\n", id,
267			data->ref.val, data->ref.metric_name);
268		break;
269	default:
270		assert(0);  /* Unreachable. */
271	}
272
273	return 0;
274}
275
276void expr__del_id(struct expr_parse_ctx *ctx, const char *id)
277{
278	struct expr_id_data *old_val = NULL;
279	char *old_key = NULL;
280
281	hashmap__delete(ctx->ids, id, &old_key, &old_val);
282	free(old_key);
283	free(old_val);
284}
285
286struct expr_parse_ctx *expr__ctx_new(void)
287{
288	struct expr_parse_ctx *ctx;
289
290	ctx = malloc(sizeof(struct expr_parse_ctx));
291	if (!ctx)
292		return NULL;
293
294	ctx->ids = hashmap__new(key_hash, key_equal, NULL);
295	if (IS_ERR(ctx->ids)) {
296		free(ctx);
297		return NULL;
298	}
299	ctx->sctx.user_requested_cpu_list = NULL;
300	ctx->sctx.runtime = 0;
301	ctx->sctx.system_wide = false;
302
303	return ctx;
304}
305
306void expr__ctx_clear(struct expr_parse_ctx *ctx)
307{
308	struct hashmap_entry *cur;
309	size_t bkt;
310
311	hashmap__for_each_entry(ctx->ids, cur, bkt) {
312		free((void *)cur->pkey);
313		free(cur->pvalue);
314	}
315	hashmap__clear(ctx->ids);
316}
317
318void expr__ctx_free(struct expr_parse_ctx *ctx)
319{
320	struct hashmap_entry *cur;
321	size_t bkt;
322
323	if (!ctx)
324		return;
325
326	free(ctx->sctx.user_requested_cpu_list);
327	hashmap__for_each_entry(ctx->ids, cur, bkt) {
328		free((void *)cur->pkey);
329		free(cur->pvalue);
330	}
331	hashmap__free(ctx->ids);
332	free(ctx);
333}
334
335static int
336__expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
337	      bool compute_ids)
338{
339	YY_BUFFER_STATE buffer;
340	void *scanner;
341	int ret;
342
343	pr_debug2("parsing metric: %s\n", expr);
344
345	ret = expr_lex_init_extra(&ctx->sctx, &scanner);
346	if (ret)
347		return ret;
348
349	buffer = expr__scan_string(expr, scanner);
350
351#ifdef PARSER_DEBUG
352	expr_debug = 1;
353	expr_set_debug(1, scanner);
354#endif
355
356	ret = expr_parse(val, ctx, compute_ids, scanner);
357
358	expr__flush_buffer(buffer, scanner);
359	expr__delete_buffer(buffer, scanner);
360	expr_lex_destroy(scanner);
361	return ret;
362}
363
364int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
365		const char *expr)
366{
367	return __expr__parse(final_val, ctx, expr, /*compute_ids=*/false) ? -1 : 0;
368}
369
370int expr__find_ids(const char *expr, const char *one,
371		   struct expr_parse_ctx *ctx)
372{
373	int ret = __expr__parse(NULL, ctx, expr, /*compute_ids=*/true);
374
375	if (one)
376		expr__del_id(ctx, one);
377
378	return ret;
379}
380
381double expr_id_data__value(const struct expr_id_data *data)
382{
383	if (data->kind == EXPR_ID_DATA__VALUE)
384		return data->val.val;
385	assert(data->kind == EXPR_ID_DATA__REF_VALUE);
386	return data->ref.val;
387}
388
389double expr_id_data__source_count(const struct expr_id_data *data)
390{
391	assert(data->kind == EXPR_ID_DATA__VALUE);
392	return data->val.source_count;
393}
394
395#if !defined(__i386__) && !defined(__x86_64__)
396double arch_get_tsc_freq(void)
397{
398	return 0.0;
399}
400#endif
401
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx)
403{
404	static struct cpu_topology *topology;
405	double result = NAN;
406
407	if (!strcmp("#num_cpus", literal)) {
408		result = cpu__max_present_cpu().cpu;
409		goto out;
410	}
 
 
 
 
 
 
 
411
412	if (!strcasecmp("#system_tsc_freq", literal)) {
413		result = arch_get_tsc_freq();
414		goto out;
415	}
416
417	/*
418	 * Assume that topology strings are consistent, such as CPUs "0-1"
419	 * wouldn't be listed as "0,1", and so after deduplication the number of
420	 * these strings gives an indication of the number of packages, dies,
421	 * etc.
422	 */
423	if (!topology) {
424		topology = cpu_topology__new();
425		if (!topology) {
426			pr_err("Error creating CPU topology");
427			goto out;
428		}
429	}
430	if (!strcasecmp("#smt_on", literal)) {
431		result = smt_on(topology) ? 1.0 : 0.0;
432		goto out;
433	}
434	if (!strcmp("#core_wide", literal)) {
435		result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list, topology)
436			? 1.0 : 0.0;
437		goto out;
438	}
439	if (!strcmp("#num_packages", literal)) {
 
440		result = topology->package_cpus_lists;
441		goto out;
442	}
443	if (!strcmp("#num_dies", literal)) {
 
444		result = topology->die_cpus_lists;
445		goto out;
446	}
447	if (!strcmp("#num_cores", literal)) {
 
448		result = topology->core_cpus_lists;
449		goto out;
450	}
 
 
 
 
 
 
 
 
451
452	pr_err("Unrecognized literal '%s'", literal);
453out:
454	pr_debug2("literal: %s = %f\n", literal, result);
455	return result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456}