Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Memory bandwidth monitoring and allocation library
  4 *
  5 * Copyright (C) 2018 Intel Corporation
  6 *
  7 * Authors:
  8 *    Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
  9 *    Fenghua Yu <fenghua.yu@intel.com>
 10 */
 11#include "resctrl.h"
 12
 13#define UNCORE_IMC		"uncore_imc"
 14#define READ_FILE_NAME		"events/cas_count_read"
 
 15#define DYN_PMU_PATH		"/sys/bus/event_source/devices"
 16#define SCALE			0.00006103515625
 17#define MAX_IMCS		20
 18#define MAX_TOKENS		5
 
 
 
 
 19
 20#define CON_MBM_LOCAL_BYTES_PATH		\
 21	"%s/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23struct membw_read_format {
 24	__u64 value;         /* The value of the event */
 25	__u64 time_enabled;  /* if PERF_FORMAT_TOTAL_TIME_ENABLED */
 26	__u64 time_running;  /* if PERF_FORMAT_TOTAL_TIME_RUNNING */
 27	__u64 id;            /* if PERF_FORMAT_ID */
 28};
 29
 30struct imc_counter_config {
 31	__u32 type;
 32	__u64 event;
 33	__u64 umask;
 34	struct perf_event_attr pe;
 35	struct membw_read_format return_value;
 36	int fd;
 37};
 38
 39static char mbm_total_path[1024];
 40static int imcs;
 41static struct imc_counter_config imc_counters_config[MAX_IMCS];
 42static const struct resctrl_test *current_test;
 43
 44static void read_mem_bw_initialize_perf_event_attr(int i)
 45{
 46	memset(&imc_counters_config[i].pe, 0,
 47	       sizeof(struct perf_event_attr));
 48	imc_counters_config[i].pe.type = imc_counters_config[i].type;
 49	imc_counters_config[i].pe.size = sizeof(struct perf_event_attr);
 50	imc_counters_config[i].pe.disabled = 1;
 51	imc_counters_config[i].pe.inherit = 1;
 52	imc_counters_config[i].pe.exclude_guest = 0;
 53	imc_counters_config[i].pe.config =
 54		imc_counters_config[i].umask << 8 |
 55		imc_counters_config[i].event;
 56	imc_counters_config[i].pe.sample_type = PERF_SAMPLE_IDENTIFIER;
 57	imc_counters_config[i].pe.read_format =
 58		PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
 59}
 60
 61static void read_mem_bw_ioctl_perf_event_ioc_reset_enable(int i)
 62{
 63	ioctl(imc_counters_config[i].fd, PERF_EVENT_IOC_RESET, 0);
 64	ioctl(imc_counters_config[i].fd, PERF_EVENT_IOC_ENABLE, 0);
 65}
 66
 67static void read_mem_bw_ioctl_perf_event_ioc_disable(int i)
 68{
 69	ioctl(imc_counters_config[i].fd, PERF_EVENT_IOC_DISABLE, 0);
 70}
 71
 72/*
 73 * get_read_event_and_umask:	Parse config into event and umask
 74 * @cas_count_cfg:	Config
 75 * @count:		iMC number
 
 76 */
 77static void get_read_event_and_umask(char *cas_count_cfg, int count)
 78{
 79	char *token[MAX_TOKENS];
 80	int i = 0;
 81
 
 82	token[0] = strtok(cas_count_cfg, "=,");
 83
 84	for (i = 1; i < MAX_TOKENS; i++)
 85		token[i] = strtok(NULL, "=,");
 86
 87	for (i = 0; i < MAX_TOKENS - 1; i++) {
 88		if (!token[i])
 89			break;
 90		if (strcmp(token[i], "event") == 0)
 91			imc_counters_config[count].event = strtol(token[i + 1], NULL, 16);
 92		if (strcmp(token[i], "umask") == 0)
 93			imc_counters_config[count].umask = strtol(token[i + 1], NULL, 16);
 
 
 
 
 
 
 
 
 
 
 
 
 94	}
 95}
 96
 97static int open_perf_read_event(int i, int cpu_no)
 98{
 99	imc_counters_config[i].fd =
100		perf_event_open(&imc_counters_config[i].pe, -1, cpu_no, -1,
101				PERF_FLAG_FD_CLOEXEC);
102
103	if (imc_counters_config[i].fd == -1) {
104		fprintf(stderr, "Error opening leader %llx\n",
105			imc_counters_config[i].pe.config);
106
107		return -1;
108	}
109
110	return 0;
111}
112
113/* Get type and config of an iMC counter's read event. */
114static int read_from_imc_dir(char *imc_dir, int count)
115{
116	char cas_count_cfg[1024], imc_counter_cfg[1024], imc_counter_type[1024];
117	FILE *fp;
118
119	/* Get type of iMC counter */
120	sprintf(imc_counter_type, "%s%s", imc_dir, "type");
121	fp = fopen(imc_counter_type, "r");
122	if (!fp) {
123		ksft_perror("Failed to open iMC counter type file");
124
125		return -1;
126	}
127	if (fscanf(fp, "%u", &imc_counters_config[count].type) <= 0) {
128		ksft_perror("Could not get iMC type");
129		fclose(fp);
130
131		return -1;
132	}
133	fclose(fp);
134
 
 
 
135	/* Get read config */
136	sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME);
137	fp = fopen(imc_counter_cfg, "r");
138	if (!fp) {
139		ksft_perror("Failed to open iMC config file");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
141		return -1;
142	}
143	if (fscanf(fp, "%1023s", cas_count_cfg) <= 0) {
144		ksft_perror("Could not get iMC cas count read");
145		fclose(fp);
146
147		return -1;
148	}
149	fclose(fp);
150
151	get_read_event_and_umask(cas_count_cfg, count);
152
153	return 0;
154}
155
156/*
157 * A system can have 'n' number of iMC (Integrated Memory Controller)
158 * counters, get that 'n'. Discover the properties of the available
159 * counters in support of needed performance measurement via perf.
160 * For each iMC counter get it's type and config. Also obtain each
161 * counter's event and umask for the memory read events that will be
162 * measured.
163 *
164 * Enumerate all these details into an array of structures.
165 *
166 * Return: >= 0 on success. < 0 on failure.
167 */
168static int num_of_imcs(void)
169{
170	char imc_dir[512], *temp;
171	unsigned int count = 0;
172	struct dirent *ep;
173	int ret;
174	DIR *dp;
175
176	dp = opendir(DYN_PMU_PATH);
177	if (dp) {
178		while ((ep = readdir(dp))) {
179			temp = strstr(ep->d_name, UNCORE_IMC);
180			if (!temp)
181				continue;
182
183			/*
184			 * imc counters are named as "uncore_imc_<n>", hence
185			 * increment the pointer to point to <n>. Note that
186			 * sizeof(UNCORE_IMC) would count for null character as
187			 * well and hence the last underscore character in
188			 * uncore_imc'_' need not be counted.
189			 */
190			temp = temp + sizeof(UNCORE_IMC);
191
192			/*
193			 * Some directories under "DYN_PMU_PATH" could have
194			 * names like "uncore_imc_free_running", hence, check if
195			 * first character is a numerical digit or not.
196			 */
197			if (temp[0] >= '0' && temp[0] <= '9') {
198				sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
199					ep->d_name);
200				ret = read_from_imc_dir(imc_dir, count);
201				if (ret) {
202					closedir(dp);
203
204					return ret;
205				}
206				count++;
207			}
208		}
209		closedir(dp);
210		if (count == 0) {
211			ksft_print_msg("Unable to find iMC counters\n");
212
213			return -1;
214		}
215	} else {
216		ksft_perror("Unable to open PMU directory");
217
218		return -1;
219	}
220
221	return count;
222}
223
224int initialize_read_mem_bw_imc(void)
225{
226	int imc;
227
228	imcs = num_of_imcs();
229	if (imcs <= 0)
230		return imcs;
231
232	/* Initialize perf_event_attr structures for all iMC's */
233	for (imc = 0; imc < imcs; imc++)
234		read_mem_bw_initialize_perf_event_attr(imc);
 
 
235
236	return 0;
237}
238
239static void perf_close_imc_read_mem_bw(void)
240{
241	int mc;
242
243	for (mc = 0; mc < imcs; mc++) {
244		if (imc_counters_config[mc].fd != -1)
245			close(imc_counters_config[mc].fd);
246	}
247}
248
249/*
250 * perf_open_imc_read_mem_bw - Open perf fds for IMCs
251 * @cpu_no: CPU number that the benchmark PID is bound to
 
 
 
 
252 *
253 * Return: = 0 on success. < 0 on failure.
254 */
255static int perf_open_imc_read_mem_bw(int cpu_no)
256{
257	int imc, ret;
258
259	for (imc = 0; imc < imcs; imc++)
260		imc_counters_config[imc].fd = -1;
261
 
 
262	for (imc = 0; imc < imcs; imc++) {
263		ret = open_perf_read_event(imc, cpu_no);
264		if (ret)
265			goto close_fds;
 
 
 
 
266	}
267
268	return 0;
269
270close_fds:
271	perf_close_imc_read_mem_bw();
272	return -1;
273}
274
275/*
276 * do_imc_read_mem_bw_test - Perform memory bandwidth test
277 *
278 * Runs memory bandwidth test over one second period. Also, handles starting
279 * and stopping of the IMC perf counters around the test.
280 */
281static void do_imc_read_mem_bw_test(void)
282{
283	int imc;
284
285	for (imc = 0; imc < imcs; imc++)
286		read_mem_bw_ioctl_perf_event_ioc_reset_enable(imc);
287
288	sleep(1);
289
290	/* Stop counters after a second to get results. */
291	for (imc = 0; imc < imcs; imc++)
292		read_mem_bw_ioctl_perf_event_ioc_disable(imc);
293}
294
295/*
296 * get_read_mem_bw_imc - Memory read bandwidth as reported by iMC counters
297 *
298 * Memory read bandwidth utilized by a process on a socket can be calculated
299 * using iMC counters' read events. Perf events are used to read these
300 * counters.
301 *
302 * Return: = 0 on success. < 0 on failure.
303 */
304static int get_read_mem_bw_imc(float *bw_imc)
305{
306	float reads = 0, of_mul_read = 1;
307	int imc;
308
309	/*
310	 * Log read event values from all iMC counters into
311	 * struct imc_counter_config.
312	 * Take overflow into consideration before calculating total bandwidth.
313	 */
314	for (imc = 0; imc < imcs; imc++) {
315		struct imc_counter_config *r =
316			&imc_counters_config[imc];
 
 
317
318		if (read(r->fd, &r->return_value,
319			 sizeof(struct membw_read_format)) == -1) {
320			ksft_perror("Couldn't get read bandwidth through iMC");
 
 
 
 
 
 
 
 
321			return -1;
322		}
323
324		__u64 r_time_enabled = r->return_value.time_enabled;
325		__u64 r_time_running = r->return_value.time_running;
326
327		if (r_time_enabled != r_time_running)
328			of_mul_read = (float)r_time_enabled /
329					(float)r_time_running;
330
 
 
 
 
 
 
331		reads += r->return_value.value * of_mul_read * SCALE;
 
332	}
333
334	*bw_imc = reads;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335	return 0;
336}
337
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338/*
339 * initialize_mem_bw_resctrl:	Appropriately populate "mbm_total_path"
340 * @param:	Parameters passed to resctrl_val()
341 * @domain_id:	Domain ID (cache ID; for MB, L3 cache ID)
342 */
343void initialize_mem_bw_resctrl(const struct resctrl_val_param *param,
344			       int domain_id)
345{
346	sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
347		param->ctrlgrp, domain_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348}
349
350/*
351 * Open file to read MBM local bytes from resctrl FS
 
 
 
 
 
 
 
352 */
353static FILE *open_mem_bw_resctrl(const char *mbm_bw_file)
354{
355	FILE *fp;
356
357	fp = fopen(mbm_bw_file, "r");
358	if (!fp)
359		ksft_perror("Failed to open total memory bandwidth file");
360
361	return fp;
362}
 
 
 
363
364/*
365 * Get MBM Local bytes as reported by resctrl FS
366 */
367static int get_mem_bw_resctrl(FILE *fp, unsigned long *mbm_total)
368{
369	if (fscanf(fp, "%lu\n", mbm_total) <= 0) {
370		ksft_perror("Could not get MBM local bytes");
371		return -1;
372	}
 
 
373	return 0;
374}
375
376static pid_t bm_pid;
377
378void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
379{
380	/* Only kill child after bm_pid is set after fork() */
381	if (bm_pid)
382		kill(bm_pid, SIGKILL);
383	umount_resctrlfs();
384	if (current_test && current_test->cleanup)
385		current_test->cleanup();
386	ksft_print_msg("Ending\n\n");
387
388	exit(EXIT_SUCCESS);
389}
390
391/*
392 * Register CTRL-C handler for parent, as it has to kill
393 * child process before exiting.
394 */
395int signal_handler_register(const struct resctrl_test *test)
396{
397	struct sigaction sigact = {};
398	int ret = 0;
399
400	bm_pid = 0;
401
402	current_test = test;
403	sigact.sa_sigaction = ctrlc_handler;
404	sigemptyset(&sigact.sa_mask);
405	sigact.sa_flags = SA_SIGINFO;
406	if (sigaction(SIGINT, &sigact, NULL) ||
407	    sigaction(SIGTERM, &sigact, NULL) ||
408	    sigaction(SIGHUP, &sigact, NULL)) {
409		ksft_perror("sigaction");
410		ret = -1;
411	}
412	return ret;
413}
414
415/*
416 * Reset signal handler to SIG_DFL.
417 * Non-Value return because the caller should keep
418 * the error code of other path even if sigaction fails.
419 */
420void signal_handler_unregister(void)
421{
422	struct sigaction sigact = {};
423
424	current_test = NULL;
425	sigact.sa_handler = SIG_DFL;
426	sigemptyset(&sigact.sa_mask);
427	if (sigaction(SIGINT, &sigact, NULL) ||
428	    sigaction(SIGTERM, &sigact, NULL) ||
429	    sigaction(SIGHUP, &sigact, NULL)) {
430		ksft_perror("sigaction");
431	}
432}
433
434/*
435 * print_results_bw:	the memory bandwidth results are stored in a file
436 * @filename:		file that stores the results
437 * @bm_pid:		child pid that runs benchmark
438 * @bw_imc:		perf imc counter value
439 * @bw_resc:		memory bandwidth value
440 *
441 * Return:		0 on success, < 0 on error.
442 */
443static int print_results_bw(char *filename, pid_t bm_pid, float bw_imc,
444			    unsigned long bw_resc)
445{
446	unsigned long diff = fabs(bw_imc - bw_resc);
447	FILE *fp;
448
449	if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
450		printf("Pid: %d \t Mem_BW_iMC: %f \t ", (int)bm_pid, bw_imc);
451		printf("Mem_BW_resc: %lu \t Difference: %lu\n", bw_resc, diff);
452	} else {
453		fp = fopen(filename, "a");
454		if (!fp) {
455			ksft_perror("Cannot open results file");
456
457			return -1;
458		}
459		if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n",
460			    (int)bm_pid, bw_imc, bw_resc, diff) <= 0) {
461			ksft_print_msg("Could not log results\n");
462			fclose(fp);
 
463
464			return -1;
465		}
466		fclose(fp);
467	}
468
469	return 0;
470}
471
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472/*
473 * measure_read_mem_bw - Measures read memory bandwidth numbers while benchmark runs
474 * @uparams:		User supplied parameters
475 * @param:		Parameters passed to resctrl_val()
476 * @bm_pid:		PID that runs the benchmark
477 *
478 * Measure memory bandwidth from resctrl and from another source which is
479 * perf imc value or could be something else if perf imc event is not
480 * available. Compare the two values to validate resctrl value. It takes
481 * 1 sec to measure the data.
482 * resctrl does not distinguish between read and write operations so
483 * its data includes all memory operations.
484 */
485int measure_read_mem_bw(const struct user_params *uparams,
486			struct resctrl_val_param *param, pid_t bm_pid)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487{
488	unsigned long bw_resc, bw_resc_start, bw_resc_end;
489	FILE *mem_bw_fp;
490	float bw_imc;
491	int ret;
492
493	mem_bw_fp = open_mem_bw_resctrl(mbm_total_path);
494	if (!mem_bw_fp)
495		return -1;
496
497	ret = perf_open_imc_read_mem_bw(uparams->cpu);
 
 
 
498	if (ret < 0)
499		goto close_fp;
500
501	ret = get_mem_bw_resctrl(mem_bw_fp, &bw_resc_start);
502	if (ret < 0)
503		goto close_imc;
504
505	rewind(mem_bw_fp);
 
 
 
506
507	do_imc_read_mem_bw_test();
508
509	ret = get_mem_bw_resctrl(mem_bw_fp, &bw_resc_end);
510	if (ret < 0)
511		goto close_imc;
512
513	ret = get_read_mem_bw_imc(&bw_imc);
514	if (ret < 0)
515		goto close_imc;
 
 
 
 
 
 
 
 
 
 
 
 
 
516
517	perf_close_imc_read_mem_bw();
518	fclose(mem_bw_fp);
 
 
 
 
 
519
520	bw_resc = (bw_resc_end - bw_resc_start) / MB;
 
 
 
 
 
 
 
 
 
 
521
522	return print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
 
 
 
 
 
 
 
523
524close_imc:
525	perf_close_imc_read_mem_bw();
526close_fp:
527	fclose(mem_bw_fp);
528	return ret;
529}
530
531/*
532 * resctrl_val:	execute benchmark and measure memory bandwidth on
533 *			the benchmark
534 * @test:		test information structure
535 * @uparams:		user supplied parameters
536 * @param:		parameters passed to resctrl_val()
537 *
538 * Return:		0 when the test was run, < 0 on error.
539 */
540int resctrl_val(const struct resctrl_test *test,
541		const struct user_params *uparams,
542		struct resctrl_val_param *param)
543{
544	unsigned char *buf = NULL;
545	cpu_set_t old_affinity;
546	int domain_id;
547	int ret = 0;
548	pid_t ppid;
549
550	if (strcmp(param->filename, "") == 0)
551		sprintf(param->filename, "stdio");
552
553	ret = get_domain_id(test->resource, uparams->cpu, &domain_id);
554	if (ret < 0) {
555		ksft_print_msg("Could not get domain ID\n");
556		return ret;
 
557	}
558
 
 
 
 
559	ppid = getpid();
560
561	/* Taskset test to specified CPU. */
562	ret = taskset_benchmark(ppid, uparams->cpu, &old_affinity);
563	if (ret)
564		return ret;
565
566	/* Write test to specified control & monitoring group in resctrl FS. */
567	ret = write_bm_pid_to_resctrl(ppid, param->ctrlgrp, param->mongrp);
568	if (ret)
569		goto reset_affinity;
570
571	if (param->init) {
572		ret = param->init(param, domain_id);
573		if (ret)
574			goto reset_affinity;
575	}
576
577	/*
578	 * If not running user provided benchmark, run the default
579	 * "fill_buf". First phase of "fill_buf" is to prepare the
580	 * buffer that the benchmark will operate on. No measurements
581	 * are needed during this phase and prepared memory will be
582	 * passed to next part of benchmark via copy-on-write thus
583	 * no impact on the benchmark that relies on reading from
584	 * memory only.
585	 */
586	if (param->fill_buf) {
587		buf = alloc_buffer(param->fill_buf->buf_size,
588				   param->fill_buf->memflush);
589		if (!buf) {
590			ret = -ENOMEM;
591			goto reset_affinity;
592		}
593	}
594
595	fflush(stdout);
596	bm_pid = fork();
597	if (bm_pid == -1) {
598		ret = -errno;
599		ksft_perror("Unable to fork");
600		goto free_buf;
601	}
602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603	/*
604	 * What needs to be measured runs in separate process until
605	 * terminated.
 
 
606	 */
607	if (bm_pid == 0) {
608		if (param->fill_buf)
609			fill_cache_read(buf, param->fill_buf->buf_size, false);
610		else if (uparams->benchmark_cmd[0])
611			execvp(uparams->benchmark_cmd[0], (char **)uparams->benchmark_cmd);
612		exit(EXIT_SUCCESS);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613	}
 
614
615	ksft_print_msg("Benchmark PID: %d\n", (int)bm_pid);
 
 
 
 
 
616
617	/* Give benchmark enough time to fully run. */
618	sleep(1);
619
620	/* Test runs until the callback setup() tells the test to stop. */
621	while (1) {
622		ret = param->setup(test, uparams, param);
623		if (ret == END_OF_TESTS) {
624			ret = 0;
625			break;
626		}
627		if (ret < 0)
628			break;
629
630		ret = param->measure(uparams, param, bm_pid);
631		if (ret)
632			break;
 
 
 
 
 
 
 
 
633	}
634
 
635	kill(bm_pid, SIGKILL);
636free_buf:
637	free(buf);
638reset_affinity:
639	taskset_restore(ppid, &old_affinity);
640	return ret;
641}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Memory bandwidth monitoring and allocation library
  4 *
  5 * Copyright (C) 2018 Intel Corporation
  6 *
  7 * Authors:
  8 *    Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>,
  9 *    Fenghua Yu <fenghua.yu@intel.com>
 10 */
 11#include "resctrl.h"
 12
 13#define UNCORE_IMC		"uncore_imc"
 14#define READ_FILE_NAME		"events/cas_count_read"
 15#define WRITE_FILE_NAME		"events/cas_count_write"
 16#define DYN_PMU_PATH		"/sys/bus/event_source/devices"
 17#define SCALE			0.00006103515625
 18#define MAX_IMCS		20
 19#define MAX_TOKENS		5
 20#define READ			0
 21#define WRITE			1
 22#define CON_MON_MBM_LOCAL_BYTES_PATH				\
 23	"%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
 24
 25#define CON_MBM_LOCAL_BYTES_PATH		\
 26	"%s/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
 27
 28#define MON_MBM_LOCAL_BYTES_PATH		\
 29	"%s/mon_groups/%s/mon_data/mon_L3_%02d/mbm_local_bytes"
 30
 31#define MBM_LOCAL_BYTES_PATH			\
 32	"%s/mon_data/mon_L3_%02d/mbm_local_bytes"
 33
 34#define CON_MON_LCC_OCCUP_PATH		\
 35	"%s/%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
 36
 37#define CON_LCC_OCCUP_PATH		\
 38	"%s/%s/mon_data/mon_L3_%02d/llc_occupancy"
 39
 40#define MON_LCC_OCCUP_PATH		\
 41	"%s/mon_groups/%s/mon_data/mon_L3_%02d/llc_occupancy"
 42
 43#define LCC_OCCUP_PATH			\
 44	"%s/mon_data/mon_L3_%02d/llc_occupancy"
 45
 46struct membw_read_format {
 47	__u64 value;         /* The value of the event */
 48	__u64 time_enabled;  /* if PERF_FORMAT_TOTAL_TIME_ENABLED */
 49	__u64 time_running;  /* if PERF_FORMAT_TOTAL_TIME_RUNNING */
 50	__u64 id;            /* if PERF_FORMAT_ID */
 51};
 52
 53struct imc_counter_config {
 54	__u32 type;
 55	__u64 event;
 56	__u64 umask;
 57	struct perf_event_attr pe;
 58	struct membw_read_format return_value;
 59	int fd;
 60};
 61
 62static char mbm_total_path[1024];
 63static int imcs;
 64static struct imc_counter_config imc_counters_config[MAX_IMCS][2];
 
 65
 66void membw_initialize_perf_event_attr(int i, int j)
 67{
 68	memset(&imc_counters_config[i][j].pe, 0,
 69	       sizeof(struct perf_event_attr));
 70	imc_counters_config[i][j].pe.type = imc_counters_config[i][j].type;
 71	imc_counters_config[i][j].pe.size = sizeof(struct perf_event_attr);
 72	imc_counters_config[i][j].pe.disabled = 1;
 73	imc_counters_config[i][j].pe.inherit = 1;
 74	imc_counters_config[i][j].pe.exclude_guest = 0;
 75	imc_counters_config[i][j].pe.config =
 76		imc_counters_config[i][j].umask << 8 |
 77		imc_counters_config[i][j].event;
 78	imc_counters_config[i][j].pe.sample_type = PERF_SAMPLE_IDENTIFIER;
 79	imc_counters_config[i][j].pe.read_format =
 80		PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING;
 81}
 82
 83void membw_ioctl_perf_event_ioc_reset_enable(int i, int j)
 84{
 85	ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_RESET, 0);
 86	ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_ENABLE, 0);
 87}
 88
 89void membw_ioctl_perf_event_ioc_disable(int i, int j)
 90{
 91	ioctl(imc_counters_config[i][j].fd, PERF_EVENT_IOC_DISABLE, 0);
 92}
 93
 94/*
 95 * get_event_and_umask:	Parse config into event and umask
 96 * @cas_count_cfg:	Config
 97 * @count:		iMC number
 98 * @op:			Operation (read/write)
 99 */
100void get_event_and_umask(char *cas_count_cfg, int count, bool op)
101{
102	char *token[MAX_TOKENS];
103	int i = 0;
104
105	strcat(cas_count_cfg, ",");
106	token[0] = strtok(cas_count_cfg, "=,");
107
108	for (i = 1; i < MAX_TOKENS; i++)
109		token[i] = strtok(NULL, "=,");
110
111	for (i = 0; i < MAX_TOKENS; i++) {
112		if (!token[i])
113			break;
114		if (strcmp(token[i], "event") == 0) {
115			if (op == READ)
116				imc_counters_config[count][READ].event =
117				strtol(token[i + 1], NULL, 16);
118			else
119				imc_counters_config[count][WRITE].event =
120				strtol(token[i + 1], NULL, 16);
121		}
122		if (strcmp(token[i], "umask") == 0) {
123			if (op == READ)
124				imc_counters_config[count][READ].umask =
125				strtol(token[i + 1], NULL, 16);
126			else
127				imc_counters_config[count][WRITE].umask =
128				strtol(token[i + 1], NULL, 16);
129		}
130	}
131}
132
133static int open_perf_event(int i, int cpu_no, int j)
134{
135	imc_counters_config[i][j].fd =
136		perf_event_open(&imc_counters_config[i][j].pe, -1, cpu_no, -1,
137				PERF_FLAG_FD_CLOEXEC);
138
139	if (imc_counters_config[i][j].fd == -1) {
140		fprintf(stderr, "Error opening leader %llx\n",
141			imc_counters_config[i][j].pe.config);
142
143		return -1;
144	}
145
146	return 0;
147}
148
149/* Get type and config (read and write) of an iMC counter */
150static int read_from_imc_dir(char *imc_dir, int count)
151{
152	char cas_count_cfg[1024], imc_counter_cfg[1024], imc_counter_type[1024];
153	FILE *fp;
154
155	/* Get type of iMC counter */
156	sprintf(imc_counter_type, "%s%s", imc_dir, "type");
157	fp = fopen(imc_counter_type, "r");
158	if (!fp) {
159		perror("Failed to open imc counter type file");
160
161		return -1;
162	}
163	if (fscanf(fp, "%u", &imc_counters_config[count][READ].type) <= 0) {
164		perror("Could not get imc type");
165		fclose(fp);
166
167		return -1;
168	}
169	fclose(fp);
170
171	imc_counters_config[count][WRITE].type =
172				imc_counters_config[count][READ].type;
173
174	/* Get read config */
175	sprintf(imc_counter_cfg, "%s%s", imc_dir, READ_FILE_NAME);
176	fp = fopen(imc_counter_cfg, "r");
177	if (!fp) {
178		perror("Failed to open imc config file");
179
180		return -1;
181	}
182	if (fscanf(fp, "%s", cas_count_cfg) <= 0) {
183		perror("Could not get imc cas count read");
184		fclose(fp);
185
186		return -1;
187	}
188	fclose(fp);
189
190	get_event_and_umask(cas_count_cfg, count, READ);
191
192	/* Get write config */
193	sprintf(imc_counter_cfg, "%s%s", imc_dir, WRITE_FILE_NAME);
194	fp = fopen(imc_counter_cfg, "r");
195	if (!fp) {
196		perror("Failed to open imc config file");
197
198		return -1;
199	}
200	if  (fscanf(fp, "%s", cas_count_cfg) <= 0) {
201		perror("Could not get imc cas count write");
202		fclose(fp);
203
204		return -1;
205	}
206	fclose(fp);
207
208	get_event_and_umask(cas_count_cfg, count, WRITE);
209
210	return 0;
211}
212
213/*
214 * A system can have 'n' number of iMC (Integrated Memory Controller)
215 * counters, get that 'n'. For each iMC counter get it's type and config.
216 * Also, each counter has two configs, one for read and the other for write.
217 * A config again has two parts, event and umask.
 
 
 
218 * Enumerate all these details into an array of structures.
219 *
220 * Return: >= 0 on success. < 0 on failure.
221 */
222static int num_of_imcs(void)
223{
224	char imc_dir[512], *temp;
225	unsigned int count = 0;
226	struct dirent *ep;
227	int ret;
228	DIR *dp;
229
230	dp = opendir(DYN_PMU_PATH);
231	if (dp) {
232		while ((ep = readdir(dp))) {
233			temp = strstr(ep->d_name, UNCORE_IMC);
234			if (!temp)
235				continue;
236
237			/*
238			 * imc counters are named as "uncore_imc_<n>", hence
239			 * increment the pointer to point to <n>. Note that
240			 * sizeof(UNCORE_IMC) would count for null character as
241			 * well and hence the last underscore character in
242			 * uncore_imc'_' need not be counted.
243			 */
244			temp = temp + sizeof(UNCORE_IMC);
245
246			/*
247			 * Some directories under "DYN_PMU_PATH" could have
248			 * names like "uncore_imc_free_running", hence, check if
249			 * first character is a numerical digit or not.
250			 */
251			if (temp[0] >= '0' && temp[0] <= '9') {
252				sprintf(imc_dir, "%s/%s/", DYN_PMU_PATH,
253					ep->d_name);
254				ret = read_from_imc_dir(imc_dir, count);
255				if (ret) {
256					closedir(dp);
257
258					return ret;
259				}
260				count++;
261			}
262		}
263		closedir(dp);
264		if (count == 0) {
265			perror("Unable find iMC counters!\n");
266
267			return -1;
268		}
269	} else {
270		perror("Unable to open PMU directory!\n");
271
272		return -1;
273	}
274
275	return count;
276}
277
278static int initialize_mem_bw_imc(void)
279{
280	int imc, j;
281
282	imcs = num_of_imcs();
283	if (imcs <= 0)
284		return imcs;
285
286	/* Initialize perf_event_attr structures for all iMC's */
287	for (imc = 0; imc < imcs; imc++) {
288		for (j = 0; j < 2; j++)
289			membw_initialize_perf_event_attr(imc, j);
290	}
291
292	return 0;
293}
294
 
 
 
 
 
 
 
 
 
 
295/*
296 * get_mem_bw_imc:	Memory band width as reported by iMC counters
297 * @cpu_no:		CPU number that the benchmark PID is binded to
298 * @bw_report:		Bandwidth report type (reads, writes)
299 *
300 * Memory B/W utilized by a process on a socket can be calculated using
301 * iMC counters. Perf events are used to read these counters.
302 *
303 * Return: = 0 on success. < 0 on failure.
304 */
305static int get_mem_bw_imc(int cpu_no, char *bw_report, float *bw_imc)
306{
307	float reads, writes, of_mul_read, of_mul_write;
308	int imc, j, ret;
 
 
309
310	/* Start all iMC counters to log values (both read and write) */
311	reads = 0, writes = 0, of_mul_read = 1, of_mul_write = 1;
312	for (imc = 0; imc < imcs; imc++) {
313		for (j = 0; j < 2; j++) {
314			ret = open_perf_event(imc, cpu_no, j);
315			if (ret)
316				return -1;
317		}
318		for (j = 0; j < 2; j++)
319			membw_ioctl_perf_event_ioc_reset_enable(imc, j);
320	}
321
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322	sleep(1);
323
324	/* Stop counters after a second to get results (both read and write) */
325	for (imc = 0; imc < imcs; imc++) {
326		for (j = 0; j < 2; j++)
327			membw_ioctl_perf_event_ioc_disable(imc, j);
328	}
 
 
 
 
 
 
 
 
 
 
 
 
 
329
330	/*
331	 * Get results which are stored in struct type imc_counter_config
332	 * Take over flow into consideration before calculating total b/w
 
333	 */
334	for (imc = 0; imc < imcs; imc++) {
335		struct imc_counter_config *r =
336			&imc_counters_config[imc][READ];
337		struct imc_counter_config *w =
338			&imc_counters_config[imc][WRITE];
339
340		if (read(r->fd, &r->return_value,
341			 sizeof(struct membw_read_format)) == -1) {
342			perror("Couldn't get read b/w through iMC");
343
344			return -1;
345		}
346
347		if (read(w->fd, &w->return_value,
348			 sizeof(struct membw_read_format)) == -1) {
349			perror("Couldn't get write bw through iMC");
350
351			return -1;
352		}
353
354		__u64 r_time_enabled = r->return_value.time_enabled;
355		__u64 r_time_running = r->return_value.time_running;
356
357		if (r_time_enabled != r_time_running)
358			of_mul_read = (float)r_time_enabled /
359					(float)r_time_running;
360
361		__u64 w_time_enabled = w->return_value.time_enabled;
362		__u64 w_time_running = w->return_value.time_running;
363
364		if (w_time_enabled != w_time_running)
365			of_mul_write = (float)w_time_enabled /
366					(float)w_time_running;
367		reads += r->return_value.value * of_mul_read * SCALE;
368		writes += w->return_value.value * of_mul_write * SCALE;
369	}
370
371	for (imc = 0; imc < imcs; imc++) {
372		close(imc_counters_config[imc][READ].fd);
373		close(imc_counters_config[imc][WRITE].fd);
374	}
375
376	if (strcmp(bw_report, "reads") == 0) {
377		*bw_imc = reads;
378		return 0;
379	}
380
381	if (strcmp(bw_report, "writes") == 0) {
382		*bw_imc = writes;
383		return 0;
384	}
385
386	*bw_imc = reads + writes;
387	return 0;
388}
389
390void set_mbm_path(const char *ctrlgrp, const char *mongrp, int resource_id)
391{
392	if (ctrlgrp && mongrp)
393		sprintf(mbm_total_path, CON_MON_MBM_LOCAL_BYTES_PATH,
394			RESCTRL_PATH, ctrlgrp, mongrp, resource_id);
395	else if (!ctrlgrp && mongrp)
396		sprintf(mbm_total_path, MON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
397			mongrp, resource_id);
398	else if (ctrlgrp && !mongrp)
399		sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
400			ctrlgrp, resource_id);
401	else if (!ctrlgrp && !mongrp)
402		sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH, RESCTRL_PATH,
403			resource_id);
404}
405
406/*
407 * initialize_mem_bw_resctrl:	Appropriately populate "mbm_total_path"
408 * @ctrlgrp:			Name of the control monitor group (con_mon grp)
409 * @mongrp:			Name of the monitor group (mon grp)
410 * @cpu_no:			CPU number that the benchmark PID is binded to
411 * @resctrl_val:		Resctrl feature (Eg: mbm, mba.. etc)
412 */
413static void initialize_mem_bw_resctrl(const char *ctrlgrp, const char *mongrp,
414				      int cpu_no, char *resctrl_val)
415{
416	int resource_id;
417
418	if (get_resource_id(cpu_no, &resource_id) < 0) {
419		perror("Could not get resource_id");
420		return;
421	}
422
423	if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)))
424		set_mbm_path(ctrlgrp, mongrp, resource_id);
425
426	if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
427		if (ctrlgrp)
428			sprintf(mbm_total_path, CON_MBM_LOCAL_BYTES_PATH,
429				RESCTRL_PATH, ctrlgrp, resource_id);
430		else
431			sprintf(mbm_total_path, MBM_LOCAL_BYTES_PATH,
432				RESCTRL_PATH, resource_id);
433	}
434}
435
436/*
437 * Get MBM Local bytes as reported by resctrl FS
438 * For MBM,
439 * 1. If con_mon grp and mon grp are given, then read from con_mon grp's mon grp
440 * 2. If only con_mon grp is given, then read from con_mon grp
441 * 3. If both are not given, then read from root con_mon grp
442 * For MBA,
443 * 1. If con_mon grp is given, then read from it
444 * 2. If con_mon grp is not given, then read from root con_mon grp
445 */
446static int get_mem_bw_resctrl(unsigned long *mbm_total)
447{
448	FILE *fp;
449
450	fp = fopen(mbm_total_path, "r");
451	if (!fp) {
452		perror("Failed to open total bw file");
453
454		return -1;
455	}
456	if (fscanf(fp, "%lu", mbm_total) <= 0) {
457		perror("Could not get mbm local bytes");
458		fclose(fp);
459
 
 
 
 
 
 
 
460		return -1;
461	}
462	fclose(fp);
463
464	return 0;
465}
466
467pid_t bm_pid, ppid;
468
469void ctrlc_handler(int signum, siginfo_t *info, void *ptr)
470{
471	/* Only kill child after bm_pid is set after fork() */
472	if (bm_pid)
473		kill(bm_pid, SIGKILL);
474	umount_resctrlfs();
475	tests_cleanup();
 
476	ksft_print_msg("Ending\n\n");
477
478	exit(EXIT_SUCCESS);
479}
480
481/*
482 * Register CTRL-C handler for parent, as it has to kill
483 * child process before exiting.
484 */
485int signal_handler_register(void)
486{
487	struct sigaction sigact = {};
488	int ret = 0;
489
490	bm_pid = 0;
491
 
492	sigact.sa_sigaction = ctrlc_handler;
493	sigemptyset(&sigact.sa_mask);
494	sigact.sa_flags = SA_SIGINFO;
495	if (sigaction(SIGINT, &sigact, NULL) ||
496	    sigaction(SIGTERM, &sigact, NULL) ||
497	    sigaction(SIGHUP, &sigact, NULL)) {
498		perror("# sigaction");
499		ret = -1;
500	}
501	return ret;
502}
503
504/*
505 * Reset signal handler to SIG_DFL.
506 * Non-Value return because the caller should keep
507 * the error code of other path even if sigaction fails.
508 */
509void signal_handler_unregister(void)
510{
511	struct sigaction sigact = {};
512
 
513	sigact.sa_handler = SIG_DFL;
514	sigemptyset(&sigact.sa_mask);
515	if (sigaction(SIGINT, &sigact, NULL) ||
516	    sigaction(SIGTERM, &sigact, NULL) ||
517	    sigaction(SIGHUP, &sigact, NULL)) {
518		perror("# sigaction");
519	}
520}
521
522/*
523 * print_results_bw:	the memory bandwidth results are stored in a file
524 * @filename:		file that stores the results
525 * @bm_pid:		child pid that runs benchmark
526 * @bw_imc:		perf imc counter value
527 * @bw_resc:		memory bandwidth value
528 *
529 * Return:		0 on success. non-zero on failure.
530 */
531static int print_results_bw(char *filename,  int bm_pid, float bw_imc,
532			    unsigned long bw_resc)
533{
534	unsigned long diff = fabs(bw_imc - bw_resc);
535	FILE *fp;
536
537	if (strcmp(filename, "stdio") == 0 || strcmp(filename, "stderr") == 0) {
538		printf("Pid: %d \t Mem_BW_iMC: %f \t ", bm_pid, bw_imc);
539		printf("Mem_BW_resc: %lu \t Difference: %lu\n", bw_resc, diff);
540	} else {
541		fp = fopen(filename, "a");
542		if (!fp) {
543			perror("Cannot open results file");
544
545			return errno;
546		}
547		if (fprintf(fp, "Pid: %d \t Mem_BW_iMC: %f \t Mem_BW_resc: %lu \t Difference: %lu\n",
548			    bm_pid, bw_imc, bw_resc, diff) <= 0) {
 
549			fclose(fp);
550			perror("Could not log results.");
551
552			return errno;
553		}
554		fclose(fp);
555	}
556
557	return 0;
558}
559
560static void set_cmt_path(const char *ctrlgrp, const char *mongrp, char sock_num)
561{
562	if (strlen(ctrlgrp) && strlen(mongrp))
563		sprintf(llc_occup_path,	CON_MON_LCC_OCCUP_PATH,	RESCTRL_PATH,
564			ctrlgrp, mongrp, sock_num);
565	else if (!strlen(ctrlgrp) && strlen(mongrp))
566		sprintf(llc_occup_path,	MON_LCC_OCCUP_PATH, RESCTRL_PATH,
567			mongrp, sock_num);
568	else if (strlen(ctrlgrp) && !strlen(mongrp))
569		sprintf(llc_occup_path,	CON_LCC_OCCUP_PATH, RESCTRL_PATH,
570			ctrlgrp, sock_num);
571	else if (!strlen(ctrlgrp) && !strlen(mongrp))
572		sprintf(llc_occup_path, LCC_OCCUP_PATH,	RESCTRL_PATH, sock_num);
573}
574
575/*
576 * initialize_llc_occu_resctrl:	Appropriately populate "llc_occup_path"
577 * @ctrlgrp:			Name of the control monitor group (con_mon grp)
578 * @mongrp:			Name of the monitor group (mon grp)
579 * @cpu_no:			CPU number that the benchmark PID is binded to
580 * @resctrl_val:		Resctrl feature (Eg: cat, cmt.. etc)
 
 
 
 
 
 
581 */
582static void initialize_llc_occu_resctrl(const char *ctrlgrp, const char *mongrp,
583					int cpu_no, char *resctrl_val)
584{
585	int resource_id;
586
587	if (get_resource_id(cpu_no, &resource_id) < 0) {
588		perror("# Unable to resource_id");
589		return;
590	}
591
592	if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
593		set_cmt_path(ctrlgrp, mongrp, resource_id);
594}
595
596static int
597measure_vals(struct resctrl_val_param *param, unsigned long *bw_resc_start)
598{
599	unsigned long bw_resc, bw_resc_end;
 
600	float bw_imc;
601	int ret;
602
603	/*
604	 * Measure memory bandwidth from resctrl and from
605	 * another source which is perf imc value or could
606	 * be something else if perf imc event is not available.
607	 * Compare the two values to validate resctrl value.
608	 * It takes 1sec to measure the data.
609	 */
610	ret = get_mem_bw_imc(param->cpu_no, param->bw_report, &bw_imc);
611	if (ret < 0)
612		return ret;
613
614	ret = get_mem_bw_resctrl(&bw_resc_end);
615	if (ret < 0)
616		return ret;
617
618	bw_resc = (bw_resc_end - *bw_resc_start) / MB;
619	ret = print_results_bw(param->filename, bm_pid, bw_imc, bw_resc);
620	if (ret)
621		return ret;
622
623	*bw_resc_start = bw_resc_end;
624
625	return 0;
626}
 
627
628/*
629 * run_benchmark - Run a specified benchmark or fill_buf (default benchmark)
630 *		   in specified signal. Direct benchmark stdio to /dev/null.
631 * @signum:	signal number
632 * @info:	signal info
633 * @ucontext:	user context in signal handling
634 */
635static void run_benchmark(int signum, siginfo_t *info, void *ucontext)
636{
637	int operation, ret, memflush;
638	char **benchmark_cmd;
639	size_t span;
640	bool once;
641	FILE *fp;
642
643	benchmark_cmd = info->si_ptr;
644
645	/*
646	 * Direct stdio of child to /dev/null, so that only parent writes to
647	 * stdio (console)
648	 */
649	fp = freopen("/dev/null", "w", stdout);
650	if (!fp)
651		PARENT_EXIT("Unable to direct benchmark status to /dev/null");
652
653	if (strcmp(benchmark_cmd[0], "fill_buf") == 0) {
654		/* Execute default fill_buf benchmark */
655		span = strtoul(benchmark_cmd[1], NULL, 10);
656		memflush =  atoi(benchmark_cmd[2]);
657		operation = atoi(benchmark_cmd[3]);
658		if (!strcmp(benchmark_cmd[4], "true"))
659			once = true;
660		else if (!strcmp(benchmark_cmd[4], "false"))
661			once = false;
662		else
663			PARENT_EXIT("Invalid once parameter");
664
665		if (run_fill_buf(span, memflush, operation, once))
666			fprintf(stderr, "Error in running fill buffer\n");
667	} else {
668		/* Execute specified benchmark */
669		ret = execvp(benchmark_cmd[0], benchmark_cmd);
670		if (ret)
671			perror("wrong\n");
672	}
673
674	fclose(stdout);
675	PARENT_EXIT("Unable to run specified benchmark");
 
 
 
676}
677
678/*
679 * resctrl_val:	execute benchmark and measure memory bandwidth on
680 *			the benchmark
681 * @benchmark_cmd:	benchmark command and its arguments
 
682 * @param:		parameters passed to resctrl_val()
683 *
684 * Return:		0 on success. non-zero on failure.
685 */
686int resctrl_val(const char * const *benchmark_cmd, struct resctrl_val_param *param)
687{
688	char *resctrl_val = param->resctrl_val;
689	unsigned long bw_resc_start = 0;
690	struct sigaction sigact;
691	int ret = 0, pipefd[2];
692	char pipe_message = 0;
693	union sigval value;
 
694
695	if (strcmp(param->filename, "") == 0)
696		sprintf(param->filename, "stdio");
697
698	if (!strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR)) ||
699	    !strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR))) {
700		ret = validate_bw_report_request(param->bw_report);
701		if (ret)
702			return ret;
703	}
704
705	/*
706	 * If benchmark wasn't successfully started by child, then child should
707	 * kill parent, so save parent's pid
708	 */
709	ppid = getpid();
710
711	if (pipe(pipefd)) {
712		perror("# Unable to create pipe");
 
 
 
 
 
 
 
713
714		return -1;
 
 
 
715	}
716
717	/*
718	 * Fork to start benchmark, save child's pid so that it can be killed
719	 * when needed
 
 
 
 
 
720	 */
 
 
 
 
 
 
 
 
 
721	fflush(stdout);
722	bm_pid = fork();
723	if (bm_pid == -1) {
724		perror("# Unable to fork");
725
726		return -1;
727	}
728
729	if (bm_pid == 0) {
730		/*
731		 * Mask all signals except SIGUSR1, parent uses SIGUSR1 to
732		 * start benchmark
733		 */
734		sigfillset(&sigact.sa_mask);
735		sigdelset(&sigact.sa_mask, SIGUSR1);
736
737		sigact.sa_sigaction = run_benchmark;
738		sigact.sa_flags = SA_SIGINFO;
739
740		/* Register for "SIGUSR1" signal from parent */
741		if (sigaction(SIGUSR1, &sigact, NULL))
742			PARENT_EXIT("Can't register child for signal");
743
744		/* Tell parent that child is ready */
745		close(pipefd[0]);
746		pipe_message = 1;
747		if (write(pipefd[1], &pipe_message, sizeof(pipe_message)) <
748		    sizeof(pipe_message)) {
749			perror("# failed signaling parent process");
750			close(pipefd[1]);
751			return -1;
752		}
753		close(pipefd[1]);
754
755		/* Suspend child until delivery of "SIGUSR1" from parent */
756		sigsuspend(&sigact.sa_mask);
757
758		PARENT_EXIT("Child is done");
759	}
760
761	ksft_print_msg("Benchmark PID: %d\n", bm_pid);
762
763	/*
764	 * The cast removes constness but nothing mutates benchmark_cmd within
765	 * the context of this process. At the receiving process, it becomes
766	 * argv, which is mutable, on exec() but that's after fork() so it
767	 * doesn't matter for the process running the tests.
768	 */
769	value.sival_ptr = (void *)benchmark_cmd;
770
771	/* Taskset benchmark to specified cpu */
772	ret = taskset_benchmark(bm_pid, param->cpu_no);
773	if (ret)
774		goto out;
775
776	/* Write benchmark to specified control&monitoring grp in resctrl FS */
777	ret = write_bm_pid_to_resctrl(bm_pid, param->ctrlgrp, param->mongrp,
778				      resctrl_val);
779	if (ret)
780		goto out;
781
782	if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
783	    !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
784		ret = initialize_mem_bw_imc();
785		if (ret)
786			goto out;
787
788		initialize_mem_bw_resctrl(param->ctrlgrp, param->mongrp,
789					  param->cpu_no, resctrl_val);
790	} else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR)))
791		initialize_llc_occu_resctrl(param->ctrlgrp, param->mongrp,
792					    param->cpu_no, resctrl_val);
793
794	/* Parent waits for child to be ready. */
795	close(pipefd[1]);
796	while (pipe_message != 1) {
797		if (read(pipefd[0], &pipe_message, sizeof(pipe_message)) <
798		    sizeof(pipe_message)) {
799			perror("# failed reading message from child process");
800			close(pipefd[0]);
801			goto out;
802		}
803	}
804	close(pipefd[0]);
805
806	/* Signal child to start benchmark */
807	if (sigqueue(bm_pid, SIGUSR1, value) == -1) {
808		perror("# sigqueue SIGUSR1 to child");
809		ret = errno;
810		goto out;
811	}
812
813	/* Give benchmark enough time to fully run */
814	sleep(1);
815
816	/* Test runs until the callback setup() tells the test to stop. */
817	while (1) {
818		ret = param->setup(param);
819		if (ret == END_OF_TESTS) {
820			ret = 0;
821			break;
822		}
823		if (ret < 0)
824			break;
825
826		if (!strncmp(resctrl_val, MBM_STR, sizeof(MBM_STR)) ||
827		    !strncmp(resctrl_val, MBA_STR, sizeof(MBA_STR))) {
828			ret = measure_vals(param, &bw_resc_start);
829			if (ret)
830				break;
831		} else if (!strncmp(resctrl_val, CMT_STR, sizeof(CMT_STR))) {
832			sleep(1);
833			ret = measure_cache_vals(param, bm_pid);
834			if (ret)
835				break;
836		}
837	}
838
839out:
840	kill(bm_pid, SIGKILL);
841
 
 
 
842	return ret;
843}