Loading...
Note: File does not exist in v4.17.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * intel_tpebs.c: Intel TPEBS support
4 */
5
6
7#include <sys/param.h>
8#include <subcmd/run-command.h>
9#include <thread.h>
10#include "intel-tpebs.h"
11#include <linux/list.h>
12#include <linux/zalloc.h>
13#include <linux/err.h>
14#include "sample.h"
15#include "debug.h"
16#include "evlist.h"
17#include "evsel.h"
18#include "session.h"
19#include "tool.h"
20#include "cpumap.h"
21#include "metricgroup.h"
22#include <sys/stat.h>
23#include <sys/file.h>
24#include <poll.h>
25#include <math.h>
26
27#define PERF_DATA "-"
28
29bool tpebs_recording;
30static pid_t tpebs_pid = -1;
31static size_t tpebs_event_size;
32static LIST_HEAD(tpebs_results);
33static pthread_t tpebs_reader_thread;
34static struct child_process *tpebs_cmd;
35
36struct tpebs_retire_lat {
37 struct list_head nd;
38 /* Event name */
39 const char *name;
40 /* Event name with the TPEBS modifier R */
41 const char *tpebs_name;
42 /* Count of retire_latency values found in sample data */
43 size_t count;
44 /* Sum of all the retire_latency values in sample data */
45 int sum;
46 /* Average of retire_latency, val = sum / count */
47 double val;
48};
49
50static int get_perf_record_args(const char **record_argv, char buf[],
51 const char *cpumap_buf)
52{
53 struct tpebs_retire_lat *e;
54 int i = 0;
55
56 pr_debug("tpebs: Prepare perf record for retire_latency\n");
57
58 record_argv[i++] = "perf";
59 record_argv[i++] = "record";
60 record_argv[i++] = "-W";
61 record_argv[i++] = "--synth=no";
62 record_argv[i++] = buf;
63
64 if (!cpumap_buf) {
65 pr_err("tpebs: Require cpumap list to run sampling\n");
66 return -ECANCELED;
67 }
68 /* Use -C when cpumap_buf is not "-1" */
69 if (strcmp(cpumap_buf, "-1")) {
70 record_argv[i++] = "-C";
71 record_argv[i++] = cpumap_buf;
72 }
73
74 list_for_each_entry(e, &tpebs_results, nd) {
75 record_argv[i++] = "-e";
76 record_argv[i++] = e->name;
77 }
78
79 record_argv[i++] = "-o";
80 record_argv[i++] = PERF_DATA;
81
82 return 0;
83}
84
85static int prepare_run_command(const char **argv)
86{
87 tpebs_cmd = zalloc(sizeof(struct child_process));
88 if (!tpebs_cmd)
89 return -ENOMEM;
90 tpebs_cmd->argv = argv;
91 tpebs_cmd->out = -1;
92 return 0;
93}
94
95static int start_perf_record(int control_fd[], int ack_fd[],
96 const char *cpumap_buf)
97{
98 const char **record_argv;
99 int ret;
100 char buf[32];
101
102 scnprintf(buf, sizeof(buf), "--control=fd:%d,%d", control_fd[0], ack_fd[1]);
103
104 record_argv = calloc(12 + 2 * tpebs_event_size, sizeof(char *));
105 if (!record_argv)
106 return -ENOMEM;
107
108 ret = get_perf_record_args(record_argv, buf, cpumap_buf);
109 if (ret)
110 goto out;
111
112 ret = prepare_run_command(record_argv);
113 if (ret)
114 goto out;
115 ret = start_command(tpebs_cmd);
116out:
117 free(record_argv);
118 return ret;
119}
120
121static int process_sample_event(const struct perf_tool *tool __maybe_unused,
122 union perf_event *event __maybe_unused,
123 struct perf_sample *sample,
124 struct evsel *evsel,
125 struct machine *machine __maybe_unused)
126{
127 int ret = 0;
128 const char *evname;
129 struct tpebs_retire_lat *t;
130
131 evname = evsel__name(evsel);
132
133 /*
134 * Need to handle per core results? We are assuming average retire
135 * latency value will be used. Save the number of samples and the sum of
136 * retire latency value for each event.
137 */
138 list_for_each_entry(t, &tpebs_results, nd) {
139 if (!strcmp(evname, t->name)) {
140 t->count += 1;
141 t->sum += sample->retire_lat;
142 t->val = (double) t->sum / t->count;
143 break;
144 }
145 }
146
147 return ret;
148}
149
150static int process_feature_event(struct perf_session *session,
151 union perf_event *event)
152{
153 if (event->feat.feat_id < HEADER_LAST_FEATURE)
154 return perf_event__process_feature(session, event);
155 return 0;
156}
157
158static void *__sample_reader(void *arg)
159{
160 struct child_process *child = arg;
161 struct perf_session *session;
162 struct perf_data data = {
163 .mode = PERF_DATA_MODE_READ,
164 .path = PERF_DATA,
165 .file.fd = child->out,
166 };
167 struct perf_tool tool;
168
169 perf_tool__init(&tool, /*ordered_events=*/false);
170 tool.sample = process_sample_event;
171 tool.feature = process_feature_event;
172 tool.attr = perf_event__process_attr;
173
174 session = perf_session__new(&data, &tool);
175 if (IS_ERR(session))
176 return NULL;
177 perf_session__process_events(session);
178 perf_session__delete(session);
179
180 return NULL;
181}
182
183/*
184 * tpebs_stop - stop the sample data read thread and the perf record process.
185 */
186static int tpebs_stop(void)
187{
188 int ret = 0;
189
190 /* Like tpebs_start, we should only run tpebs_end once. */
191 if (tpebs_pid != -1) {
192 kill(tpebs_cmd->pid, SIGTERM);
193 tpebs_pid = -1;
194 pthread_join(tpebs_reader_thread, NULL);
195 close(tpebs_cmd->out);
196 ret = finish_command(tpebs_cmd);
197 if (ret == -ERR_RUN_COMMAND_WAITPID_SIGNAL)
198 ret = 0;
199 }
200 return ret;
201}
202
203/*
204 * tpebs_start - start tpebs execution.
205 * @evsel_list: retire_latency evsels in this list will be selected and sampled
206 * to get the average retire_latency value.
207 *
208 * This function will be called from evlist level later when evlist__open() is
209 * called consistently.
210 */
211int tpebs_start(struct evlist *evsel_list)
212{
213 int ret = 0;
214 struct evsel *evsel;
215 char cpumap_buf[50];
216
217 /*
218 * We should only run tpebs_start when tpebs_recording is enabled.
219 * And we should only run it once with all the required events.
220 */
221 if (tpebs_pid != -1 || !tpebs_recording)
222 return 0;
223
224 cpu_map__snprint(evsel_list->core.user_requested_cpus, cpumap_buf, sizeof(cpumap_buf));
225 /*
226 * Prepare perf record for sampling event retire_latency before fork and
227 * prepare workload
228 */
229 evlist__for_each_entry(evsel_list, evsel) {
230 int i;
231 char *name;
232 struct tpebs_retire_lat *new;
233
234 if (!evsel->retire_lat)
235 continue;
236
237 pr_debug("tpebs: Retire_latency of event %s is required\n", evsel->name);
238 for (i = strlen(evsel->name) - 1; i > 0; i--) {
239 if (evsel->name[i] == 'R')
240 break;
241 }
242 if (i <= 0 || evsel->name[i] != 'R') {
243 ret = -1;
244 goto err;
245 }
246
247 name = strdup(evsel->name);
248 if (!name) {
249 ret = -ENOMEM;
250 goto err;
251 }
252 name[i] = 'p';
253
254 new = zalloc(sizeof(*new));
255 if (!new) {
256 ret = -1;
257 zfree(name);
258 goto err;
259 }
260 new->name = name;
261 new->tpebs_name = evsel->name;
262 list_add_tail(&new->nd, &tpebs_results);
263 tpebs_event_size += 1;
264 }
265
266 if (tpebs_event_size > 0) {
267 struct pollfd pollfd = { .events = POLLIN, };
268 int control_fd[2], ack_fd[2], len;
269 char ack_buf[8];
270
271 /*Create control and ack fd for --control*/
272 if (pipe(control_fd) < 0) {
273 pr_err("tpebs: Failed to create control fifo");
274 ret = -1;
275 goto out;
276 }
277 if (pipe(ack_fd) < 0) {
278 pr_err("tpebs: Failed to create control fifo");
279 ret = -1;
280 goto out;
281 }
282
283 ret = start_perf_record(control_fd, ack_fd, cpumap_buf);
284 if (ret)
285 goto out;
286 tpebs_pid = tpebs_cmd->pid;
287 if (pthread_create(&tpebs_reader_thread, NULL, __sample_reader, tpebs_cmd)) {
288 kill(tpebs_cmd->pid, SIGTERM);
289 close(tpebs_cmd->out);
290 pr_err("Could not create thread to process sample data.\n");
291 ret = -1;
292 goto out;
293 }
294 /* Wait for perf record initialization.*/
295 len = strlen(EVLIST_CTL_CMD_ENABLE_TAG);
296 ret = write(control_fd[1], EVLIST_CTL_CMD_ENABLE_TAG, len);
297 if (ret != len) {
298 pr_err("perf record control write control message failed\n");
299 goto out;
300 }
301
302 /* wait for an ack */
303 pollfd.fd = ack_fd[0];
304
305 /*
306 * We need this poll to ensure the ack_fd PIPE will not hang
307 * when perf record failed for any reason. The timeout value
308 * 3000ms is an empirical selection.
309 */
310 if (!poll(&pollfd, 1, 3000)) {
311 pr_err("tpebs failed: perf record ack timeout\n");
312 ret = -1;
313 goto out;
314 }
315
316 if (!(pollfd.revents & POLLIN)) {
317 pr_err("tpebs failed: did not received an ack\n");
318 ret = -1;
319 goto out;
320 }
321
322 ret = read(ack_fd[0], ack_buf, sizeof(ack_buf));
323 if (ret > 0)
324 ret = strcmp(ack_buf, EVLIST_CTL_CMD_ACK_TAG);
325 else {
326 pr_err("tpebs: perf record control ack failed\n");
327 goto out;
328 }
329out:
330 close(control_fd[0]);
331 close(control_fd[1]);
332 close(ack_fd[0]);
333 close(ack_fd[1]);
334 }
335err:
336 if (ret)
337 tpebs_delete();
338 return ret;
339}
340
341
342int tpebs_set_evsel(struct evsel *evsel, int cpu_map_idx, int thread)
343{
344 __u64 val;
345 bool found = false;
346 struct tpebs_retire_lat *t;
347 struct perf_counts_values *count;
348
349 /* Non reitre_latency evsel should never enter this function. */
350 if (!evsel__is_retire_lat(evsel))
351 return -1;
352
353 /*
354 * Need to stop the forked record to ensure get sampled data from the
355 * PIPE to process and get non-zero retire_lat value for hybrid.
356 */
357 tpebs_stop();
358 count = perf_counts(evsel->counts, cpu_map_idx, thread);
359
360 list_for_each_entry(t, &tpebs_results, nd) {
361 if (t->tpebs_name == evsel->name ||
362 (evsel->metric_id && !strcmp(t->tpebs_name, evsel->metric_id))) {
363 found = true;
364 break;
365 }
366 }
367
368 /* Set ena and run to non-zero */
369 count->ena = count->run = 1;
370 count->lost = 0;
371
372 if (!found) {
373 /*
374 * Set default value or 0 when retire_latency for this event is
375 * not found from sampling data (record_tpebs not set or 0
376 * sample recorded).
377 */
378 count->val = 0;
379 return 0;
380 }
381
382 /*
383 * Only set retire_latency value to the first CPU and thread.
384 */
385 if (cpu_map_idx == 0 && thread == 0)
386 val = rint(t->val);
387 else
388 val = 0;
389
390 count->val = val;
391 return 0;
392}
393
394static void tpebs_retire_lat__delete(struct tpebs_retire_lat *r)
395{
396 zfree(&r->name);
397 free(r);
398}
399
400
401/*
402 * tpebs_delete - delete tpebs related data and stop the created thread and
403 * process by calling tpebs_stop().
404 *
405 * This function is called from evlist_delete() and also from builtin-stat
406 * stat_handle_error(). If tpebs_start() is called from places other then perf
407 * stat, need to ensure tpebs_delete() is also called to safely free mem and
408 * close the data read thread and the forked perf record process.
409 *
410 * This function is also called in evsel__close() to be symmetric with
411 * tpebs_start() being called in evsel__open(). We will update this call site
412 * when move tpebs_start() to evlist level.
413 */
414void tpebs_delete(void)
415{
416 struct tpebs_retire_lat *r, *rtmp;
417
418 if (tpebs_pid == -1)
419 return;
420
421 tpebs_stop();
422
423 list_for_each_entry_safe(r, rtmp, &tpebs_results, nd) {
424 list_del_init(&r->nd);
425 tpebs_retire_lat__delete(r);
426 }
427
428 if (tpebs_cmd) {
429 free(tpebs_cmd);
430 tpebs_cmd = NULL;
431 }
432}