Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * builtin-inject.c
4 *
5 * Builtin inject command: Examine the live mode (stdin) event stream
6 * and repipe it to stdout while optionally injecting additional
7 * events into it.
8 */
9#include "builtin.h"
10
11#include "util/color.h"
12#include "util/dso.h"
13#include "util/evlist.h"
14#include "util/evsel.h"
15#include "util/map.h"
16#include "util/session.h"
17#include "util/tool.h"
18#include "util/debug.h"
19#include "util/build-id.h"
20#include "util/data.h"
21#include "util/auxtrace.h"
22#include "util/jit.h"
23#include "util/symbol.h"
24#include "util/synthetic-events.h"
25#include "util/thread.h"
26#include <linux/err.h>
27
28#include <subcmd/parse-options.h>
29
30#include <linux/list.h>
31#include <errno.h>
32#include <signal.h>
33
34struct perf_inject {
35 struct perf_tool tool;
36 struct perf_session *session;
37 bool build_ids;
38 bool sched_stat;
39 bool have_auxtrace;
40 bool strip;
41 bool jit_mode;
42 const char *input_name;
43 struct perf_data output;
44 u64 bytes_written;
45 u64 aux_id;
46 struct list_head samples;
47 struct itrace_synth_opts itrace_synth_opts;
48};
49
50struct event_entry {
51 struct list_head node;
52 u32 tid;
53 union perf_event event[0];
54};
55
56static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
57{
58 ssize_t size;
59
60 size = perf_data__write(&inject->output, buf, sz);
61 if (size < 0)
62 return -errno;
63
64 inject->bytes_written += size;
65 return 0;
66}
67
68static int perf_event__repipe_synth(struct perf_tool *tool,
69 union perf_event *event)
70{
71 struct perf_inject *inject = container_of(tool, struct perf_inject,
72 tool);
73
74 return output_bytes(inject, event, event->header.size);
75}
76
77static int perf_event__repipe_oe_synth(struct perf_tool *tool,
78 union perf_event *event,
79 struct ordered_events *oe __maybe_unused)
80{
81 return perf_event__repipe_synth(tool, event);
82}
83
84#ifdef HAVE_JITDUMP
85static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
86 union perf_event *event __maybe_unused,
87 struct ordered_events *oe __maybe_unused)
88{
89 return 0;
90}
91#endif
92
93static int perf_event__repipe_op2_synth(struct perf_session *session,
94 union perf_event *event)
95{
96 return perf_event__repipe_synth(session->tool, event);
97}
98
99static int perf_event__repipe_attr(struct perf_tool *tool,
100 union perf_event *event,
101 struct evlist **pevlist)
102{
103 struct perf_inject *inject = container_of(tool, struct perf_inject,
104 tool);
105 int ret;
106
107 ret = perf_event__process_attr(tool, event, pevlist);
108 if (ret)
109 return ret;
110
111 if (!inject->output.is_pipe)
112 return 0;
113
114 return perf_event__repipe_synth(tool, event);
115}
116
117#ifdef HAVE_AUXTRACE_SUPPORT
118
119static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
120{
121 char buf[4096];
122 ssize_t ssz;
123 int ret;
124
125 while (size > 0) {
126 ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
127 if (ssz < 0)
128 return -errno;
129 ret = output_bytes(inject, buf, ssz);
130 if (ret)
131 return ret;
132 size -= ssz;
133 }
134
135 return 0;
136}
137
138static s64 perf_event__repipe_auxtrace(struct perf_session *session,
139 union perf_event *event)
140{
141 struct perf_tool *tool = session->tool;
142 struct perf_inject *inject = container_of(tool, struct perf_inject,
143 tool);
144 int ret;
145
146 inject->have_auxtrace = true;
147
148 if (!inject->output.is_pipe) {
149 off_t offset;
150
151 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
152 if (offset == -1)
153 return -errno;
154 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
155 event, offset);
156 if (ret < 0)
157 return ret;
158 }
159
160 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
161 ret = output_bytes(inject, event, event->header.size);
162 if (ret < 0)
163 return ret;
164 ret = copy_bytes(inject, perf_data__fd(session->data),
165 event->auxtrace.size);
166 } else {
167 ret = output_bytes(inject, event,
168 event->header.size + event->auxtrace.size);
169 }
170 if (ret < 0)
171 return ret;
172
173 return event->auxtrace.size;
174}
175
176#else
177
178static s64
179perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
180 union perf_event *event __maybe_unused)
181{
182 pr_err("AUX area tracing not supported\n");
183 return -EINVAL;
184}
185
186#endif
187
188static int perf_event__repipe(struct perf_tool *tool,
189 union perf_event *event,
190 struct perf_sample *sample __maybe_unused,
191 struct machine *machine __maybe_unused)
192{
193 return perf_event__repipe_synth(tool, event);
194}
195
196static int perf_event__drop(struct perf_tool *tool __maybe_unused,
197 union perf_event *event __maybe_unused,
198 struct perf_sample *sample __maybe_unused,
199 struct machine *machine __maybe_unused)
200{
201 return 0;
202}
203
204static int perf_event__drop_aux(struct perf_tool *tool,
205 union perf_event *event __maybe_unused,
206 struct perf_sample *sample,
207 struct machine *machine __maybe_unused)
208{
209 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
210
211 if (!inject->aux_id)
212 inject->aux_id = sample->id;
213
214 return 0;
215}
216
217typedef int (*inject_handler)(struct perf_tool *tool,
218 union perf_event *event,
219 struct perf_sample *sample,
220 struct evsel *evsel,
221 struct machine *machine);
222
223static int perf_event__repipe_sample(struct perf_tool *tool,
224 union perf_event *event,
225 struct perf_sample *sample,
226 struct evsel *evsel,
227 struct machine *machine)
228{
229 if (evsel && evsel->handler) {
230 inject_handler f = evsel->handler;
231 return f(tool, event, sample, evsel, machine);
232 }
233
234 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
235
236 return perf_event__repipe_synth(tool, event);
237}
238
239static int perf_event__repipe_mmap(struct perf_tool *tool,
240 union perf_event *event,
241 struct perf_sample *sample,
242 struct machine *machine)
243{
244 int err;
245
246 err = perf_event__process_mmap(tool, event, sample, machine);
247 perf_event__repipe(tool, event, sample, machine);
248
249 return err;
250}
251
252#ifdef HAVE_JITDUMP
253static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
254 union perf_event *event,
255 struct perf_sample *sample,
256 struct machine *machine)
257{
258 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
259 u64 n = 0;
260 int ret;
261
262 /*
263 * if jit marker, then inject jit mmaps and generate ELF images
264 */
265 ret = jit_process(inject->session, &inject->output, machine,
266 event->mmap.filename, sample->pid, &n);
267 if (ret < 0)
268 return ret;
269 if (ret) {
270 inject->bytes_written += n;
271 return 0;
272 }
273 return perf_event__repipe_mmap(tool, event, sample, machine);
274}
275#endif
276
277static int perf_event__repipe_mmap2(struct perf_tool *tool,
278 union perf_event *event,
279 struct perf_sample *sample,
280 struct machine *machine)
281{
282 int err;
283
284 err = perf_event__process_mmap2(tool, event, sample, machine);
285 perf_event__repipe(tool, event, sample, machine);
286
287 return err;
288}
289
290#ifdef HAVE_JITDUMP
291static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
292 union perf_event *event,
293 struct perf_sample *sample,
294 struct machine *machine)
295{
296 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
297 u64 n = 0;
298 int ret;
299
300 /*
301 * if jit marker, then inject jit mmaps and generate ELF images
302 */
303 ret = jit_process(inject->session, &inject->output, machine,
304 event->mmap2.filename, sample->pid, &n);
305 if (ret < 0)
306 return ret;
307 if (ret) {
308 inject->bytes_written += n;
309 return 0;
310 }
311 return perf_event__repipe_mmap2(tool, event, sample, machine);
312}
313#endif
314
315static int perf_event__repipe_fork(struct perf_tool *tool,
316 union perf_event *event,
317 struct perf_sample *sample,
318 struct machine *machine)
319{
320 int err;
321
322 err = perf_event__process_fork(tool, event, sample, machine);
323 perf_event__repipe(tool, event, sample, machine);
324
325 return err;
326}
327
328static int perf_event__repipe_comm(struct perf_tool *tool,
329 union perf_event *event,
330 struct perf_sample *sample,
331 struct machine *machine)
332{
333 int err;
334
335 err = perf_event__process_comm(tool, event, sample, machine);
336 perf_event__repipe(tool, event, sample, machine);
337
338 return err;
339}
340
341static int perf_event__repipe_namespaces(struct perf_tool *tool,
342 union perf_event *event,
343 struct perf_sample *sample,
344 struct machine *machine)
345{
346 int err = perf_event__process_namespaces(tool, event, sample, machine);
347
348 perf_event__repipe(tool, event, sample, machine);
349
350 return err;
351}
352
353static int perf_event__repipe_exit(struct perf_tool *tool,
354 union perf_event *event,
355 struct perf_sample *sample,
356 struct machine *machine)
357{
358 int err;
359
360 err = perf_event__process_exit(tool, event, sample, machine);
361 perf_event__repipe(tool, event, sample, machine);
362
363 return err;
364}
365
366static int perf_event__repipe_tracing_data(struct perf_session *session,
367 union perf_event *event)
368{
369 int err;
370
371 perf_event__repipe_synth(session->tool, event);
372 err = perf_event__process_tracing_data(session, event);
373
374 return err;
375}
376
377static int perf_event__repipe_id_index(struct perf_session *session,
378 union perf_event *event)
379{
380 int err;
381
382 perf_event__repipe_synth(session->tool, event);
383 err = perf_event__process_id_index(session, event);
384
385 return err;
386}
387
388static int dso__read_build_id(struct dso *dso)
389{
390 if (dso->has_build_id)
391 return 0;
392
393 if (filename__read_build_id(dso->long_name, dso->build_id,
394 sizeof(dso->build_id)) > 0) {
395 dso->has_build_id = true;
396 return 0;
397 }
398
399 return -1;
400}
401
402static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
403 struct machine *machine)
404{
405 u16 misc = PERF_RECORD_MISC_USER;
406 int err;
407
408 if (dso__read_build_id(dso) < 0) {
409 pr_debug("no build_id found for %s\n", dso->long_name);
410 return -1;
411 }
412
413 if (dso->kernel)
414 misc = PERF_RECORD_MISC_KERNEL;
415
416 err = perf_event__synthesize_build_id(tool, dso, misc, perf_event__repipe,
417 machine);
418 if (err) {
419 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
420 return -1;
421 }
422
423 return 0;
424}
425
426static int perf_event__inject_buildid(struct perf_tool *tool,
427 union perf_event *event,
428 struct perf_sample *sample,
429 struct evsel *evsel __maybe_unused,
430 struct machine *machine)
431{
432 struct addr_location al;
433 struct thread *thread;
434
435 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
436 if (thread == NULL) {
437 pr_err("problem processing %d event, skipping it.\n",
438 event->header.type);
439 goto repipe;
440 }
441
442 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
443 if (!al.map->dso->hit) {
444 al.map->dso->hit = 1;
445 if (map__load(al.map) >= 0) {
446 dso__inject_build_id(al.map->dso, tool, machine);
447 /*
448 * If this fails, too bad, let the other side
449 * account this as unresolved.
450 */
451 } else {
452#ifdef HAVE_LIBELF_SUPPORT
453 pr_warning("no symbols found in %s, maybe "
454 "install a debug package?\n",
455 al.map->dso->long_name);
456#endif
457 }
458 }
459 }
460
461 thread__put(thread);
462repipe:
463 perf_event__repipe(tool, event, sample, machine);
464 return 0;
465}
466
467static int perf_inject__sched_process_exit(struct perf_tool *tool,
468 union perf_event *event __maybe_unused,
469 struct perf_sample *sample,
470 struct evsel *evsel __maybe_unused,
471 struct machine *machine __maybe_unused)
472{
473 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
474 struct event_entry *ent;
475
476 list_for_each_entry(ent, &inject->samples, node) {
477 if (sample->tid == ent->tid) {
478 list_del_init(&ent->node);
479 free(ent);
480 break;
481 }
482 }
483
484 return 0;
485}
486
487static int perf_inject__sched_switch(struct perf_tool *tool,
488 union perf_event *event,
489 struct perf_sample *sample,
490 struct evsel *evsel,
491 struct machine *machine)
492{
493 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
494 struct event_entry *ent;
495
496 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
497
498 ent = malloc(event->header.size + sizeof(struct event_entry));
499 if (ent == NULL) {
500 color_fprintf(stderr, PERF_COLOR_RED,
501 "Not enough memory to process sched switch event!");
502 return -1;
503 }
504
505 ent->tid = sample->tid;
506 memcpy(&ent->event, event, event->header.size);
507 list_add(&ent->node, &inject->samples);
508 return 0;
509}
510
511static int perf_inject__sched_stat(struct perf_tool *tool,
512 union perf_event *event __maybe_unused,
513 struct perf_sample *sample,
514 struct evsel *evsel,
515 struct machine *machine)
516{
517 struct event_entry *ent;
518 union perf_event *event_sw;
519 struct perf_sample sample_sw;
520 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
521 u32 pid = perf_evsel__intval(evsel, sample, "pid");
522
523 list_for_each_entry(ent, &inject->samples, node) {
524 if (pid == ent->tid)
525 goto found;
526 }
527
528 return 0;
529found:
530 event_sw = &ent->event[0];
531 perf_evsel__parse_sample(evsel, event_sw, &sample_sw);
532
533 sample_sw.period = sample->period;
534 sample_sw.time = sample->time;
535 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
536 evsel->core.attr.read_format, &sample_sw);
537 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
538 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
539}
540
541static void sig_handler(int sig __maybe_unused)
542{
543 session_done = 1;
544}
545
546static int perf_evsel__check_stype(struct evsel *evsel,
547 u64 sample_type, const char *sample_msg)
548{
549 struct perf_event_attr *attr = &evsel->core.attr;
550 const char *name = perf_evsel__name(evsel);
551
552 if (!(attr->sample_type & sample_type)) {
553 pr_err("Samples for %s event do not have %s attribute set.",
554 name, sample_msg);
555 return -EINVAL;
556 }
557
558 return 0;
559}
560
561static int drop_sample(struct perf_tool *tool __maybe_unused,
562 union perf_event *event __maybe_unused,
563 struct perf_sample *sample __maybe_unused,
564 struct evsel *evsel __maybe_unused,
565 struct machine *machine __maybe_unused)
566{
567 return 0;
568}
569
570static void strip_init(struct perf_inject *inject)
571{
572 struct evlist *evlist = inject->session->evlist;
573 struct evsel *evsel;
574
575 inject->tool.context_switch = perf_event__drop;
576
577 evlist__for_each_entry(evlist, evsel)
578 evsel->handler = drop_sample;
579}
580
581static bool has_tracking(struct evsel *evsel)
582{
583 return evsel->core.attr.mmap || evsel->core.attr.mmap2 || evsel->core.attr.comm ||
584 evsel->core.attr.task;
585}
586
587#define COMPAT_MASK (PERF_SAMPLE_ID | PERF_SAMPLE_TID | PERF_SAMPLE_TIME | \
588 PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_IDENTIFIER)
589
590/*
591 * In order that the perf.data file is parsable, tracking events like MMAP need
592 * their selected event to exist, except if there is only 1 selected event left
593 * and it has a compatible sample type.
594 */
595static bool ok_to_remove(struct evlist *evlist,
596 struct evsel *evsel_to_remove)
597{
598 struct evsel *evsel;
599 int cnt = 0;
600 bool ok = false;
601
602 if (!has_tracking(evsel_to_remove))
603 return true;
604
605 evlist__for_each_entry(evlist, evsel) {
606 if (evsel->handler != drop_sample) {
607 cnt += 1;
608 if ((evsel->core.attr.sample_type & COMPAT_MASK) ==
609 (evsel_to_remove->core.attr.sample_type & COMPAT_MASK))
610 ok = true;
611 }
612 }
613
614 return ok && cnt == 1;
615}
616
617static void strip_fini(struct perf_inject *inject)
618{
619 struct evlist *evlist = inject->session->evlist;
620 struct evsel *evsel, *tmp;
621
622 /* Remove non-synthesized evsels if possible */
623 evlist__for_each_entry_safe(evlist, tmp, evsel) {
624 if (evsel->handler == drop_sample &&
625 ok_to_remove(evlist, evsel)) {
626 pr_debug("Deleting %s\n", perf_evsel__name(evsel));
627 evlist__remove(evlist, evsel);
628 evsel__delete(evsel);
629 }
630 }
631}
632
633static int __cmd_inject(struct perf_inject *inject)
634{
635 int ret = -EINVAL;
636 struct perf_session *session = inject->session;
637 struct perf_data *data_out = &inject->output;
638 int fd = perf_data__fd(data_out);
639 u64 output_data_offset;
640
641 signal(SIGINT, sig_handler);
642
643 if (inject->build_ids || inject->sched_stat ||
644 inject->itrace_synth_opts.set) {
645 inject->tool.mmap = perf_event__repipe_mmap;
646 inject->tool.mmap2 = perf_event__repipe_mmap2;
647 inject->tool.fork = perf_event__repipe_fork;
648 inject->tool.tracing_data = perf_event__repipe_tracing_data;
649 }
650
651 output_data_offset = session->header.data_offset;
652
653 if (inject->build_ids) {
654 inject->tool.sample = perf_event__inject_buildid;
655 } else if (inject->sched_stat) {
656 struct evsel *evsel;
657
658 evlist__for_each_entry(session->evlist, evsel) {
659 const char *name = perf_evsel__name(evsel);
660
661 if (!strcmp(name, "sched:sched_switch")) {
662 if (perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
663 return -EINVAL;
664
665 evsel->handler = perf_inject__sched_switch;
666 } else if (!strcmp(name, "sched:sched_process_exit"))
667 evsel->handler = perf_inject__sched_process_exit;
668 else if (!strncmp(name, "sched:sched_stat_", 17))
669 evsel->handler = perf_inject__sched_stat;
670 }
671 } else if (inject->itrace_synth_opts.set) {
672 session->itrace_synth_opts = &inject->itrace_synth_opts;
673 inject->itrace_synth_opts.inject = true;
674 inject->tool.comm = perf_event__repipe_comm;
675 inject->tool.namespaces = perf_event__repipe_namespaces;
676 inject->tool.exit = perf_event__repipe_exit;
677 inject->tool.id_index = perf_event__repipe_id_index;
678 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
679 inject->tool.auxtrace = perf_event__process_auxtrace;
680 inject->tool.aux = perf_event__drop_aux;
681 inject->tool.itrace_start = perf_event__drop_aux,
682 inject->tool.ordered_events = true;
683 inject->tool.ordering_requires_timestamps = true;
684 /* Allow space in the header for new attributes */
685 output_data_offset = 4096;
686 if (inject->strip)
687 strip_init(inject);
688 }
689
690 if (!inject->itrace_synth_opts.set)
691 auxtrace_index__free(&session->auxtrace_index);
692
693 if (!data_out->is_pipe)
694 lseek(fd, output_data_offset, SEEK_SET);
695
696 ret = perf_session__process_events(session);
697 if (ret)
698 return ret;
699
700 if (!data_out->is_pipe) {
701 if (inject->build_ids)
702 perf_header__set_feat(&session->header,
703 HEADER_BUILD_ID);
704 /*
705 * Keep all buildids when there is unprocessed AUX data because
706 * it is not known which ones the AUX trace hits.
707 */
708 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
709 inject->have_auxtrace && !inject->itrace_synth_opts.set)
710 dsos__hit_all(session);
711 /*
712 * The AUX areas have been removed and replaced with
713 * synthesized hardware events, so clear the feature flag and
714 * remove the evsel.
715 */
716 if (inject->itrace_synth_opts.set) {
717 struct evsel *evsel;
718
719 perf_header__clear_feat(&session->header,
720 HEADER_AUXTRACE);
721 if (inject->itrace_synth_opts.last_branch)
722 perf_header__set_feat(&session->header,
723 HEADER_BRANCH_STACK);
724 evsel = perf_evlist__id2evsel_strict(session->evlist,
725 inject->aux_id);
726 if (evsel) {
727 pr_debug("Deleting %s\n",
728 perf_evsel__name(evsel));
729 evlist__remove(session->evlist, evsel);
730 evsel__delete(evsel);
731 }
732 if (inject->strip)
733 strip_fini(inject);
734 }
735 session->header.data_offset = output_data_offset;
736 session->header.data_size = inject->bytes_written;
737 perf_session__write_header(session, session->evlist, fd, true);
738 }
739
740 return ret;
741}
742
743int cmd_inject(int argc, const char **argv)
744{
745 struct perf_inject inject = {
746 .tool = {
747 .sample = perf_event__repipe_sample,
748 .mmap = perf_event__repipe,
749 .mmap2 = perf_event__repipe,
750 .comm = perf_event__repipe,
751 .fork = perf_event__repipe,
752 .exit = perf_event__repipe,
753 .lost = perf_event__repipe,
754 .lost_samples = perf_event__repipe,
755 .aux = perf_event__repipe,
756 .itrace_start = perf_event__repipe,
757 .context_switch = perf_event__repipe,
758 .read = perf_event__repipe_sample,
759 .throttle = perf_event__repipe,
760 .unthrottle = perf_event__repipe,
761 .attr = perf_event__repipe_attr,
762 .tracing_data = perf_event__repipe_op2_synth,
763 .auxtrace_info = perf_event__repipe_op2_synth,
764 .auxtrace = perf_event__repipe_auxtrace,
765 .auxtrace_error = perf_event__repipe_op2_synth,
766 .time_conv = perf_event__repipe_op2_synth,
767 .finished_round = perf_event__repipe_oe_synth,
768 .build_id = perf_event__repipe_op2_synth,
769 .id_index = perf_event__repipe_op2_synth,
770 .feature = perf_event__repipe_op2_synth,
771 },
772 .input_name = "-",
773 .samples = LIST_HEAD_INIT(inject.samples),
774 .output = {
775 .path = "-",
776 .mode = PERF_DATA_MODE_WRITE,
777 },
778 };
779 struct perf_data data = {
780 .mode = PERF_DATA_MODE_READ,
781 };
782 int ret;
783
784 struct option options[] = {
785 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
786 "Inject build-ids into the output stream"),
787 OPT_STRING('i', "input", &inject.input_name, "file",
788 "input file name"),
789 OPT_STRING('o', "output", &inject.output.path, "file",
790 "output file name"),
791 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
792 "Merge sched-stat and sched-switch for getting events "
793 "where and how long tasks slept"),
794#ifdef HAVE_JITDUMP
795 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
796#endif
797 OPT_INCR('v', "verbose", &verbose,
798 "be more verbose (show build ids, etc)"),
799 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
800 "kallsyms pathname"),
801 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
802 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
803 NULL, "opts", "Instruction Tracing options\n"
804 ITRACE_HELP,
805 itrace_parse_synth_opts),
806 OPT_BOOLEAN(0, "strip", &inject.strip,
807 "strip non-synthesized events (use with --itrace)"),
808 OPT_END()
809 };
810 const char * const inject_usage[] = {
811 "perf inject [<options>]",
812 NULL
813 };
814#ifndef HAVE_JITDUMP
815 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
816#endif
817 argc = parse_options(argc, argv, options, inject_usage, 0);
818
819 /*
820 * Any (unrecognized) arguments left?
821 */
822 if (argc)
823 usage_with_options(inject_usage, options);
824
825 if (inject.strip && !inject.itrace_synth_opts.set) {
826 pr_err("--strip option requires --itrace option\n");
827 return -1;
828 }
829
830 if (perf_data__open(&inject.output)) {
831 perror("failed to create output file");
832 return -1;
833 }
834
835 inject.tool.ordered_events = inject.sched_stat;
836
837 data.path = inject.input_name;
838 inject.session = perf_session__new(&data, true, &inject.tool);
839 if (IS_ERR(inject.session))
840 return PTR_ERR(inject.session);
841
842 if (zstd_init(&(inject.session->zstd_data), 0) < 0)
843 pr_warning("Decompression initialization failed.\n");
844
845 if (inject.build_ids) {
846 /*
847 * to make sure the mmap records are ordered correctly
848 * and so that the correct especially due to jitted code
849 * mmaps. We cannot generate the buildid hit list and
850 * inject the jit mmaps at the same time for now.
851 */
852 inject.tool.ordered_events = true;
853 inject.tool.ordering_requires_timestamps = true;
854 }
855#ifdef HAVE_JITDUMP
856 if (inject.jit_mode) {
857 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
858 inject.tool.mmap = perf_event__jit_repipe_mmap;
859 inject.tool.ordered_events = true;
860 inject.tool.ordering_requires_timestamps = true;
861 /*
862 * JIT MMAP injection injects all MMAP events in one go, so it
863 * does not obey finished_round semantics.
864 */
865 inject.tool.finished_round = perf_event__drop_oe;
866 }
867#endif
868 ret = symbol__init(&inject.session->header.env);
869 if (ret < 0)
870 goto out_delete;
871
872 ret = __cmd_inject(&inject);
873
874out_delete:
875 zstd_fini(&(inject.session->zstd_data));
876 perf_session__delete(inject.session);
877 return ret;
878}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * builtin-inject.c
4 *
5 * Builtin inject command: Examine the live mode (stdin) event stream
6 * and repipe it to stdout while optionally injecting additional
7 * events into it.
8 */
9#include "builtin.h"
10
11#include "util/color.h"
12#include "util/dso.h"
13#include "util/vdso.h"
14#include "util/evlist.h"
15#include "util/evsel.h"
16#include "util/map.h"
17#include "util/session.h"
18#include "util/tool.h"
19#include "util/debug.h"
20#include "util/build-id.h"
21#include "util/data.h"
22#include "util/auxtrace.h"
23#include "util/jit.h"
24#include "util/symbol.h"
25#include "util/synthetic-events.h"
26#include "util/thread.h"
27#include "util/namespaces.h"
28
29#include <linux/err.h>
30#include <subcmd/parse-options.h>
31#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
32
33#include <linux/list.h>
34#include <linux/string.h>
35#include <errno.h>
36#include <signal.h>
37
38struct perf_inject {
39 struct perf_tool tool;
40 struct perf_session *session;
41 bool build_ids;
42 bool build_id_all;
43 bool sched_stat;
44 bool have_auxtrace;
45 bool strip;
46 bool jit_mode;
47 bool in_place_update;
48 bool in_place_update_dry_run;
49 const char *input_name;
50 struct perf_data output;
51 u64 bytes_written;
52 u64 aux_id;
53 struct list_head samples;
54 struct itrace_synth_opts itrace_synth_opts;
55 char event_copy[PERF_SAMPLE_MAX_SIZE];
56};
57
58struct event_entry {
59 struct list_head node;
60 u32 tid;
61 union perf_event event[];
62};
63
64static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
65 struct machine *machine, u8 cpumode, u32 flags);
66
67static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
68{
69 ssize_t size;
70
71 size = perf_data__write(&inject->output, buf, sz);
72 if (size < 0)
73 return -errno;
74
75 inject->bytes_written += size;
76 return 0;
77}
78
79static int perf_event__repipe_synth(struct perf_tool *tool,
80 union perf_event *event)
81{
82 struct perf_inject *inject = container_of(tool, struct perf_inject,
83 tool);
84
85 return output_bytes(inject, event, event->header.size);
86}
87
88static int perf_event__repipe_oe_synth(struct perf_tool *tool,
89 union perf_event *event,
90 struct ordered_events *oe __maybe_unused)
91{
92 return perf_event__repipe_synth(tool, event);
93}
94
95#ifdef HAVE_JITDUMP
96static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
97 union perf_event *event __maybe_unused,
98 struct ordered_events *oe __maybe_unused)
99{
100 return 0;
101}
102#endif
103
104static int perf_event__repipe_op2_synth(struct perf_session *session,
105 union perf_event *event)
106{
107 return perf_event__repipe_synth(session->tool, event);
108}
109
110static int perf_event__repipe_op4_synth(struct perf_session *session,
111 union perf_event *event,
112 u64 data __maybe_unused)
113{
114 return perf_event__repipe_synth(session->tool, event);
115}
116
117static int perf_event__repipe_attr(struct perf_tool *tool,
118 union perf_event *event,
119 struct evlist **pevlist)
120{
121 struct perf_inject *inject = container_of(tool, struct perf_inject,
122 tool);
123 int ret;
124
125 ret = perf_event__process_attr(tool, event, pevlist);
126 if (ret)
127 return ret;
128
129 if (!inject->output.is_pipe)
130 return 0;
131
132 return perf_event__repipe_synth(tool, event);
133}
134
135static int perf_event__repipe_event_update(struct perf_tool *tool,
136 union perf_event *event,
137 struct evlist **pevlist __maybe_unused)
138{
139 return perf_event__repipe_synth(tool, event);
140}
141
142#ifdef HAVE_AUXTRACE_SUPPORT
143
144static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
145{
146 char buf[4096];
147 ssize_t ssz;
148 int ret;
149
150 while (size > 0) {
151 ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
152 if (ssz < 0)
153 return -errno;
154 ret = output_bytes(inject, buf, ssz);
155 if (ret)
156 return ret;
157 size -= ssz;
158 }
159
160 return 0;
161}
162
163static s64 perf_event__repipe_auxtrace(struct perf_session *session,
164 union perf_event *event)
165{
166 struct perf_tool *tool = session->tool;
167 struct perf_inject *inject = container_of(tool, struct perf_inject,
168 tool);
169 int ret;
170
171 inject->have_auxtrace = true;
172
173 if (!inject->output.is_pipe) {
174 off_t offset;
175
176 offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
177 if (offset == -1)
178 return -errno;
179 ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
180 event, offset);
181 if (ret < 0)
182 return ret;
183 }
184
185 if (perf_data__is_pipe(session->data) || !session->one_mmap) {
186 ret = output_bytes(inject, event, event->header.size);
187 if (ret < 0)
188 return ret;
189 ret = copy_bytes(inject, perf_data__fd(session->data),
190 event->auxtrace.size);
191 } else {
192 ret = output_bytes(inject, event,
193 event->header.size + event->auxtrace.size);
194 }
195 if (ret < 0)
196 return ret;
197
198 return event->auxtrace.size;
199}
200
201#else
202
203static s64
204perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
205 union perf_event *event __maybe_unused)
206{
207 pr_err("AUX area tracing not supported\n");
208 return -EINVAL;
209}
210
211#endif
212
213static int perf_event__repipe(struct perf_tool *tool,
214 union perf_event *event,
215 struct perf_sample *sample __maybe_unused,
216 struct machine *machine __maybe_unused)
217{
218 return perf_event__repipe_synth(tool, event);
219}
220
221static int perf_event__drop(struct perf_tool *tool __maybe_unused,
222 union perf_event *event __maybe_unused,
223 struct perf_sample *sample __maybe_unused,
224 struct machine *machine __maybe_unused)
225{
226 return 0;
227}
228
229static int perf_event__drop_aux(struct perf_tool *tool,
230 union perf_event *event __maybe_unused,
231 struct perf_sample *sample,
232 struct machine *machine __maybe_unused)
233{
234 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
235
236 if (!inject->aux_id)
237 inject->aux_id = sample->id;
238
239 return 0;
240}
241
242static union perf_event *
243perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
244 union perf_event *event,
245 struct perf_sample *sample)
246{
247 size_t sz1 = sample->aux_sample.data - (void *)event;
248 size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
249 union perf_event *ev = (union perf_event *)inject->event_copy;
250
251 if (sz1 > event->header.size || sz2 > event->header.size ||
252 sz1 + sz2 > event->header.size ||
253 sz1 < sizeof(struct perf_event_header) + sizeof(u64))
254 return event;
255
256 memcpy(ev, event, sz1);
257 memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
258 ev->header.size = sz1 + sz2;
259 ((u64 *)((void *)ev + sz1))[-1] = 0;
260
261 return ev;
262}
263
264typedef int (*inject_handler)(struct perf_tool *tool,
265 union perf_event *event,
266 struct perf_sample *sample,
267 struct evsel *evsel,
268 struct machine *machine);
269
270static int perf_event__repipe_sample(struct perf_tool *tool,
271 union perf_event *event,
272 struct perf_sample *sample,
273 struct evsel *evsel,
274 struct machine *machine)
275{
276 struct perf_inject *inject = container_of(tool, struct perf_inject,
277 tool);
278
279 if (evsel && evsel->handler) {
280 inject_handler f = evsel->handler;
281 return f(tool, event, sample, evsel, machine);
282 }
283
284 build_id__mark_dso_hit(tool, event, sample, evsel, machine);
285
286 if (inject->itrace_synth_opts.set && sample->aux_sample.size)
287 event = perf_inject__cut_auxtrace_sample(inject, event, sample);
288
289 return perf_event__repipe_synth(tool, event);
290}
291
292static int perf_event__repipe_mmap(struct perf_tool *tool,
293 union perf_event *event,
294 struct perf_sample *sample,
295 struct machine *machine)
296{
297 int err;
298
299 err = perf_event__process_mmap(tool, event, sample, machine);
300 perf_event__repipe(tool, event, sample, machine);
301
302 return err;
303}
304
305#ifdef HAVE_JITDUMP
306static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
307 union perf_event *event,
308 struct perf_sample *sample,
309 struct machine *machine)
310{
311 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
312 u64 n = 0;
313 int ret;
314
315 /*
316 * if jit marker, then inject jit mmaps and generate ELF images
317 */
318 ret = jit_process(inject->session, &inject->output, machine,
319 event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
320 if (ret < 0)
321 return ret;
322 if (ret) {
323 inject->bytes_written += n;
324 return 0;
325 }
326 return perf_event__repipe_mmap(tool, event, sample, machine);
327}
328#endif
329
330static struct dso *findnew_dso(int pid, int tid, const char *filename,
331 struct dso_id *id, struct machine *machine)
332{
333 struct thread *thread;
334 struct nsinfo *nsi = NULL;
335 struct nsinfo *nnsi;
336 struct dso *dso;
337 bool vdso;
338
339 thread = machine__findnew_thread(machine, pid, tid);
340 if (thread == NULL) {
341 pr_err("cannot find or create a task %d/%d.\n", tid, pid);
342 return NULL;
343 }
344
345 vdso = is_vdso_map(filename);
346 nsi = nsinfo__get(thread->nsinfo);
347
348 if (vdso) {
349 /* The vdso maps are always on the host and not the
350 * container. Ensure that we don't use setns to look
351 * them up.
352 */
353 nnsi = nsinfo__copy(nsi);
354 if (nnsi) {
355 nsinfo__put(nsi);
356 nnsi->need_setns = false;
357 nsi = nnsi;
358 }
359 dso = machine__findnew_vdso(machine, thread);
360 } else {
361 dso = machine__findnew_dso_id(machine, filename, id);
362 }
363
364 if (dso) {
365 nsinfo__put(dso->nsinfo);
366 dso->nsinfo = nsi;
367 } else
368 nsinfo__put(nsi);
369
370 thread__put(thread);
371 return dso;
372}
373
374static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
375 union perf_event *event,
376 struct perf_sample *sample,
377 struct machine *machine)
378{
379 struct dso *dso;
380
381 dso = findnew_dso(event->mmap.pid, event->mmap.tid,
382 event->mmap.filename, NULL, machine);
383
384 if (dso && !dso->hit) {
385 dso->hit = 1;
386 dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
387 }
388 dso__put(dso);
389
390 return perf_event__repipe(tool, event, sample, machine);
391}
392
393static int perf_event__repipe_mmap2(struct perf_tool *tool,
394 union perf_event *event,
395 struct perf_sample *sample,
396 struct machine *machine)
397{
398 int err;
399
400 err = perf_event__process_mmap2(tool, event, sample, machine);
401 perf_event__repipe(tool, event, sample, machine);
402
403 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
404 struct dso *dso;
405
406 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
407 event->mmap2.filename, NULL, machine);
408 if (dso) {
409 /* mark it not to inject build-id */
410 dso->hit = 1;
411 }
412 dso__put(dso);
413 }
414
415 return err;
416}
417
418#ifdef HAVE_JITDUMP
419static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
420 union perf_event *event,
421 struct perf_sample *sample,
422 struct machine *machine)
423{
424 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
425 u64 n = 0;
426 int ret;
427
428 /*
429 * if jit marker, then inject jit mmaps and generate ELF images
430 */
431 ret = jit_process(inject->session, &inject->output, machine,
432 event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
433 if (ret < 0)
434 return ret;
435 if (ret) {
436 inject->bytes_written += n;
437 return 0;
438 }
439 return perf_event__repipe_mmap2(tool, event, sample, machine);
440}
441#endif
442
443static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
444 union perf_event *event,
445 struct perf_sample *sample,
446 struct machine *machine)
447{
448 struct dso_id dso_id = {
449 .maj = event->mmap2.maj,
450 .min = event->mmap2.min,
451 .ino = event->mmap2.ino,
452 .ino_generation = event->mmap2.ino_generation,
453 };
454 struct dso *dso;
455
456 if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
457 /* cannot use dso_id since it'd have invalid info */
458 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
459 event->mmap2.filename, NULL, machine);
460 if (dso) {
461 /* mark it not to inject build-id */
462 dso->hit = 1;
463 }
464 dso__put(dso);
465 return 0;
466 }
467
468 dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
469 event->mmap2.filename, &dso_id, machine);
470
471 if (dso && !dso->hit) {
472 dso->hit = 1;
473 dso__inject_build_id(dso, tool, machine, sample->cpumode,
474 event->mmap2.flags);
475 }
476 dso__put(dso);
477
478 perf_event__repipe(tool, event, sample, machine);
479
480 return 0;
481}
482
483static int perf_event__repipe_fork(struct perf_tool *tool,
484 union perf_event *event,
485 struct perf_sample *sample,
486 struct machine *machine)
487{
488 int err;
489
490 err = perf_event__process_fork(tool, event, sample, machine);
491 perf_event__repipe(tool, event, sample, machine);
492
493 return err;
494}
495
496static int perf_event__repipe_comm(struct perf_tool *tool,
497 union perf_event *event,
498 struct perf_sample *sample,
499 struct machine *machine)
500{
501 int err;
502
503 err = perf_event__process_comm(tool, event, sample, machine);
504 perf_event__repipe(tool, event, sample, machine);
505
506 return err;
507}
508
509static int perf_event__repipe_namespaces(struct perf_tool *tool,
510 union perf_event *event,
511 struct perf_sample *sample,
512 struct machine *machine)
513{
514 int err = perf_event__process_namespaces(tool, event, sample, machine);
515
516 perf_event__repipe(tool, event, sample, machine);
517
518 return err;
519}
520
521static int perf_event__repipe_exit(struct perf_tool *tool,
522 union perf_event *event,
523 struct perf_sample *sample,
524 struct machine *machine)
525{
526 int err;
527
528 err = perf_event__process_exit(tool, event, sample, machine);
529 perf_event__repipe(tool, event, sample, machine);
530
531 return err;
532}
533
534static int perf_event__repipe_tracing_data(struct perf_session *session,
535 union perf_event *event)
536{
537 int err;
538
539 perf_event__repipe_synth(session->tool, event);
540 err = perf_event__process_tracing_data(session, event);
541
542 return err;
543}
544
545static int dso__read_build_id(struct dso *dso)
546{
547 struct nscookie nsc;
548
549 if (dso->has_build_id)
550 return 0;
551
552 nsinfo__mountns_enter(dso->nsinfo, &nsc);
553 if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
554 dso->has_build_id = true;
555 nsinfo__mountns_exit(&nsc);
556
557 return dso->has_build_id ? 0 : -1;
558}
559
560static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
561 struct machine *machine, u8 cpumode, u32 flags)
562{
563 int err;
564
565 if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
566 return 0;
567 if (is_no_dso_memory(dso->long_name))
568 return 0;
569
570 if (dso__read_build_id(dso) < 0) {
571 pr_debug("no build_id found for %s\n", dso->long_name);
572 return -1;
573 }
574
575 err = perf_event__synthesize_build_id(tool, dso, cpumode,
576 perf_event__repipe, machine);
577 if (err) {
578 pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
579 return -1;
580 }
581
582 return 0;
583}
584
585int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
586 struct perf_sample *sample,
587 struct evsel *evsel __maybe_unused,
588 struct machine *machine)
589{
590 struct addr_location al;
591 struct thread *thread;
592
593 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
594 if (thread == NULL) {
595 pr_err("problem processing %d event, skipping it.\n",
596 event->header.type);
597 goto repipe;
598 }
599
600 if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
601 if (!al.map->dso->hit) {
602 al.map->dso->hit = 1;
603 dso__inject_build_id(al.map->dso, tool, machine,
604 sample->cpumode, al.map->flags);
605 }
606 }
607
608 thread__put(thread);
609repipe:
610 perf_event__repipe(tool, event, sample, machine);
611 return 0;
612}
613
614static int perf_inject__sched_process_exit(struct perf_tool *tool,
615 union perf_event *event __maybe_unused,
616 struct perf_sample *sample,
617 struct evsel *evsel __maybe_unused,
618 struct machine *machine __maybe_unused)
619{
620 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
621 struct event_entry *ent;
622
623 list_for_each_entry(ent, &inject->samples, node) {
624 if (sample->tid == ent->tid) {
625 list_del_init(&ent->node);
626 free(ent);
627 break;
628 }
629 }
630
631 return 0;
632}
633
634static int perf_inject__sched_switch(struct perf_tool *tool,
635 union perf_event *event,
636 struct perf_sample *sample,
637 struct evsel *evsel,
638 struct machine *machine)
639{
640 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
641 struct event_entry *ent;
642
643 perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
644
645 ent = malloc(event->header.size + sizeof(struct event_entry));
646 if (ent == NULL) {
647 color_fprintf(stderr, PERF_COLOR_RED,
648 "Not enough memory to process sched switch event!");
649 return -1;
650 }
651
652 ent->tid = sample->tid;
653 memcpy(&ent->event, event, event->header.size);
654 list_add(&ent->node, &inject->samples);
655 return 0;
656}
657
658static int perf_inject__sched_stat(struct perf_tool *tool,
659 union perf_event *event __maybe_unused,
660 struct perf_sample *sample,
661 struct evsel *evsel,
662 struct machine *machine)
663{
664 struct event_entry *ent;
665 union perf_event *event_sw;
666 struct perf_sample sample_sw;
667 struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
668 u32 pid = evsel__intval(evsel, sample, "pid");
669
670 list_for_each_entry(ent, &inject->samples, node) {
671 if (pid == ent->tid)
672 goto found;
673 }
674
675 return 0;
676found:
677 event_sw = &ent->event[0];
678 evsel__parse_sample(evsel, event_sw, &sample_sw);
679
680 sample_sw.period = sample->period;
681 sample_sw.time = sample->time;
682 perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
683 evsel->core.attr.read_format, &sample_sw);
684 build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
685 return perf_event__repipe(tool, event_sw, &sample_sw, machine);
686}
687
688static void sig_handler(int sig __maybe_unused)
689{
690 session_done = 1;
691}
692
693static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
694{
695 struct perf_event_attr *attr = &evsel->core.attr;
696 const char *name = evsel__name(evsel);
697
698 if (!(attr->sample_type & sample_type)) {
699 pr_err("Samples for %s event do not have %s attribute set.",
700 name, sample_msg);
701 return -EINVAL;
702 }
703
704 return 0;
705}
706
707static int drop_sample(struct perf_tool *tool __maybe_unused,
708 union perf_event *event __maybe_unused,
709 struct perf_sample *sample __maybe_unused,
710 struct evsel *evsel __maybe_unused,
711 struct machine *machine __maybe_unused)
712{
713 return 0;
714}
715
716static void strip_init(struct perf_inject *inject)
717{
718 struct evlist *evlist = inject->session->evlist;
719 struct evsel *evsel;
720
721 inject->tool.context_switch = perf_event__drop;
722
723 evlist__for_each_entry(evlist, evsel)
724 evsel->handler = drop_sample;
725}
726
727static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
728{
729 struct perf_inject *inject = opt->value;
730 const char *args;
731 char *dry_run;
732
733 if (unset)
734 return 0;
735
736 inject->itrace_synth_opts.set = true;
737 inject->itrace_synth_opts.vm_time_correlation = true;
738 inject->in_place_update = true;
739
740 if (!str)
741 return 0;
742
743 dry_run = skip_spaces(str);
744 if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
745 inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
746 inject->in_place_update_dry_run = true;
747 args = dry_run + strlen("dry-run");
748 } else {
749 args = str;
750 }
751
752 inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
753
754 return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
755}
756
757static int __cmd_inject(struct perf_inject *inject)
758{
759 int ret = -EINVAL;
760 struct perf_session *session = inject->session;
761 struct perf_data *data_out = &inject->output;
762 int fd = inject->in_place_update ? -1 : perf_data__fd(data_out);
763 u64 output_data_offset;
764
765 signal(SIGINT, sig_handler);
766
767 if (inject->build_ids || inject->sched_stat ||
768 inject->itrace_synth_opts.set || inject->build_id_all) {
769 inject->tool.mmap = perf_event__repipe_mmap;
770 inject->tool.mmap2 = perf_event__repipe_mmap2;
771 inject->tool.fork = perf_event__repipe_fork;
772 inject->tool.tracing_data = perf_event__repipe_tracing_data;
773 }
774
775 output_data_offset = session->header.data_offset;
776
777 if (inject->build_id_all) {
778 inject->tool.mmap = perf_event__repipe_buildid_mmap;
779 inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
780 } else if (inject->build_ids) {
781 inject->tool.sample = perf_event__inject_buildid;
782 } else if (inject->sched_stat) {
783 struct evsel *evsel;
784
785 evlist__for_each_entry(session->evlist, evsel) {
786 const char *name = evsel__name(evsel);
787
788 if (!strcmp(name, "sched:sched_switch")) {
789 if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
790 return -EINVAL;
791
792 evsel->handler = perf_inject__sched_switch;
793 } else if (!strcmp(name, "sched:sched_process_exit"))
794 evsel->handler = perf_inject__sched_process_exit;
795 else if (!strncmp(name, "sched:sched_stat_", 17))
796 evsel->handler = perf_inject__sched_stat;
797 }
798 } else if (inject->itrace_synth_opts.vm_time_correlation) {
799 session->itrace_synth_opts = &inject->itrace_synth_opts;
800 memset(&inject->tool, 0, sizeof(inject->tool));
801 inject->tool.id_index = perf_event__process_id_index;
802 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
803 inject->tool.auxtrace = perf_event__process_auxtrace;
804 inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
805 inject->tool.ordered_events = true;
806 inject->tool.ordering_requires_timestamps = true;
807 } else if (inject->itrace_synth_opts.set) {
808 session->itrace_synth_opts = &inject->itrace_synth_opts;
809 inject->itrace_synth_opts.inject = true;
810 inject->tool.comm = perf_event__repipe_comm;
811 inject->tool.namespaces = perf_event__repipe_namespaces;
812 inject->tool.exit = perf_event__repipe_exit;
813 inject->tool.id_index = perf_event__process_id_index;
814 inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
815 inject->tool.auxtrace = perf_event__process_auxtrace;
816 inject->tool.aux = perf_event__drop_aux;
817 inject->tool.itrace_start = perf_event__drop_aux,
818 inject->tool.ordered_events = true;
819 inject->tool.ordering_requires_timestamps = true;
820 /* Allow space in the header for new attributes */
821 output_data_offset = 4096;
822 if (inject->strip)
823 strip_init(inject);
824 }
825
826 if (!inject->itrace_synth_opts.set)
827 auxtrace_index__free(&session->auxtrace_index);
828
829 if (!data_out->is_pipe && !inject->in_place_update)
830 lseek(fd, output_data_offset, SEEK_SET);
831
832 ret = perf_session__process_events(session);
833 if (ret)
834 return ret;
835
836 if (!data_out->is_pipe && !inject->in_place_update) {
837 if (inject->build_ids)
838 perf_header__set_feat(&session->header,
839 HEADER_BUILD_ID);
840 /*
841 * Keep all buildids when there is unprocessed AUX data because
842 * it is not known which ones the AUX trace hits.
843 */
844 if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
845 inject->have_auxtrace && !inject->itrace_synth_opts.set)
846 dsos__hit_all(session);
847 /*
848 * The AUX areas have been removed and replaced with
849 * synthesized hardware events, so clear the feature flag.
850 */
851 if (inject->itrace_synth_opts.set) {
852 perf_header__clear_feat(&session->header,
853 HEADER_AUXTRACE);
854 if (inject->itrace_synth_opts.last_branch ||
855 inject->itrace_synth_opts.add_last_branch)
856 perf_header__set_feat(&session->header,
857 HEADER_BRANCH_STACK);
858 }
859 session->header.data_offset = output_data_offset;
860 session->header.data_size = inject->bytes_written;
861 perf_session__write_header(session, session->evlist, fd, true);
862 }
863
864 return ret;
865}
866
867int cmd_inject(int argc, const char **argv)
868{
869 struct perf_inject inject = {
870 .tool = {
871 .sample = perf_event__repipe_sample,
872 .read = perf_event__repipe_sample,
873 .mmap = perf_event__repipe,
874 .mmap2 = perf_event__repipe,
875 .comm = perf_event__repipe,
876 .namespaces = perf_event__repipe,
877 .cgroup = perf_event__repipe,
878 .fork = perf_event__repipe,
879 .exit = perf_event__repipe,
880 .lost = perf_event__repipe,
881 .lost_samples = perf_event__repipe,
882 .aux = perf_event__repipe,
883 .itrace_start = perf_event__repipe,
884 .context_switch = perf_event__repipe,
885 .throttle = perf_event__repipe,
886 .unthrottle = perf_event__repipe,
887 .ksymbol = perf_event__repipe,
888 .bpf = perf_event__repipe,
889 .text_poke = perf_event__repipe,
890 .attr = perf_event__repipe_attr,
891 .event_update = perf_event__repipe_event_update,
892 .tracing_data = perf_event__repipe_op2_synth,
893 .finished_round = perf_event__repipe_oe_synth,
894 .build_id = perf_event__repipe_op2_synth,
895 .id_index = perf_event__repipe_op2_synth,
896 .auxtrace_info = perf_event__repipe_op2_synth,
897 .auxtrace_error = perf_event__repipe_op2_synth,
898 .time_conv = perf_event__repipe_op2_synth,
899 .thread_map = perf_event__repipe_op2_synth,
900 .cpu_map = perf_event__repipe_op2_synth,
901 .stat_config = perf_event__repipe_op2_synth,
902 .stat = perf_event__repipe_op2_synth,
903 .stat_round = perf_event__repipe_op2_synth,
904 .feature = perf_event__repipe_op2_synth,
905 .compressed = perf_event__repipe_op4_synth,
906 .auxtrace = perf_event__repipe_auxtrace,
907 },
908 .input_name = "-",
909 .samples = LIST_HEAD_INIT(inject.samples),
910 .output = {
911 .path = "-",
912 .mode = PERF_DATA_MODE_WRITE,
913 .use_stdio = true,
914 },
915 };
916 struct perf_data data = {
917 .mode = PERF_DATA_MODE_READ,
918 .use_stdio = true,
919 };
920 int ret;
921
922 struct option options[] = {
923 OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
924 "Inject build-ids into the output stream"),
925 OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
926 "Inject build-ids of all DSOs into the output stream"),
927 OPT_STRING('i', "input", &inject.input_name, "file",
928 "input file name"),
929 OPT_STRING('o', "output", &inject.output.path, "file",
930 "output file name"),
931 OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
932 "Merge sched-stat and sched-switch for getting events "
933 "where and how long tasks slept"),
934#ifdef HAVE_JITDUMP
935 OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
936#endif
937 OPT_INCR('v', "verbose", &verbose,
938 "be more verbose (show build ids, etc)"),
939 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
940 "kallsyms pathname"),
941 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
942 OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
943 NULL, "opts", "Instruction Tracing options\n"
944 ITRACE_HELP,
945 itrace_parse_synth_opts),
946 OPT_BOOLEAN(0, "strip", &inject.strip,
947 "strip non-synthesized events (use with --itrace)"),
948 OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
949 "correlate time between VM guests and the host",
950 parse_vm_time_correlation),
951 OPT_END()
952 };
953 const char * const inject_usage[] = {
954 "perf inject [<options>]",
955 NULL
956 };
957#ifndef HAVE_JITDUMP
958 set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
959#endif
960 argc = parse_options(argc, argv, options, inject_usage, 0);
961
962 /*
963 * Any (unrecognized) arguments left?
964 */
965 if (argc)
966 usage_with_options(inject_usage, options);
967
968 if (inject.strip && !inject.itrace_synth_opts.set) {
969 pr_err("--strip option requires --itrace option\n");
970 return -1;
971 }
972
973 if (inject.in_place_update) {
974 if (!strcmp(inject.input_name, "-")) {
975 pr_err("Input file name required for in-place updating\n");
976 return -1;
977 }
978 if (strcmp(inject.output.path, "-")) {
979 pr_err("Output file name must not be specified for in-place updating\n");
980 return -1;
981 }
982 if (!data.force && !inject.in_place_update_dry_run) {
983 pr_err("The input file would be updated in place, "
984 "the --force option is required.\n");
985 return -1;
986 }
987 if (!inject.in_place_update_dry_run)
988 data.in_place_update = true;
989 } else if (perf_data__open(&inject.output)) {
990 perror("failed to create output file");
991 return -1;
992 }
993
994 data.path = inject.input_name;
995 inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
996 if (IS_ERR(inject.session)) {
997 ret = PTR_ERR(inject.session);
998 goto out_close_output;
999 }
1000
1001 if (zstd_init(&(inject.session->zstd_data), 0) < 0)
1002 pr_warning("Decompression initialization failed.\n");
1003
1004 if (inject.build_ids && !inject.build_id_all) {
1005 /*
1006 * to make sure the mmap records are ordered correctly
1007 * and so that the correct especially due to jitted code
1008 * mmaps. We cannot generate the buildid hit list and
1009 * inject the jit mmaps at the same time for now.
1010 */
1011 inject.tool.ordered_events = true;
1012 inject.tool.ordering_requires_timestamps = true;
1013 }
1014
1015 if (inject.sched_stat) {
1016 inject.tool.ordered_events = true;
1017 }
1018
1019#ifdef HAVE_JITDUMP
1020 if (inject.jit_mode) {
1021 inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
1022 inject.tool.mmap = perf_event__jit_repipe_mmap;
1023 inject.tool.ordered_events = true;
1024 inject.tool.ordering_requires_timestamps = true;
1025 /*
1026 * JIT MMAP injection injects all MMAP events in one go, so it
1027 * does not obey finished_round semantics.
1028 */
1029 inject.tool.finished_round = perf_event__drop_oe;
1030 }
1031#endif
1032 ret = symbol__init(&inject.session->header.env);
1033 if (ret < 0)
1034 goto out_delete;
1035
1036 ret = __cmd_inject(&inject);
1037
1038out_delete:
1039 zstd_fini(&(inject.session->zstd_data));
1040 perf_session__delete(inject.session);
1041out_close_output:
1042 perf_data__close(&inject.output);
1043 free(inject.itrace_synth_opts.vm_tm_corr_args);
1044 return ret;
1045}