Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-inject.c
   4 *
   5 * Builtin inject command: Examine the live mode (stdin) event stream
   6 * and repipe it to stdout while optionally injecting additional
   7 * events into it.
   8 */
   9#include "builtin.h"
  10
  11#include "util/color.h"
  12#include "util/dso.h"
  13#include "util/vdso.h"
  14#include "util/evlist.h"
  15#include "util/evsel.h"
  16#include "util/map.h"
  17#include "util/session.h"
  18#include "util/tool.h"
  19#include "util/debug.h"
  20#include "util/build-id.h"
  21#include "util/data.h"
  22#include "util/auxtrace.h"
  23#include "util/jit.h"
  24#include "util/string2.h"
  25#include "util/symbol.h"
  26#include "util/synthetic-events.h"
  27#include "util/thread.h"
  28#include "util/namespaces.h"
  29#include "util/util.h"
  30#include "util/tsc.h"
  31
  32#include <internal/lib.h>
  33
  34#include <linux/err.h>
  35#include <subcmd/parse-options.h>
  36#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  37
  38#include <linux/list.h>
  39#include <linux/string.h>
  40#include <linux/zalloc.h>
  41#include <linux/hash.h>
  42#include <ctype.h>
  43#include <errno.h>
  44#include <signal.h>
  45#include <inttypes.h>
  46
  47struct guest_event {
  48	struct perf_sample		sample;
  49	union perf_event		*event;
  50	char				event_buf[PERF_SAMPLE_MAX_SIZE];
  51};
  52
  53struct guest_id {
  54	/* hlist_node must be first, see free_hlist() */
  55	struct hlist_node		node;
  56	u64				id;
  57	u64				host_id;
  58	u32				vcpu;
  59};
  60
  61struct guest_tid {
  62	/* hlist_node must be first, see free_hlist() */
  63	struct hlist_node		node;
  64	/* Thread ID of QEMU thread */
  65	u32				tid;
  66	u32				vcpu;
  67};
  68
  69struct guest_vcpu {
  70	/* Current host CPU */
  71	u32				cpu;
  72	/* Thread ID of QEMU thread */
  73	u32				tid;
  74};
  75
  76struct guest_session {
  77	char				*perf_data_file;
  78	u32				machine_pid;
  79	u64				time_offset;
  80	double				time_scale;
  81	struct perf_tool		tool;
  82	struct perf_data		data;
  83	struct perf_session		*session;
  84	char				*tmp_file_name;
  85	int				tmp_fd;
  86	struct perf_tsc_conversion	host_tc;
  87	struct perf_tsc_conversion	guest_tc;
  88	bool				copy_kcore_dir;
  89	bool				have_tc;
  90	bool				fetched;
  91	bool				ready;
  92	u16				dflt_id_hdr_size;
  93	u64				dflt_id;
  94	u64				highest_id;
  95	/* Array of guest_vcpu */
  96	struct guest_vcpu		*vcpu;
  97	size_t				vcpu_cnt;
  98	/* Hash table for guest_id */
  99	struct hlist_head		heads[PERF_EVLIST__HLIST_SIZE];
 100	/* Hash table for guest_tid */
 101	struct hlist_head		tids[PERF_EVLIST__HLIST_SIZE];
 102	/* Place to stash next guest event */
 103	struct guest_event		ev;
 104};
 105
 
 
 
 
 
 
 
 
 106struct perf_inject {
 107	struct perf_tool	tool;
 108	struct perf_session	*session;
 109	bool			build_ids;
 110	bool			build_id_all;
 111	bool			sched_stat;
 112	bool			have_auxtrace;
 113	bool			strip;
 114	bool			jit_mode;
 115	bool			in_place_update;
 116	bool			in_place_update_dry_run;
 117	bool			is_pipe;
 118	bool			copy_kcore_dir;
 119	const char		*input_name;
 120	struct perf_data	output;
 121	u64			bytes_written;
 122	u64			aux_id;
 123	struct list_head	samples;
 124	struct itrace_synth_opts itrace_synth_opts;
 125	char			event_copy[PERF_SAMPLE_MAX_SIZE];
 126	struct perf_file_section secs[HEADER_FEAT_BITS];
 127	struct guest_session	guest_session;
 128	struct strlist		*known_build_ids;
 
 129};
 130
 131struct event_entry {
 132	struct list_head node;
 133	u32		 tid;
 134	union perf_event event[];
 135};
 136
 137static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
 138				struct machine *machine, u8 cpumode, u32 flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 139
 140static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
 141{
 142	ssize_t size;
 143
 144	size = perf_data__write(&inject->output, buf, sz);
 145	if (size < 0)
 146		return -errno;
 147
 148	inject->bytes_written += size;
 149	return 0;
 150}
 151
 152static int perf_event__repipe_synth(struct perf_tool *tool,
 153				    union perf_event *event)
 
 154{
 155	struct perf_inject *inject = container_of(tool, struct perf_inject,
 156						  tool);
 157
 158	return output_bytes(inject, event, event->header.size);
 159}
 160
 161static int perf_event__repipe_oe_synth(struct perf_tool *tool,
 162				       union perf_event *event,
 163				       struct ordered_events *oe __maybe_unused)
 164{
 165	return perf_event__repipe_synth(tool, event);
 166}
 167
 168#ifdef HAVE_JITDUMP
 169static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
 170			       union perf_event *event __maybe_unused,
 171			       struct ordered_events *oe __maybe_unused)
 172{
 173	return 0;
 174}
 175#endif
 176
 177static int perf_event__repipe_op2_synth(struct perf_session *session,
 178					union perf_event *event)
 179{
 180	return perf_event__repipe_synth(session->tool, event);
 181}
 182
 183static int perf_event__repipe_op4_synth(struct perf_session *session,
 184					union perf_event *event,
 185					u64 data __maybe_unused,
 186					const char *str __maybe_unused)
 187{
 188	return perf_event__repipe_synth(session->tool, event);
 189}
 190
 191static int perf_event__repipe_attr(struct perf_tool *tool,
 192				   union perf_event *event,
 193				   struct evlist **pevlist)
 194{
 195	struct perf_inject *inject = container_of(tool, struct perf_inject,
 196						  tool);
 197	int ret;
 198
 199	ret = perf_event__process_attr(tool, event, pevlist);
 200	if (ret)
 201		return ret;
 202
 203	if (!inject->is_pipe)
 
 204		return 0;
 205
 206	return perf_event__repipe_synth(tool, event);
 207}
 208
 209static int perf_event__repipe_event_update(struct perf_tool *tool,
 210					   union perf_event *event,
 211					   struct evlist **pevlist __maybe_unused)
 212{
 213	return perf_event__repipe_synth(tool, event);
 214}
 215
 216#ifdef HAVE_AUXTRACE_SUPPORT
 217
 218static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
 219{
 220	char buf[4096];
 221	ssize_t ssz;
 222	int ret;
 223
 224	while (size > 0) {
 225		ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
 226		if (ssz < 0)
 227			return -errno;
 228		ret = output_bytes(inject, buf, ssz);
 229		if (ret)
 230			return ret;
 231		size -= ssz;
 232	}
 233
 234	return 0;
 235}
 236
 237static s64 perf_event__repipe_auxtrace(struct perf_session *session,
 238				       union perf_event *event)
 239{
 240	struct perf_tool *tool = session->tool;
 241	struct perf_inject *inject = container_of(tool, struct perf_inject,
 242						  tool);
 243	int ret;
 244
 245	inject->have_auxtrace = true;
 246
 247	if (!inject->output.is_pipe) {
 248		off_t offset;
 249
 250		offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
 251		if (offset == -1)
 252			return -errno;
 253		ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
 254						     event, offset);
 255		if (ret < 0)
 256			return ret;
 257	}
 258
 259	if (perf_data__is_pipe(session->data) || !session->one_mmap) {
 260		ret = output_bytes(inject, event, event->header.size);
 261		if (ret < 0)
 262			return ret;
 263		ret = copy_bytes(inject, perf_data__fd(session->data),
 264				 event->auxtrace.size);
 265	} else {
 266		ret = output_bytes(inject, event,
 267				   event->header.size + event->auxtrace.size);
 268	}
 269	if (ret < 0)
 270		return ret;
 271
 272	return event->auxtrace.size;
 273}
 274
 275#else
 276
 277static s64
 278perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
 279			    union perf_event *event __maybe_unused)
 280{
 281	pr_err("AUX area tracing not supported\n");
 282	return -EINVAL;
 283}
 284
 285#endif
 286
 287static int perf_event__repipe(struct perf_tool *tool,
 288			      union perf_event *event,
 289			      struct perf_sample *sample __maybe_unused,
 290			      struct machine *machine __maybe_unused)
 291{
 292	return perf_event__repipe_synth(tool, event);
 293}
 294
 295static int perf_event__drop(struct perf_tool *tool __maybe_unused,
 296			    union perf_event *event __maybe_unused,
 297			    struct perf_sample *sample __maybe_unused,
 298			    struct machine *machine __maybe_unused)
 299{
 300	return 0;
 301}
 302
 303static int perf_event__drop_aux(struct perf_tool *tool,
 304				union perf_event *event __maybe_unused,
 305				struct perf_sample *sample,
 306				struct machine *machine __maybe_unused)
 307{
 308	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 309
 310	if (!inject->aux_id)
 311		inject->aux_id = sample->id;
 312
 313	return 0;
 314}
 315
 316static union perf_event *
 317perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
 318				 union perf_event *event,
 319				 struct perf_sample *sample)
 320{
 321	size_t sz1 = sample->aux_sample.data - (void *)event;
 322	size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
 323	union perf_event *ev = (union perf_event *)inject->event_copy;
 324
 
 
 
 
 
 
 325	if (sz1 > event->header.size || sz2 > event->header.size ||
 326	    sz1 + sz2 > event->header.size ||
 327	    sz1 < sizeof(struct perf_event_header) + sizeof(u64))
 328		return event;
 329
 330	memcpy(ev, event, sz1);
 331	memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
 332	ev->header.size = sz1 + sz2;
 333	((u64 *)((void *)ev + sz1))[-1] = 0;
 334
 335	return ev;
 336}
 337
 338typedef int (*inject_handler)(struct perf_tool *tool,
 339			      union perf_event *event,
 340			      struct perf_sample *sample,
 341			      struct evsel *evsel,
 342			      struct machine *machine);
 343
 344static int perf_event__repipe_sample(struct perf_tool *tool,
 345				     union perf_event *event,
 346				     struct perf_sample *sample,
 347				     struct evsel *evsel,
 348				     struct machine *machine)
 349{
 350	struct perf_inject *inject = container_of(tool, struct perf_inject,
 351						  tool);
 352
 353	if (evsel && evsel->handler) {
 354		inject_handler f = evsel->handler;
 355		return f(tool, event, sample, evsel, machine);
 356	}
 357
 358	build_id__mark_dso_hit(tool, event, sample, evsel, machine);
 359
 360	if (inject->itrace_synth_opts.set && sample->aux_sample.size)
 361		event = perf_inject__cut_auxtrace_sample(inject, event, sample);
 
 
 
 362
 363	return perf_event__repipe_synth(tool, event);
 364}
 365
 366static int perf_event__repipe_mmap(struct perf_tool *tool,
 367				   union perf_event *event,
 368				   struct perf_sample *sample,
 369				   struct machine *machine)
 370{
 371	int err;
 372
 373	err = perf_event__process_mmap(tool, event, sample, machine);
 374	perf_event__repipe(tool, event, sample, machine);
 375
 376	return err;
 377}
 378
 379#ifdef HAVE_JITDUMP
 380static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
 381				       union perf_event *event,
 382				       struct perf_sample *sample,
 383				       struct machine *machine)
 384{
 385	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 386	u64 n = 0;
 387	int ret;
 388
 389	/*
 390	 * if jit marker, then inject jit mmaps and generate ELF images
 391	 */
 392	ret = jit_process(inject->session, &inject->output, machine,
 393			  event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
 394	if (ret < 0)
 395		return ret;
 396	if (ret) {
 397		inject->bytes_written += n;
 398		return 0;
 399	}
 400	return perf_event__repipe_mmap(tool, event, sample, machine);
 401}
 402#endif
 403
 404static struct dso *findnew_dso(int pid, int tid, const char *filename,
 405			       struct dso_id *id, struct machine *machine)
 406{
 407	struct thread *thread;
 408	struct nsinfo *nsi = NULL;
 409	struct nsinfo *nnsi;
 410	struct dso *dso;
 411	bool vdso;
 412
 413	thread = machine__findnew_thread(machine, pid, tid);
 414	if (thread == NULL) {
 415		pr_err("cannot find or create a task %d/%d.\n", tid, pid);
 416		return NULL;
 417	}
 418
 419	vdso = is_vdso_map(filename);
 420	nsi = nsinfo__get(thread->nsinfo);
 421
 422	if (vdso) {
 423		/* The vdso maps are always on the host and not the
 424		 * container.  Ensure that we don't use setns to look
 425		 * them up.
 426		 */
 427		nnsi = nsinfo__copy(nsi);
 428		if (nnsi) {
 429			nsinfo__put(nsi);
 430			nsinfo__clear_need_setns(nnsi);
 431			nsi = nnsi;
 432		}
 433		dso = machine__findnew_vdso(machine, thread);
 434	} else {
 435		dso = machine__findnew_dso_id(machine, filename, id);
 436	}
 437
 438	if (dso) {
 439		mutex_lock(&dso->lock);
 440		nsinfo__put(dso->nsinfo);
 441		dso->nsinfo = nsi;
 442		mutex_unlock(&dso->lock);
 443	} else
 444		nsinfo__put(nsi);
 445
 446	thread__put(thread);
 447	return dso;
 448}
 449
 450static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
 451					   union perf_event *event,
 452					   struct perf_sample *sample,
 453					   struct machine *machine)
 454{
 455	struct dso *dso;
 456
 457	dso = findnew_dso(event->mmap.pid, event->mmap.tid,
 458			  event->mmap.filename, NULL, machine);
 459
 460	if (dso && !dso->hit) {
 461		dso->hit = 1;
 462		dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
 463	}
 464	dso__put(dso);
 465
 466	return perf_event__repipe(tool, event, sample, machine);
 467}
 468
 469static int perf_event__repipe_mmap2(struct perf_tool *tool,
 470				   union perf_event *event,
 471				   struct perf_sample *sample,
 472				   struct machine *machine)
 473{
 474	int err;
 475
 476	err = perf_event__process_mmap2(tool, event, sample, machine);
 477	perf_event__repipe(tool, event, sample, machine);
 478
 479	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
 480		struct dso *dso;
 481
 482		dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
 483				  event->mmap2.filename, NULL, machine);
 484		if (dso) {
 485			/* mark it not to inject build-id */
 486			dso->hit = 1;
 487		}
 488		dso__put(dso);
 489	}
 490
 491	return err;
 492}
 493
 494#ifdef HAVE_JITDUMP
 495static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
 496					union perf_event *event,
 497					struct perf_sample *sample,
 498					struct machine *machine)
 
 
 
 
 
 
 
 
 499{
 500	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 501	u64 n = 0;
 502	int ret;
 503
 504	/*
 505	 * if jit marker, then inject jit mmaps and generate ELF images
 506	 */
 507	ret = jit_process(inject->session, &inject->output, machine,
 508			  event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
 509	if (ret < 0)
 510		return ret;
 511	if (ret) {
 512		inject->bytes_written += n;
 513		return 0;
 
 
 
 
 514	}
 515	return perf_event__repipe_mmap2(tool, event, sample, machine);
 516}
 517#endif
 518
 519static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
 520					    union perf_event *event,
 521					    struct perf_sample *sample,
 522					    struct machine *machine)
 523{
 524	struct dso_id dso_id = {
 525		.maj = event->mmap2.maj,
 526		.min = event->mmap2.min,
 527		.ino = event->mmap2.ino,
 528		.ino_generation = event->mmap2.ino_generation,
 529	};
 530	struct dso *dso;
 531
 532	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
 533		/* cannot use dso_id since it'd have invalid info */
 534		dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
 535				  event->mmap2.filename, NULL, machine);
 536		if (dso) {
 537			/* mark it not to inject build-id */
 538			dso->hit = 1;
 539		}
 540		dso__put(dso);
 541		return 0;
 542	}
 
 
 
 
 
 
 
 
 543
 544	dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
 545			  event->mmap2.filename, &dso_id, machine);
 
 
 
 
 
 
 
 546
 547	if (dso && !dso->hit) {
 548		dso->hit = 1;
 549		dso__inject_build_id(dso, tool, machine, sample->cpumode,
 550				     event->mmap2.flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 551	}
 552	dso__put(dso);
 
 
 553
 554	perf_event__repipe(tool, event, sample, machine);
 
 555
 556	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 557}
 558
 559static int perf_event__repipe_fork(struct perf_tool *tool,
 560				   union perf_event *event,
 561				   struct perf_sample *sample,
 562				   struct machine *machine)
 563{
 564	int err;
 565
 566	err = perf_event__process_fork(tool, event, sample, machine);
 567	perf_event__repipe(tool, event, sample, machine);
 568
 569	return err;
 570}
 571
 572static int perf_event__repipe_comm(struct perf_tool *tool,
 573				   union perf_event *event,
 574				   struct perf_sample *sample,
 575				   struct machine *machine)
 576{
 577	int err;
 578
 579	err = perf_event__process_comm(tool, event, sample, machine);
 580	perf_event__repipe(tool, event, sample, machine);
 581
 582	return err;
 583}
 584
 585static int perf_event__repipe_namespaces(struct perf_tool *tool,
 586					 union perf_event *event,
 587					 struct perf_sample *sample,
 588					 struct machine *machine)
 589{
 590	int err = perf_event__process_namespaces(tool, event, sample, machine);
 591
 592	perf_event__repipe(tool, event, sample, machine);
 593
 594	return err;
 595}
 596
 597static int perf_event__repipe_exit(struct perf_tool *tool,
 598				   union perf_event *event,
 599				   struct perf_sample *sample,
 600				   struct machine *machine)
 601{
 602	int err;
 603
 604	err = perf_event__process_exit(tool, event, sample, machine);
 605	perf_event__repipe(tool, event, sample, machine);
 606
 607	return err;
 608}
 609
 610#ifdef HAVE_LIBTRACEEVENT
 611static int perf_event__repipe_tracing_data(struct perf_session *session,
 612					   union perf_event *event)
 613{
 614	perf_event__repipe_synth(session->tool, event);
 615
 616	return perf_event__process_tracing_data(session, event);
 617}
 618#endif
 619
 620static int dso__read_build_id(struct dso *dso)
 621{
 622	struct nscookie nsc;
 623
 624	if (dso->has_build_id)
 625		return 0;
 626
 627	mutex_lock(&dso->lock);
 628	nsinfo__mountns_enter(dso->nsinfo, &nsc);
 629	if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
 630		dso->has_build_id = true;
 631	else if (dso->nsinfo) {
 632		char *new_name;
 633
 634		new_name = filename_with_chroot(dso->nsinfo->pid,
 635						dso->long_name);
 636		if (new_name && filename__read_build_id(new_name, &dso->bid) > 0)
 637			dso->has_build_id = true;
 638		free(new_name);
 639	}
 640	nsinfo__mountns_exit(&nsc);
 641	mutex_unlock(&dso->lock);
 642
 643	return dso->has_build_id ? 0 : -1;
 644}
 645
 646static struct strlist *perf_inject__parse_known_build_ids(
 647	const char *known_build_ids_string)
 648{
 649	struct str_node *pos, *tmp;
 650	struct strlist *known_build_ids;
 651	int bid_len;
 652
 653	known_build_ids = strlist__new(known_build_ids_string, NULL);
 654	if (known_build_ids == NULL)
 655		return NULL;
 656	strlist__for_each_entry_safe(pos, tmp, known_build_ids) {
 657		const char *build_id, *dso_name;
 658
 659		build_id = skip_spaces(pos->s);
 660		dso_name = strchr(build_id, ' ');
 661		if (dso_name == NULL) {
 662			strlist__remove(known_build_ids, pos);
 663			continue;
 664		}
 665		bid_len = dso_name - pos->s;
 666		dso_name = skip_spaces(dso_name);
 667		if (bid_len % 2 != 0 || bid_len >= SBUILD_ID_SIZE) {
 668			strlist__remove(known_build_ids, pos);
 669			continue;
 670		}
 671		for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
 672			if (!isxdigit(build_id[2 * ix]) ||
 673			    !isxdigit(build_id[2 * ix + 1])) {
 674				strlist__remove(known_build_ids, pos);
 675				break;
 676			}
 677		}
 678	}
 679	return known_build_ids;
 680}
 681
 682static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
 683					       struct dso *dso)
 684{
 685	struct str_node *pos;
 686	int bid_len;
 687
 688	strlist__for_each_entry(pos, inject->known_build_ids) {
 689		const char *build_id, *dso_name;
 690
 691		build_id = skip_spaces(pos->s);
 692		dso_name = strchr(build_id, ' ');
 693		bid_len = dso_name - pos->s;
 694		dso_name = skip_spaces(dso_name);
 695		if (strcmp(dso->long_name, dso_name))
 696			continue;
 697		for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
 698			dso->bid.data[ix] = (hex(build_id[2 * ix]) << 4 |
 699					     hex(build_id[2 * ix + 1]));
 700		}
 701		dso->bid.size = bid_len / 2;
 702		dso->has_build_id = 1;
 703		return true;
 704	}
 705	return false;
 706}
 707
 708static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
 709				struct machine *machine, u8 cpumode, u32 flags)
 
 
 
 
 
 710{
 711	struct perf_inject *inject = container_of(tool, struct perf_inject,
 712						  tool);
 713	int err;
 714
 715	if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
 716		return 0;
 717	if (is_no_dso_memory(dso->long_name))
 718		return 0;
 719
 720	if (inject->known_build_ids != NULL &&
 721	    perf_inject__lookup_known_build_id(inject, dso))
 722		return 1;
 723
 724	if (dso__read_build_id(dso) < 0) {
 725		pr_debug("no build_id found for %s\n", dso->long_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 726		return -1;
 727	}
 728
 729	err = perf_event__synthesize_build_id(tool, dso, cpumode,
 730					      perf_event__repipe, machine);
 
 
 
 
 
 
 731	if (err) {
 732		pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
 733		return -1;
 734	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 735
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 736	return 0;
 737}
 738
 739int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 740			       struct perf_sample *sample,
 741			       struct evsel *evsel __maybe_unused,
 742			       struct machine *machine)
 743{
 744	struct addr_location al;
 745	struct thread *thread;
 
 
 
 
 
 
 
 
 
 
 
 
 746
 
 747	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
 748	if (thread == NULL) {
 749		pr_err("problem processing %d event, skipping it.\n",
 750		       event->header.type);
 751		goto repipe;
 752	}
 753
 754	if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
 755		if (!al.map->dso->hit) {
 756			al.map->dso->hit = 1;
 757			dso__inject_build_id(al.map->dso, tool, machine,
 758					     sample->cpumode, al.map->flags);
 759		}
 760	}
 761
 
 
 
 762	thread__put(thread);
 763repipe:
 764	perf_event__repipe(tool, event, sample, machine);
 
 765	return 0;
 766}
 767
 768static int perf_inject__sched_process_exit(struct perf_tool *tool,
 769					   union perf_event *event __maybe_unused,
 770					   struct perf_sample *sample,
 771					   struct evsel *evsel __maybe_unused,
 772					   struct machine *machine __maybe_unused)
 773{
 774	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 775	struct event_entry *ent;
 776
 777	list_for_each_entry(ent, &inject->samples, node) {
 778		if (sample->tid == ent->tid) {
 779			list_del_init(&ent->node);
 780			free(ent);
 781			break;
 782		}
 783	}
 784
 785	return 0;
 786}
 787
 788static int perf_inject__sched_switch(struct perf_tool *tool,
 789				     union perf_event *event,
 790				     struct perf_sample *sample,
 791				     struct evsel *evsel,
 792				     struct machine *machine)
 793{
 794	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 795	struct event_entry *ent;
 796
 797	perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
 798
 799	ent = malloc(event->header.size + sizeof(struct event_entry));
 800	if (ent == NULL) {
 801		color_fprintf(stderr, PERF_COLOR_RED,
 802			     "Not enough memory to process sched switch event!");
 803		return -1;
 804	}
 805
 806	ent->tid = sample->tid;
 807	memcpy(&ent->event, event, event->header.size);
 808	list_add(&ent->node, &inject->samples);
 809	return 0;
 810}
 811
 812#ifdef HAVE_LIBTRACEEVENT
 813static int perf_inject__sched_stat(struct perf_tool *tool,
 814				   union perf_event *event __maybe_unused,
 815				   struct perf_sample *sample,
 816				   struct evsel *evsel,
 817				   struct machine *machine)
 818{
 819	struct event_entry *ent;
 820	union perf_event *event_sw;
 821	struct perf_sample sample_sw;
 822	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 823	u32 pid = evsel__intval(evsel, sample, "pid");
 824
 825	list_for_each_entry(ent, &inject->samples, node) {
 826		if (pid == ent->tid)
 827			goto found;
 828	}
 829
 830	return 0;
 831found:
 832	event_sw = &ent->event[0];
 833	evsel__parse_sample(evsel, event_sw, &sample_sw);
 834
 835	sample_sw.period = sample->period;
 836	sample_sw.time	 = sample->time;
 837	perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
 838				      evsel->core.attr.read_format, &sample_sw);
 839	build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
 840	return perf_event__repipe(tool, event_sw, &sample_sw, machine);
 841}
 842#endif
 843
 844static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
 845{
 846	if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
 847		return NULL;
 848	return &gs->vcpu[vcpu];
 849}
 850
 851static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
 852{
 853	ssize_t ret = writen(gs->tmp_fd, buf, sz);
 854
 855	return ret < 0 ? ret : 0;
 856}
 857
 858static int guest_session__repipe(struct perf_tool *tool,
 859				 union perf_event *event,
 860				 struct perf_sample *sample __maybe_unused,
 861				 struct machine *machine __maybe_unused)
 862{
 863	struct guest_session *gs = container_of(tool, struct guest_session, tool);
 864
 865	return guest_session__output_bytes(gs, event, event->header.size);
 866}
 867
 868static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
 869{
 870	struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
 871	int hash;
 872
 873	if (!guest_tid)
 874		return -ENOMEM;
 875
 876	guest_tid->tid = tid;
 877	guest_tid->vcpu = vcpu;
 878	hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
 879	hlist_add_head(&guest_tid->node, &gs->tids[hash]);
 880
 881	return 0;
 882}
 883
 884static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
 885				 union perf_event *event,
 886				 u64 offset __maybe_unused, void *data)
 887{
 888	struct guest_session *gs = data;
 889	unsigned int vcpu;
 890	struct guest_vcpu *guest_vcpu;
 891	int ret;
 892
 893	if (event->header.type != PERF_RECORD_COMM ||
 894	    event->comm.pid != gs->machine_pid)
 895		return 0;
 896
 897	/*
 898	 * QEMU option -name debug-threads=on, causes thread names formatted as
 899	 * below, although it is not an ABI. Also libvirt seems to use this by
 900	 * default. Here we rely on it to tell us which thread is which VCPU.
 901	 */
 902	ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
 903	if (ret <= 0)
 904		return ret;
 905	pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
 906		 event->comm.tid, event->comm.comm, vcpu);
 907	if (vcpu > INT_MAX) {
 908		pr_err("Invalid VCPU %u\n", vcpu);
 909		return -EINVAL;
 910	}
 911	guest_vcpu = guest_session__vcpu(gs, vcpu);
 912	if (!guest_vcpu)
 913		return -ENOMEM;
 914	if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
 915		pr_err("Fatal error: Two threads found with the same VCPU\n");
 916		return -EINVAL;
 917	}
 918	guest_vcpu->tid = event->comm.tid;
 919
 920	return guest_session__map_tid(gs, event->comm.tid, vcpu);
 921}
 922
 923static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
 924{
 925	return perf_session__peek_events(session, session->header.data_offset,
 926					 session->header.data_size,
 927					 host_peek_vm_comms_cb, gs);
 928}
 929
 930static bool evlist__is_id_used(struct evlist *evlist, u64 id)
 931{
 932	return evlist__id2sid(evlist, id);
 933}
 934
 935static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
 936{
 937	do {
 938		gs->highest_id += 1;
 939	} while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
 940
 941	return gs->highest_id;
 942}
 943
 944static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
 945{
 946	struct guest_id *guest_id = zalloc(sizeof(*guest_id));
 947	int hash;
 948
 949	if (!guest_id)
 950		return -ENOMEM;
 951
 952	guest_id->id = id;
 953	guest_id->host_id = host_id;
 954	guest_id->vcpu = vcpu;
 955	hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
 956	hlist_add_head(&guest_id->node, &gs->heads[hash]);
 957
 958	return 0;
 959}
 960
 961static u64 evlist__find_highest_id(struct evlist *evlist)
 962{
 963	struct evsel *evsel;
 964	u64 highest_id = 1;
 965
 966	evlist__for_each_entry(evlist, evsel) {
 967		u32 j;
 968
 969		for (j = 0; j < evsel->core.ids; j++) {
 970			u64 id = evsel->core.id[j];
 971
 972			if (id > highest_id)
 973				highest_id = id;
 974		}
 975	}
 976
 977	return highest_id;
 978}
 979
 980static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
 981{
 982	struct evlist *evlist = gs->session->evlist;
 983	struct evsel *evsel;
 984	int ret;
 985
 986	evlist__for_each_entry(evlist, evsel) {
 987		u32 j;
 988
 989		for (j = 0; j < evsel->core.ids; j++) {
 990			struct perf_sample_id *sid;
 991			u64 host_id;
 992			u64 id;
 993
 994			id = evsel->core.id[j];
 995			sid = evlist__id2sid(evlist, id);
 996			if (!sid || sid->cpu.cpu == -1)
 997				continue;
 998			host_id = guest_session__allocate_new_id(gs, host_evlist);
 999			ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
1000			if (ret)
1001				return ret;
1002		}
1003	}
1004
1005	return 0;
1006}
1007
1008static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
1009{
1010	struct hlist_head *head;
1011	struct guest_id *guest_id;
1012	int hash;
1013
1014	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1015	head = &gs->heads[hash];
1016
1017	hlist_for_each_entry(guest_id, head, node)
1018		if (guest_id->id == id)
1019			return guest_id;
1020
1021	return NULL;
1022}
1023
1024static int process_attr(struct perf_tool *tool, union perf_event *event,
1025			struct perf_sample *sample __maybe_unused,
1026			struct machine *machine __maybe_unused)
1027{
1028	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1029
1030	return perf_event__process_attr(tool, event, &inject->session->evlist);
1031}
1032
1033static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
1034{
1035	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1036	struct perf_event_attr attr = evsel->core.attr;
1037	u64 *id_array;
1038	u32 *vcpu_array;
1039	int ret = -ENOMEM;
1040	u32 i;
1041
1042	id_array = calloc(evsel->core.ids, sizeof(*id_array));
1043	if (!id_array)
1044		return -ENOMEM;
1045
1046	vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
1047	if (!vcpu_array)
1048		goto out;
1049
1050	for (i = 0; i < evsel->core.ids; i++) {
1051		u64 id = evsel->core.id[i];
1052		struct guest_id *guest_id = guest_session__lookup_id(gs, id);
1053
1054		if (!guest_id) {
1055			pr_err("Failed to find guest id %"PRIu64"\n", id);
1056			ret = -EINVAL;
1057			goto out;
1058		}
1059		id_array[i] = guest_id->host_id;
1060		vcpu_array[i] = guest_id->vcpu;
1061	}
1062
1063	attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
1064	attr.exclude_host = 1;
1065	attr.exclude_guest = 0;
1066
1067	ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
1068					  id_array, process_attr);
1069	if (ret)
1070		pr_err("Failed to add guest attr.\n");
1071
1072	for (i = 0; i < evsel->core.ids; i++) {
1073		struct perf_sample_id *sid;
1074		u32 vcpu = vcpu_array[i];
1075
1076		sid = evlist__id2sid(inject->session->evlist, id_array[i]);
1077		/* Guest event is per-thread from the host point of view */
1078		sid->cpu.cpu = -1;
1079		sid->tid = gs->vcpu[vcpu].tid;
1080		sid->machine_pid = gs->machine_pid;
1081		sid->vcpu.cpu = vcpu;
1082	}
1083out:
1084	free(vcpu_array);
1085	free(id_array);
1086	return ret;
1087}
1088
1089static int guest_session__add_attrs(struct guest_session *gs)
1090{
1091	struct evlist *evlist = gs->session->evlist;
1092	struct evsel *evsel;
1093	int ret;
1094
1095	evlist__for_each_entry(evlist, evsel) {
1096		ret = guest_session__add_attr(gs, evsel);
1097		if (ret)
1098			return ret;
1099	}
1100
1101	return 0;
1102}
1103
1104static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
1105{
1106	struct perf_session *session = inject->session;
1107	struct evlist *evlist = session->evlist;
1108	struct machine *machine = &session->machines.host;
1109	size_t from = evlist->core.nr_entries - new_cnt;
1110
1111	return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
1112						 evlist, machine, from);
1113}
1114
1115static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
1116{
1117	struct hlist_head *head;
1118	struct guest_tid *guest_tid;
1119	int hash;
1120
1121	hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
1122	head = &gs->tids[hash];
1123
1124	hlist_for_each_entry(guest_tid, head, node)
1125		if (guest_tid->tid == tid)
1126			return guest_tid;
1127
1128	return NULL;
1129}
1130
1131static bool dso__is_in_kernel_space(struct dso *dso)
1132{
1133	if (dso__is_vdso(dso))
1134		return false;
1135
1136	return dso__is_kcore(dso) ||
1137	       dso->kernel ||
1138	       is_kernel_module(dso->long_name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
1139}
1140
1141static u64 evlist__first_id(struct evlist *evlist)
1142{
1143	struct evsel *evsel;
1144
1145	evlist__for_each_entry(evlist, evsel) {
1146		if (evsel->core.ids)
1147			return evsel->core.id[0];
1148	}
1149	return 0;
1150}
1151
1152static int process_build_id(struct perf_tool *tool,
1153			    union perf_event *event,
1154			    struct perf_sample *sample __maybe_unused,
1155			    struct machine *machine __maybe_unused)
1156{
1157	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1158
1159	return perf_event__process_build_id(inject->session, event);
1160}
1161
1162static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
1163{
1164	struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
1165	u8 cpumode = dso__is_in_kernel_space(dso) ?
1166			PERF_RECORD_MISC_GUEST_KERNEL :
1167			PERF_RECORD_MISC_GUEST_USER;
 
 
 
 
 
 
 
 
1168
1169	if (!machine)
1170		return -ENOMEM;
1171
1172	dso->hit = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1173
1174	return perf_event__synthesize_build_id(&inject->tool, dso, cpumode,
1175					       process_build_id, machine);
1176}
1177
1178static int guest_session__add_build_ids(struct guest_session *gs)
1179{
1180	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1181	struct machine *machine = &gs->session->machines.host;
1182	struct dso *dso;
1183	int ret;
1184
1185	/* Build IDs will be put in the Build ID feature section */
1186	perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
1187
1188	dsos__for_each_with_build_id(dso, &machine->dsos.head) {
1189		ret = synthesize_build_id(inject, dso, gs->machine_pid);
1190		if (ret)
1191			return ret;
1192	}
1193
1194	return 0;
1195}
1196
1197static int guest_session__ksymbol_event(struct perf_tool *tool,
1198					union perf_event *event,
1199					struct perf_sample *sample __maybe_unused,
1200					struct machine *machine __maybe_unused)
1201{
1202	struct guest_session *gs = container_of(tool, struct guest_session, tool);
1203
1204	/* Only support out-of-line i.e. no BPF support */
1205	if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
1206		return 0;
1207
1208	return guest_session__output_bytes(gs, event, event->header.size);
1209}
1210
1211static int guest_session__start(struct guest_session *gs, const char *name, bool force)
1212{
1213	char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
1214	struct perf_session *session;
1215	int ret;
1216
1217	/* Only these events will be injected */
1218	gs->tool.mmap		= guest_session__repipe;
1219	gs->tool.mmap2		= guest_session__repipe;
1220	gs->tool.comm		= guest_session__repipe;
1221	gs->tool.fork		= guest_session__repipe;
1222	gs->tool.exit		= guest_session__repipe;
1223	gs->tool.lost		= guest_session__repipe;
1224	gs->tool.context_switch	= guest_session__repipe;
1225	gs->tool.ksymbol	= guest_session__ksymbol_event;
1226	gs->tool.text_poke	= guest_session__repipe;
1227	/*
1228	 * Processing a build ID creates a struct dso with that build ID. Later,
1229	 * all guest dsos are iterated and the build IDs processed into the host
1230	 * session where they will be output to the Build ID feature section
1231	 * when the perf.data file header is written.
1232	 */
1233	gs->tool.build_id	= perf_event__process_build_id;
1234	/* Process the id index to know what VCPU an ID belongs to */
1235	gs->tool.id_index	= perf_event__process_id_index;
1236
1237	gs->tool.ordered_events	= true;
1238	gs->tool.ordering_requires_timestamps = true;
1239
1240	gs->data.path	= name;
1241	gs->data.force	= force;
1242	gs->data.mode	= PERF_DATA_MODE_READ;
1243
1244	session = perf_session__new(&gs->data, &gs->tool);
1245	if (IS_ERR(session))
1246		return PTR_ERR(session);
1247	gs->session = session;
1248
1249	/*
1250	 * Initial events have zero'd ID samples. Get default ID sample size
1251	 * used for removing them.
1252	 */
1253	gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
1254	/* And default ID for adding back a host-compatible ID sample */
1255	gs->dflt_id = evlist__first_id(session->evlist);
1256	if (!gs->dflt_id) {
1257		pr_err("Guest data has no sample IDs");
1258		return -EINVAL;
1259	}
1260
1261	/* Temporary file for guest events */
1262	gs->tmp_file_name = strdup(tmp_file_name);
1263	if (!gs->tmp_file_name)
1264		return -ENOMEM;
1265	gs->tmp_fd = mkstemp(gs->tmp_file_name);
1266	if (gs->tmp_fd < 0)
1267		return -errno;
1268
1269	if (zstd_init(&gs->session->zstd_data, 0) < 0)
1270		pr_warning("Guest session decompression initialization failed.\n");
1271
1272	/*
1273	 * perf does not support processing 2 sessions simultaneously, so output
1274	 * guest events to a temporary file.
1275	 */
1276	ret = perf_session__process_events(gs->session);
1277	if (ret)
1278		return ret;
1279
1280	if (lseek(gs->tmp_fd, 0, SEEK_SET))
1281		return -errno;
1282
1283	return 0;
1284}
1285
1286/* Free hlist nodes assuming hlist_node is the first member of hlist entries */
1287static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
1288{
1289	struct hlist_node *pos, *n;
1290	size_t i;
1291
1292	for (i = 0; i < hlist_sz; ++i) {
1293		hlist_for_each_safe(pos, n, &heads[i]) {
1294			hlist_del(pos);
1295			free(pos);
1296		}
1297	}
1298}
1299
1300static void guest_session__exit(struct guest_session *gs)
1301{
1302	if (gs->session) {
1303		perf_session__delete(gs->session);
1304		free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
1305		free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
1306	}
1307	if (gs->tmp_file_name) {
1308		if (gs->tmp_fd >= 0)
1309			close(gs->tmp_fd);
1310		unlink(gs->tmp_file_name);
1311		free(gs->tmp_file_name);
1312	}
1313	free(gs->vcpu);
1314	free(gs->perf_data_file);
1315}
1316
1317static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
1318{
1319	tc->time_shift		= time_conv->time_shift;
1320	tc->time_mult		= time_conv->time_mult;
1321	tc->time_zero		= time_conv->time_zero;
1322	tc->time_cycles		= time_conv->time_cycles;
1323	tc->time_mask		= time_conv->time_mask;
1324	tc->cap_user_time_zero	= time_conv->cap_user_time_zero;
1325	tc->cap_user_time_short	= time_conv->cap_user_time_short;
1326}
1327
1328static void guest_session__get_tc(struct guest_session *gs)
1329{
1330	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1331
1332	get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
1333	get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
1334}
1335
1336static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
1337{
1338	u64 tsc;
1339
1340	if (!guest_time) {
1341		*host_time = 0;
1342		return;
1343	}
1344
1345	if (gs->guest_tc.cap_user_time_zero)
1346		tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
1347	else
1348		tsc = guest_time;
1349
1350	/*
1351	 * This is the correct order of operations for x86 if the TSC Offset and
1352	 * Multiplier values are used.
1353	 */
1354	tsc -= gs->time_offset;
1355	tsc /= gs->time_scale;
1356
1357	if (gs->host_tc.cap_user_time_zero)
1358		*host_time = tsc_to_perf_time(tsc, &gs->host_tc);
1359	else
1360		*host_time = tsc;
1361}
1362
1363static int guest_session__fetch(struct guest_session *gs)
1364{
1365	void *buf = gs->ev.event_buf;
1366	struct perf_event_header *hdr = buf;
1367	size_t hdr_sz = sizeof(*hdr);
1368	ssize_t ret;
1369
 
 
 
 
 
 
 
 
1370	ret = readn(gs->tmp_fd, buf, hdr_sz);
1371	if (ret < 0)
1372		return ret;
1373
1374	if (!ret) {
1375		/* Zero size means EOF */
1376		hdr->size = 0;
1377		return 0;
1378	}
1379
1380	buf += hdr_sz;
1381
1382	ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
1383	if (ret < 0)
1384		return ret;
1385
1386	gs->ev.event = (union perf_event *)gs->ev.event_buf;
1387	gs->ev.sample.time = 0;
1388
1389	if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
1390		pr_err("Unexpected type fetching guest event");
1391		return 0;
1392	}
1393
1394	ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
1395	if (ret) {
1396		pr_err("Parse failed fetching guest event");
1397		return ret;
1398	}
1399
1400	if (!gs->have_tc) {
1401		guest_session__get_tc(gs);
1402		gs->have_tc = true;
1403	}
1404
1405	guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
1406
1407	return 0;
1408}
1409
1410static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
1411				    const struct perf_sample *sample)
1412{
1413	struct evsel *evsel;
1414	void *array;
1415	int ret;
1416
1417	evsel = evlist__id2evsel(evlist, sample->id);
1418	array = ev;
1419
1420	if (!evsel) {
1421		pr_err("No evsel for id %"PRIu64"\n", sample->id);
1422		return -EINVAL;
1423	}
1424
1425	array += ev->header.size;
1426	ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
1427	if (ret < 0)
1428		return ret;
1429
1430	if (ret & 7) {
1431		pr_err("Bad id sample size %d\n", ret);
1432		return -EINVAL;
1433	}
1434
1435	ev->header.size += ret;
1436
1437	return 0;
1438}
1439
1440static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
1441{
1442	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1443	int ret;
1444
1445	if (!gs->ready)
1446		return 0;
1447
1448	while (1) {
1449		struct perf_sample *sample;
1450		struct guest_id *guest_id;
1451		union perf_event *ev;
1452		u16 id_hdr_size;
1453		u8 cpumode;
1454		u64 id;
1455
1456		if (!gs->fetched) {
1457			ret = guest_session__fetch(gs);
1458			if (ret)
1459				return ret;
1460			gs->fetched = true;
1461		}
1462
1463		ev = gs->ev.event;
1464		sample = &gs->ev.sample;
1465
1466		if (!ev->header.size)
1467			return 0; /* EOF */
1468
1469		if (sample->time > timestamp)
1470			return 0;
1471
1472		/* Change cpumode to guest */
1473		cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1474		if (cpumode & PERF_RECORD_MISC_USER)
1475			cpumode = PERF_RECORD_MISC_GUEST_USER;
1476		else
1477			cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1478		ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
1479		ev->header.misc |= cpumode;
1480
1481		id = sample->id;
1482		if (!id) {
1483			id = gs->dflt_id;
1484			id_hdr_size = gs->dflt_id_hdr_size;
1485		} else {
1486			struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
1487
1488			id_hdr_size = evsel__id_hdr_size(evsel);
1489		}
1490
1491		if (id_hdr_size & 7) {
1492			pr_err("Bad id_hdr_size %u\n", id_hdr_size);
1493			return -EINVAL;
1494		}
1495
1496		if (ev->header.size & 7) {
1497			pr_err("Bad event size %u\n", ev->header.size);
1498			return -EINVAL;
1499		}
1500
1501		/* Remove guest id sample */
1502		ev->header.size -= id_hdr_size;
1503
1504		if (ev->header.size & 7) {
1505			pr_err("Bad raw event size %u\n", ev->header.size);
1506			return -EINVAL;
1507		}
1508
1509		guest_id = guest_session__lookup_id(gs, id);
1510		if (!guest_id) {
1511			pr_err("Guest event with unknown id %llu\n",
1512			       (unsigned long long)id);
1513			return -EINVAL;
1514		}
1515
1516		/* Change to host ID to avoid conflicting ID values */
1517		sample->id = guest_id->host_id;
1518		sample->stream_id = guest_id->host_id;
1519
1520		if (sample->cpu != (u32)-1) {
1521			if (sample->cpu >= gs->vcpu_cnt) {
1522				pr_err("Guest event with unknown VCPU %u\n",
1523				       sample->cpu);
1524				return -EINVAL;
1525			}
1526			/* Change to host CPU instead of guest VCPU */
1527			sample->cpu = gs->vcpu[sample->cpu].cpu;
1528		}
1529
1530		/* New id sample with new ID and CPU */
1531		ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
1532		if (ret)
1533			return ret;
1534
1535		if (ev->header.size & 7) {
1536			pr_err("Bad new event size %u\n", ev->header.size);
1537			return -EINVAL;
1538		}
1539
1540		gs->fetched = false;
1541
1542		ret = output_bytes(inject, ev, ev->header.size);
1543		if (ret)
1544			return ret;
1545	}
1546}
1547
1548static int guest_session__flush_events(struct guest_session *gs)
1549{
1550	return guest_session__inject_events(gs, -1);
1551}
1552
1553static int host__repipe(struct perf_tool *tool,
1554			union perf_event *event,
1555			struct perf_sample *sample,
1556			struct machine *machine)
1557{
1558	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1559	int ret;
1560
1561	ret = guest_session__inject_events(&inject->guest_session, sample->time);
1562	if (ret)
1563		return ret;
1564
1565	return perf_event__repipe(tool, event, sample, machine);
1566}
1567
1568static int host__finished_init(struct perf_session *session, union perf_event *event)
1569{
1570	struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
1571	struct guest_session *gs = &inject->guest_session;
1572	int ret;
1573
1574	/*
1575	 * Peek through host COMM events to find QEMU threads and the VCPU they
1576	 * are running.
1577	 */
1578	ret = host_peek_vm_comms(session, gs);
1579	if (ret)
1580		return ret;
1581
1582	if (!gs->vcpu_cnt) {
1583		pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
1584		return -EINVAL;
1585	}
1586
1587	/*
1588	 * Allocate new (unused) host sample IDs and map them to the guest IDs.
1589	 */
1590	gs->highest_id = evlist__find_highest_id(session->evlist);
1591	ret = guest_session__map_ids(gs, session->evlist);
1592	if (ret)
1593		return ret;
1594
1595	ret = guest_session__add_attrs(gs);
1596	if (ret)
1597		return ret;
1598
1599	ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
1600	if (ret) {
1601		pr_err("Failed to synthesize id_index\n");
1602		return ret;
1603	}
1604
1605	ret = guest_session__add_build_ids(gs);
1606	if (ret) {
1607		pr_err("Failed to add guest build IDs\n");
1608		return ret;
1609	}
1610
1611	gs->ready = true;
1612
1613	ret = guest_session__inject_events(gs, 0);
1614	if (ret)
1615		return ret;
1616
1617	return perf_event__repipe_op2_synth(session, event);
1618}
1619
1620/*
1621 * Obey finished-round ordering. The FINISHED_ROUND event is first processed
1622 * which flushes host events to file up until the last flush time. Then inject
1623 * guest events up to the same time. Finally write out the FINISHED_ROUND event
1624 * itself.
1625 */
1626static int host__finished_round(struct perf_tool *tool,
1627				union perf_event *event,
1628				struct ordered_events *oe)
1629{
1630	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1631	int ret = perf_event__process_finished_round(tool, event, oe);
1632	u64 timestamp = ordered_events__last_flush_time(oe);
1633
1634	if (ret)
1635		return ret;
1636
1637	ret = guest_session__inject_events(&inject->guest_session, timestamp);
1638	if (ret)
1639		return ret;
1640
1641	return perf_event__repipe_oe_synth(tool, event, oe);
1642}
1643
1644static int host__context_switch(struct perf_tool *tool,
1645				union perf_event *event,
1646				struct perf_sample *sample,
1647				struct machine *machine)
1648{
1649	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1650	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1651	struct guest_session *gs = &inject->guest_session;
1652	u32 pid = event->context_switch.next_prev_pid;
1653	u32 tid = event->context_switch.next_prev_tid;
1654	struct guest_tid *guest_tid;
1655	u32 vcpu;
1656
1657	if (out || pid != gs->machine_pid)
1658		goto out;
1659
1660	guest_tid = guest_session__lookup_tid(gs, tid);
1661	if (!guest_tid)
1662		goto out;
1663
1664	if (sample->cpu == (u32)-1) {
1665		pr_err("Switch event does not have CPU\n");
1666		return -EINVAL;
1667	}
1668
1669	vcpu = guest_tid->vcpu;
1670	if (vcpu >= gs->vcpu_cnt)
1671		return -EINVAL;
1672
1673	/* Guest is switching in, record which CPU the VCPU is now running on */
1674	gs->vcpu[vcpu].cpu = sample->cpu;
1675out:
1676	return host__repipe(tool, event, sample, machine);
1677}
1678
1679static void sig_handler(int sig __maybe_unused)
1680{
1681	session_done = 1;
1682}
1683
1684static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
1685{
1686	struct perf_event_attr *attr = &evsel->core.attr;
1687	const char *name = evsel__name(evsel);
1688
1689	if (!(attr->sample_type & sample_type)) {
1690		pr_err("Samples for %s event do not have %s attribute set.",
1691			name, sample_msg);
1692		return -EINVAL;
1693	}
1694
1695	return 0;
1696}
1697
1698static int drop_sample(struct perf_tool *tool __maybe_unused,
1699		       union perf_event *event __maybe_unused,
1700		       struct perf_sample *sample __maybe_unused,
1701		       struct evsel *evsel __maybe_unused,
1702		       struct machine *machine __maybe_unused)
1703{
1704	return 0;
1705}
1706
1707static void strip_init(struct perf_inject *inject)
1708{
1709	struct evlist *evlist = inject->session->evlist;
1710	struct evsel *evsel;
1711
1712	inject->tool.context_switch = perf_event__drop;
1713
1714	evlist__for_each_entry(evlist, evsel)
1715		evsel->handler = drop_sample;
1716}
1717
1718static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
1719{
1720	struct perf_inject *inject = opt->value;
1721	const char *args;
1722	char *dry_run;
1723
1724	if (unset)
1725		return 0;
1726
1727	inject->itrace_synth_opts.set = true;
1728	inject->itrace_synth_opts.vm_time_correlation = true;
1729	inject->in_place_update = true;
1730
1731	if (!str)
1732		return 0;
1733
1734	dry_run = skip_spaces(str);
1735	if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
1736		inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
1737		inject->in_place_update_dry_run = true;
1738		args = dry_run + strlen("dry-run");
1739	} else {
1740		args = str;
1741	}
1742
1743	inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
1744
1745	return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
1746}
1747
1748static int parse_guest_data(const struct option *opt, const char *str, int unset)
1749{
1750	struct perf_inject *inject = opt->value;
1751	struct guest_session *gs = &inject->guest_session;
1752	char *tok;
1753	char *s;
1754
1755	if (unset)
1756		return 0;
1757
1758	if (!str)
1759		goto bad_args;
1760
1761	s = strdup(str);
1762	if (!s)
1763		return -ENOMEM;
1764
1765	gs->perf_data_file = strsep(&s, ",");
1766	if (!gs->perf_data_file)
1767		goto bad_args;
1768
1769	gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
1770	if (gs->copy_kcore_dir)
1771		inject->output.is_dir = true;
1772
1773	tok = strsep(&s, ",");
1774	if (!tok)
1775		goto bad_args;
1776	gs->machine_pid = strtoul(tok, NULL, 0);
1777	if (!inject->guest_session.machine_pid)
1778		goto bad_args;
1779
1780	gs->time_scale = 1;
1781
1782	tok = strsep(&s, ",");
1783	if (!tok)
1784		goto out;
1785	gs->time_offset = strtoull(tok, NULL, 0);
1786
1787	tok = strsep(&s, ",");
1788	if (!tok)
1789		goto out;
1790	gs->time_scale = strtod(tok, NULL);
1791	if (!gs->time_scale)
1792		goto bad_args;
1793out:
1794	return 0;
1795
1796bad_args:
1797	pr_err("--guest-data option requires guest perf.data file name, "
1798	       "guest machine PID, and optionally guest timestamp offset, "
1799	       "and guest timestamp scale factor, separated by commas.\n");
1800	return -1;
1801}
1802
1803static int save_section_info_cb(struct perf_file_section *section,
1804				struct perf_header *ph __maybe_unused,
1805				int feat, int fd __maybe_unused, void *data)
1806{
1807	struct perf_inject *inject = data;
1808
1809	inject->secs[feat] = *section;
1810	return 0;
1811}
1812
1813static int save_section_info(struct perf_inject *inject)
1814{
1815	struct perf_header *header = &inject->session->header;
1816	int fd = perf_data__fd(inject->session->data);
1817
1818	return perf_header__process_sections(header, fd, inject, save_section_info_cb);
1819}
1820
1821static bool keep_feat(int feat)
1822{
1823	switch (feat) {
1824	/* Keep original information that describes the machine or software */
1825	case HEADER_TRACING_DATA:
1826	case HEADER_HOSTNAME:
1827	case HEADER_OSRELEASE:
1828	case HEADER_VERSION:
1829	case HEADER_ARCH:
1830	case HEADER_NRCPUS:
1831	case HEADER_CPUDESC:
1832	case HEADER_CPUID:
1833	case HEADER_TOTAL_MEM:
1834	case HEADER_CPU_TOPOLOGY:
1835	case HEADER_NUMA_TOPOLOGY:
1836	case HEADER_PMU_MAPPINGS:
1837	case HEADER_CACHE:
1838	case HEADER_MEM_TOPOLOGY:
1839	case HEADER_CLOCKID:
1840	case HEADER_BPF_PROG_INFO:
1841	case HEADER_BPF_BTF:
1842	case HEADER_CPU_PMU_CAPS:
1843	case HEADER_CLOCK_DATA:
1844	case HEADER_HYBRID_TOPOLOGY:
1845	case HEADER_PMU_CAPS:
1846		return true;
1847	/* Information that can be updated */
1848	case HEADER_BUILD_ID:
1849	case HEADER_CMDLINE:
1850	case HEADER_EVENT_DESC:
1851	case HEADER_BRANCH_STACK:
1852	case HEADER_GROUP_DESC:
1853	case HEADER_AUXTRACE:
1854	case HEADER_STAT:
1855	case HEADER_SAMPLE_TIME:
1856	case HEADER_DIR_FORMAT:
1857	case HEADER_COMPRESSED:
1858	default:
1859		return false;
1860	};
1861}
1862
1863static int read_file(int fd, u64 offs, void *buf, size_t sz)
1864{
1865	ssize_t ret = preadn(fd, buf, sz, offs);
1866
1867	if (ret < 0)
1868		return -errno;
1869	if ((size_t)ret != sz)
1870		return -EINVAL;
1871	return 0;
1872}
1873
1874static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
1875{
1876	int fd = perf_data__fd(inject->session->data);
1877	u64 offs = inject->secs[feat].offset;
1878	size_t sz = inject->secs[feat].size;
1879	void *buf = malloc(sz);
1880	int ret;
1881
1882	if (!buf)
1883		return -ENOMEM;
1884
1885	ret = read_file(fd, offs, buf, sz);
1886	if (ret)
1887		goto out_free;
1888
1889	ret = fw->write(fw, buf, sz);
1890out_free:
1891	free(buf);
1892	return ret;
1893}
1894
1895struct inject_fc {
1896	struct feat_copier fc;
1897	struct perf_inject *inject;
1898};
1899
1900static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
1901{
1902	struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
1903	struct perf_inject *inject = inj_fc->inject;
1904	int ret;
1905
1906	if (!inject->secs[feat].offset ||
1907	    !keep_feat(feat))
1908		return 0;
1909
1910	ret = feat_copy(inject, feat, fw);
1911	if (ret < 0)
1912		return ret;
1913
1914	return 1; /* Feature section copied */
1915}
1916
1917static int copy_kcore_dir(struct perf_inject *inject)
1918{
1919	char *cmd;
1920	int ret;
1921
1922	ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
1923		       inject->input_name, inject->output.path);
1924	if (ret < 0)
1925		return ret;
1926	pr_debug("%s\n", cmd);
1927	ret = system(cmd);
1928	free(cmd);
1929	return ret;
1930}
1931
1932static int guest_session__copy_kcore_dir(struct guest_session *gs)
1933{
1934	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1935	char *cmd;
1936	int ret;
1937
1938	ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
1939		       gs->perf_data_file, inject->output.path, gs->machine_pid);
1940	if (ret < 0)
1941		return ret;
1942	pr_debug("%s\n", cmd);
1943	ret = system(cmd);
1944	free(cmd);
1945	return ret;
1946}
1947
1948static int output_fd(struct perf_inject *inject)
1949{
1950	return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
1951}
1952
1953static int __cmd_inject(struct perf_inject *inject)
1954{
1955	int ret = -EINVAL;
1956	struct guest_session *gs = &inject->guest_session;
1957	struct perf_session *session = inject->session;
1958	int fd = output_fd(inject);
1959	u64 output_data_offset;
 
 
 
 
 
 
1960
1961	signal(SIGINT, sig_handler);
1962
1963	if (inject->build_ids || inject->sched_stat ||
1964	    inject->itrace_synth_opts.set || inject->build_id_all) {
1965		inject->tool.mmap	  = perf_event__repipe_mmap;
1966		inject->tool.mmap2	  = perf_event__repipe_mmap2;
1967		inject->tool.fork	  = perf_event__repipe_fork;
1968#ifdef HAVE_LIBTRACEEVENT
1969		inject->tool.tracing_data = perf_event__repipe_tracing_data;
1970#endif
1971	}
1972
1973	output_data_offset = perf_session__data_offset(session->evlist);
1974
1975	if (inject->build_id_all) {
1976		inject->tool.mmap	  = perf_event__repipe_buildid_mmap;
1977		inject->tool.mmap2	  = perf_event__repipe_buildid_mmap2;
1978	} else if (inject->build_ids) {
1979		inject->tool.sample = perf_event__inject_buildid;
1980	} else if (inject->sched_stat) {
1981		struct evsel *evsel;
1982
1983		evlist__for_each_entry(session->evlist, evsel) {
1984			const char *name = evsel__name(evsel);
1985
1986			if (!strcmp(name, "sched:sched_switch")) {
1987				if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
1988					return -EINVAL;
1989
1990				evsel->handler = perf_inject__sched_switch;
1991			} else if (!strcmp(name, "sched:sched_process_exit"))
1992				evsel->handler = perf_inject__sched_process_exit;
1993#ifdef HAVE_LIBTRACEEVENT
1994			else if (!strncmp(name, "sched:sched_stat_", 17))
1995				evsel->handler = perf_inject__sched_stat;
1996#endif
1997		}
1998	} else if (inject->itrace_synth_opts.vm_time_correlation) {
1999		session->itrace_synth_opts = &inject->itrace_synth_opts;
2000		memset(&inject->tool, 0, sizeof(inject->tool));
2001		inject->tool.id_index	    = perf_event__process_id_index;
2002		inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
2003		inject->tool.auxtrace	    = perf_event__process_auxtrace;
2004		inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
2005		inject->tool.ordered_events = true;
2006		inject->tool.ordering_requires_timestamps = true;
2007	} else if (inject->itrace_synth_opts.set) {
2008		session->itrace_synth_opts = &inject->itrace_synth_opts;
2009		inject->itrace_synth_opts.inject = true;
2010		inject->tool.comm	    = perf_event__repipe_comm;
2011		inject->tool.namespaces	    = perf_event__repipe_namespaces;
2012		inject->tool.exit	    = perf_event__repipe_exit;
2013		inject->tool.id_index	    = perf_event__process_id_index;
2014		inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
2015		inject->tool.auxtrace	    = perf_event__process_auxtrace;
2016		inject->tool.aux	    = perf_event__drop_aux;
2017		inject->tool.itrace_start   = perf_event__drop_aux;
2018		inject->tool.aux_output_hw_id = perf_event__drop_aux;
2019		inject->tool.ordered_events = true;
2020		inject->tool.ordering_requires_timestamps = true;
2021		/* Allow space in the header for new attributes */
2022		output_data_offset = roundup(8192 + session->header.data_offset, 4096);
2023		if (inject->strip)
2024			strip_init(inject);
2025	} else if (gs->perf_data_file) {
2026		char *name = gs->perf_data_file;
2027
2028		/*
2029		 * Not strictly necessary, but keep these events in order wrt
2030		 * guest events.
2031		 */
2032		inject->tool.mmap		= host__repipe;
2033		inject->tool.mmap2		= host__repipe;
2034		inject->tool.comm		= host__repipe;
2035		inject->tool.fork		= host__repipe;
2036		inject->tool.exit		= host__repipe;
2037		inject->tool.lost		= host__repipe;
2038		inject->tool.context_switch	= host__repipe;
2039		inject->tool.ksymbol		= host__repipe;
2040		inject->tool.text_poke		= host__repipe;
2041		/*
2042		 * Once the host session has initialized, set up sample ID
2043		 * mapping and feed in guest attrs, build IDs and initial
2044		 * events.
2045		 */
2046		inject->tool.finished_init	= host__finished_init;
2047		/* Obey finished round ordering */
2048		inject->tool.finished_round	= host__finished_round,
2049		/* Keep track of which CPU a VCPU is runnng on */
2050		inject->tool.context_switch	= host__context_switch;
2051		/*
2052		 * Must order events to be able to obey finished round
2053		 * ordering.
2054		 */
2055		inject->tool.ordered_events	= true;
2056		inject->tool.ordering_requires_timestamps = true;
2057		/* Set up a separate session to process guest perf.data file */
2058		ret = guest_session__start(gs, name, session->data->force);
2059		if (ret) {
2060			pr_err("Failed to process %s, error %d\n", name, ret);
2061			return ret;
2062		}
2063		/* Allow space in the header for guest attributes */
2064		output_data_offset += gs->session->header.data_offset;
2065		output_data_offset = roundup(output_data_offset, 4096);
2066	}
2067
2068	if (!inject->itrace_synth_opts.set)
2069		auxtrace_index__free(&session->auxtrace_index);
2070
2071	if (!inject->is_pipe && !inject->in_place_update)
2072		lseek(fd, output_data_offset, SEEK_SET);
2073
2074	ret = perf_session__process_events(session);
2075	if (ret)
2076		return ret;
2077
2078	if (gs->session) {
2079		/*
2080		 * Remaining guest events have later timestamps. Flush them
2081		 * out to file.
2082		 */
2083		ret = guest_session__flush_events(gs);
2084		if (ret) {
2085			pr_err("Failed to flush guest events\n");
2086			return ret;
2087		}
2088	}
2089
2090	if (!inject->is_pipe && !inject->in_place_update) {
2091		struct inject_fc inj_fc = {
2092			.fc.copy = feat_copy_cb,
2093			.inject = inject,
2094		};
2095
2096		if (inject->build_ids)
2097			perf_header__set_feat(&session->header,
2098					      HEADER_BUILD_ID);
2099		/*
2100		 * Keep all buildids when there is unprocessed AUX data because
2101		 * it is not known which ones the AUX trace hits.
2102		 */
2103		if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
2104		    inject->have_auxtrace && !inject->itrace_synth_opts.set)
2105			dsos__hit_all(session);
2106		/*
2107		 * The AUX areas have been removed and replaced with
2108		 * synthesized hardware events, so clear the feature flag.
2109		 */
2110		if (inject->itrace_synth_opts.set) {
2111			perf_header__clear_feat(&session->header,
2112						HEADER_AUXTRACE);
2113			if (inject->itrace_synth_opts.last_branch ||
2114			    inject->itrace_synth_opts.add_last_branch)
2115				perf_header__set_feat(&session->header,
2116						      HEADER_BRANCH_STACK);
2117		}
2118		session->header.data_offset = output_data_offset;
2119		session->header.data_size = inject->bytes_written;
2120		perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
 
2121
2122		if (inject->copy_kcore_dir) {
2123			ret = copy_kcore_dir(inject);
2124			if (ret) {
2125				pr_err("Failed to copy kcore\n");
2126				return ret;
2127			}
2128		}
2129		if (gs->copy_kcore_dir) {
2130			ret = guest_session__copy_kcore_dir(gs);
2131			if (ret) {
2132				pr_err("Failed to copy guest kcore\n");
2133				return ret;
2134			}
2135		}
2136	}
2137
2138	return ret;
2139}
2140
2141int cmd_inject(int argc, const char **argv)
2142{
2143	struct perf_inject inject = {
2144		.tool = {
2145			.sample		= perf_event__repipe_sample,
2146			.read		= perf_event__repipe_sample,
2147			.mmap		= perf_event__repipe,
2148			.mmap2		= perf_event__repipe,
2149			.comm		= perf_event__repipe,
2150			.namespaces	= perf_event__repipe,
2151			.cgroup		= perf_event__repipe,
2152			.fork		= perf_event__repipe,
2153			.exit		= perf_event__repipe,
2154			.lost		= perf_event__repipe,
2155			.lost_samples	= perf_event__repipe,
2156			.aux		= perf_event__repipe,
2157			.itrace_start	= perf_event__repipe,
2158			.aux_output_hw_id = perf_event__repipe,
2159			.context_switch	= perf_event__repipe,
2160			.throttle	= perf_event__repipe,
2161			.unthrottle	= perf_event__repipe,
2162			.ksymbol	= perf_event__repipe,
2163			.bpf		= perf_event__repipe,
2164			.text_poke	= perf_event__repipe,
2165			.attr		= perf_event__repipe_attr,
2166			.event_update	= perf_event__repipe_event_update,
2167			.tracing_data	= perf_event__repipe_op2_synth,
2168			.finished_round	= perf_event__repipe_oe_synth,
2169			.build_id	= perf_event__repipe_op2_synth,
2170			.id_index	= perf_event__repipe_op2_synth,
2171			.auxtrace_info	= perf_event__repipe_op2_synth,
2172			.auxtrace_error	= perf_event__repipe_op2_synth,
2173			.time_conv	= perf_event__repipe_op2_synth,
2174			.thread_map	= perf_event__repipe_op2_synth,
2175			.cpu_map	= perf_event__repipe_op2_synth,
2176			.stat_config	= perf_event__repipe_op2_synth,
2177			.stat		= perf_event__repipe_op2_synth,
2178			.stat_round	= perf_event__repipe_op2_synth,
2179			.feature	= perf_event__repipe_op2_synth,
2180			.finished_init	= perf_event__repipe_op2_synth,
2181			.compressed	= perf_event__repipe_op4_synth,
2182			.auxtrace	= perf_event__repipe_auxtrace,
2183		},
2184		.input_name  = "-",
2185		.samples = LIST_HEAD_INIT(inject.samples),
2186		.output = {
2187			.path = "-",
2188			.mode = PERF_DATA_MODE_WRITE,
2189			.use_stdio = true,
2190		},
2191	};
2192	struct perf_data data = {
2193		.mode = PERF_DATA_MODE_READ,
2194		.use_stdio = true,
2195	};
2196	int ret;
2197	bool repipe = true;
2198	const char *known_build_ids = NULL;
 
 
 
 
2199
2200	struct option options[] = {
2201		OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
2202			    "Inject build-ids into the output stream"),
2203		OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
2204			    "Inject build-ids of all DSOs into the output stream"),
 
 
 
 
2205		OPT_STRING(0, "known-build-ids", &known_build_ids,
2206			   "buildid path [,buildid path...]",
2207			   "build-ids to use for given paths"),
2208		OPT_STRING('i', "input", &inject.input_name, "file",
2209			   "input file name"),
2210		OPT_STRING('o', "output", &inject.output.path, "file",
2211			   "output file name"),
2212		OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
2213			    "Merge sched-stat and sched-switch for getting events "
2214			    "where and how long tasks slept"),
2215#ifdef HAVE_JITDUMP
2216		OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
2217#endif
2218		OPT_INCR('v', "verbose", &verbose,
2219			 "be more verbose (show build ids, etc)"),
2220		OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2221			   "file", "vmlinux pathname"),
2222		OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
2223			    "don't load vmlinux even if found"),
2224		OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
2225			   "kallsyms pathname"),
2226		OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
2227		OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
2228				    NULL, "opts", "Instruction Tracing options\n"
2229				    ITRACE_HELP,
2230				    itrace_parse_synth_opts),
2231		OPT_BOOLEAN(0, "strip", &inject.strip,
2232			    "strip non-synthesized events (use with --itrace)"),
2233		OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
2234				    "correlate time between VM guests and the host",
2235				    parse_vm_time_correlation),
2236		OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
2237				    "inject events from a guest perf.data file",
2238				    parse_guest_data),
2239		OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
2240			   "guest mount directory under which every guest os"
2241			   " instance has a subdir"),
2242		OPT_END()
2243	};
2244	const char * const inject_usage[] = {
2245		"perf inject [<options>]",
2246		NULL
2247	};
 
 
 
 
 
 
 
2248#ifndef HAVE_JITDUMP
2249	set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
2250#endif
2251	argc = parse_options(argc, argv, options, inject_usage, 0);
2252
2253	/*
2254	 * Any (unrecognized) arguments left?
2255	 */
2256	if (argc)
2257		usage_with_options(inject_usage, options);
2258
2259	if (inject.strip && !inject.itrace_synth_opts.set) {
2260		pr_err("--strip option requires --itrace option\n");
2261		return -1;
2262	}
2263
2264	if (symbol__validate_sym_arguments())
2265		return -1;
2266
2267	if (inject.in_place_update) {
2268		if (!strcmp(inject.input_name, "-")) {
2269			pr_err("Input file name required for in-place updating\n");
2270			return -1;
2271		}
2272		if (strcmp(inject.output.path, "-")) {
2273			pr_err("Output file name must not be specified for in-place updating\n");
2274			return -1;
2275		}
2276		if (!data.force && !inject.in_place_update_dry_run) {
2277			pr_err("The input file would be updated in place, "
2278				"the --force option is required.\n");
2279			return -1;
2280		}
2281		if (!inject.in_place_update_dry_run)
2282			data.in_place_update = true;
2283	} else {
2284		if (strcmp(inject.output.path, "-") && !inject.strip &&
2285		    has_kcore_dir(inject.input_name)) {
2286			inject.output.is_dir = true;
2287			inject.copy_kcore_dir = true;
2288		}
2289		if (perf_data__open(&inject.output)) {
2290			perror("failed to create output file");
2291			return -1;
2292		}
2293	}
 
 
 
 
 
 
 
 
2294
2295	data.path = inject.input_name;
2296	if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
2297		inject.is_pipe = true;
2298		/*
2299		 * Do not repipe header when input is a regular file
2300		 * since either it can rewrite the header at the end
2301		 * or write a new pipe header.
2302		 */
2303		if (strcmp(inject.input_name, "-"))
2304			repipe = false;
2305	}
2306
2307	inject.session = __perf_session__new(&data, repipe,
2308					     output_fd(&inject),
2309					     &inject.tool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2310	if (IS_ERR(inject.session)) {
2311		ret = PTR_ERR(inject.session);
2312		goto out_close_output;
2313	}
2314
2315	if (zstd_init(&(inject.session->zstd_data), 0) < 0)
2316		pr_warning("Decompression initialization failed.\n");
2317
2318	/* Save original section info before feature bits change */
2319	ret = save_section_info(&inject);
2320	if (ret)
2321		goto out_delete;
2322
2323	if (!data.is_pipe && inject.output.is_pipe) {
2324		ret = perf_header__write_pipe(perf_data__fd(&inject.output));
2325		if (ret < 0) {
2326			pr_err("Couldn't write a new pipe header.\n");
2327			goto out_delete;
2328		}
2329
2330		ret = perf_event__synthesize_for_pipe(&inject.tool,
2331						      inject.session,
2332						      &inject.output,
2333						      perf_event__repipe);
2334		if (ret < 0)
2335			goto out_delete;
 
 
 
 
 
 
 
2336	}
2337
2338	if (inject.build_ids && !inject.build_id_all) {
 
2339		/*
2340		 * to make sure the mmap records are ordered correctly
2341		 * and so that the correct especially due to jitted code
2342		 * mmaps. We cannot generate the buildid hit list and
2343		 * inject the jit mmaps at the same time for now.
2344		 */
2345		inject.tool.ordered_events = true;
2346		inject.tool.ordering_requires_timestamps = true;
2347		if (known_build_ids != NULL) {
2348			inject.known_build_ids =
2349				perf_inject__parse_known_build_ids(known_build_ids);
2350
2351			if (inject.known_build_ids == NULL) {
2352				pr_err("Couldn't parse known build ids.\n");
2353				goto out_delete;
2354			}
2355		}
2356	}
 
 
 
2357
2358	if (inject.sched_stat) {
2359		inject.tool.ordered_events = true;
 
 
2360	}
2361
2362#ifdef HAVE_JITDUMP
2363	if (inject.jit_mode) {
2364		inject.tool.mmap2	   = perf_event__jit_repipe_mmap2;
2365		inject.tool.mmap	   = perf_event__jit_repipe_mmap;
2366		inject.tool.ordered_events = true;
2367		inject.tool.ordering_requires_timestamps = true;
2368		/*
2369		 * JIT MMAP injection injects all MMAP events in one go, so it
2370		 * does not obey finished_round semantics.
2371		 */
2372		inject.tool.finished_round = perf_event__drop_oe;
2373	}
2374#endif
2375	ret = symbol__init(&inject.session->header.env);
2376	if (ret < 0)
2377		goto out_delete;
2378
2379	ret = __cmd_inject(&inject);
2380
2381	guest_session__exit(&inject.guest_session);
2382
2383out_delete:
2384	strlist__delete(inject.known_build_ids);
2385	zstd_fini(&(inject.session->zstd_data));
2386	perf_session__delete(inject.session);
2387out_close_output:
2388	if (!inject.in_place_update)
2389		perf_data__close(&inject.output);
2390	free(inject.itrace_synth_opts.vm_tm_corr_args);
 
 
2391	return ret;
2392}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-inject.c
   4 *
   5 * Builtin inject command: Examine the live mode (stdin) event stream
   6 * and repipe it to stdout while optionally injecting additional
   7 * events into it.
   8 */
   9#include "builtin.h"
  10
  11#include "util/color.h"
  12#include "util/dso.h"
  13#include "util/vdso.h"
  14#include "util/evlist.h"
  15#include "util/evsel.h"
  16#include "util/map.h"
  17#include "util/session.h"
  18#include "util/tool.h"
  19#include "util/debug.h"
  20#include "util/build-id.h"
  21#include "util/data.h"
  22#include "util/auxtrace.h"
  23#include "util/jit.h"
  24#include "util/string2.h"
  25#include "util/symbol.h"
  26#include "util/synthetic-events.h"
  27#include "util/thread.h"
  28#include "util/namespaces.h"
  29#include "util/util.h"
  30#include "util/tsc.h"
  31
  32#include <internal/lib.h>
  33
  34#include <linux/err.h>
  35#include <subcmd/parse-options.h>
  36#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
  37
  38#include <linux/list.h>
  39#include <linux/string.h>
  40#include <linux/zalloc.h>
  41#include <linux/hash.h>
  42#include <ctype.h>
  43#include <errno.h>
  44#include <signal.h>
  45#include <inttypes.h>
  46
  47struct guest_event {
  48	struct perf_sample		sample;
  49	union perf_event		*event;
  50	char				*event_buf;
  51};
  52
  53struct guest_id {
  54	/* hlist_node must be first, see free_hlist() */
  55	struct hlist_node		node;
  56	u64				id;
  57	u64				host_id;
  58	u32				vcpu;
  59};
  60
  61struct guest_tid {
  62	/* hlist_node must be first, see free_hlist() */
  63	struct hlist_node		node;
  64	/* Thread ID of QEMU thread */
  65	u32				tid;
  66	u32				vcpu;
  67};
  68
  69struct guest_vcpu {
  70	/* Current host CPU */
  71	u32				cpu;
  72	/* Thread ID of QEMU thread */
  73	u32				tid;
  74};
  75
  76struct guest_session {
  77	char				*perf_data_file;
  78	u32				machine_pid;
  79	u64				time_offset;
  80	double				time_scale;
  81	struct perf_tool		tool;
  82	struct perf_data		data;
  83	struct perf_session		*session;
  84	char				*tmp_file_name;
  85	int				tmp_fd;
  86	struct perf_tsc_conversion	host_tc;
  87	struct perf_tsc_conversion	guest_tc;
  88	bool				copy_kcore_dir;
  89	bool				have_tc;
  90	bool				fetched;
  91	bool				ready;
  92	u16				dflt_id_hdr_size;
  93	u64				dflt_id;
  94	u64				highest_id;
  95	/* Array of guest_vcpu */
  96	struct guest_vcpu		*vcpu;
  97	size_t				vcpu_cnt;
  98	/* Hash table for guest_id */
  99	struct hlist_head		heads[PERF_EVLIST__HLIST_SIZE];
 100	/* Hash table for guest_tid */
 101	struct hlist_head		tids[PERF_EVLIST__HLIST_SIZE];
 102	/* Place to stash next guest event */
 103	struct guest_event		ev;
 104};
 105
 106enum build_id_rewrite_style {
 107	BID_RWS__NONE = 0,
 108	BID_RWS__INJECT_HEADER_LAZY,
 109	BID_RWS__INJECT_HEADER_ALL,
 110	BID_RWS__MMAP2_BUILDID_ALL,
 111	BID_RWS__MMAP2_BUILDID_LAZY,
 112};
 113
 114struct perf_inject {
 115	struct perf_tool	tool;
 116	struct perf_session	*session;
 117	enum build_id_rewrite_style build_id_style;
 
 118	bool			sched_stat;
 119	bool			have_auxtrace;
 120	bool			strip;
 121	bool			jit_mode;
 122	bool			in_place_update;
 123	bool			in_place_update_dry_run;
 
 124	bool			copy_kcore_dir;
 125	const char		*input_name;
 126	struct perf_data	output;
 127	u64			bytes_written;
 128	u64			aux_id;
 129	struct list_head	samples;
 130	struct itrace_synth_opts itrace_synth_opts;
 131	char			*event_copy;
 132	struct perf_file_section secs[HEADER_FEAT_BITS];
 133	struct guest_session	guest_session;
 134	struct strlist		*known_build_ids;
 135	const struct evsel	*mmap_evsel;
 136};
 137
 138struct event_entry {
 139	struct list_head node;
 140	u32		 tid;
 141	union perf_event event[];
 142};
 143
 144static int tool__inject_build_id(const struct perf_tool *tool,
 145				 struct perf_sample *sample,
 146				 struct machine *machine,
 147				 const struct evsel *evsel,
 148				 __u16 misc,
 149				 const char *filename,
 150				 struct dso *dso, u32 flags);
 151static int tool__inject_mmap2_build_id(const struct perf_tool *tool,
 152				      struct perf_sample *sample,
 153				      struct machine *machine,
 154				      const struct evsel *evsel,
 155				      __u16 misc,
 156				      __u32 pid, __u32 tid,
 157				      __u64 start, __u64 len, __u64 pgoff,
 158				      struct dso *dso,
 159				      __u32 prot, __u32 flags,
 160				      const char *filename);
 161
 162static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
 163{
 164	ssize_t size;
 165
 166	size = perf_data__write(&inject->output, buf, sz);
 167	if (size < 0)
 168		return -errno;
 169
 170	inject->bytes_written += size;
 171	return 0;
 172}
 173
 174static int perf_event__repipe_synth(const struct perf_tool *tool,
 175				    union perf_event *event)
 176
 177{
 178	struct perf_inject *inject = container_of(tool, struct perf_inject,
 179						  tool);
 180
 181	return output_bytes(inject, event, event->header.size);
 182}
 183
 184static int perf_event__repipe_oe_synth(const struct perf_tool *tool,
 185				       union perf_event *event,
 186				       struct ordered_events *oe __maybe_unused)
 187{
 188	return perf_event__repipe_synth(tool, event);
 189}
 190
 191#ifdef HAVE_JITDUMP
 192static int perf_event__drop_oe(const struct perf_tool *tool __maybe_unused,
 193			       union perf_event *event __maybe_unused,
 194			       struct ordered_events *oe __maybe_unused)
 195{
 196	return 0;
 197}
 198#endif
 199
 200static int perf_event__repipe_op2_synth(struct perf_session *session,
 201					union perf_event *event)
 202{
 203	return perf_event__repipe_synth(session->tool, event);
 204}
 205
 206static int perf_event__repipe_op4_synth(struct perf_session *session,
 207					union perf_event *event,
 208					u64 data __maybe_unused,
 209					const char *str __maybe_unused)
 210{
 211	return perf_event__repipe_synth(session->tool, event);
 212}
 213
 214static int perf_event__repipe_attr(const struct perf_tool *tool,
 215				   union perf_event *event,
 216				   struct evlist **pevlist)
 217{
 218	struct perf_inject *inject = container_of(tool, struct perf_inject,
 219						  tool);
 220	int ret;
 221
 222	ret = perf_event__process_attr(tool, event, pevlist);
 223	if (ret)
 224		return ret;
 225
 226	/* If the output isn't a pipe then the attributes will be written as part of the header. */
 227	if (!inject->output.is_pipe)
 228		return 0;
 229
 230	return perf_event__repipe_synth(tool, event);
 231}
 232
 233static int perf_event__repipe_event_update(const struct perf_tool *tool,
 234					   union perf_event *event,
 235					   struct evlist **pevlist __maybe_unused)
 236{
 237	return perf_event__repipe_synth(tool, event);
 238}
 239
 240#ifdef HAVE_AUXTRACE_SUPPORT
 241
 242static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
 243{
 244	char buf[4096];
 245	ssize_t ssz;
 246	int ret;
 247
 248	while (size > 0) {
 249		ssz = perf_data__read(data, buf, min(size, (off_t)sizeof(buf)));
 250		if (ssz < 0)
 251			return -errno;
 252		ret = output_bytes(inject, buf, ssz);
 253		if (ret)
 254			return ret;
 255		size -= ssz;
 256	}
 257
 258	return 0;
 259}
 260
 261static s64 perf_event__repipe_auxtrace(struct perf_session *session,
 262				       union perf_event *event)
 263{
 264	const struct perf_tool *tool = session->tool;
 265	struct perf_inject *inject = container_of(tool, struct perf_inject,
 266						  tool);
 267	int ret;
 268
 269	inject->have_auxtrace = true;
 270
 271	if (!inject->output.is_pipe) {
 272		off_t offset;
 273
 274		offset = lseek(inject->output.file.fd, 0, SEEK_CUR);
 275		if (offset == -1)
 276			return -errno;
 277		ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
 278						     event, offset);
 279		if (ret < 0)
 280			return ret;
 281	}
 282
 283	if (perf_data__is_pipe(session->data) || !session->one_mmap) {
 284		ret = output_bytes(inject, event, event->header.size);
 285		if (ret < 0)
 286			return ret;
 287		ret = copy_bytes(inject, session->data,
 288				 event->auxtrace.size);
 289	} else {
 290		ret = output_bytes(inject, event,
 291				   event->header.size + event->auxtrace.size);
 292	}
 293	if (ret < 0)
 294		return ret;
 295
 296	return event->auxtrace.size;
 297}
 298
 299#else
 300
 301static s64
 302perf_event__repipe_auxtrace(struct perf_session *session __maybe_unused,
 303			    union perf_event *event __maybe_unused)
 304{
 305	pr_err("AUX area tracing not supported\n");
 306	return -EINVAL;
 307}
 308
 309#endif
 310
 311static int perf_event__repipe(const struct perf_tool *tool,
 312			      union perf_event *event,
 313			      struct perf_sample *sample __maybe_unused,
 314			      struct machine *machine __maybe_unused)
 315{
 316	return perf_event__repipe_synth(tool, event);
 317}
 318
 319static int perf_event__drop(const struct perf_tool *tool __maybe_unused,
 320			    union perf_event *event __maybe_unused,
 321			    struct perf_sample *sample __maybe_unused,
 322			    struct machine *machine __maybe_unused)
 323{
 324	return 0;
 325}
 326
 327static int perf_event__drop_aux(const struct perf_tool *tool,
 328				union perf_event *event __maybe_unused,
 329				struct perf_sample *sample,
 330				struct machine *machine __maybe_unused)
 331{
 332	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 333
 334	if (!inject->aux_id)
 335		inject->aux_id = sample->id;
 336
 337	return 0;
 338}
 339
 340static union perf_event *
 341perf_inject__cut_auxtrace_sample(struct perf_inject *inject,
 342				 union perf_event *event,
 343				 struct perf_sample *sample)
 344{
 345	size_t sz1 = sample->aux_sample.data - (void *)event;
 346	size_t sz2 = event->header.size - sample->aux_sample.size - sz1;
 347	union perf_event *ev;
 348
 349	if (inject->event_copy == NULL) {
 350		inject->event_copy = malloc(PERF_SAMPLE_MAX_SIZE);
 351		if (!inject->event_copy)
 352			return ERR_PTR(-ENOMEM);
 353	}
 354	ev = (union perf_event *)inject->event_copy;
 355	if (sz1 > event->header.size || sz2 > event->header.size ||
 356	    sz1 + sz2 > event->header.size ||
 357	    sz1 < sizeof(struct perf_event_header) + sizeof(u64))
 358		return event;
 359
 360	memcpy(ev, event, sz1);
 361	memcpy((void *)ev + sz1, (void *)event + event->header.size - sz2, sz2);
 362	ev->header.size = sz1 + sz2;
 363	((u64 *)((void *)ev + sz1))[-1] = 0;
 364
 365	return ev;
 366}
 367
 368typedef int (*inject_handler)(const struct perf_tool *tool,
 369			      union perf_event *event,
 370			      struct perf_sample *sample,
 371			      struct evsel *evsel,
 372			      struct machine *machine);
 373
 374static int perf_event__repipe_sample(const struct perf_tool *tool,
 375				     union perf_event *event,
 376				     struct perf_sample *sample,
 377				     struct evsel *evsel,
 378				     struct machine *machine)
 379{
 380	struct perf_inject *inject = container_of(tool, struct perf_inject,
 381						  tool);
 382
 383	if (evsel && evsel->handler) {
 384		inject_handler f = evsel->handler;
 385		return f(tool, event, sample, evsel, machine);
 386	}
 387
 388	build_id__mark_dso_hit(tool, event, sample, evsel, machine);
 389
 390	if (inject->itrace_synth_opts.set && sample->aux_sample.size) {
 391		event = perf_inject__cut_auxtrace_sample(inject, event, sample);
 392		if (IS_ERR(event))
 393			return PTR_ERR(event);
 394	}
 395
 396	return perf_event__repipe_synth(tool, event);
 397}
 398
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 399static struct dso *findnew_dso(int pid, int tid, const char *filename,
 400			       const struct dso_id *id, struct machine *machine)
 401{
 402	struct thread *thread;
 403	struct nsinfo *nsi = NULL;
 404	struct nsinfo *nnsi;
 405	struct dso *dso;
 406	bool vdso;
 407
 408	thread = machine__findnew_thread(machine, pid, tid);
 409	if (thread == NULL) {
 410		pr_err("cannot find or create a task %d/%d.\n", tid, pid);
 411		return NULL;
 412	}
 413
 414	vdso = is_vdso_map(filename);
 415	nsi = nsinfo__get(thread__nsinfo(thread));
 416
 417	if (vdso) {
 418		/* The vdso maps are always on the host and not the
 419		 * container.  Ensure that we don't use setns to look
 420		 * them up.
 421		 */
 422		nnsi = nsinfo__copy(nsi);
 423		if (nnsi) {
 424			nsinfo__put(nsi);
 425			nsinfo__clear_need_setns(nnsi);
 426			nsi = nnsi;
 427		}
 428		dso = machine__findnew_vdso(machine, thread);
 429	} else {
 430		dso = machine__findnew_dso_id(machine, filename, id);
 431	}
 432
 433	if (dso) {
 434		mutex_lock(dso__lock(dso));
 435		dso__set_nsinfo(dso, nsi);
 436		mutex_unlock(dso__lock(dso));
 
 437	} else
 438		nsinfo__put(nsi);
 439
 440	thread__put(thread);
 441	return dso;
 442}
 443
 444/*
 445 * The evsel used for the sample ID for mmap events. Typically stashed when
 446 * processing mmap events. If not stashed, search the evlist for the first mmap
 447 * gathering event.
 448 */
 449static const struct evsel *inject__mmap_evsel(struct perf_inject *inject)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 450{
 451	struct evsel *pos;
 
 
 
 452
 453	if (inject->mmap_evsel)
 454		return inject->mmap_evsel;
 455
 456	evlist__for_each_entry(inject->session->evlist, pos) {
 457		if (pos->core.attr.mmap) {
 458			inject->mmap_evsel = pos;
 459			return pos;
 
 460		}
 
 461	}
 462	pr_err("No mmap events found\n");
 463	return NULL;
 464}
 465
 466static int perf_event__repipe_common_mmap(const struct perf_tool *tool,
 467					  union perf_event *event,
 468					  struct perf_sample *sample,
 469					  struct machine *machine,
 470					  __u32 pid, __u32 tid,
 471					  __u64 start, __u64 len, __u64 pgoff,
 472					  __u32 flags, __u32 prot,
 473					  const char *filename,
 474					  const struct dso_id *dso_id,
 475					  int (*perf_event_process)(const struct perf_tool *tool,
 476								    union perf_event *event,
 477								    struct perf_sample *sample,
 478								    struct machine *machine))
 479{
 480	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 481	struct dso *dso = NULL;
 482	bool dso_sought = false;
 483
 484#ifdef HAVE_JITDUMP
 485	if (inject->jit_mode) {
 486		u64 n = 0;
 487		int ret;
 488
 489		/* If jit marker, then inject jit mmaps and generate ELF images. */
 490		ret = jit_process(inject->session, &inject->output, machine,
 491				  filename, pid, tid, &n);
 492		if (ret < 0)
 493			return ret;
 494		if (ret) {
 495			inject->bytes_written += n;
 496			return 0;
 497		}
 498	}
 
 
 499#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 500	if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
 501		dso = findnew_dso(pid, tid, filename, dso_id, machine);
 502		dso_sought = true;
 
 503		if (dso) {
 504			/* mark it not to inject build-id */
 505			dso__set_hit(dso);
 506		}
 
 
 507	}
 508	if (inject->build_id_style == BID_RWS__INJECT_HEADER_ALL) {
 509		if (!dso_sought) {
 510			dso = findnew_dso(pid, tid, filename, dso_id, machine);
 511			dso_sought = true;
 512		}
 513
 514		if (dso && !dso__hit(dso)) {
 515			struct evsel *evsel = evlist__event2evsel(inject->session->evlist, event);
 516
 517			if (evsel) {
 518				dso__set_hit(dso);
 519				tool__inject_build_id(tool, sample, machine, evsel,
 520						      /*misc=*/sample->cpumode,
 521						      filename, dso, flags);
 522			}
 523		}
 524	} else {
 525		int err;
 526
 527		/*
 528		 * Remember the evsel for lazy build id generation. It is used
 529		 * for the sample id header type.
 530		 */
 531		if ((inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
 532		     inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) &&
 533		    !inject->mmap_evsel)
 534			inject->mmap_evsel = evlist__event2evsel(inject->session->evlist, event);
 535
 536		/* Create the thread, map, etc. Not done for the unordered inject all case. */
 537		err = perf_event_process(tool, event, sample, machine);
 538
 539		if (err) {
 540			dso__put(dso);
 541			return err;
 542		}
 543	}
 544	if ((inject->build_id_style == BID_RWS__MMAP2_BUILDID_ALL) &&
 545	    !(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
 546		struct evsel *evsel = evlist__event2evsel(inject->session->evlist, event);
 547
 548		if (evsel && !dso_sought) {
 549			dso = findnew_dso(pid, tid, filename, dso_id, machine);
 550			dso_sought = true;
 551		}
 552		if (evsel && dso &&
 553		    !tool__inject_mmap2_build_id(tool, sample, machine, evsel,
 554						 sample->cpumode | PERF_RECORD_MISC_MMAP_BUILD_ID,
 555						 pid, tid, start, len, pgoff,
 556						 dso,
 557						 prot, flags,
 558						 filename)) {
 559			/* Injected mmap2 so no need to repipe. */
 560			dso__put(dso);
 561			return 0;
 562		}
 563	}
 564	dso__put(dso);
 565	if (inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY)
 566		return 0;
 567
 568	return perf_event__repipe(tool, event, sample, machine);
 569}
 570
 571static int perf_event__repipe_mmap(const struct perf_tool *tool,
 572				union perf_event *event,
 573				struct perf_sample *sample,
 574				struct machine *machine)
 575{
 576	return perf_event__repipe_common_mmap(
 577		tool, event, sample, machine,
 578		event->mmap.pid, event->mmap.tid,
 579		event->mmap.start, event->mmap.len, event->mmap.pgoff,
 580		/*flags=*/0, PROT_EXEC,
 581		event->mmap.filename, /*dso_id=*/NULL,
 582		perf_event__process_mmap);
 583}
 584
 585static int perf_event__repipe_mmap2(const struct perf_tool *tool,
 586				union perf_event *event,
 587				struct perf_sample *sample,
 588				struct machine *machine)
 589{
 590	struct dso_id id;
 591	struct dso_id *dso_id = NULL;
 592
 593	if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
 594		id.maj = event->mmap2.maj;
 595		id.min = event->mmap2.min;
 596		id.ino = event->mmap2.ino;
 597		id.ino_generation = event->mmap2.ino_generation;
 598		dso_id = &id;
 599	}
 600
 601	return perf_event__repipe_common_mmap(
 602		tool, event, sample, machine,
 603		event->mmap2.pid, event->mmap2.tid,
 604		event->mmap2.start, event->mmap2.len, event->mmap2.pgoff,
 605		event->mmap2.flags, event->mmap2.prot,
 606		event->mmap2.filename, dso_id,
 607		perf_event__process_mmap2);
 608}
 609
 610static int perf_event__repipe_fork(const struct perf_tool *tool,
 611				   union perf_event *event,
 612				   struct perf_sample *sample,
 613				   struct machine *machine)
 614{
 615	int err;
 616
 617	err = perf_event__process_fork(tool, event, sample, machine);
 618	perf_event__repipe(tool, event, sample, machine);
 619
 620	return err;
 621}
 622
 623static int perf_event__repipe_comm(const struct perf_tool *tool,
 624				   union perf_event *event,
 625				   struct perf_sample *sample,
 626				   struct machine *machine)
 627{
 628	int err;
 629
 630	err = perf_event__process_comm(tool, event, sample, machine);
 631	perf_event__repipe(tool, event, sample, machine);
 632
 633	return err;
 634}
 635
 636static int perf_event__repipe_namespaces(const struct perf_tool *tool,
 637					 union perf_event *event,
 638					 struct perf_sample *sample,
 639					 struct machine *machine)
 640{
 641	int err = perf_event__process_namespaces(tool, event, sample, machine);
 642
 643	perf_event__repipe(tool, event, sample, machine);
 644
 645	return err;
 646}
 647
 648static int perf_event__repipe_exit(const struct perf_tool *tool,
 649				   union perf_event *event,
 650				   struct perf_sample *sample,
 651				   struct machine *machine)
 652{
 653	int err;
 654
 655	err = perf_event__process_exit(tool, event, sample, machine);
 656	perf_event__repipe(tool, event, sample, machine);
 657
 658	return err;
 659}
 660
 661#ifdef HAVE_LIBTRACEEVENT
 662static int perf_event__repipe_tracing_data(struct perf_session *session,
 663					   union perf_event *event)
 664{
 665	perf_event__repipe_synth(session->tool, event);
 666
 667	return perf_event__process_tracing_data(session, event);
 668}
 669#endif
 670
 671static int dso__read_build_id(struct dso *dso)
 672{
 673	struct nscookie nsc;
 674
 675	if (dso__has_build_id(dso))
 676		return 0;
 677
 678	mutex_lock(dso__lock(dso));
 679	nsinfo__mountns_enter(dso__nsinfo(dso), &nsc);
 680	if (filename__read_build_id(dso__long_name(dso), dso__bid(dso)) > 0)
 681		dso__set_has_build_id(dso);
 682	else if (dso__nsinfo(dso)) {
 683		char *new_name = dso__filename_with_chroot(dso, dso__long_name(dso));
 684
 685		if (new_name && filename__read_build_id(new_name, dso__bid(dso)) > 0)
 686			dso__set_has_build_id(dso);
 
 
 687		free(new_name);
 688	}
 689	nsinfo__mountns_exit(&nsc);
 690	mutex_unlock(dso__lock(dso));
 691
 692	return dso__has_build_id(dso) ? 0 : -1;
 693}
 694
 695static struct strlist *perf_inject__parse_known_build_ids(
 696	const char *known_build_ids_string)
 697{
 698	struct str_node *pos, *tmp;
 699	struct strlist *known_build_ids;
 700	int bid_len;
 701
 702	known_build_ids = strlist__new(known_build_ids_string, NULL);
 703	if (known_build_ids == NULL)
 704		return NULL;
 705	strlist__for_each_entry_safe(pos, tmp, known_build_ids) {
 706		const char *build_id, *dso_name;
 707
 708		build_id = skip_spaces(pos->s);
 709		dso_name = strchr(build_id, ' ');
 710		if (dso_name == NULL) {
 711			strlist__remove(known_build_ids, pos);
 712			continue;
 713		}
 714		bid_len = dso_name - pos->s;
 715		dso_name = skip_spaces(dso_name);
 716		if (bid_len % 2 != 0 || bid_len >= SBUILD_ID_SIZE) {
 717			strlist__remove(known_build_ids, pos);
 718			continue;
 719		}
 720		for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
 721			if (!isxdigit(build_id[2 * ix]) ||
 722			    !isxdigit(build_id[2 * ix + 1])) {
 723				strlist__remove(known_build_ids, pos);
 724				break;
 725			}
 726		}
 727	}
 728	return known_build_ids;
 729}
 730
 731static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
 732					       struct dso *dso)
 733{
 734	struct str_node *pos;
 735	int bid_len;
 736
 737	strlist__for_each_entry(pos, inject->known_build_ids) {
 738		const char *build_id, *dso_name;
 739
 740		build_id = skip_spaces(pos->s);
 741		dso_name = strchr(build_id, ' ');
 742		bid_len = dso_name - pos->s;
 743		dso_name = skip_spaces(dso_name);
 744		if (strcmp(dso__long_name(dso), dso_name))
 745			continue;
 746		for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
 747			dso__bid(dso)->data[ix] = (hex(build_id[2 * ix]) << 4 |
 748						  hex(build_id[2 * ix + 1]));
 749		}
 750		dso__bid(dso)->size = bid_len / 2;
 751		dso__set_has_build_id(dso);
 752		return true;
 753	}
 754	return false;
 755}
 756
 757static int tool__inject_build_id(const struct perf_tool *tool,
 758				 struct perf_sample *sample,
 759				 struct machine *machine,
 760				 const struct evsel *evsel,
 761				 __u16 misc,
 762				 const char *filename,
 763				 struct dso *dso, u32 flags)
 764{
 765	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 
 766	int err;
 767
 768	if (is_anon_memory(filename) || flags & MAP_HUGETLB)
 769		return 0;
 770	if (is_no_dso_memory(filename))
 771		return 0;
 772
 773	if (inject->known_build_ids != NULL &&
 774	    perf_inject__lookup_known_build_id(inject, dso))
 775		return 1;
 776
 777	if (dso__read_build_id(dso) < 0) {
 778		pr_debug("no build_id found for %s\n", filename);
 779		return -1;
 780	}
 781
 782	err = perf_event__synthesize_build_id(tool, sample, machine,
 783					      perf_event__repipe,
 784					      evsel, misc, dso__bid(dso),
 785					      filename);
 786	if (err) {
 787		pr_err("Can't synthesize build_id event for %s\n", filename);
 788		return -1;
 789	}
 790
 791	return 0;
 792}
 793
 794static int tool__inject_mmap2_build_id(const struct perf_tool *tool,
 795				       struct perf_sample *sample,
 796				       struct machine *machine,
 797				       const struct evsel *evsel,
 798				       __u16 misc,
 799				       __u32 pid, __u32 tid,
 800				       __u64 start, __u64 len, __u64 pgoff,
 801				       struct dso *dso,
 802				       __u32 prot, __u32 flags,
 803				       const char *filename)
 804{
 805	int err;
 806
 807	/* Return to repipe anonymous maps. */
 808	if (is_anon_memory(filename) || flags & MAP_HUGETLB)
 809		return 1;
 810	if (is_no_dso_memory(filename))
 811		return 1;
 812
 813	if (dso__read_build_id(dso)) {
 814		pr_debug("no build_id found for %s\n", filename);
 815		return -1;
 816	}
 817
 818	err = perf_event__synthesize_mmap2_build_id(tool, sample, machine,
 819						    perf_event__repipe,
 820						    evsel,
 821						    misc, pid, tid,
 822						    start, len, pgoff,
 823						    dso__bid(dso),
 824						    prot, flags,
 825						    filename);
 826	if (err) {
 827		pr_err("Can't synthesize build_id event for %s\n", filename);
 828		return -1;
 829	}
 830	return 0;
 831}
 832
 833static int mark_dso_hit(const struct perf_inject *inject,
 834			const struct perf_tool *tool,
 835			struct perf_sample *sample,
 836			struct machine *machine,
 837			const struct evsel *mmap_evsel,
 838			struct map *map, bool sample_in_dso)
 839{
 840	struct dso *dso;
 841	u16 misc = sample->cpumode;
 842
 843	if (!map)
 844		return 0;
 845
 846	if (!sample_in_dso) {
 847		u16 guest_mask = PERF_RECORD_MISC_GUEST_KERNEL |
 848			PERF_RECORD_MISC_GUEST_USER;
 849
 850		if ((misc & guest_mask) != 0) {
 851			misc &= PERF_RECORD_MISC_HYPERVISOR;
 852			misc |= __map__is_kernel(map)
 853				? PERF_RECORD_MISC_GUEST_KERNEL
 854				: PERF_RECORD_MISC_GUEST_USER;
 855		} else {
 856			misc &= PERF_RECORD_MISC_HYPERVISOR;
 857			misc |= __map__is_kernel(map)
 858				? PERF_RECORD_MISC_KERNEL
 859				: PERF_RECORD_MISC_USER;
 860		}
 861	}
 862	dso = map__dso(map);
 863	if (inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY) {
 864		if (dso && !dso__hit(dso)) {
 865			dso__set_hit(dso);
 866			tool__inject_build_id(tool, sample, machine,
 867					     mmap_evsel, misc, dso__long_name(dso), dso,
 868					     map__flags(map));
 869		}
 870	} else if (inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) {
 871		if (!map__hit(map)) {
 872			const struct build_id null_bid = { .size = 0 };
 873			const struct build_id *bid = dso ? dso__bid(dso) : &null_bid;
 874			const char *filename = dso ? dso__long_name(dso) : "";
 875
 876			map__set_hit(map);
 877			perf_event__synthesize_mmap2_build_id(tool, sample, machine,
 878								perf_event__repipe,
 879								mmap_evsel,
 880								misc,
 881								sample->pid, sample->tid,
 882								map__start(map),
 883								map__end(map) - map__start(map),
 884								map__pgoff(map),
 885								bid,
 886								map__prot(map),
 887								map__flags(map),
 888								filename);
 889		}
 890	}
 891	return 0;
 892}
 893
 894struct mark_dso_hit_args {
 895	const struct perf_inject *inject;
 896	const struct perf_tool *tool;
 897	struct perf_sample *sample;
 898	struct machine *machine;
 899	const struct evsel *mmap_evsel;
 900};
 901
 902static int mark_dso_hit_callback(struct callchain_cursor_node *node, void *data)
 903{
 904	struct mark_dso_hit_args *args = data;
 905	struct map *map = node->ms.map;
 906
 907	return mark_dso_hit(args->inject, args->tool, args->sample, args->machine,
 908			    args->mmap_evsel, map, /*sample_in_dso=*/false);
 909}
 910
 911int perf_event__inject_buildid(const struct perf_tool *tool, union perf_event *event,
 912			       struct perf_sample *sample,
 913			       struct evsel *evsel __maybe_unused,
 914			       struct machine *machine)
 915{
 916	struct addr_location al;
 917	struct thread *thread;
 918	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 919	struct mark_dso_hit_args args = {
 920		.inject = inject,
 921		.tool = tool,
 922		/*
 923		 * Use the parsed sample data of the sample event, which will
 924		 * have a later timestamp than the mmap event.
 925		 */
 926		.sample = sample,
 927		.machine = machine,
 928		.mmap_evsel = inject__mmap_evsel(inject),
 929	};
 930
 931	addr_location__init(&al);
 932	thread = machine__findnew_thread(machine, sample->pid, sample->tid);
 933	if (thread == NULL) {
 934		pr_err("problem processing %d event, skipping it.\n",
 935		       event->header.type);
 936		goto repipe;
 937	}
 938
 939	if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
 940		mark_dso_hit(inject, tool, sample, machine, args.mmap_evsel, al.map,
 941			     /*sample_in_dso=*/true);
 
 
 
 942	}
 943
 944	sample__for_each_callchain_node(thread, evsel, sample, PERF_MAX_STACK_DEPTH,
 945					/*symbols=*/false, mark_dso_hit_callback, &args);
 946
 947	thread__put(thread);
 948repipe:
 949	perf_event__repipe(tool, event, sample, machine);
 950	addr_location__exit(&al);
 951	return 0;
 952}
 953
 954static int perf_inject__sched_process_exit(const struct perf_tool *tool,
 955					   union perf_event *event __maybe_unused,
 956					   struct perf_sample *sample,
 957					   struct evsel *evsel __maybe_unused,
 958					   struct machine *machine __maybe_unused)
 959{
 960	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 961	struct event_entry *ent;
 962
 963	list_for_each_entry(ent, &inject->samples, node) {
 964		if (sample->tid == ent->tid) {
 965			list_del_init(&ent->node);
 966			free(ent);
 967			break;
 968		}
 969	}
 970
 971	return 0;
 972}
 973
 974static int perf_inject__sched_switch(const struct perf_tool *tool,
 975				     union perf_event *event,
 976				     struct perf_sample *sample,
 977				     struct evsel *evsel,
 978				     struct machine *machine)
 979{
 980	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
 981	struct event_entry *ent;
 982
 983	perf_inject__sched_process_exit(tool, event, sample, evsel, machine);
 984
 985	ent = malloc(event->header.size + sizeof(struct event_entry));
 986	if (ent == NULL) {
 987		color_fprintf(stderr, PERF_COLOR_RED,
 988			     "Not enough memory to process sched switch event!");
 989		return -1;
 990	}
 991
 992	ent->tid = sample->tid;
 993	memcpy(&ent->event, event, event->header.size);
 994	list_add(&ent->node, &inject->samples);
 995	return 0;
 996}
 997
 998#ifdef HAVE_LIBTRACEEVENT
 999static int perf_inject__sched_stat(const struct perf_tool *tool,
1000				   union perf_event *event __maybe_unused,
1001				   struct perf_sample *sample,
1002				   struct evsel *evsel,
1003				   struct machine *machine)
1004{
1005	struct event_entry *ent;
1006	union perf_event *event_sw;
1007	struct perf_sample sample_sw;
1008	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1009	u32 pid = evsel__intval(evsel, sample, "pid");
1010
1011	list_for_each_entry(ent, &inject->samples, node) {
1012		if (pid == ent->tid)
1013			goto found;
1014	}
1015
1016	return 0;
1017found:
1018	event_sw = &ent->event[0];
1019	evsel__parse_sample(evsel, event_sw, &sample_sw);
1020
1021	sample_sw.period = sample->period;
1022	sample_sw.time	 = sample->time;
1023	perf_event__synthesize_sample(event_sw, evsel->core.attr.sample_type,
1024				      evsel->core.attr.read_format, &sample_sw);
1025	build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
1026	return perf_event__repipe(tool, event_sw, &sample_sw, machine);
1027}
1028#endif
1029
1030static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
1031{
1032	if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
1033		return NULL;
1034	return &gs->vcpu[vcpu];
1035}
1036
1037static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
1038{
1039	ssize_t ret = writen(gs->tmp_fd, buf, sz);
1040
1041	return ret < 0 ? ret : 0;
1042}
1043
1044static int guest_session__repipe(const struct perf_tool *tool,
1045				 union perf_event *event,
1046				 struct perf_sample *sample __maybe_unused,
1047				 struct machine *machine __maybe_unused)
1048{
1049	struct guest_session *gs = container_of(tool, struct guest_session, tool);
1050
1051	return guest_session__output_bytes(gs, event, event->header.size);
1052}
1053
1054static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
1055{
1056	struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
1057	int hash;
1058
1059	if (!guest_tid)
1060		return -ENOMEM;
1061
1062	guest_tid->tid = tid;
1063	guest_tid->vcpu = vcpu;
1064	hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
1065	hlist_add_head(&guest_tid->node, &gs->tids[hash]);
1066
1067	return 0;
1068}
1069
1070static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
1071				 union perf_event *event,
1072				 u64 offset __maybe_unused, void *data)
1073{
1074	struct guest_session *gs = data;
1075	unsigned int vcpu;
1076	struct guest_vcpu *guest_vcpu;
1077	int ret;
1078
1079	if (event->header.type != PERF_RECORD_COMM ||
1080	    event->comm.pid != gs->machine_pid)
1081		return 0;
1082
1083	/*
1084	 * QEMU option -name debug-threads=on, causes thread names formatted as
1085	 * below, although it is not an ABI. Also libvirt seems to use this by
1086	 * default. Here we rely on it to tell us which thread is which VCPU.
1087	 */
1088	ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
1089	if (ret <= 0)
1090		return ret;
1091	pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
1092		 event->comm.tid, event->comm.comm, vcpu);
1093	if (vcpu > INT_MAX) {
1094		pr_err("Invalid VCPU %u\n", vcpu);
1095		return -EINVAL;
1096	}
1097	guest_vcpu = guest_session__vcpu(gs, vcpu);
1098	if (!guest_vcpu)
1099		return -ENOMEM;
1100	if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
1101		pr_err("Fatal error: Two threads found with the same VCPU\n");
1102		return -EINVAL;
1103	}
1104	guest_vcpu->tid = event->comm.tid;
1105
1106	return guest_session__map_tid(gs, event->comm.tid, vcpu);
1107}
1108
1109static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
1110{
1111	return perf_session__peek_events(session, session->header.data_offset,
1112					 session->header.data_size,
1113					 host_peek_vm_comms_cb, gs);
1114}
1115
1116static bool evlist__is_id_used(struct evlist *evlist, u64 id)
1117{
1118	return evlist__id2sid(evlist, id);
1119}
1120
1121static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
1122{
1123	do {
1124		gs->highest_id += 1;
1125	} while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
1126
1127	return gs->highest_id;
1128}
1129
1130static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
1131{
1132	struct guest_id *guest_id = zalloc(sizeof(*guest_id));
1133	int hash;
1134
1135	if (!guest_id)
1136		return -ENOMEM;
1137
1138	guest_id->id = id;
1139	guest_id->host_id = host_id;
1140	guest_id->vcpu = vcpu;
1141	hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
1142	hlist_add_head(&guest_id->node, &gs->heads[hash]);
1143
1144	return 0;
1145}
1146
1147static u64 evlist__find_highest_id(struct evlist *evlist)
1148{
1149	struct evsel *evsel;
1150	u64 highest_id = 1;
1151
1152	evlist__for_each_entry(evlist, evsel) {
1153		u32 j;
1154
1155		for (j = 0; j < evsel->core.ids; j++) {
1156			u64 id = evsel->core.id[j];
1157
1158			if (id > highest_id)
1159				highest_id = id;
1160		}
1161	}
1162
1163	return highest_id;
1164}
1165
1166static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
1167{
1168	struct evlist *evlist = gs->session->evlist;
1169	struct evsel *evsel;
1170	int ret;
1171
1172	evlist__for_each_entry(evlist, evsel) {
1173		u32 j;
1174
1175		for (j = 0; j < evsel->core.ids; j++) {
1176			struct perf_sample_id *sid;
1177			u64 host_id;
1178			u64 id;
1179
1180			id = evsel->core.id[j];
1181			sid = evlist__id2sid(evlist, id);
1182			if (!sid || sid->cpu.cpu == -1)
1183				continue;
1184			host_id = guest_session__allocate_new_id(gs, host_evlist);
1185			ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
1186			if (ret)
1187				return ret;
1188		}
1189	}
1190
1191	return 0;
1192}
1193
1194static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
1195{
1196	struct hlist_head *head;
1197	struct guest_id *guest_id;
1198	int hash;
1199
1200	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1201	head = &gs->heads[hash];
1202
1203	hlist_for_each_entry(guest_id, head, node)
1204		if (guest_id->id == id)
1205			return guest_id;
1206
1207	return NULL;
1208}
1209
1210static int process_attr(const struct perf_tool *tool, union perf_event *event,
1211			struct perf_sample *sample __maybe_unused,
1212			struct machine *machine __maybe_unused)
1213{
1214	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1215
1216	return perf_event__process_attr(tool, event, &inject->session->evlist);
1217}
1218
1219static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
1220{
1221	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1222	struct perf_event_attr attr = evsel->core.attr;
1223	u64 *id_array;
1224	u32 *vcpu_array;
1225	int ret = -ENOMEM;
1226	u32 i;
1227
1228	id_array = calloc(evsel->core.ids, sizeof(*id_array));
1229	if (!id_array)
1230		return -ENOMEM;
1231
1232	vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
1233	if (!vcpu_array)
1234		goto out;
1235
1236	for (i = 0; i < evsel->core.ids; i++) {
1237		u64 id = evsel->core.id[i];
1238		struct guest_id *guest_id = guest_session__lookup_id(gs, id);
1239
1240		if (!guest_id) {
1241			pr_err("Failed to find guest id %"PRIu64"\n", id);
1242			ret = -EINVAL;
1243			goto out;
1244		}
1245		id_array[i] = guest_id->host_id;
1246		vcpu_array[i] = guest_id->vcpu;
1247	}
1248
1249	attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
1250	attr.exclude_host = 1;
1251	attr.exclude_guest = 0;
1252
1253	ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
1254					  id_array, process_attr);
1255	if (ret)
1256		pr_err("Failed to add guest attr.\n");
1257
1258	for (i = 0; i < evsel->core.ids; i++) {
1259		struct perf_sample_id *sid;
1260		u32 vcpu = vcpu_array[i];
1261
1262		sid = evlist__id2sid(inject->session->evlist, id_array[i]);
1263		/* Guest event is per-thread from the host point of view */
1264		sid->cpu.cpu = -1;
1265		sid->tid = gs->vcpu[vcpu].tid;
1266		sid->machine_pid = gs->machine_pid;
1267		sid->vcpu.cpu = vcpu;
1268	}
1269out:
1270	free(vcpu_array);
1271	free(id_array);
1272	return ret;
1273}
1274
1275static int guest_session__add_attrs(struct guest_session *gs)
1276{
1277	struct evlist *evlist = gs->session->evlist;
1278	struct evsel *evsel;
1279	int ret;
1280
1281	evlist__for_each_entry(evlist, evsel) {
1282		ret = guest_session__add_attr(gs, evsel);
1283		if (ret)
1284			return ret;
1285	}
1286
1287	return 0;
1288}
1289
1290static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
1291{
1292	struct perf_session *session = inject->session;
1293	struct evlist *evlist = session->evlist;
1294	struct machine *machine = &session->machines.host;
1295	size_t from = evlist->core.nr_entries - new_cnt;
1296
1297	return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
1298						 evlist, machine, from);
1299}
1300
1301static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
1302{
1303	struct hlist_head *head;
1304	struct guest_tid *guest_tid;
1305	int hash;
1306
1307	hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
1308	head = &gs->tids[hash];
1309
1310	hlist_for_each_entry(guest_tid, head, node)
1311		if (guest_tid->tid == tid)
1312			return guest_tid;
1313
1314	return NULL;
1315}
1316
1317static bool dso__is_in_kernel_space(struct dso *dso)
1318{
1319	if (dso__is_vdso(dso))
1320		return false;
1321
1322	return dso__is_kcore(dso) ||
1323	       dso__kernel(dso) ||
1324	       is_kernel_module(dso__long_name(dso), PERF_RECORD_MISC_CPUMODE_UNKNOWN);
1325}
1326
1327static u64 evlist__first_id(struct evlist *evlist)
1328{
1329	struct evsel *evsel;
1330
1331	evlist__for_each_entry(evlist, evsel) {
1332		if (evsel->core.ids)
1333			return evsel->core.id[0];
1334	}
1335	return 0;
1336}
1337
1338static int process_build_id(const struct perf_tool *tool,
1339			    union perf_event *event,
1340			    struct perf_sample *sample __maybe_unused,
1341			    struct machine *machine __maybe_unused)
1342{
1343	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1344
1345	return perf_event__process_build_id(inject->session, event);
1346}
1347
1348static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
1349{
1350	struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
1351	struct perf_sample synth_sample = {
1352		.pid	   = -1,
1353		.tid	   = -1,
1354		.time	   = -1,
1355		.stream_id = -1,
1356		.cpu	   = -1,
1357		.period	   = 1,
1358		.cpumode   = dso__is_in_kernel_space(dso)
1359		? PERF_RECORD_MISC_GUEST_KERNEL
1360		: PERF_RECORD_MISC_GUEST_USER,
1361	};
1362
1363	if (!machine)
1364		return -ENOMEM;
1365
1366	dso__set_hit(dso);
1367
1368	return perf_event__synthesize_build_id(&inject->tool, &synth_sample, machine,
1369					       process_build_id, inject__mmap_evsel(inject),
1370					       /*misc=*/synth_sample.cpumode,
1371					       dso__bid(dso), dso__long_name(dso));
1372}
1373
1374static int guest_session__add_build_ids_cb(struct dso *dso, void *data)
1375{
1376	struct guest_session *gs = data;
1377	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1378
1379	if (!dso__has_build_id(dso))
1380		return 0;
1381
1382	return synthesize_build_id(inject, dso, gs->machine_pid);
1383
 
 
1384}
1385
1386static int guest_session__add_build_ids(struct guest_session *gs)
1387{
1388	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
 
 
 
1389
1390	/* Build IDs will be put in the Build ID feature section */
1391	perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
1392
1393	return dsos__for_each_dso(&gs->session->machines.host.dsos,
1394				  guest_session__add_build_ids_cb,
1395				  gs);
 
 
 
 
1396}
1397
1398static int guest_session__ksymbol_event(const struct perf_tool *tool,
1399					union perf_event *event,
1400					struct perf_sample *sample __maybe_unused,
1401					struct machine *machine __maybe_unused)
1402{
1403	struct guest_session *gs = container_of(tool, struct guest_session, tool);
1404
1405	/* Only support out-of-line i.e. no BPF support */
1406	if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
1407		return 0;
1408
1409	return guest_session__output_bytes(gs, event, event->header.size);
1410}
1411
1412static int guest_session__start(struct guest_session *gs, const char *name, bool force)
1413{
1414	char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
1415	struct perf_session *session;
1416	int ret;
1417
1418	/* Only these events will be injected */
1419	gs->tool.mmap		= guest_session__repipe;
1420	gs->tool.mmap2		= guest_session__repipe;
1421	gs->tool.comm		= guest_session__repipe;
1422	gs->tool.fork		= guest_session__repipe;
1423	gs->tool.exit		= guest_session__repipe;
1424	gs->tool.lost		= guest_session__repipe;
1425	gs->tool.context_switch	= guest_session__repipe;
1426	gs->tool.ksymbol	= guest_session__ksymbol_event;
1427	gs->tool.text_poke	= guest_session__repipe;
1428	/*
1429	 * Processing a build ID creates a struct dso with that build ID. Later,
1430	 * all guest dsos are iterated and the build IDs processed into the host
1431	 * session where they will be output to the Build ID feature section
1432	 * when the perf.data file header is written.
1433	 */
1434	gs->tool.build_id	= perf_event__process_build_id;
1435	/* Process the id index to know what VCPU an ID belongs to */
1436	gs->tool.id_index	= perf_event__process_id_index;
1437
1438	gs->tool.ordered_events	= true;
1439	gs->tool.ordering_requires_timestamps = true;
1440
1441	gs->data.path	= name;
1442	gs->data.force	= force;
1443	gs->data.mode	= PERF_DATA_MODE_READ;
1444
1445	session = perf_session__new(&gs->data, &gs->tool);
1446	if (IS_ERR(session))
1447		return PTR_ERR(session);
1448	gs->session = session;
1449
1450	/*
1451	 * Initial events have zero'd ID samples. Get default ID sample size
1452	 * used for removing them.
1453	 */
1454	gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
1455	/* And default ID for adding back a host-compatible ID sample */
1456	gs->dflt_id = evlist__first_id(session->evlist);
1457	if (!gs->dflt_id) {
1458		pr_err("Guest data has no sample IDs");
1459		return -EINVAL;
1460	}
1461
1462	/* Temporary file for guest events */
1463	gs->tmp_file_name = strdup(tmp_file_name);
1464	if (!gs->tmp_file_name)
1465		return -ENOMEM;
1466	gs->tmp_fd = mkstemp(gs->tmp_file_name);
1467	if (gs->tmp_fd < 0)
1468		return -errno;
1469
1470	if (zstd_init(&gs->session->zstd_data, 0) < 0)
1471		pr_warning("Guest session decompression initialization failed.\n");
1472
1473	/*
1474	 * perf does not support processing 2 sessions simultaneously, so output
1475	 * guest events to a temporary file.
1476	 */
1477	ret = perf_session__process_events(gs->session);
1478	if (ret)
1479		return ret;
1480
1481	if (lseek(gs->tmp_fd, 0, SEEK_SET))
1482		return -errno;
1483
1484	return 0;
1485}
1486
1487/* Free hlist nodes assuming hlist_node is the first member of hlist entries */
1488static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
1489{
1490	struct hlist_node *pos, *n;
1491	size_t i;
1492
1493	for (i = 0; i < hlist_sz; ++i) {
1494		hlist_for_each_safe(pos, n, &heads[i]) {
1495			hlist_del(pos);
1496			free(pos);
1497		}
1498	}
1499}
1500
1501static void guest_session__exit(struct guest_session *gs)
1502{
1503	if (gs->session) {
1504		perf_session__delete(gs->session);
1505		free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
1506		free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
1507	}
1508	if (gs->tmp_file_name) {
1509		if (gs->tmp_fd >= 0)
1510			close(gs->tmp_fd);
1511		unlink(gs->tmp_file_name);
1512		zfree(&gs->tmp_file_name);
1513	}
1514	zfree(&gs->vcpu);
1515	zfree(&gs->perf_data_file);
1516}
1517
1518static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
1519{
1520	tc->time_shift		= time_conv->time_shift;
1521	tc->time_mult		= time_conv->time_mult;
1522	tc->time_zero		= time_conv->time_zero;
1523	tc->time_cycles		= time_conv->time_cycles;
1524	tc->time_mask		= time_conv->time_mask;
1525	tc->cap_user_time_zero	= time_conv->cap_user_time_zero;
1526	tc->cap_user_time_short	= time_conv->cap_user_time_short;
1527}
1528
1529static void guest_session__get_tc(struct guest_session *gs)
1530{
1531	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1532
1533	get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
1534	get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
1535}
1536
1537static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
1538{
1539	u64 tsc;
1540
1541	if (!guest_time) {
1542		*host_time = 0;
1543		return;
1544	}
1545
1546	if (gs->guest_tc.cap_user_time_zero)
1547		tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
1548	else
1549		tsc = guest_time;
1550
1551	/*
1552	 * This is the correct order of operations for x86 if the TSC Offset and
1553	 * Multiplier values are used.
1554	 */
1555	tsc -= gs->time_offset;
1556	tsc /= gs->time_scale;
1557
1558	if (gs->host_tc.cap_user_time_zero)
1559		*host_time = tsc_to_perf_time(tsc, &gs->host_tc);
1560	else
1561		*host_time = tsc;
1562}
1563
1564static int guest_session__fetch(struct guest_session *gs)
1565{
1566	void *buf;
1567	struct perf_event_header *hdr;
1568	size_t hdr_sz = sizeof(*hdr);
1569	ssize_t ret;
1570
1571	buf = gs->ev.event_buf;
1572	if (!buf) {
1573		buf = malloc(PERF_SAMPLE_MAX_SIZE);
1574		if (!buf)
1575			return -ENOMEM;
1576		gs->ev.event_buf = buf;
1577	}
1578	hdr = buf;
1579	ret = readn(gs->tmp_fd, buf, hdr_sz);
1580	if (ret < 0)
1581		return ret;
1582
1583	if (!ret) {
1584		/* Zero size means EOF */
1585		hdr->size = 0;
1586		return 0;
1587	}
1588
1589	buf += hdr_sz;
1590
1591	ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
1592	if (ret < 0)
1593		return ret;
1594
1595	gs->ev.event = (union perf_event *)gs->ev.event_buf;
1596	gs->ev.sample.time = 0;
1597
1598	if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
1599		pr_err("Unexpected type fetching guest event");
1600		return 0;
1601	}
1602
1603	ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
1604	if (ret) {
1605		pr_err("Parse failed fetching guest event");
1606		return ret;
1607	}
1608
1609	if (!gs->have_tc) {
1610		guest_session__get_tc(gs);
1611		gs->have_tc = true;
1612	}
1613
1614	guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
1615
1616	return 0;
1617}
1618
1619static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
1620				    const struct perf_sample *sample)
1621{
1622	struct evsel *evsel;
1623	void *array;
1624	int ret;
1625
1626	evsel = evlist__id2evsel(evlist, sample->id);
1627	array = ev;
1628
1629	if (!evsel) {
1630		pr_err("No evsel for id %"PRIu64"\n", sample->id);
1631		return -EINVAL;
1632	}
1633
1634	array += ev->header.size;
1635	ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
1636	if (ret < 0)
1637		return ret;
1638
1639	if (ret & 7) {
1640		pr_err("Bad id sample size %d\n", ret);
1641		return -EINVAL;
1642	}
1643
1644	ev->header.size += ret;
1645
1646	return 0;
1647}
1648
1649static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
1650{
1651	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
1652	int ret;
1653
1654	if (!gs->ready)
1655		return 0;
1656
1657	while (1) {
1658		struct perf_sample *sample;
1659		struct guest_id *guest_id;
1660		union perf_event *ev;
1661		u16 id_hdr_size;
1662		u8 cpumode;
1663		u64 id;
1664
1665		if (!gs->fetched) {
1666			ret = guest_session__fetch(gs);
1667			if (ret)
1668				return ret;
1669			gs->fetched = true;
1670		}
1671
1672		ev = gs->ev.event;
1673		sample = &gs->ev.sample;
1674
1675		if (!ev->header.size)
1676			return 0; /* EOF */
1677
1678		if (sample->time > timestamp)
1679			return 0;
1680
1681		/* Change cpumode to guest */
1682		cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1683		if (cpumode & PERF_RECORD_MISC_USER)
1684			cpumode = PERF_RECORD_MISC_GUEST_USER;
1685		else
1686			cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
1687		ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
1688		ev->header.misc |= cpumode;
1689
1690		id = sample->id;
1691		if (!id) {
1692			id = gs->dflt_id;
1693			id_hdr_size = gs->dflt_id_hdr_size;
1694		} else {
1695			struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
1696
1697			id_hdr_size = evsel__id_hdr_size(evsel);
1698		}
1699
1700		if (id_hdr_size & 7) {
1701			pr_err("Bad id_hdr_size %u\n", id_hdr_size);
1702			return -EINVAL;
1703		}
1704
1705		if (ev->header.size & 7) {
1706			pr_err("Bad event size %u\n", ev->header.size);
1707			return -EINVAL;
1708		}
1709
1710		/* Remove guest id sample */
1711		ev->header.size -= id_hdr_size;
1712
1713		if (ev->header.size & 7) {
1714			pr_err("Bad raw event size %u\n", ev->header.size);
1715			return -EINVAL;
1716		}
1717
1718		guest_id = guest_session__lookup_id(gs, id);
1719		if (!guest_id) {
1720			pr_err("Guest event with unknown id %llu\n",
1721			       (unsigned long long)id);
1722			return -EINVAL;
1723		}
1724
1725		/* Change to host ID to avoid conflicting ID values */
1726		sample->id = guest_id->host_id;
1727		sample->stream_id = guest_id->host_id;
1728
1729		if (sample->cpu != (u32)-1) {
1730			if (sample->cpu >= gs->vcpu_cnt) {
1731				pr_err("Guest event with unknown VCPU %u\n",
1732				       sample->cpu);
1733				return -EINVAL;
1734			}
1735			/* Change to host CPU instead of guest VCPU */
1736			sample->cpu = gs->vcpu[sample->cpu].cpu;
1737		}
1738
1739		/* New id sample with new ID and CPU */
1740		ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
1741		if (ret)
1742			return ret;
1743
1744		if (ev->header.size & 7) {
1745			pr_err("Bad new event size %u\n", ev->header.size);
1746			return -EINVAL;
1747		}
1748
1749		gs->fetched = false;
1750
1751		ret = output_bytes(inject, ev, ev->header.size);
1752		if (ret)
1753			return ret;
1754	}
1755}
1756
1757static int guest_session__flush_events(struct guest_session *gs)
1758{
1759	return guest_session__inject_events(gs, -1);
1760}
1761
1762static int host__repipe(const struct perf_tool *tool,
1763			union perf_event *event,
1764			struct perf_sample *sample,
1765			struct machine *machine)
1766{
1767	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1768	int ret;
1769
1770	ret = guest_session__inject_events(&inject->guest_session, sample->time);
1771	if (ret)
1772		return ret;
1773
1774	return perf_event__repipe(tool, event, sample, machine);
1775}
1776
1777static int host__finished_init(struct perf_session *session, union perf_event *event)
1778{
1779	struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
1780	struct guest_session *gs = &inject->guest_session;
1781	int ret;
1782
1783	/*
1784	 * Peek through host COMM events to find QEMU threads and the VCPU they
1785	 * are running.
1786	 */
1787	ret = host_peek_vm_comms(session, gs);
1788	if (ret)
1789		return ret;
1790
1791	if (!gs->vcpu_cnt) {
1792		pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
1793		return -EINVAL;
1794	}
1795
1796	/*
1797	 * Allocate new (unused) host sample IDs and map them to the guest IDs.
1798	 */
1799	gs->highest_id = evlist__find_highest_id(session->evlist);
1800	ret = guest_session__map_ids(gs, session->evlist);
1801	if (ret)
1802		return ret;
1803
1804	ret = guest_session__add_attrs(gs);
1805	if (ret)
1806		return ret;
1807
1808	ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
1809	if (ret) {
1810		pr_err("Failed to synthesize id_index\n");
1811		return ret;
1812	}
1813
1814	ret = guest_session__add_build_ids(gs);
1815	if (ret) {
1816		pr_err("Failed to add guest build IDs\n");
1817		return ret;
1818	}
1819
1820	gs->ready = true;
1821
1822	ret = guest_session__inject_events(gs, 0);
1823	if (ret)
1824		return ret;
1825
1826	return perf_event__repipe_op2_synth(session, event);
1827}
1828
1829/*
1830 * Obey finished-round ordering. The FINISHED_ROUND event is first processed
1831 * which flushes host events to file up until the last flush time. Then inject
1832 * guest events up to the same time. Finally write out the FINISHED_ROUND event
1833 * itself.
1834 */
1835static int host__finished_round(const struct perf_tool *tool,
1836				union perf_event *event,
1837				struct ordered_events *oe)
1838{
1839	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1840	int ret = perf_event__process_finished_round(tool, event, oe);
1841	u64 timestamp = ordered_events__last_flush_time(oe);
1842
1843	if (ret)
1844		return ret;
1845
1846	ret = guest_session__inject_events(&inject->guest_session, timestamp);
1847	if (ret)
1848		return ret;
1849
1850	return perf_event__repipe_oe_synth(tool, event, oe);
1851}
1852
1853static int host__context_switch(const struct perf_tool *tool,
1854				union perf_event *event,
1855				struct perf_sample *sample,
1856				struct machine *machine)
1857{
1858	struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
1859	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1860	struct guest_session *gs = &inject->guest_session;
1861	u32 pid = event->context_switch.next_prev_pid;
1862	u32 tid = event->context_switch.next_prev_tid;
1863	struct guest_tid *guest_tid;
1864	u32 vcpu;
1865
1866	if (out || pid != gs->machine_pid)
1867		goto out;
1868
1869	guest_tid = guest_session__lookup_tid(gs, tid);
1870	if (!guest_tid)
1871		goto out;
1872
1873	if (sample->cpu == (u32)-1) {
1874		pr_err("Switch event does not have CPU\n");
1875		return -EINVAL;
1876	}
1877
1878	vcpu = guest_tid->vcpu;
1879	if (vcpu >= gs->vcpu_cnt)
1880		return -EINVAL;
1881
1882	/* Guest is switching in, record which CPU the VCPU is now running on */
1883	gs->vcpu[vcpu].cpu = sample->cpu;
1884out:
1885	return host__repipe(tool, event, sample, machine);
1886}
1887
1888static void sig_handler(int sig __maybe_unused)
1889{
1890	session_done = 1;
1891}
1892
1893static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
1894{
1895	struct perf_event_attr *attr = &evsel->core.attr;
1896	const char *name = evsel__name(evsel);
1897
1898	if (!(attr->sample_type & sample_type)) {
1899		pr_err("Samples for %s event do not have %s attribute set.",
1900			name, sample_msg);
1901		return -EINVAL;
1902	}
1903
1904	return 0;
1905}
1906
1907static int drop_sample(const struct perf_tool *tool __maybe_unused,
1908		       union perf_event *event __maybe_unused,
1909		       struct perf_sample *sample __maybe_unused,
1910		       struct evsel *evsel __maybe_unused,
1911		       struct machine *machine __maybe_unused)
1912{
1913	return 0;
1914}
1915
1916static void strip_init(struct perf_inject *inject)
1917{
1918	struct evlist *evlist = inject->session->evlist;
1919	struct evsel *evsel;
1920
1921	inject->tool.context_switch = perf_event__drop;
1922
1923	evlist__for_each_entry(evlist, evsel)
1924		evsel->handler = drop_sample;
1925}
1926
1927static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
1928{
1929	struct perf_inject *inject = opt->value;
1930	const char *args;
1931	char *dry_run;
1932
1933	if (unset)
1934		return 0;
1935
1936	inject->itrace_synth_opts.set = true;
1937	inject->itrace_synth_opts.vm_time_correlation = true;
1938	inject->in_place_update = true;
1939
1940	if (!str)
1941		return 0;
1942
1943	dry_run = skip_spaces(str);
1944	if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
1945		inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
1946		inject->in_place_update_dry_run = true;
1947		args = dry_run + strlen("dry-run");
1948	} else {
1949		args = str;
1950	}
1951
1952	inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
1953
1954	return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
1955}
1956
1957static int parse_guest_data(const struct option *opt, const char *str, int unset)
1958{
1959	struct perf_inject *inject = opt->value;
1960	struct guest_session *gs = &inject->guest_session;
1961	char *tok;
1962	char *s;
1963
1964	if (unset)
1965		return 0;
1966
1967	if (!str)
1968		goto bad_args;
1969
1970	s = strdup(str);
1971	if (!s)
1972		return -ENOMEM;
1973
1974	gs->perf_data_file = strsep(&s, ",");
1975	if (!gs->perf_data_file)
1976		goto bad_args;
1977
1978	gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
1979	if (gs->copy_kcore_dir)
1980		inject->output.is_dir = true;
1981
1982	tok = strsep(&s, ",");
1983	if (!tok)
1984		goto bad_args;
1985	gs->machine_pid = strtoul(tok, NULL, 0);
1986	if (!inject->guest_session.machine_pid)
1987		goto bad_args;
1988
1989	gs->time_scale = 1;
1990
1991	tok = strsep(&s, ",");
1992	if (!tok)
1993		goto out;
1994	gs->time_offset = strtoull(tok, NULL, 0);
1995
1996	tok = strsep(&s, ",");
1997	if (!tok)
1998		goto out;
1999	gs->time_scale = strtod(tok, NULL);
2000	if (!gs->time_scale)
2001		goto bad_args;
2002out:
2003	return 0;
2004
2005bad_args:
2006	pr_err("--guest-data option requires guest perf.data file name, "
2007	       "guest machine PID, and optionally guest timestamp offset, "
2008	       "and guest timestamp scale factor, separated by commas.\n");
2009	return -1;
2010}
2011
2012static int save_section_info_cb(struct perf_file_section *section,
2013				struct perf_header *ph __maybe_unused,
2014				int feat, int fd __maybe_unused, void *data)
2015{
2016	struct perf_inject *inject = data;
2017
2018	inject->secs[feat] = *section;
2019	return 0;
2020}
2021
2022static int save_section_info(struct perf_inject *inject)
2023{
2024	struct perf_header *header = &inject->session->header;
2025	int fd = perf_data__fd(inject->session->data);
2026
2027	return perf_header__process_sections(header, fd, inject, save_section_info_cb);
2028}
2029
2030static bool keep_feat(int feat)
2031{
2032	switch (feat) {
2033	/* Keep original information that describes the machine or software */
2034	case HEADER_TRACING_DATA:
2035	case HEADER_HOSTNAME:
2036	case HEADER_OSRELEASE:
2037	case HEADER_VERSION:
2038	case HEADER_ARCH:
2039	case HEADER_NRCPUS:
2040	case HEADER_CPUDESC:
2041	case HEADER_CPUID:
2042	case HEADER_TOTAL_MEM:
2043	case HEADER_CPU_TOPOLOGY:
2044	case HEADER_NUMA_TOPOLOGY:
2045	case HEADER_PMU_MAPPINGS:
2046	case HEADER_CACHE:
2047	case HEADER_MEM_TOPOLOGY:
2048	case HEADER_CLOCKID:
2049	case HEADER_BPF_PROG_INFO:
2050	case HEADER_BPF_BTF:
2051	case HEADER_CPU_PMU_CAPS:
2052	case HEADER_CLOCK_DATA:
2053	case HEADER_HYBRID_TOPOLOGY:
2054	case HEADER_PMU_CAPS:
2055		return true;
2056	/* Information that can be updated */
2057	case HEADER_BUILD_ID:
2058	case HEADER_CMDLINE:
2059	case HEADER_EVENT_DESC:
2060	case HEADER_BRANCH_STACK:
2061	case HEADER_GROUP_DESC:
2062	case HEADER_AUXTRACE:
2063	case HEADER_STAT:
2064	case HEADER_SAMPLE_TIME:
2065	case HEADER_DIR_FORMAT:
2066	case HEADER_COMPRESSED:
2067	default:
2068		return false;
2069	};
2070}
2071
2072static int read_file(int fd, u64 offs, void *buf, size_t sz)
2073{
2074	ssize_t ret = preadn(fd, buf, sz, offs);
2075
2076	if (ret < 0)
2077		return -errno;
2078	if ((size_t)ret != sz)
2079		return -EINVAL;
2080	return 0;
2081}
2082
2083static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
2084{
2085	int fd = perf_data__fd(inject->session->data);
2086	u64 offs = inject->secs[feat].offset;
2087	size_t sz = inject->secs[feat].size;
2088	void *buf = malloc(sz);
2089	int ret;
2090
2091	if (!buf)
2092		return -ENOMEM;
2093
2094	ret = read_file(fd, offs, buf, sz);
2095	if (ret)
2096		goto out_free;
2097
2098	ret = fw->write(fw, buf, sz);
2099out_free:
2100	free(buf);
2101	return ret;
2102}
2103
2104struct inject_fc {
2105	struct feat_copier fc;
2106	struct perf_inject *inject;
2107};
2108
2109static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
2110{
2111	struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
2112	struct perf_inject *inject = inj_fc->inject;
2113	int ret;
2114
2115	if (!inject->secs[feat].offset ||
2116	    !keep_feat(feat))
2117		return 0;
2118
2119	ret = feat_copy(inject, feat, fw);
2120	if (ret < 0)
2121		return ret;
2122
2123	return 1; /* Feature section copied */
2124}
2125
2126static int copy_kcore_dir(struct perf_inject *inject)
2127{
2128	char *cmd;
2129	int ret;
2130
2131	ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
2132		       inject->input_name, inject->output.path);
2133	if (ret < 0)
2134		return ret;
2135	pr_debug("%s\n", cmd);
2136	ret = system(cmd);
2137	free(cmd);
2138	return ret;
2139}
2140
2141static int guest_session__copy_kcore_dir(struct guest_session *gs)
2142{
2143	struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
2144	char *cmd;
2145	int ret;
2146
2147	ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
2148		       gs->perf_data_file, inject->output.path, gs->machine_pid);
2149	if (ret < 0)
2150		return ret;
2151	pr_debug("%s\n", cmd);
2152	ret = system(cmd);
2153	free(cmd);
2154	return ret;
2155}
2156
2157static int output_fd(struct perf_inject *inject)
2158{
2159	return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
2160}
2161
2162static int __cmd_inject(struct perf_inject *inject)
2163{
2164	int ret = -EINVAL;
2165	struct guest_session *gs = &inject->guest_session;
2166	struct perf_session *session = inject->session;
2167	int fd = output_fd(inject);
2168	u64 output_data_offset = perf_session__data_offset(session->evlist);
2169	/*
2170	 * Pipe input hasn't loaded the attributes and will handle them as
2171	 * events. So that the attributes don't overlap the data, write the
2172	 * attributes after the data.
2173	 */
2174	bool write_attrs_after_data = !inject->output.is_pipe && inject->session->data->is_pipe;
2175
2176	signal(SIGINT, sig_handler);
2177
2178	if (inject->build_id_style != BID_RWS__NONE || inject->sched_stat ||
2179	    inject->itrace_synth_opts.set) {
2180		inject->tool.mmap	  = perf_event__repipe_mmap;
2181		inject->tool.mmap2	  = perf_event__repipe_mmap2;
2182		inject->tool.fork	  = perf_event__repipe_fork;
2183#ifdef HAVE_LIBTRACEEVENT
2184		inject->tool.tracing_data = perf_event__repipe_tracing_data;
2185#endif
2186	}
2187
2188	if (inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
2189	    inject->build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) {
 
 
 
 
2190		inject->tool.sample = perf_event__inject_buildid;
2191	} else if (inject->sched_stat) {
2192		struct evsel *evsel;
2193
2194		evlist__for_each_entry(session->evlist, evsel) {
2195			const char *name = evsel__name(evsel);
2196
2197			if (!strcmp(name, "sched:sched_switch")) {
2198				if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
2199					return -EINVAL;
2200
2201				evsel->handler = perf_inject__sched_switch;
2202			} else if (!strcmp(name, "sched:sched_process_exit"))
2203				evsel->handler = perf_inject__sched_process_exit;
2204#ifdef HAVE_LIBTRACEEVENT
2205			else if (!strncmp(name, "sched:sched_stat_", 17))
2206				evsel->handler = perf_inject__sched_stat;
2207#endif
2208		}
2209	} else if (inject->itrace_synth_opts.vm_time_correlation) {
2210		session->itrace_synth_opts = &inject->itrace_synth_opts;
2211		memset(&inject->tool, 0, sizeof(inject->tool));
2212		inject->tool.id_index	    = perf_event__process_id_index;
2213		inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
2214		inject->tool.auxtrace	    = perf_event__process_auxtrace;
2215		inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
2216		inject->tool.ordered_events = true;
2217		inject->tool.ordering_requires_timestamps = true;
2218	} else if (inject->itrace_synth_opts.set) {
2219		session->itrace_synth_opts = &inject->itrace_synth_opts;
2220		inject->itrace_synth_opts.inject = true;
2221		inject->tool.comm	    = perf_event__repipe_comm;
2222		inject->tool.namespaces	    = perf_event__repipe_namespaces;
2223		inject->tool.exit	    = perf_event__repipe_exit;
2224		inject->tool.id_index	    = perf_event__process_id_index;
2225		inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
2226		inject->tool.auxtrace	    = perf_event__process_auxtrace;
2227		inject->tool.aux	    = perf_event__drop_aux;
2228		inject->tool.itrace_start   = perf_event__drop_aux;
2229		inject->tool.aux_output_hw_id = perf_event__drop_aux;
2230		inject->tool.ordered_events = true;
2231		inject->tool.ordering_requires_timestamps = true;
2232		/* Allow space in the header for new attributes */
2233		output_data_offset = roundup(8192 + session->header.data_offset, 4096);
2234		if (inject->strip)
2235			strip_init(inject);
2236	} else if (gs->perf_data_file) {
2237		char *name = gs->perf_data_file;
2238
2239		/*
2240		 * Not strictly necessary, but keep these events in order wrt
2241		 * guest events.
2242		 */
2243		inject->tool.mmap		= host__repipe;
2244		inject->tool.mmap2		= host__repipe;
2245		inject->tool.comm		= host__repipe;
2246		inject->tool.fork		= host__repipe;
2247		inject->tool.exit		= host__repipe;
2248		inject->tool.lost		= host__repipe;
2249		inject->tool.context_switch	= host__repipe;
2250		inject->tool.ksymbol		= host__repipe;
2251		inject->tool.text_poke		= host__repipe;
2252		/*
2253		 * Once the host session has initialized, set up sample ID
2254		 * mapping and feed in guest attrs, build IDs and initial
2255		 * events.
2256		 */
2257		inject->tool.finished_init	= host__finished_init;
2258		/* Obey finished round ordering */
2259		inject->tool.finished_round	= host__finished_round;
2260		/* Keep track of which CPU a VCPU is runnng on */
2261		inject->tool.context_switch	= host__context_switch;
2262		/*
2263		 * Must order events to be able to obey finished round
2264		 * ordering.
2265		 */
2266		inject->tool.ordered_events	= true;
2267		inject->tool.ordering_requires_timestamps = true;
2268		/* Set up a separate session to process guest perf.data file */
2269		ret = guest_session__start(gs, name, session->data->force);
2270		if (ret) {
2271			pr_err("Failed to process %s, error %d\n", name, ret);
2272			return ret;
2273		}
2274		/* Allow space in the header for guest attributes */
2275		output_data_offset += gs->session->header.data_offset;
2276		output_data_offset = roundup(output_data_offset, 4096);
2277	}
2278
2279	if (!inject->itrace_synth_opts.set)
2280		auxtrace_index__free(&session->auxtrace_index);
2281
2282	if (!inject->output.is_pipe && !inject->in_place_update)
2283		lseek(fd, output_data_offset, SEEK_SET);
2284
2285	ret = perf_session__process_events(session);
2286	if (ret)
2287		return ret;
2288
2289	if (gs->session) {
2290		/*
2291		 * Remaining guest events have later timestamps. Flush them
2292		 * out to file.
2293		 */
2294		ret = guest_session__flush_events(gs);
2295		if (ret) {
2296			pr_err("Failed to flush guest events\n");
2297			return ret;
2298		}
2299	}
2300
2301	if (!inject->output.is_pipe && !inject->in_place_update) {
2302		struct inject_fc inj_fc = {
2303			.fc.copy = feat_copy_cb,
2304			.inject = inject,
2305		};
2306
2307		if (inject->build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
2308		    inject->build_id_style == BID_RWS__INJECT_HEADER_ALL)
2309			perf_header__set_feat(&session->header, HEADER_BUILD_ID);
2310		/*
2311		 * Keep all buildids when there is unprocessed AUX data because
2312		 * it is not known which ones the AUX trace hits.
2313		 */
2314		if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
2315		    inject->have_auxtrace && !inject->itrace_synth_opts.set)
2316			perf_session__dsos_hit_all(session);
2317		/*
2318		 * The AUX areas have been removed and replaced with
2319		 * synthesized hardware events, so clear the feature flag.
2320		 */
2321		if (inject->itrace_synth_opts.set) {
2322			perf_header__clear_feat(&session->header,
2323						HEADER_AUXTRACE);
2324			if (inject->itrace_synth_opts.last_branch ||
2325			    inject->itrace_synth_opts.add_last_branch)
2326				perf_header__set_feat(&session->header,
2327						      HEADER_BRANCH_STACK);
2328		}
2329		session->header.data_offset = output_data_offset;
2330		session->header.data_size = inject->bytes_written;
2331		perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc,
2332					    write_attrs_after_data);
2333
2334		if (inject->copy_kcore_dir) {
2335			ret = copy_kcore_dir(inject);
2336			if (ret) {
2337				pr_err("Failed to copy kcore\n");
2338				return ret;
2339			}
2340		}
2341		if (gs->copy_kcore_dir) {
2342			ret = guest_session__copy_kcore_dir(gs);
2343			if (ret) {
2344				pr_err("Failed to copy guest kcore\n");
2345				return ret;
2346			}
2347		}
2348	}
2349
2350	return ret;
2351}
2352
2353int cmd_inject(int argc, const char **argv)
2354{
2355	struct perf_inject inject = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2356		.input_name  = "-",
2357		.samples = LIST_HEAD_INIT(inject.samples),
2358		.output = {
2359			.path = "-",
2360			.mode = PERF_DATA_MODE_WRITE,
2361			.use_stdio = true,
2362		},
2363	};
2364	struct perf_data data = {
2365		.mode = PERF_DATA_MODE_READ,
2366		.use_stdio = true,
2367	};
2368	int ret;
 
2369	const char *known_build_ids = NULL;
2370	bool build_ids = false;
2371	bool build_id_all = false;
2372	bool mmap2_build_ids = false;
2373	bool mmap2_build_id_all = false;
2374
2375	struct option options[] = {
2376		OPT_BOOLEAN('b', "build-ids", &build_ids,
2377			    "Inject build-ids into the output stream"),
2378		OPT_BOOLEAN(0, "buildid-all", &build_id_all,
2379			    "Inject build-ids of all DSOs into the output stream"),
2380		OPT_BOOLEAN('B', "mmap2-buildids", &mmap2_build_ids,
2381			    "Drop unused mmap events, make others mmap2 with build IDs"),
2382		OPT_BOOLEAN(0, "mmap2-buildid-all", &mmap2_build_id_all,
2383			    "Rewrite all mmap events as mmap2 events with build IDs"),
2384		OPT_STRING(0, "known-build-ids", &known_build_ids,
2385			   "buildid path [,buildid path...]",
2386			   "build-ids to use for given paths"),
2387		OPT_STRING('i', "input", &inject.input_name, "file",
2388			   "input file name"),
2389		OPT_STRING('o', "output", &inject.output.path, "file",
2390			   "output file name"),
2391		OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
2392			    "Merge sched-stat and sched-switch for getting events "
2393			    "where and how long tasks slept"),
2394#ifdef HAVE_JITDUMP
2395		OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
2396#endif
2397		OPT_INCR('v', "verbose", &verbose,
2398			 "be more verbose (show build ids, etc)"),
2399		OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2400			   "file", "vmlinux pathname"),
2401		OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
2402			    "don't load vmlinux even if found"),
2403		OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
2404			   "kallsyms pathname"),
2405		OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
2406		OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
2407				    NULL, "opts", "Instruction Tracing options\n"
2408				    ITRACE_HELP,
2409				    itrace_parse_synth_opts),
2410		OPT_BOOLEAN(0, "strip", &inject.strip,
2411			    "strip non-synthesized events (use with --itrace)"),
2412		OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
2413				    "correlate time between VM guests and the host",
2414				    parse_vm_time_correlation),
2415		OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
2416				    "inject events from a guest perf.data file",
2417				    parse_guest_data),
2418		OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
2419			   "guest mount directory under which every guest os"
2420			   " instance has a subdir"),
2421		OPT_END()
2422	};
2423	const char * const inject_usage[] = {
2424		"perf inject [<options>]",
2425		NULL
2426	};
2427	bool ordered_events;
2428
2429	if (!inject.itrace_synth_opts.set) {
2430		/* Disable eager loading of kernel symbols that adds overhead to perf inject. */
2431		symbol_conf.lazy_load_kernel_maps = true;
2432	}
2433
2434#ifndef HAVE_JITDUMP
2435	set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
2436#endif
2437	argc = parse_options(argc, argv, options, inject_usage, 0);
2438
2439	/*
2440	 * Any (unrecognized) arguments left?
2441	 */
2442	if (argc)
2443		usage_with_options(inject_usage, options);
2444
2445	if (inject.strip && !inject.itrace_synth_opts.set) {
2446		pr_err("--strip option requires --itrace option\n");
2447		return -1;
2448	}
2449
2450	if (symbol__validate_sym_arguments())
2451		return -1;
2452
2453	if (inject.in_place_update) {
2454		if (!strcmp(inject.input_name, "-")) {
2455			pr_err("Input file name required for in-place updating\n");
2456			return -1;
2457		}
2458		if (strcmp(inject.output.path, "-")) {
2459			pr_err("Output file name must not be specified for in-place updating\n");
2460			return -1;
2461		}
2462		if (!data.force && !inject.in_place_update_dry_run) {
2463			pr_err("The input file would be updated in place, "
2464				"the --force option is required.\n");
2465			return -1;
2466		}
2467		if (!inject.in_place_update_dry_run)
2468			data.in_place_update = true;
2469	} else {
2470		if (strcmp(inject.output.path, "-") && !inject.strip &&
2471		    has_kcore_dir(inject.input_name)) {
2472			inject.output.is_dir = true;
2473			inject.copy_kcore_dir = true;
2474		}
2475		if (perf_data__open(&inject.output)) {
2476			perror("failed to create output file");
2477			return -1;
2478		}
2479	}
2480	if (mmap2_build_ids)
2481		inject.build_id_style = BID_RWS__MMAP2_BUILDID_LAZY;
2482	if (mmap2_build_id_all)
2483		inject.build_id_style = BID_RWS__MMAP2_BUILDID_ALL;
2484	if (build_ids)
2485		inject.build_id_style = BID_RWS__INJECT_HEADER_LAZY;
2486	if (build_id_all)
2487		inject.build_id_style = BID_RWS__INJECT_HEADER_ALL;
2488
2489	data.path = inject.input_name;
 
 
 
 
 
 
 
 
 
 
2490
2491	ordered_events = inject.jit_mode || inject.sched_stat ||
2492		inject.build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
2493		inject.build_id_style == BID_RWS__MMAP2_BUILDID_LAZY;
2494	perf_tool__init(&inject.tool, ordered_events);
2495	inject.tool.sample		= perf_event__repipe_sample;
2496	inject.tool.read		= perf_event__repipe_sample;
2497	inject.tool.mmap		= perf_event__repipe;
2498	inject.tool.mmap2		= perf_event__repipe;
2499	inject.tool.comm		= perf_event__repipe;
2500	inject.tool.namespaces		= perf_event__repipe;
2501	inject.tool.cgroup		= perf_event__repipe;
2502	inject.tool.fork		= perf_event__repipe;
2503	inject.tool.exit		= perf_event__repipe;
2504	inject.tool.lost		= perf_event__repipe;
2505	inject.tool.lost_samples	= perf_event__repipe;
2506	inject.tool.aux			= perf_event__repipe;
2507	inject.tool.itrace_start	= perf_event__repipe;
2508	inject.tool.aux_output_hw_id	= perf_event__repipe;
2509	inject.tool.context_switch	= perf_event__repipe;
2510	inject.tool.throttle		= perf_event__repipe;
2511	inject.tool.unthrottle		= perf_event__repipe;
2512	inject.tool.ksymbol		= perf_event__repipe;
2513	inject.tool.bpf			= perf_event__repipe;
2514	inject.tool.text_poke		= perf_event__repipe;
2515	inject.tool.attr		= perf_event__repipe_attr;
2516	inject.tool.event_update	= perf_event__repipe_event_update;
2517	inject.tool.tracing_data	= perf_event__repipe_op2_synth;
2518	inject.tool.finished_round	= perf_event__repipe_oe_synth;
2519	inject.tool.build_id		= perf_event__repipe_op2_synth;
2520	inject.tool.id_index		= perf_event__repipe_op2_synth;
2521	inject.tool.auxtrace_info	= perf_event__repipe_op2_synth;
2522	inject.tool.auxtrace_error	= perf_event__repipe_op2_synth;
2523	inject.tool.time_conv		= perf_event__repipe_op2_synth;
2524	inject.tool.thread_map		= perf_event__repipe_op2_synth;
2525	inject.tool.cpu_map		= perf_event__repipe_op2_synth;
2526	inject.tool.stat_config		= perf_event__repipe_op2_synth;
2527	inject.tool.stat		= perf_event__repipe_op2_synth;
2528	inject.tool.stat_round		= perf_event__repipe_op2_synth;
2529	inject.tool.feature		= perf_event__repipe_op2_synth;
2530	inject.tool.finished_init	= perf_event__repipe_op2_synth;
2531	inject.tool.compressed		= perf_event__repipe_op4_synth;
2532	inject.tool.auxtrace		= perf_event__repipe_auxtrace;
2533	inject.tool.dont_split_sample_group = true;
2534	inject.session = __perf_session__new(&data, &inject.tool,
2535					     /*trace_event_repipe=*/inject.output.is_pipe);
2536
2537	if (IS_ERR(inject.session)) {
2538		ret = PTR_ERR(inject.session);
2539		goto out_close_output;
2540	}
2541
2542	if (zstd_init(&(inject.session->zstd_data), 0) < 0)
2543		pr_warning("Decompression initialization failed.\n");
2544
2545	/* Save original section info before feature bits change */
2546	ret = save_section_info(&inject);
2547	if (ret)
2548		goto out_delete;
2549
2550	if (inject.output.is_pipe) {
2551		ret = perf_header__write_pipe(perf_data__fd(&inject.output));
2552		if (ret < 0) {
2553			pr_err("Couldn't write a new pipe header.\n");
2554			goto out_delete;
2555		}
2556
2557		/*
2558		 * If the input is already a pipe then the features and
2559		 * attributes don't need synthesizing, they will be present in
2560		 * the input.
2561		 */
2562		if (!data.is_pipe) {
2563			ret = perf_event__synthesize_for_pipe(&inject.tool,
2564							      inject.session,
2565							      &inject.output,
2566							      perf_event__repipe);
2567			if (ret < 0)
2568				goto out_delete;
2569		}
2570	}
2571
2572	if (inject.build_id_style == BID_RWS__INJECT_HEADER_LAZY ||
2573	    inject.build_id_style == BID_RWS__MMAP2_BUILDID_LAZY) {
2574		/*
2575		 * to make sure the mmap records are ordered correctly
2576		 * and so that the correct especially due to jitted code
2577		 * mmaps. We cannot generate the buildid hit list and
2578		 * inject the jit mmaps at the same time for now.
2579		 */
 
2580		inject.tool.ordering_requires_timestamps = true;
 
 
 
 
 
 
 
 
 
2581	}
2582	if (inject.build_id_style != BID_RWS__NONE && known_build_ids != NULL) {
2583		inject.known_build_ids =
2584			perf_inject__parse_known_build_ids(known_build_ids);
2585
2586		if (inject.known_build_ids == NULL) {
2587			pr_err("Couldn't parse known build ids.\n");
2588			goto out_delete;
2589		}
2590	}
2591
2592#ifdef HAVE_JITDUMP
2593	if (inject.jit_mode) {
2594		inject.tool.mmap2	   = perf_event__repipe_mmap2;
2595		inject.tool.mmap	   = perf_event__repipe_mmap;
 
2596		inject.tool.ordering_requires_timestamps = true;
2597		/*
2598		 * JIT MMAP injection injects all MMAP events in one go, so it
2599		 * does not obey finished_round semantics.
2600		 */
2601		inject.tool.finished_round = perf_event__drop_oe;
2602	}
2603#endif
2604	ret = symbol__init(&inject.session->header.env);
2605	if (ret < 0)
2606		goto out_delete;
2607
2608	ret = __cmd_inject(&inject);
2609
2610	guest_session__exit(&inject.guest_session);
2611
2612out_delete:
2613	strlist__delete(inject.known_build_ids);
2614	zstd_fini(&(inject.session->zstd_data));
2615	perf_session__delete(inject.session);
2616out_close_output:
2617	if (!inject.in_place_update)
2618		perf_data__close(&inject.output);
2619	free(inject.itrace_synth_opts.vm_tm_corr_args);
2620	free(inject.event_copy);
2621	free(inject.guest_session.ev.event_buf);
2622	return ret;
2623}