Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * intel_pt.c: Intel Processor Trace support
   4 * Copyright (c) 2013-2015, Intel Corporation.
   5 */
   6
   7#include <inttypes.h>
   8#include <linux/perf_event.h>
   9#include <stdio.h>
  10#include <stdbool.h>
  11#include <errno.h>
  12#include <linux/kernel.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/zalloc.h>
  16
  17#include "session.h"
  18#include "machine.h"
  19#include "memswap.h"
  20#include "sort.h"
  21#include "tool.h"
  22#include "event.h"
  23#include "evlist.h"
  24#include "evsel.h"
  25#include "map.h"
  26#include "color.h"
  27#include "thread.h"
  28#include "thread-stack.h"
  29#include "symbol.h"
  30#include "callchain.h"
  31#include "dso.h"
  32#include "debug.h"
  33#include "auxtrace.h"
  34#include "tsc.h"
  35#include "intel-pt.h"
  36#include "config.h"
  37#include "util/perf_api_probe.h"
  38#include "util/synthetic-events.h"
  39#include "time-utils.h"
  40
  41#include "../arch/x86/include/uapi/asm/perf_regs.h"
  42
  43#include "intel-pt-decoder/intel-pt-log.h"
  44#include "intel-pt-decoder/intel-pt-decoder.h"
  45#include "intel-pt-decoder/intel-pt-insn-decoder.h"
  46#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
  47
  48#define MAX_TIMESTAMP (~0ULL)
  49
  50#define INTEL_PT_CFG_PASS_THRU	BIT_ULL(0)
  51#define INTEL_PT_CFG_PWR_EVT_EN	BIT_ULL(4)
  52#define INTEL_PT_CFG_BRANCH_EN	BIT_ULL(13)
  53#define INTEL_PT_CFG_EVT_EN	BIT_ULL(31)
  54#define INTEL_PT_CFG_TNT_DIS	BIT_ULL(55)
  55
  56struct range {
  57	u64 start;
  58	u64 end;
  59};
  60
  61struct intel_pt {
  62	struct auxtrace auxtrace;
  63	struct auxtrace_queues queues;
  64	struct auxtrace_heap heap;
  65	u32 auxtrace_type;
  66	struct perf_session *session;
  67	struct machine *machine;
  68	struct evsel *switch_evsel;
  69	struct thread *unknown_thread;
  70	bool timeless_decoding;
  71	bool sampling_mode;
  72	bool snapshot_mode;
  73	bool per_cpu_mmaps;
  74	bool have_tsc;
  75	bool data_queued;
  76	bool est_tsc;
  77	bool sync_switch;
  78	bool sync_switch_not_supported;
  79	bool mispred_all;
  80	bool use_thread_stack;
  81	bool callstack;
  82	bool cap_event_trace;
  83	bool have_guest_sideband;
  84	unsigned int br_stack_sz;
  85	unsigned int br_stack_sz_plus;
  86	int have_sched_switch;
  87	u32 pmu_type;
  88	u64 kernel_start;
  89	u64 switch_ip;
  90	u64 ptss_ip;
  91	u64 first_timestamp;
  92
  93	struct perf_tsc_conversion tc;
  94	bool cap_user_time_zero;
  95
  96	struct itrace_synth_opts synth_opts;
  97
  98	bool sample_instructions;
  99	u64 instructions_sample_type;
 100	u64 instructions_id;
 101
 102	bool sample_cycles;
 103	u64 cycles_sample_type;
 104	u64 cycles_id;
 105
 106	bool sample_branches;
 107	u32 branches_filter;
 108	u64 branches_sample_type;
 109	u64 branches_id;
 110
 111	bool sample_transactions;
 112	u64 transactions_sample_type;
 113	u64 transactions_id;
 114
 115	bool sample_ptwrites;
 116	u64 ptwrites_sample_type;
 117	u64 ptwrites_id;
 118
 119	bool sample_pwr_events;
 120	u64 pwr_events_sample_type;
 121	u64 mwait_id;
 122	u64 pwre_id;
 123	u64 exstop_id;
 124	u64 pwrx_id;
 125	u64 cbr_id;
 126	u64 psb_id;
 127
 128	bool single_pebs;
 129	bool sample_pebs;
 130	struct evsel *pebs_evsel;
 131
 132	u64 evt_sample_type;
 133	u64 evt_id;
 134
 135	u64 iflag_chg_sample_type;
 136	u64 iflag_chg_id;
 137
 138	u64 tsc_bit;
 139	u64 mtc_bit;
 140	u64 mtc_freq_bits;
 141	u32 tsc_ctc_ratio_n;
 142	u32 tsc_ctc_ratio_d;
 143	u64 cyc_bit;
 144	u64 noretcomp_bit;
 145	unsigned max_non_turbo_ratio;
 146	unsigned cbr2khz;
 147	int max_loops;
 148
 149	unsigned long num_events;
 150
 151	char *filter;
 152	struct addr_filters filts;
 153
 154	struct range *time_ranges;
 155	unsigned int range_cnt;
 156
 157	struct ip_callchain *chain;
 158	struct branch_stack *br_stack;
 159
 160	u64 dflt_tsc_offset;
 161	struct rb_root vmcs_info;
 162};
 163
 164enum switch_state {
 165	INTEL_PT_SS_NOT_TRACING,
 166	INTEL_PT_SS_UNKNOWN,
 167	INTEL_PT_SS_TRACING,
 168	INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
 169	INTEL_PT_SS_EXPECTING_SWITCH_IP,
 170};
 171
 172/* applicable_counters is 64-bits */
 173#define INTEL_PT_MAX_PEBS 64
 174
 175struct intel_pt_pebs_event {
 176	struct evsel *evsel;
 177	u64 id;
 178};
 179
 180struct intel_pt_queue {
 181	struct intel_pt *pt;
 182	unsigned int queue_nr;
 183	struct auxtrace_buffer *buffer;
 184	struct auxtrace_buffer *old_buffer;
 185	void *decoder;
 186	const struct intel_pt_state *state;
 187	struct ip_callchain *chain;
 188	struct branch_stack *last_branch;
 189	union perf_event *event_buf;
 190	bool on_heap;
 191	bool stop;
 192	bool step_through_buffers;
 193	bool use_buffer_pid_tid;
 194	bool sync_switch;
 195	bool sample_ipc;
 196	pid_t pid, tid;
 197	int cpu;
 198	int switch_state;
 199	pid_t next_tid;
 200	struct thread *thread;
 201	struct machine *guest_machine;
 202	struct thread *guest_thread;
 203	struct thread *unknown_guest_thread;
 204	pid_t guest_machine_pid;
 205	pid_t guest_pid;
 206	pid_t guest_tid;
 207	int vcpu;
 208	bool exclude_kernel;
 209	bool have_sample;
 210	u64 time;
 211	u64 timestamp;
 212	u64 sel_timestamp;
 213	bool sel_start;
 214	unsigned int sel_idx;
 215	u32 flags;
 216	u16 insn_len;
 217	u64 last_insn_cnt;
 218	u64 ipc_insn_cnt;
 219	u64 ipc_cyc_cnt;
 220	u64 last_in_insn_cnt;
 221	u64 last_in_cyc_cnt;
 222	u64 last_cy_insn_cnt;
 223	u64 last_cy_cyc_cnt;
 224	u64 last_br_insn_cnt;
 225	u64 last_br_cyc_cnt;
 226	unsigned int cbr_seen;
 227	char insn[INTEL_PT_INSN_BUF_SZ];
 228	struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS];
 229};
 230
 231static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
 232			  unsigned char *buf, size_t len)
 233{
 234	struct intel_pt_pkt packet;
 235	size_t pos = 0;
 236	int ret, pkt_len, i;
 237	char desc[INTEL_PT_PKT_DESC_MAX];
 238	const char *color = PERF_COLOR_BLUE;
 239	enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
 240
 241	color_fprintf(stdout, color,
 242		      ". ... Intel Processor Trace data: size %zu bytes\n",
 243		      len);
 244
 245	while (len) {
 246		ret = intel_pt_get_packet(buf, len, &packet, &ctx);
 247		if (ret > 0)
 248			pkt_len = ret;
 249		else
 250			pkt_len = 1;
 251		printf(".");
 252		color_fprintf(stdout, color, "  %08x: ", pos);
 253		for (i = 0; i < pkt_len; i++)
 254			color_fprintf(stdout, color, " %02x", buf[i]);
 255		for (; i < 16; i++)
 256			color_fprintf(stdout, color, "   ");
 257		if (ret > 0) {
 258			ret = intel_pt_pkt_desc(&packet, desc,
 259						INTEL_PT_PKT_DESC_MAX);
 260			if (ret > 0)
 261				color_fprintf(stdout, color, " %s\n", desc);
 262		} else {
 263			color_fprintf(stdout, color, " Bad packet!\n");
 264		}
 265		pos += pkt_len;
 266		buf += pkt_len;
 267		len -= pkt_len;
 268	}
 269}
 270
 271static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
 272				size_t len)
 273{
 274	printf(".\n");
 275	intel_pt_dump(pt, buf, len);
 276}
 277
 278static void intel_pt_log_event(union perf_event *event)
 279{
 280	FILE *f = intel_pt_log_fp();
 281
 282	if (!intel_pt_enable_logging || !f)
 283		return;
 284
 285	perf_event__fprintf(event, NULL, f);
 286}
 287
 288static void intel_pt_dump_sample(struct perf_session *session,
 289				 struct perf_sample *sample)
 290{
 291	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
 292					   auxtrace);
 293
 294	printf("\n");
 295	intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
 296}
 297
 298static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
 299{
 300	struct perf_time_interval *range = pt->synth_opts.ptime_range;
 301	int n = pt->synth_opts.range_num;
 302
 303	if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
 304		return true;
 305
 306	if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
 307		return false;
 308
 309	/* perf_time__ranges_skip_sample does not work if time is zero */
 310	if (!tm)
 311		tm = 1;
 312
 313	return !n || !perf_time__ranges_skip_sample(range, n, tm);
 314}
 315
 316static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
 317							u64 vmcs,
 318							u64 dflt_tsc_offset)
 319{
 320	struct rb_node **p = &rb_root->rb_node;
 321	struct rb_node *parent = NULL;
 322	struct intel_pt_vmcs_info *v;
 323
 324	while (*p) {
 325		parent = *p;
 326		v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
 327
 328		if (v->vmcs == vmcs)
 329			return v;
 330
 331		if (vmcs < v->vmcs)
 332			p = &(*p)->rb_left;
 333		else
 334			p = &(*p)->rb_right;
 335	}
 336
 337	v = zalloc(sizeof(*v));
 338	if (v) {
 339		v->vmcs = vmcs;
 340		v->tsc_offset = dflt_tsc_offset;
 341		v->reliable = dflt_tsc_offset;
 342
 343		rb_link_node(&v->rb_node, parent, p);
 344		rb_insert_color(&v->rb_node, rb_root);
 345	}
 346
 347	return v;
 348}
 349
 350static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
 351{
 352	struct intel_pt_queue *ptq = data;
 353	struct intel_pt *pt = ptq->pt;
 354
 355	if (!vmcs && !pt->dflt_tsc_offset)
 356		return NULL;
 357
 358	return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
 359}
 360
 361static void intel_pt_free_vmcs_info(struct intel_pt *pt)
 362{
 363	struct intel_pt_vmcs_info *v;
 364	struct rb_node *n;
 365
 366	n = rb_first(&pt->vmcs_info);
 367	while (n) {
 368		v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
 369		n = rb_next(n);
 370		rb_erase(&v->rb_node, &pt->vmcs_info);
 371		free(v);
 372	}
 373}
 374
 375static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
 376				   struct auxtrace_buffer *b)
 377{
 378	bool consecutive = false;
 379	void *start;
 380
 381	start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
 382				      pt->have_tsc, &consecutive,
 383				      pt->synth_opts.vm_time_correlation);
 384	if (!start)
 385		return -EINVAL;
 386	/*
 387	 * In the case of vm_time_correlation, the overlap might contain TSC
 388	 * packets that will not be fixed, and that will then no longer work for
 389	 * overlap detection. Avoid that by zeroing out the overlap.
 390	 */
 391	if (pt->synth_opts.vm_time_correlation)
 392		memset(b->data, 0, start - b->data);
 393	b->use_size = b->data + b->size - start;
 394	b->use_data = start;
 395	if (b->use_size && consecutive)
 396		b->consecutive = true;
 397	return 0;
 398}
 399
 400static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
 401			       struct auxtrace_buffer *buffer,
 402			       struct auxtrace_buffer *old_buffer,
 403			       struct intel_pt_buffer *b)
 404{
 405	bool might_overlap;
 406
 407	if (!buffer->data) {
 408		int fd = perf_data__fd(ptq->pt->session->data);
 409
 410		buffer->data = auxtrace_buffer__get_data(buffer, fd);
 411		if (!buffer->data)
 412			return -ENOMEM;
 413	}
 414
 415	might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
 416	if (might_overlap && !buffer->consecutive && old_buffer &&
 417	    intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
 418		return -ENOMEM;
 419
 420	if (buffer->use_data) {
 421		b->len = buffer->use_size;
 422		b->buf = buffer->use_data;
 423	} else {
 424		b->len = buffer->size;
 425		b->buf = buffer->data;
 426	}
 427	b->ref_timestamp = buffer->reference;
 428
 429	if (!old_buffer || (might_overlap && !buffer->consecutive)) {
 430		b->consecutive = false;
 431		b->trace_nr = buffer->buffer_nr + 1;
 432	} else {
 433		b->consecutive = true;
 434	}
 435
 436	return 0;
 437}
 438
 439/* Do not drop buffers with references - refer intel_pt_get_trace() */
 440static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
 441					   struct auxtrace_buffer *buffer)
 442{
 443	if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
 444		return;
 445
 446	auxtrace_buffer__drop_data(buffer);
 447}
 448
 449/* Must be serialized with respect to intel_pt_get_trace() */
 450static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
 451			      void *cb_data)
 452{
 453	struct intel_pt_queue *ptq = data;
 454	struct auxtrace_buffer *buffer = ptq->buffer;
 455	struct auxtrace_buffer *old_buffer = ptq->old_buffer;
 456	struct auxtrace_queue *queue;
 457	int err = 0;
 458
 459	queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
 460
 461	while (1) {
 462		struct intel_pt_buffer b = { .len = 0 };
 463
 464		buffer = auxtrace_buffer__next(queue, buffer);
 465		if (!buffer)
 466			break;
 467
 468		err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
 469		if (err)
 470			break;
 471
 472		if (b.len) {
 473			intel_pt_lookahead_drop_buffer(ptq, old_buffer);
 474			old_buffer = buffer;
 475		} else {
 476			intel_pt_lookahead_drop_buffer(ptq, buffer);
 477			continue;
 478		}
 479
 480		err = cb(&b, cb_data);
 481		if (err)
 482			break;
 483	}
 484
 485	if (buffer != old_buffer)
 486		intel_pt_lookahead_drop_buffer(ptq, buffer);
 487	intel_pt_lookahead_drop_buffer(ptq, old_buffer);
 488
 489	return err;
 490}
 491
 492/*
 493 * This function assumes data is processed sequentially only.
 494 * Must be serialized with respect to intel_pt_lookahead()
 495 */
 496static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
 497{
 498	struct intel_pt_queue *ptq = data;
 499	struct auxtrace_buffer *buffer = ptq->buffer;
 500	struct auxtrace_buffer *old_buffer = ptq->old_buffer;
 501	struct auxtrace_queue *queue;
 502	int err;
 503
 504	if (ptq->stop) {
 505		b->len = 0;
 506		return 0;
 507	}
 508
 509	queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
 510
 511	buffer = auxtrace_buffer__next(queue, buffer);
 512	if (!buffer) {
 513		if (old_buffer)
 514			auxtrace_buffer__drop_data(old_buffer);
 515		b->len = 0;
 516		return 0;
 517	}
 518
 519	ptq->buffer = buffer;
 520
 521	err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
 522	if (err)
 523		return err;
 524
 525	if (ptq->step_through_buffers)
 526		ptq->stop = true;
 527
 528	if (b->len) {
 529		if (old_buffer)
 530			auxtrace_buffer__drop_data(old_buffer);
 531		ptq->old_buffer = buffer;
 532	} else {
 533		auxtrace_buffer__drop_data(buffer);
 534		return intel_pt_get_trace(b, data);
 535	}
 536
 537	return 0;
 538}
 539
 540struct intel_pt_cache_entry {
 541	struct auxtrace_cache_entry	entry;
 542	u64				insn_cnt;
 543	u64				byte_cnt;
 544	enum intel_pt_insn_op		op;
 545	enum intel_pt_insn_branch	branch;
 546	bool				emulated_ptwrite;
 547	int				length;
 548	int32_t				rel;
 549	char				insn[INTEL_PT_INSN_BUF_SZ];
 550};
 551
 552static int intel_pt_config_div(const char *var, const char *value, void *data)
 553{
 554	int *d = data;
 555	long val;
 556
 557	if (!strcmp(var, "intel-pt.cache-divisor")) {
 558		val = strtol(value, NULL, 0);
 559		if (val > 0 && val <= INT_MAX)
 560			*d = val;
 561	}
 562
 563	return 0;
 564}
 565
 566static int intel_pt_cache_divisor(void)
 567{
 568	static int d;
 569
 570	if (d)
 571		return d;
 572
 573	perf_config(intel_pt_config_div, &d);
 574
 575	if (!d)
 576		d = 64;
 577
 578	return d;
 579}
 580
 581static unsigned int intel_pt_cache_size(struct dso *dso,
 582					struct machine *machine)
 583{
 584	off_t size;
 585
 586	size = dso__data_size(dso, machine);
 587	size /= intel_pt_cache_divisor();
 588	if (size < 1000)
 589		return 10;
 590	if (size > (1 << 21))
 591		return 21;
 592	return 32 - __builtin_clz(size);
 593}
 594
 595static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
 596					     struct machine *machine)
 597{
 598	struct auxtrace_cache *c;
 599	unsigned int bits;
 600
 601	if (dso->auxtrace_cache)
 602		return dso->auxtrace_cache;
 603
 604	bits = intel_pt_cache_size(dso, machine);
 605
 606	/* Ignoring cache creation failure */
 607	c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
 608
 609	dso->auxtrace_cache = c;
 610
 611	return c;
 612}
 613
 614static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
 615			      u64 offset, u64 insn_cnt, u64 byte_cnt,
 616			      struct intel_pt_insn *intel_pt_insn)
 617{
 618	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
 619	struct intel_pt_cache_entry *e;
 620	int err;
 621
 622	if (!c)
 623		return -ENOMEM;
 624
 625	e = auxtrace_cache__alloc_entry(c);
 626	if (!e)
 627		return -ENOMEM;
 628
 629	e->insn_cnt = insn_cnt;
 630	e->byte_cnt = byte_cnt;
 631	e->op = intel_pt_insn->op;
 632	e->branch = intel_pt_insn->branch;
 633	e->emulated_ptwrite = intel_pt_insn->emulated_ptwrite;
 634	e->length = intel_pt_insn->length;
 635	e->rel = intel_pt_insn->rel;
 636	memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
 637
 638	err = auxtrace_cache__add(c, offset, &e->entry);
 639	if (err)
 640		auxtrace_cache__free_entry(c, e);
 641
 642	return err;
 643}
 644
 645static struct intel_pt_cache_entry *
 646intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
 647{
 648	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
 649
 650	if (!c)
 651		return NULL;
 652
 653	return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
 654}
 655
 656static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
 657				      u64 offset)
 658{
 659	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
 660
 661	if (!c)
 662		return;
 663
 664	auxtrace_cache__remove(dso->auxtrace_cache, offset);
 665}
 666
 667static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
 668{
 669	/* Assumes 64-bit kernel */
 670	return ip & (1ULL << 63);
 671}
 672
 673static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
 674{
 675	if (nr) {
 676		return intel_pt_guest_kernel_ip(ip) ?
 677		       PERF_RECORD_MISC_GUEST_KERNEL :
 678		       PERF_RECORD_MISC_GUEST_USER;
 679	}
 680
 681	return ip >= ptq->pt->kernel_start ?
 682	       PERF_RECORD_MISC_KERNEL :
 683	       PERF_RECORD_MISC_USER;
 684}
 685
 686static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
 687{
 688	/* No support for non-zero CS base */
 689	if (from_ip)
 690		return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
 691	return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
 692}
 693
 694static int intel_pt_get_guest(struct intel_pt_queue *ptq)
 695{
 696	struct machines *machines = &ptq->pt->session->machines;
 697	struct machine *machine;
 698	pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
 699
 700	if (ptq->guest_machine && pid == ptq->guest_machine->pid)
 701		return 0;
 702
 703	ptq->guest_machine = NULL;
 704	thread__zput(ptq->unknown_guest_thread);
 705
 706	if (symbol_conf.guest_code) {
 707		thread__zput(ptq->guest_thread);
 708		ptq->guest_thread = machines__findnew_guest_code(machines, pid);
 709	}
 710
 711	machine = machines__find_guest(machines, pid);
 712	if (!machine)
 713		return -1;
 714
 715	ptq->unknown_guest_thread = machine__idle_thread(machine);
 716	if (!ptq->unknown_guest_thread)
 717		return -1;
 718
 719	ptq->guest_machine = machine;
 720
 721	return 0;
 722}
 723
 724static inline bool intel_pt_jmp_16(struct intel_pt_insn *intel_pt_insn)
 725{
 726	return intel_pt_insn->rel == 16 && intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL;
 727}
 728
 729#define PTWRITE_MAGIC		"\x0f\x0bperf,ptwrite  "
 730#define PTWRITE_MAGIC_LEN	16
 731
 732static bool intel_pt_emulated_ptwrite(struct dso *dso, struct machine *machine, u64 offset)
 733{
 734	unsigned char buf[PTWRITE_MAGIC_LEN];
 735	ssize_t len;
 736
 737	len = dso__data_read_offset(dso, machine, offset, buf, PTWRITE_MAGIC_LEN);
 738	if (len == PTWRITE_MAGIC_LEN && !memcmp(buf, PTWRITE_MAGIC, PTWRITE_MAGIC_LEN)) {
 739		intel_pt_log("Emulated ptwrite signature found\n");
 740		return true;
 741	}
 742	intel_pt_log("Emulated ptwrite signature not found\n");
 743	return false;
 744}
 745
 746static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
 747				   uint64_t *insn_cnt_ptr, uint64_t *ip,
 748				   uint64_t to_ip, uint64_t max_insn_cnt,
 749				   void *data)
 750{
 751	struct intel_pt_queue *ptq = data;
 752	struct machine *machine = ptq->pt->machine;
 753	struct thread *thread;
 754	struct addr_location al;
 755	unsigned char buf[INTEL_PT_INSN_BUF_SZ];
 756	ssize_t len;
 757	int x86_64, ret = 0;
 758	u8 cpumode;
 759	u64 offset, start_offset, start_ip;
 760	u64 insn_cnt = 0;
 761	bool one_map = true;
 762	bool nr;
 763
 764
 765	addr_location__init(&al);
 766	intel_pt_insn->length = 0;
 
 767
 768	if (to_ip && *ip == to_ip)
 769		goto out_no_cache;
 770
 771	nr = ptq->state->to_nr;
 772	cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
 773
 774	if (nr) {
 775		if (ptq->pt->have_guest_sideband) {
 776			if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) {
 777				intel_pt_log("ERROR: guest sideband but no guest machine\n");
 778				ret = -EINVAL;
 779				goto out_ret;
 780			}
 781		} else if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) ||
 782			   intel_pt_get_guest(ptq)) {
 783			intel_pt_log("ERROR: no guest machine\n");
 784			ret = -EINVAL;
 785			goto out_ret;
 786		}
 787		machine = ptq->guest_machine;
 788		thread = ptq->guest_thread;
 789		if (!thread) {
 790			if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL) {
 791				intel_pt_log("ERROR: no guest thread\n");
 792				ret = -EINVAL;
 793				goto out_ret;
 794			}
 795			thread = ptq->unknown_guest_thread;
 796		}
 797	} else {
 798		thread = ptq->thread;
 799		if (!thread) {
 800			if (cpumode != PERF_RECORD_MISC_KERNEL) {
 801				intel_pt_log("ERROR: no thread\n");
 802				ret = -EINVAL;
 803				goto out_ret;
 804			}
 805			thread = ptq->pt->unknown_thread;
 806		}
 807	}
 808
 809	while (1) {
 810		struct dso *dso;
 811
 812		if (!thread__find_map(thread, cpumode, *ip, &al) || !map__dso(al.map)) {
 813			if (al.map)
 814				intel_pt_log("ERROR: thread has no dso for %#" PRIx64 "\n", *ip);
 815			else
 816				intel_pt_log("ERROR: thread has no map for %#" PRIx64 "\n", *ip);
 817			addr_location__exit(&al);
 818			ret = -EINVAL;
 819			goto out_ret;
 820		}
 821		dso = map__dso(al.map);
 822
 823		if (dso->data.status == DSO_DATA_STATUS_ERROR &&
 824			dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) {
 825			ret = -ENOENT;
 826			goto out_ret;
 827		}
 828
 829		offset = map__map_ip(al.map, *ip);
 830
 831		if (!to_ip && one_map) {
 832			struct intel_pt_cache_entry *e;
 833
 834			e = intel_pt_cache_lookup(dso, machine, offset);
 835			if (e &&
 836			    (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
 837				*insn_cnt_ptr = e->insn_cnt;
 838				*ip += e->byte_cnt;
 839				intel_pt_insn->op = e->op;
 840				intel_pt_insn->branch = e->branch;
 841				intel_pt_insn->emulated_ptwrite = e->emulated_ptwrite;
 842				intel_pt_insn->length = e->length;
 843				intel_pt_insn->rel = e->rel;
 844				memcpy(intel_pt_insn->buf, e->insn, INTEL_PT_INSN_BUF_SZ);
 845				intel_pt_log_insn_no_data(intel_pt_insn, *ip);
 846				ret = 0;
 847				goto out_ret;
 848			}
 849		}
 850
 851		start_offset = offset;
 852		start_ip = *ip;
 853
 854		/* Load maps to ensure dso->is_64_bit has been updated */
 855		map__load(al.map);
 856
 857		x86_64 = dso->is_64_bit;
 858
 859		while (1) {
 860			len = dso__data_read_offset(dso, machine,
 861						    offset, buf,
 862						    INTEL_PT_INSN_BUF_SZ);
 863			if (len <= 0) {
 864				intel_pt_log("ERROR: failed to read at offset %#" PRIx64 " ",
 865					     offset);
 866				if (intel_pt_enable_logging)
 867					dso__fprintf(dso, intel_pt_log_fp());
 868				ret = -EINVAL;
 869				goto out_ret;
 870			}
 871
 872			if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) {
 873				ret = -EINVAL;
 874				goto out_ret;
 875			}
 876
 877			intel_pt_log_insn(intel_pt_insn, *ip);
 878
 879			insn_cnt += 1;
 880
 881			if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) {
 882				bool eptw;
 883				u64 offs;
 884
 885				if (!intel_pt_jmp_16(intel_pt_insn))
 886					goto out;
 887				/* Check for emulated ptwrite */
 888				offs = offset + intel_pt_insn->length;
 889				eptw = intel_pt_emulated_ptwrite(dso, machine, offs);
 890				intel_pt_insn->emulated_ptwrite = eptw;
 891				goto out;
 892			}
 893
 894			if (max_insn_cnt && insn_cnt >= max_insn_cnt)
 895				goto out_no_cache;
 896
 897			*ip += intel_pt_insn->length;
 898
 899			if (to_ip && *ip == to_ip) {
 900				intel_pt_insn->length = 0;
 
 901				goto out_no_cache;
 902			}
 903
 904			if (*ip >= map__end(al.map))
 905				break;
 906
 907			offset += intel_pt_insn->length;
 908		}
 909		one_map = false;
 910	}
 911out:
 912	*insn_cnt_ptr = insn_cnt;
 913
 914	if (!one_map)
 915		goto out_no_cache;
 916
 917	/*
 918	 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
 919	 * entries.
 920	 */
 921	if (to_ip) {
 922		struct intel_pt_cache_entry *e;
 923
 924		e = intel_pt_cache_lookup(map__dso(al.map), machine, start_offset);
 925		if (e)
 926			goto out_ret;
 927	}
 928
 929	/* Ignore cache errors */
 930	intel_pt_cache_add(map__dso(al.map), machine, start_offset, insn_cnt,
 931			   *ip - start_ip, intel_pt_insn);
 932
 933out_ret:
 934	addr_location__exit(&al);
 935	return ret;
 936
 937out_no_cache:
 938	*insn_cnt_ptr = insn_cnt;
 939	addr_location__exit(&al);
 940	return 0;
 941}
 942
 943static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
 944				  uint64_t offset, const char *filename)
 945{
 946	struct addr_filter *filt;
 947	bool have_filter   = false;
 948	bool hit_tracestop = false;
 949	bool hit_filter    = false;
 950
 951	list_for_each_entry(filt, &pt->filts.head, list) {
 952		if (filt->start)
 953			have_filter = true;
 954
 955		if ((filename && !filt->filename) ||
 956		    (!filename && filt->filename) ||
 957		    (filename && strcmp(filename, filt->filename)))
 958			continue;
 959
 960		if (!(offset >= filt->addr && offset < filt->addr + filt->size))
 961			continue;
 962
 963		intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
 964			     ip, offset, filename ? filename : "[kernel]",
 965			     filt->start ? "filter" : "stop",
 966			     filt->addr, filt->size);
 967
 968		if (filt->start)
 969			hit_filter = true;
 970		else
 971			hit_tracestop = true;
 972	}
 973
 974	if (!hit_tracestop && !hit_filter)
 975		intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
 976			     ip, offset, filename ? filename : "[kernel]");
 977
 978	return hit_tracestop || (have_filter && !hit_filter);
 979}
 980
 981static int __intel_pt_pgd_ip(uint64_t ip, void *data)
 982{
 983	struct intel_pt_queue *ptq = data;
 984	struct thread *thread;
 985	struct addr_location al;
 986	u8 cpumode;
 987	u64 offset;
 988	int res;
 989
 990	if (ptq->state->to_nr) {
 991		if (intel_pt_guest_kernel_ip(ip))
 992			return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
 993		/* No support for decoding guest user space */
 994		return -EINVAL;
 995	} else if (ip >= ptq->pt->kernel_start) {
 996		return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
 997	}
 998
 999	cpumode = PERF_RECORD_MISC_USER;
1000
1001	thread = ptq->thread;
1002	if (!thread)
1003		return -EINVAL;
1004
1005	addr_location__init(&al);
1006	if (!thread__find_map(thread, cpumode, ip, &al) || !map__dso(al.map))
1007		return -EINVAL;
1008
1009	offset = map__map_ip(al.map, ip);
1010
1011	res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, map__dso(al.map)->long_name);
1012	addr_location__exit(&al);
1013	return res;
1014}
1015
1016static bool intel_pt_pgd_ip(uint64_t ip, void *data)
1017{
1018	return __intel_pt_pgd_ip(ip, data) > 0;
1019}
1020
1021static bool intel_pt_get_config(struct intel_pt *pt,
1022				struct perf_event_attr *attr, u64 *config)
1023{
1024	if (attr->type == pt->pmu_type) {
1025		if (config)
1026			*config = attr->config;
1027		return true;
1028	}
1029
1030	return false;
1031}
1032
1033static bool intel_pt_exclude_kernel(struct intel_pt *pt)
1034{
1035	struct evsel *evsel;
1036
1037	evlist__for_each_entry(pt->session->evlist, evsel) {
1038		if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
1039		    !evsel->core.attr.exclude_kernel)
1040			return false;
1041	}
1042	return true;
1043}
1044
1045static bool intel_pt_return_compression(struct intel_pt *pt)
1046{
1047	struct evsel *evsel;
1048	u64 config;
1049
1050	if (!pt->noretcomp_bit)
1051		return true;
1052
1053	evlist__for_each_entry(pt->session->evlist, evsel) {
1054		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1055		    (config & pt->noretcomp_bit))
1056			return false;
1057	}
1058	return true;
1059}
1060
1061static bool intel_pt_branch_enable(struct intel_pt *pt)
1062{
1063	struct evsel *evsel;
1064	u64 config;
1065
1066	evlist__for_each_entry(pt->session->evlist, evsel) {
1067		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1068		    (config & INTEL_PT_CFG_PASS_THRU) &&
1069		    !(config & INTEL_PT_CFG_BRANCH_EN))
1070			return false;
1071	}
1072	return true;
1073}
1074
1075static bool intel_pt_disabled_tnt(struct intel_pt *pt)
1076{
1077	struct evsel *evsel;
1078	u64 config;
1079
1080	evlist__for_each_entry(pt->session->evlist, evsel) {
1081		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1082		    config & INTEL_PT_CFG_TNT_DIS)
1083			return true;
1084	}
1085	return false;
1086}
1087
1088static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
1089{
1090	struct evsel *evsel;
1091	unsigned int shift;
1092	u64 config;
1093
1094	if (!pt->mtc_freq_bits)
1095		return 0;
1096
1097	for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
1098		config >>= 1;
1099
1100	evlist__for_each_entry(pt->session->evlist, evsel) {
1101		if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1102			return (config & pt->mtc_freq_bits) >> shift;
1103	}
1104	return 0;
1105}
1106
1107static bool intel_pt_timeless_decoding(struct intel_pt *pt)
1108{
1109	struct evsel *evsel;
1110	bool timeless_decoding = true;
1111	u64 config;
1112
1113	if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
1114		return true;
1115
1116	evlist__for_each_entry(pt->session->evlist, evsel) {
1117		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
1118			return true;
1119		if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1120			if (config & pt->tsc_bit)
1121				timeless_decoding = false;
1122			else
1123				return true;
1124		}
1125	}
1126	return timeless_decoding;
1127}
1128
1129static bool intel_pt_tracing_kernel(struct intel_pt *pt)
1130{
1131	struct evsel *evsel;
1132
1133	evlist__for_each_entry(pt->session->evlist, evsel) {
1134		if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
1135		    !evsel->core.attr.exclude_kernel)
1136			return true;
1137	}
1138	return false;
1139}
1140
1141static bool intel_pt_have_tsc(struct intel_pt *pt)
1142{
1143	struct evsel *evsel;
1144	bool have_tsc = false;
1145	u64 config;
1146
1147	if (!pt->tsc_bit)
1148		return false;
1149
1150	evlist__for_each_entry(pt->session->evlist, evsel) {
1151		if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1152			if (config & pt->tsc_bit)
1153				have_tsc = true;
1154			else
1155				return false;
1156		}
1157	}
1158	return have_tsc;
1159}
1160
1161static bool intel_pt_have_mtc(struct intel_pt *pt)
1162{
1163	struct evsel *evsel;
1164	u64 config;
1165
1166	evlist__for_each_entry(pt->session->evlist, evsel) {
1167		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1168		    (config & pt->mtc_bit))
1169			return true;
1170	}
1171	return false;
1172}
1173
1174static bool intel_pt_sampling_mode(struct intel_pt *pt)
1175{
1176	struct evsel *evsel;
1177
1178	evlist__for_each_entry(pt->session->evlist, evsel) {
1179		if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
1180		    evsel->core.attr.aux_sample_size)
1181			return true;
1182	}
1183	return false;
1184}
1185
1186static u64 intel_pt_ctl(struct intel_pt *pt)
1187{
1188	struct evsel *evsel;
1189	u64 config;
1190
1191	evlist__for_each_entry(pt->session->evlist, evsel) {
1192		if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1193			return config;
1194	}
1195	return 0;
1196}
1197
1198static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
1199{
1200	u64 quot, rem;
1201
1202	quot = ns / pt->tc.time_mult;
1203	rem  = ns % pt->tc.time_mult;
1204	return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
1205		pt->tc.time_mult;
1206}
1207
1208static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
1209{
1210	size_t sz = sizeof(struct ip_callchain);
1211
1212	/* Add 1 to callchain_sz for callchain context */
1213	sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
1214	return zalloc(sz);
1215}
1216
1217static int intel_pt_callchain_init(struct intel_pt *pt)
1218{
1219	struct evsel *evsel;
1220
1221	evlist__for_each_entry(pt->session->evlist, evsel) {
1222		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
1223			evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
1224	}
1225
1226	pt->chain = intel_pt_alloc_chain(pt);
1227	if (!pt->chain)
1228		return -ENOMEM;
1229
1230	return 0;
1231}
1232
1233static void intel_pt_add_callchain(struct intel_pt *pt,
1234				   struct perf_sample *sample)
1235{
1236	struct thread *thread = machine__findnew_thread(pt->machine,
1237							sample->pid,
1238							sample->tid);
1239
1240	thread_stack__sample_late(thread, sample->cpu, pt->chain,
1241				  pt->synth_opts.callchain_sz + 1, sample->ip,
1242				  pt->kernel_start);
1243
1244	sample->callchain = pt->chain;
1245}
1246
1247static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
1248{
1249	size_t sz = sizeof(struct branch_stack);
1250
1251	sz += entry_cnt * sizeof(struct branch_entry);
1252	return zalloc(sz);
1253}
1254
1255static int intel_pt_br_stack_init(struct intel_pt *pt)
1256{
1257	struct evsel *evsel;
1258
1259	evlist__for_each_entry(pt->session->evlist, evsel) {
1260		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
1261			evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
1262	}
1263
1264	pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
1265	if (!pt->br_stack)
1266		return -ENOMEM;
1267
1268	return 0;
1269}
1270
1271static void intel_pt_add_br_stack(struct intel_pt *pt,
1272				  struct perf_sample *sample)
1273{
1274	struct thread *thread = machine__findnew_thread(pt->machine,
1275							sample->pid,
1276							sample->tid);
1277
1278	thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
1279				     pt->br_stack_sz, sample->ip,
1280				     pt->kernel_start);
1281
1282	sample->branch_stack = pt->br_stack;
1283	thread__put(thread);
1284}
1285
1286/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1287#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
1288
1289static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
1290						   unsigned int queue_nr)
1291{
1292	struct intel_pt_params params = { .get_trace = 0, };
1293	struct perf_env *env = pt->machine->env;
1294	struct intel_pt_queue *ptq;
1295
1296	ptq = zalloc(sizeof(struct intel_pt_queue));
1297	if (!ptq)
1298		return NULL;
1299
1300	if (pt->synth_opts.callchain) {
1301		ptq->chain = intel_pt_alloc_chain(pt);
1302		if (!ptq->chain)
1303			goto out_free;
1304	}
1305
1306	if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
1307		unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
1308
1309		ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1310		if (!ptq->last_branch)
1311			goto out_free;
1312	}
1313
1314	ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1315	if (!ptq->event_buf)
1316		goto out_free;
1317
1318	ptq->pt = pt;
1319	ptq->queue_nr = queue_nr;
1320	ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1321	ptq->pid = -1;
1322	ptq->tid = -1;
1323	ptq->cpu = -1;
1324	ptq->next_tid = -1;
1325
1326	params.get_trace = intel_pt_get_trace;
1327	params.walk_insn = intel_pt_walk_next_insn;
1328	params.lookahead = intel_pt_lookahead;
1329	params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
1330	params.data = ptq;
1331	params.return_compression = intel_pt_return_compression(pt);
1332	params.branch_enable = intel_pt_branch_enable(pt);
1333	params.ctl = intel_pt_ctl(pt);
1334	params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1335	params.mtc_period = intel_pt_mtc_period(pt);
1336	params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1337	params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1338	params.quick = pt->synth_opts.quick;
1339	params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
1340	params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
1341	params.first_timestamp = pt->first_timestamp;
1342	params.max_loops = pt->max_loops;
1343
1344	/* Cannot walk code without TNT, so force 'quick' mode */
1345	if (params.branch_enable && intel_pt_disabled_tnt(pt) && !params.quick)
1346		params.quick = 1;
1347
1348	if (pt->filts.cnt > 0)
1349		params.pgd_ip = intel_pt_pgd_ip;
1350
1351	if (pt->synth_opts.instructions || pt->synth_opts.cycles) {
1352		if (pt->synth_opts.period) {
1353			switch (pt->synth_opts.period_type) {
1354			case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1355				params.period_type =
1356						INTEL_PT_PERIOD_INSTRUCTIONS;
1357				params.period = pt->synth_opts.period;
1358				break;
1359			case PERF_ITRACE_PERIOD_TICKS:
1360				params.period_type = INTEL_PT_PERIOD_TICKS;
1361				params.period = pt->synth_opts.period;
1362				break;
1363			case PERF_ITRACE_PERIOD_NANOSECS:
1364				params.period_type = INTEL_PT_PERIOD_TICKS;
1365				params.period = intel_pt_ns_to_ticks(pt,
1366							pt->synth_opts.period);
1367				break;
1368			default:
1369				break;
1370			}
1371		}
1372
1373		if (!params.period) {
1374			params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1375			params.period = 1;
1376		}
1377	}
1378
1379	if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1380		params.flags |= INTEL_PT_FUP_WITH_NLIP;
1381
1382	ptq->decoder = intel_pt_decoder_new(&params);
1383	if (!ptq->decoder)
1384		goto out_free;
1385
1386	return ptq;
1387
1388out_free:
1389	zfree(&ptq->event_buf);
1390	zfree(&ptq->last_branch);
1391	zfree(&ptq->chain);
1392	free(ptq);
1393	return NULL;
1394}
1395
1396static void intel_pt_free_queue(void *priv)
1397{
1398	struct intel_pt_queue *ptq = priv;
1399
1400	if (!ptq)
1401		return;
1402	thread__zput(ptq->thread);
1403	thread__zput(ptq->guest_thread);
1404	thread__zput(ptq->unknown_guest_thread);
1405	intel_pt_decoder_free(ptq->decoder);
1406	zfree(&ptq->event_buf);
1407	zfree(&ptq->last_branch);
1408	zfree(&ptq->chain);
1409	free(ptq);
1410}
1411
1412static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
1413{
1414	unsigned int i;
1415
1416	pt->first_timestamp = timestamp;
1417
1418	for (i = 0; i < pt->queues.nr_queues; i++) {
1419		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1420		struct intel_pt_queue *ptq = queue->priv;
1421
1422		if (ptq && ptq->decoder)
1423			intel_pt_set_first_timestamp(ptq->decoder, timestamp);
1424	}
1425}
1426
1427static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
1428{
1429	struct machines *machines = &ptq->pt->session->machines;
1430	struct machine *machine;
1431	pid_t machine_pid = ptq->pid;
1432	pid_t tid;
1433	int vcpu;
1434
1435	if (machine_pid <= 0)
1436		return 0; /* Not a guest machine */
1437
1438	machine = machines__find(machines, machine_pid);
1439	if (!machine)
1440		return 0; /* Not a guest machine */
1441
1442	if (ptq->guest_machine != machine) {
1443		ptq->guest_machine = NULL;
1444		thread__zput(ptq->guest_thread);
1445		thread__zput(ptq->unknown_guest_thread);
1446
1447		ptq->unknown_guest_thread = machine__find_thread(machine, 0, 0);
1448		if (!ptq->unknown_guest_thread)
1449			return -1;
1450		ptq->guest_machine = machine;
1451	}
1452
1453	vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1;
1454	if (vcpu < 0)
1455		return -1;
1456
1457	tid = machine__get_current_tid(machine, vcpu);
1458
1459	if (ptq->guest_thread && thread__tid(ptq->guest_thread) != tid)
1460		thread__zput(ptq->guest_thread);
1461
1462	if (!ptq->guest_thread) {
1463		ptq->guest_thread = machine__find_thread(machine, -1, tid);
1464		if (!ptq->guest_thread)
1465			return -1;
1466	}
1467
1468	ptq->guest_machine_pid = machine_pid;
1469	ptq->guest_pid = thread__pid(ptq->guest_thread);
1470	ptq->guest_tid = tid;
1471	ptq->vcpu = vcpu;
1472
1473	return 0;
1474}
1475
1476static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1477				     struct auxtrace_queue *queue)
1478{
1479	struct intel_pt_queue *ptq = queue->priv;
1480
1481	if (queue->tid == -1 || pt->have_sched_switch) {
1482		ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1483		if (ptq->tid == -1)
1484			ptq->pid = -1;
1485		thread__zput(ptq->thread);
1486	}
1487
1488	if (!ptq->thread && ptq->tid != -1)
1489		ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1490
1491	if (ptq->thread) {
1492		ptq->pid = thread__pid(ptq->thread);
1493		if (queue->cpu == -1)
1494			ptq->cpu = thread__cpu(ptq->thread);
1495	}
1496
1497	if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
1498		ptq->guest_machine_pid = 0;
1499		ptq->guest_pid = -1;
1500		ptq->guest_tid = -1;
1501		ptq->vcpu = -1;
1502	}
1503}
1504
1505static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1506{
1507	struct intel_pt *pt = ptq->pt;
1508
1509	ptq->insn_len = 0;
1510	if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1511		ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1512	} else if (ptq->state->flags & INTEL_PT_ASYNC) {
1513		if (!ptq->state->to_ip)
1514			ptq->flags = PERF_IP_FLAG_BRANCH |
1515				     PERF_IP_FLAG_ASYNC |
1516				     PERF_IP_FLAG_TRACE_END;
1517		else if (ptq->state->from_nr && !ptq->state->to_nr)
1518			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1519				     PERF_IP_FLAG_ASYNC |
1520				     PERF_IP_FLAG_VMEXIT;
1521		else
1522			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1523				     PERF_IP_FLAG_ASYNC |
1524				     PERF_IP_FLAG_INTERRUPT;
1525	} else {
1526		if (ptq->state->from_ip)
1527			ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1528		else
1529			ptq->flags = PERF_IP_FLAG_BRANCH |
1530				     PERF_IP_FLAG_TRACE_BEGIN;
1531		if (ptq->state->flags & INTEL_PT_IN_TX)
1532			ptq->flags |= PERF_IP_FLAG_IN_TX;
1533		ptq->insn_len = ptq->state->insn_len;
1534		memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1535	}
1536
1537	if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1538		ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1539	if (ptq->state->type & INTEL_PT_TRACE_END)
1540		ptq->flags |= PERF_IP_FLAG_TRACE_END;
1541
1542	if (pt->cap_event_trace) {
1543		if (ptq->state->type & INTEL_PT_IFLAG_CHG) {
1544			if (!ptq->state->from_iflag)
1545				ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1546			if (ptq->state->from_iflag != ptq->state->to_iflag)
1547				ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE;
1548		} else if (!ptq->state->to_iflag) {
1549			ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1550		}
1551	}
1552}
1553
1554static void intel_pt_setup_time_range(struct intel_pt *pt,
1555				      struct intel_pt_queue *ptq)
1556{
1557	if (!pt->range_cnt)
1558		return;
1559
1560	ptq->sel_timestamp = pt->time_ranges[0].start;
1561	ptq->sel_idx = 0;
1562
1563	if (ptq->sel_timestamp) {
1564		ptq->sel_start = true;
1565	} else {
1566		ptq->sel_timestamp = pt->time_ranges[0].end;
1567		ptq->sel_start = false;
1568	}
1569}
1570
1571static int intel_pt_setup_queue(struct intel_pt *pt,
1572				struct auxtrace_queue *queue,
1573				unsigned int queue_nr)
1574{
1575	struct intel_pt_queue *ptq = queue->priv;
1576
1577	if (list_empty(&queue->head))
1578		return 0;
1579
1580	if (!ptq) {
1581		ptq = intel_pt_alloc_queue(pt, queue_nr);
1582		if (!ptq)
1583			return -ENOMEM;
1584		queue->priv = ptq;
1585
1586		if (queue->cpu != -1)
1587			ptq->cpu = queue->cpu;
1588		ptq->tid = queue->tid;
1589
1590		ptq->cbr_seen = UINT_MAX;
1591
1592		if (pt->sampling_mode && !pt->snapshot_mode &&
1593		    pt->timeless_decoding)
1594			ptq->step_through_buffers = true;
1595
1596		ptq->sync_switch = pt->sync_switch;
1597
1598		intel_pt_setup_time_range(pt, ptq);
1599	}
1600
1601	if (!ptq->on_heap &&
1602	    (!ptq->sync_switch ||
1603	     ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1604		const struct intel_pt_state *state;
1605		int ret;
1606
1607		if (pt->timeless_decoding)
1608			return 0;
1609
1610		intel_pt_log("queue %u getting timestamp\n", queue_nr);
1611		intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1612			     queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1613
1614		if (ptq->sel_start && ptq->sel_timestamp) {
1615			ret = intel_pt_fast_forward(ptq->decoder,
1616						    ptq->sel_timestamp);
1617			if (ret)
1618				return ret;
1619		}
1620
1621		while (1) {
1622			state = intel_pt_decode(ptq->decoder);
1623			if (state->err) {
1624				if (state->err == INTEL_PT_ERR_NODATA) {
1625					intel_pt_log("queue %u has no timestamp\n",
1626						     queue_nr);
1627					return 0;
1628				}
1629				continue;
1630			}
1631			if (state->timestamp)
1632				break;
1633		}
1634
1635		ptq->timestamp = state->timestamp;
1636		intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1637			     queue_nr, ptq->timestamp);
1638		ptq->state = state;
1639		ptq->have_sample = true;
1640		if (ptq->sel_start && ptq->sel_timestamp &&
1641		    ptq->timestamp < ptq->sel_timestamp)
1642			ptq->have_sample = false;
1643		intel_pt_sample_flags(ptq);
1644		ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1645		if (ret)
1646			return ret;
1647		ptq->on_heap = true;
1648	}
1649
1650	return 0;
1651}
1652
1653static int intel_pt_setup_queues(struct intel_pt *pt)
1654{
1655	unsigned int i;
1656	int ret;
1657
1658	for (i = 0; i < pt->queues.nr_queues; i++) {
1659		ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1660		if (ret)
1661			return ret;
1662	}
1663	return 0;
1664}
1665
1666static inline bool intel_pt_skip_event(struct intel_pt *pt)
1667{
1668	return pt->synth_opts.initial_skip &&
1669	       pt->num_events++ < pt->synth_opts.initial_skip;
1670}
1671
1672/*
1673 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1674 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1675 * from this decoder state.
1676 */
1677static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1678{
1679	return pt->synth_opts.initial_skip &&
1680	       pt->num_events + 4 < pt->synth_opts.initial_skip;
1681}
1682
1683static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1684				   union perf_event *event,
1685				   struct perf_sample *sample)
1686{
1687	event->sample.header.type = PERF_RECORD_SAMPLE;
1688	event->sample.header.size = sizeof(struct perf_event_header);
1689
1690	sample->pid = ptq->pid;
1691	sample->tid = ptq->tid;
1692
1693	if (ptq->pt->have_guest_sideband) {
1694		if ((ptq->state->from_ip && ptq->state->from_nr) ||
1695		    (ptq->state->to_ip && ptq->state->to_nr)) {
1696			sample->pid = ptq->guest_pid;
1697			sample->tid = ptq->guest_tid;
1698			sample->machine_pid = ptq->guest_machine_pid;
1699			sample->vcpu = ptq->vcpu;
1700		}
1701	}
1702
1703	sample->cpu = ptq->cpu;
1704	sample->insn_len = ptq->insn_len;
1705	memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1706}
1707
1708static void intel_pt_prep_b_sample(struct intel_pt *pt,
1709				   struct intel_pt_queue *ptq,
1710				   union perf_event *event,
1711				   struct perf_sample *sample)
1712{
1713	intel_pt_prep_a_sample(ptq, event, sample);
1714
1715	if (!pt->timeless_decoding)
1716		sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1717
1718	sample->ip = ptq->state->from_ip;
1719	sample->addr = ptq->state->to_ip;
1720	sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1721	sample->period = 1;
1722	sample->flags = ptq->flags;
1723
1724	event->sample.header.misc = sample->cpumode;
1725}
1726
1727static int intel_pt_inject_event(union perf_event *event,
1728				 struct perf_sample *sample, u64 type)
1729{
1730	event->header.size = perf_event__sample_event_size(sample, type, 0);
1731	return perf_event__synthesize_sample(event, type, 0, sample);
1732}
1733
1734static inline int intel_pt_opt_inject(struct intel_pt *pt,
1735				      union perf_event *event,
1736				      struct perf_sample *sample, u64 type)
1737{
1738	if (!pt->synth_opts.inject)
1739		return 0;
1740
1741	return intel_pt_inject_event(event, sample, type);
1742}
1743
1744static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1745					union perf_event *event,
1746					struct perf_sample *sample, u64 type)
1747{
1748	int ret;
1749
1750	ret = intel_pt_opt_inject(pt, event, sample, type);
1751	if (ret)
1752		return ret;
1753
1754	ret = perf_session__deliver_synth_event(pt->session, event, sample);
1755	if (ret)
1756		pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1757
1758	return ret;
1759}
1760
1761static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1762{
1763	struct intel_pt *pt = ptq->pt;
1764	union perf_event *event = ptq->event_buf;
1765	struct perf_sample sample = { .ip = 0, };
1766	struct dummy_branch_stack {
1767		u64			nr;
1768		u64			hw_idx;
1769		struct branch_entry	entries;
1770	} dummy_bs;
1771
1772	if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1773		return 0;
1774
1775	if (intel_pt_skip_event(pt))
1776		return 0;
1777
1778	intel_pt_prep_b_sample(pt, ptq, event, &sample);
1779
1780	sample.id = ptq->pt->branches_id;
1781	sample.stream_id = ptq->pt->branches_id;
1782
1783	/*
1784	 * perf report cannot handle events without a branch stack when using
1785	 * SORT_MODE__BRANCH so make a dummy one.
1786	 */
1787	if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1788		dummy_bs = (struct dummy_branch_stack){
1789			.nr = 1,
1790			.hw_idx = -1ULL,
1791			.entries = {
1792				.from = sample.ip,
1793				.to = sample.addr,
1794			},
1795		};
1796		sample.branch_stack = (struct branch_stack *)&dummy_bs;
1797	}
1798
1799	if (ptq->sample_ipc)
1800		sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1801	if (sample.cyc_cnt) {
1802		sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1803		ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1804		ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1805	}
1806
1807	return intel_pt_deliver_synth_event(pt, event, &sample,
1808					    pt->branches_sample_type);
1809}
1810
1811static void intel_pt_prep_sample(struct intel_pt *pt,
1812				 struct intel_pt_queue *ptq,
1813				 union perf_event *event,
1814				 struct perf_sample *sample)
1815{
1816	intel_pt_prep_b_sample(pt, ptq, event, sample);
1817
1818	if (pt->synth_opts.callchain) {
1819		thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1820				     pt->synth_opts.callchain_sz + 1,
1821				     sample->ip, pt->kernel_start);
1822		sample->callchain = ptq->chain;
1823	}
1824
1825	if (pt->synth_opts.last_branch) {
1826		thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1827					pt->br_stack_sz);
1828		sample->branch_stack = ptq->last_branch;
1829	}
1830}
1831
1832static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1833{
1834	struct intel_pt *pt = ptq->pt;
1835	union perf_event *event = ptq->event_buf;
1836	struct perf_sample sample = { .ip = 0, };
1837
1838	if (intel_pt_skip_event(pt))
1839		return 0;
1840
1841	intel_pt_prep_sample(pt, ptq, event, &sample);
1842
1843	sample.id = ptq->pt->instructions_id;
1844	sample.stream_id = ptq->pt->instructions_id;
1845	if (pt->synth_opts.quick)
1846		sample.period = 1;
1847	else
1848		sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1849
1850	if (ptq->sample_ipc)
1851		sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1852	if (sample.cyc_cnt) {
1853		sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1854		ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1855		ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1856	}
1857
1858	ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1859
1860	return intel_pt_deliver_synth_event(pt, event, &sample,
1861					    pt->instructions_sample_type);
1862}
1863
1864static int intel_pt_synth_cycle_sample(struct intel_pt_queue *ptq)
1865{
1866	struct intel_pt *pt = ptq->pt;
1867	union perf_event *event = ptq->event_buf;
1868	struct perf_sample sample = { .ip = 0, };
1869	u64 period = 0;
1870
1871	if (ptq->sample_ipc)
1872		period = ptq->ipc_cyc_cnt - ptq->last_cy_cyc_cnt;
1873
1874	if (!period || intel_pt_skip_event(pt))
1875		return 0;
1876
1877	intel_pt_prep_sample(pt, ptq, event, &sample);
1878
1879	sample.id = ptq->pt->cycles_id;
1880	sample.stream_id = ptq->pt->cycles_id;
1881	sample.period = period;
1882
1883	sample.cyc_cnt = period;
1884	sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt;
1885	ptq->last_cy_insn_cnt = ptq->ipc_insn_cnt;
1886	ptq->last_cy_cyc_cnt = ptq->ipc_cyc_cnt;
1887
1888	return intel_pt_deliver_synth_event(pt, event, &sample, pt->cycles_sample_type);
1889}
1890
1891static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1892{
1893	struct intel_pt *pt = ptq->pt;
1894	union perf_event *event = ptq->event_buf;
1895	struct perf_sample sample = { .ip = 0, };
1896
1897	if (intel_pt_skip_event(pt))
1898		return 0;
1899
1900	intel_pt_prep_sample(pt, ptq, event, &sample);
1901
1902	sample.id = ptq->pt->transactions_id;
1903	sample.stream_id = ptq->pt->transactions_id;
1904
1905	return intel_pt_deliver_synth_event(pt, event, &sample,
1906					    pt->transactions_sample_type);
1907}
1908
1909static void intel_pt_prep_p_sample(struct intel_pt *pt,
1910				   struct intel_pt_queue *ptq,
1911				   union perf_event *event,
1912				   struct perf_sample *sample)
1913{
1914	intel_pt_prep_sample(pt, ptq, event, sample);
1915
1916	/*
1917	 * Zero IP is used to mean "trace start" but that is not the case for
1918	 * power or PTWRITE events with no IP, so clear the flags.
1919	 */
1920	if (!sample->ip)
1921		sample->flags = 0;
1922}
1923
1924static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1925{
1926	struct intel_pt *pt = ptq->pt;
1927	union perf_event *event = ptq->event_buf;
1928	struct perf_sample sample = { .ip = 0, };
1929	struct perf_synth_intel_ptwrite raw;
1930
1931	if (intel_pt_skip_event(pt))
1932		return 0;
1933
1934	intel_pt_prep_p_sample(pt, ptq, event, &sample);
1935
1936	sample.id = ptq->pt->ptwrites_id;
1937	sample.stream_id = ptq->pt->ptwrites_id;
1938
1939	raw.flags = 0;
1940	raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1941	raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1942
1943	sample.raw_size = perf_synth__raw_size(raw);
1944	sample.raw_data = perf_synth__raw_data(&raw);
1945
1946	return intel_pt_deliver_synth_event(pt, event, &sample,
1947					    pt->ptwrites_sample_type);
1948}
1949
1950static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1951{
1952	struct intel_pt *pt = ptq->pt;
1953	union perf_event *event = ptq->event_buf;
1954	struct perf_sample sample = { .ip = 0, };
1955	struct perf_synth_intel_cbr raw;
1956	u32 flags;
1957
1958	if (intel_pt_skip_cbr_event(pt))
1959		return 0;
1960
1961	ptq->cbr_seen = ptq->state->cbr;
1962
1963	intel_pt_prep_p_sample(pt, ptq, event, &sample);
1964
1965	sample.id = ptq->pt->cbr_id;
1966	sample.stream_id = ptq->pt->cbr_id;
1967
1968	flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1969	raw.flags = cpu_to_le32(flags);
1970	raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1971	raw.reserved3 = 0;
1972
1973	sample.raw_size = perf_synth__raw_size(raw);
1974	sample.raw_data = perf_synth__raw_data(&raw);
1975
1976	return intel_pt_deliver_synth_event(pt, event, &sample,
1977					    pt->pwr_events_sample_type);
1978}
1979
1980static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
1981{
1982	struct intel_pt *pt = ptq->pt;
1983	union perf_event *event = ptq->event_buf;
1984	struct perf_sample sample = { .ip = 0, };
1985	struct perf_synth_intel_psb raw;
1986
1987	if (intel_pt_skip_event(pt))
1988		return 0;
1989
1990	intel_pt_prep_p_sample(pt, ptq, event, &sample);
1991
1992	sample.id = ptq->pt->psb_id;
1993	sample.stream_id = ptq->pt->psb_id;
1994	sample.flags = 0;
1995
1996	raw.reserved = 0;
1997	raw.offset = ptq->state->psb_offset;
1998
1999	sample.raw_size = perf_synth__raw_size(raw);
2000	sample.raw_data = perf_synth__raw_data(&raw);
2001
2002	return intel_pt_deliver_synth_event(pt, event, &sample,
2003					    pt->pwr_events_sample_type);
2004}
2005
2006static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
2007{
2008	struct intel_pt *pt = ptq->pt;
2009	union perf_event *event = ptq->event_buf;
2010	struct perf_sample sample = { .ip = 0, };
2011	struct perf_synth_intel_mwait raw;
2012
2013	if (intel_pt_skip_event(pt))
2014		return 0;
2015
2016	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2017
2018	sample.id = ptq->pt->mwait_id;
2019	sample.stream_id = ptq->pt->mwait_id;
2020
2021	raw.reserved = 0;
2022	raw.payload = cpu_to_le64(ptq->state->mwait_payload);
2023
2024	sample.raw_size = perf_synth__raw_size(raw);
2025	sample.raw_data = perf_synth__raw_data(&raw);
2026
2027	return intel_pt_deliver_synth_event(pt, event, &sample,
2028					    pt->pwr_events_sample_type);
2029}
2030
2031static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
2032{
2033	struct intel_pt *pt = ptq->pt;
2034	union perf_event *event = ptq->event_buf;
2035	struct perf_sample sample = { .ip = 0, };
2036	struct perf_synth_intel_pwre raw;
2037
2038	if (intel_pt_skip_event(pt))
2039		return 0;
2040
2041	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2042
2043	sample.id = ptq->pt->pwre_id;
2044	sample.stream_id = ptq->pt->pwre_id;
2045
2046	raw.reserved = 0;
2047	raw.payload = cpu_to_le64(ptq->state->pwre_payload);
2048
2049	sample.raw_size = perf_synth__raw_size(raw);
2050	sample.raw_data = perf_synth__raw_data(&raw);
2051
2052	return intel_pt_deliver_synth_event(pt, event, &sample,
2053					    pt->pwr_events_sample_type);
2054}
2055
2056static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
2057{
2058	struct intel_pt *pt = ptq->pt;
2059	union perf_event *event = ptq->event_buf;
2060	struct perf_sample sample = { .ip = 0, };
2061	struct perf_synth_intel_exstop raw;
2062
2063	if (intel_pt_skip_event(pt))
2064		return 0;
2065
2066	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2067
2068	sample.id = ptq->pt->exstop_id;
2069	sample.stream_id = ptq->pt->exstop_id;
2070
2071	raw.flags = 0;
2072	raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2073
2074	sample.raw_size = perf_synth__raw_size(raw);
2075	sample.raw_data = perf_synth__raw_data(&raw);
2076
2077	return intel_pt_deliver_synth_event(pt, event, &sample,
2078					    pt->pwr_events_sample_type);
2079}
2080
2081static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
2082{
2083	struct intel_pt *pt = ptq->pt;
2084	union perf_event *event = ptq->event_buf;
2085	struct perf_sample sample = { .ip = 0, };
2086	struct perf_synth_intel_pwrx raw;
2087
2088	if (intel_pt_skip_event(pt))
2089		return 0;
2090
2091	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2092
2093	sample.id = ptq->pt->pwrx_id;
2094	sample.stream_id = ptq->pt->pwrx_id;
2095
2096	raw.reserved = 0;
2097	raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
2098
2099	sample.raw_size = perf_synth__raw_size(raw);
2100	sample.raw_data = perf_synth__raw_data(&raw);
2101
2102	return intel_pt_deliver_synth_event(pt, event, &sample,
2103					    pt->pwr_events_sample_type);
2104}
2105
2106/*
2107 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
2108 * intel_pt_add_gp_regs().
2109 */
2110static const int pebs_gp_regs[] = {
2111	[PERF_REG_X86_FLAGS]	= 1,
2112	[PERF_REG_X86_IP]	= 2,
2113	[PERF_REG_X86_AX]	= 3,
2114	[PERF_REG_X86_CX]	= 4,
2115	[PERF_REG_X86_DX]	= 5,
2116	[PERF_REG_X86_BX]	= 6,
2117	[PERF_REG_X86_SP]	= 7,
2118	[PERF_REG_X86_BP]	= 8,
2119	[PERF_REG_X86_SI]	= 9,
2120	[PERF_REG_X86_DI]	= 10,
2121	[PERF_REG_X86_R8]	= 11,
2122	[PERF_REG_X86_R9]	= 12,
2123	[PERF_REG_X86_R10]	= 13,
2124	[PERF_REG_X86_R11]	= 14,
2125	[PERF_REG_X86_R12]	= 15,
2126	[PERF_REG_X86_R13]	= 16,
2127	[PERF_REG_X86_R14]	= 17,
2128	[PERF_REG_X86_R15]	= 18,
2129};
2130
2131static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
2132				 const struct intel_pt_blk_items *items,
2133				 u64 regs_mask)
2134{
2135	const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
2136	u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
2137	u32 bit;
2138	int i;
2139
2140	for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
2141		/* Get the PEBS gp_regs array index */
2142		int n = pebs_gp_regs[i] - 1;
2143
2144		if (n < 0)
2145			continue;
2146		/*
2147		 * Add only registers that were requested (i.e. 'regs_mask') and
2148		 * that were provided (i.e. 'mask'), and update the resulting
2149		 * mask (i.e. 'intr_regs->mask') accordingly.
2150		 */
2151		if (mask & 1 << n && regs_mask & bit) {
2152			intr_regs->mask |= bit;
2153			*pos++ = gp_regs[n];
2154		}
2155	}
2156
2157	return pos;
2158}
2159
2160#ifndef PERF_REG_X86_XMM0
2161#define PERF_REG_X86_XMM0 32
2162#endif
2163
2164static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
2165			     const struct intel_pt_blk_items *items,
2166			     u64 regs_mask)
2167{
2168	u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
2169	const u64 *xmm = items->xmm;
2170
2171	/*
2172	 * If there are any XMM registers, then there should be all of them.
2173	 * Nevertheless, follow the logic to add only registers that were
2174	 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
2175	 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
2176	 */
2177	intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
2178
2179	for (; mask; mask >>= 1, xmm++) {
2180		if (mask & 1)
2181			*pos++ = *xmm;
2182	}
2183}
2184
2185#define LBR_INFO_MISPRED	(1ULL << 63)
2186#define LBR_INFO_IN_TX		(1ULL << 62)
2187#define LBR_INFO_ABORT		(1ULL << 61)
2188#define LBR_INFO_CYCLES		0xffff
2189
2190/* Refer kernel's intel_pmu_store_pebs_lbrs() */
2191static u64 intel_pt_lbr_flags(u64 info)
2192{
2193	union {
2194		struct branch_flags flags;
2195		u64 result;
2196	} u;
2197
2198	u.result	  = 0;
2199	u.flags.mispred	  = !!(info & LBR_INFO_MISPRED);
2200	u.flags.predicted = !(info & LBR_INFO_MISPRED);
2201	u.flags.in_tx	  = !!(info & LBR_INFO_IN_TX);
2202	u.flags.abort	  = !!(info & LBR_INFO_ABORT);
2203	u.flags.cycles	  = info & LBR_INFO_CYCLES;
2204
2205	return u.result;
2206}
2207
2208static void intel_pt_add_lbrs(struct branch_stack *br_stack,
2209			      const struct intel_pt_blk_items *items)
2210{
2211	u64 *to;
2212	int i;
2213
2214	br_stack->nr = 0;
2215
2216	to = &br_stack->entries[0].from;
2217
2218	for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
2219		u32 mask = items->mask[i];
2220		const u64 *from = items->val[i];
2221
2222		for (; mask; mask >>= 3, from += 3) {
2223			if ((mask & 7) == 7) {
2224				*to++ = from[0];
2225				*to++ = from[1];
2226				*to++ = intel_pt_lbr_flags(from[2]);
2227				br_stack->nr += 1;
2228			}
2229		}
2230	}
2231}
2232
2233static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
2234{
2235	const struct intel_pt_blk_items *items = &ptq->state->items;
2236	struct perf_sample sample = { .ip = 0, };
2237	union perf_event *event = ptq->event_buf;
2238	struct intel_pt *pt = ptq->pt;
2239	u64 sample_type = evsel->core.attr.sample_type;
2240	u8 cpumode;
2241	u64 regs[8 * sizeof(sample.intr_regs.mask)];
2242
2243	if (intel_pt_skip_event(pt))
2244		return 0;
2245
2246	intel_pt_prep_a_sample(ptq, event, &sample);
2247
2248	sample.id = id;
2249	sample.stream_id = id;
2250
2251	if (!evsel->core.attr.freq)
2252		sample.period = evsel->core.attr.sample_period;
2253
2254	/* No support for non-zero CS base */
2255	if (items->has_ip)
2256		sample.ip = items->ip;
2257	else if (items->has_rip)
2258		sample.ip = items->rip;
2259	else
2260		sample.ip = ptq->state->from_ip;
2261
2262	cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2263
2264	event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
2265
2266	sample.cpumode = cpumode;
2267
2268	if (sample_type & PERF_SAMPLE_TIME) {
2269		u64 timestamp = 0;
2270
2271		if (items->has_timestamp)
2272			timestamp = items->timestamp;
2273		else if (!pt->timeless_decoding)
2274			timestamp = ptq->timestamp;
2275		if (timestamp)
2276			sample.time = tsc_to_perf_time(timestamp, &pt->tc);
2277	}
2278
2279	if (sample_type & PERF_SAMPLE_CALLCHAIN &&
2280	    pt->synth_opts.callchain) {
2281		thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
2282				     pt->synth_opts.callchain_sz, sample.ip,
2283				     pt->kernel_start);
2284		sample.callchain = ptq->chain;
2285	}
2286
2287	if (sample_type & PERF_SAMPLE_REGS_INTR &&
2288	    (items->mask[INTEL_PT_GP_REGS_POS] ||
2289	     items->mask[INTEL_PT_XMM_POS])) {
2290		u64 regs_mask = evsel->core.attr.sample_regs_intr;
2291		u64 *pos;
2292
2293		sample.intr_regs.abi = items->is_32_bit ?
2294				       PERF_SAMPLE_REGS_ABI_32 :
2295				       PERF_SAMPLE_REGS_ABI_64;
2296		sample.intr_regs.regs = regs;
2297
2298		pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
2299
2300		intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
2301	}
2302
2303	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
2304		if (items->mask[INTEL_PT_LBR_0_POS] ||
2305		    items->mask[INTEL_PT_LBR_1_POS] ||
2306		    items->mask[INTEL_PT_LBR_2_POS]) {
2307			intel_pt_add_lbrs(ptq->last_branch, items);
2308		} else if (pt->synth_opts.last_branch) {
2309			thread_stack__br_sample(ptq->thread, ptq->cpu,
2310						ptq->last_branch,
2311						pt->br_stack_sz);
2312		} else {
2313			ptq->last_branch->nr = 0;
2314		}
2315		sample.branch_stack = ptq->last_branch;
2316	}
2317
2318	if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
2319		sample.addr = items->mem_access_address;
2320
2321	if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
2322		/*
2323		 * Refer kernel's setup_pebs_adaptive_sample_data() and
2324		 * intel_hsw_weight().
2325		 */
2326		if (items->has_mem_access_latency) {
2327			u64 weight = items->mem_access_latency >> 32;
2328
2329			/*
2330			 * Starts from SPR, the mem access latency field
2331			 * contains both cache latency [47:32] and instruction
2332			 * latency [15:0]. The cache latency is the same as the
2333			 * mem access latency on previous platforms.
2334			 *
2335			 * In practice, no memory access could last than 4G
2336			 * cycles. Use latency >> 32 to distinguish the
2337			 * different format of the mem access latency field.
2338			 */
2339			if (weight > 0) {
2340				sample.weight = weight & 0xffff;
2341				sample.ins_lat = items->mem_access_latency & 0xffff;
2342			} else
2343				sample.weight = items->mem_access_latency;
2344		}
2345		if (!sample.weight && items->has_tsx_aux_info) {
2346			/* Cycles last block */
2347			sample.weight = (u32)items->tsx_aux_info;
2348		}
2349	}
2350
2351	if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
2352		u64 ax = items->has_rax ? items->rax : 0;
2353		/* Refer kernel's intel_hsw_transaction() */
2354		u64 txn = (u8)(items->tsx_aux_info >> 32);
2355
2356		/* For RTM XABORTs also log the abort code from AX */
2357		if (txn & PERF_TXN_TRANSACTION && ax & 1)
2358			txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
2359		sample.transaction = txn;
2360	}
2361
2362	return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
2363}
2364
2365static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
2366{
2367	struct intel_pt *pt = ptq->pt;
2368	struct evsel *evsel = pt->pebs_evsel;
2369	u64 id = evsel->core.id[0];
2370
2371	return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
2372}
2373
2374static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
2375{
2376	const struct intel_pt_blk_items *items = &ptq->state->items;
2377	struct intel_pt_pebs_event *pe;
2378	struct intel_pt *pt = ptq->pt;
2379	int err = -EINVAL;
2380	int hw_id;
2381
2382	if (!items->has_applicable_counters || !items->applicable_counters) {
2383		if (!pt->single_pebs)
2384			pr_err("PEBS-via-PT record with no applicable_counters\n");
2385		return intel_pt_synth_single_pebs_sample(ptq);
2386	}
2387
2388	for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) {
2389		pe = &ptq->pebs[hw_id];
2390		if (!pe->evsel) {
2391			if (!pt->single_pebs)
2392				pr_err("PEBS-via-PT record with no matching event, hw_id %d\n",
2393				       hw_id);
2394			return intel_pt_synth_single_pebs_sample(ptq);
2395		}
2396		err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
2397		if (err)
2398			return err;
2399	}
2400
2401	return err;
2402}
2403
2404static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq)
2405{
2406	struct intel_pt *pt = ptq->pt;
2407	union perf_event *event = ptq->event_buf;
2408	struct perf_sample sample = { .ip = 0, };
2409	struct {
2410		struct perf_synth_intel_evt cfe;
2411		struct perf_synth_intel_evd evd[INTEL_PT_MAX_EVDS];
2412	} raw;
2413	int i;
2414
2415	if (intel_pt_skip_event(pt))
2416		return 0;
2417
2418	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2419
2420	sample.id        = ptq->pt->evt_id;
2421	sample.stream_id = ptq->pt->evt_id;
2422
2423	raw.cfe.type     = ptq->state->cfe_type;
2424	raw.cfe.reserved = 0;
2425	raw.cfe.ip       = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2426	raw.cfe.vector   = ptq->state->cfe_vector;
2427	raw.cfe.evd_cnt  = ptq->state->evd_cnt;
2428
2429	for (i = 0; i < ptq->state->evd_cnt; i++) {
2430		raw.evd[i].et       = 0;
2431		raw.evd[i].evd_type = ptq->state->evd[i].type;
2432		raw.evd[i].payload  = ptq->state->evd[i].payload;
2433	}
2434
2435	sample.raw_size = perf_synth__raw_size(raw) +
2436			  ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd);
2437	sample.raw_data = perf_synth__raw_data(&raw);
2438
2439	return intel_pt_deliver_synth_event(pt, event, &sample,
2440					    pt->evt_sample_type);
2441}
2442
2443static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq)
2444{
2445	struct intel_pt *pt = ptq->pt;
2446	union perf_event *event = ptq->event_buf;
2447	struct perf_sample sample = { .ip = 0, };
2448	struct perf_synth_intel_iflag_chg raw;
2449
2450	if (intel_pt_skip_event(pt))
2451		return 0;
2452
2453	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2454
2455	sample.id = ptq->pt->iflag_chg_id;
2456	sample.stream_id = ptq->pt->iflag_chg_id;
2457
2458	raw.flags = 0;
2459	raw.iflag = ptq->state->to_iflag;
2460
2461	if (ptq->state->type & INTEL_PT_BRANCH) {
2462		raw.via_branch = 1;
2463		raw.branch_ip = ptq->state->to_ip;
2464	} else {
2465		sample.addr = 0;
2466	}
2467	sample.flags = ptq->flags;
2468
2469	sample.raw_size = perf_synth__raw_size(raw);
2470	sample.raw_data = perf_synth__raw_data(&raw);
2471
2472	return intel_pt_deliver_synth_event(pt, event, &sample,
2473					    pt->iflag_chg_sample_type);
2474}
2475
2476static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
2477				pid_t pid, pid_t tid, u64 ip, u64 timestamp,
2478				pid_t machine_pid, int vcpu)
2479{
2480	bool dump_log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
2481	bool log_on_stdout = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT;
2482	union perf_event event;
2483	char msg[MAX_AUXTRACE_ERROR_MSG];
2484	int err;
2485
2486	if (pt->synth_opts.error_minus_flags) {
2487		if (code == INTEL_PT_ERR_OVR &&
2488		    pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
2489			return 0;
2490		if (code == INTEL_PT_ERR_LOST &&
2491		    pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
2492			return 0;
2493	}
2494
2495	intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
2496
2497	auxtrace_synth_guest_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
2498				   code, cpu, pid, tid, ip, msg, timestamp,
2499				   machine_pid, vcpu);
2500
2501	if (intel_pt_enable_logging && !log_on_stdout) {
2502		FILE *fp = intel_pt_log_fp();
2503
2504		if (fp)
2505			perf_event__fprintf_auxtrace_error(&event, fp);
2506	}
2507
2508	if (code != INTEL_PT_ERR_LOST && dump_log_on_error)
2509		intel_pt_log_dump_buf();
2510
2511	err = perf_session__deliver_synth_event(pt->session, &event, NULL);
2512	if (err)
2513		pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
2514		       err);
2515
2516	return err;
2517}
2518
2519static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2520				 const struct intel_pt_state *state)
2521{
2522	struct intel_pt *pt = ptq->pt;
2523	u64 tm = ptq->timestamp;
2524	pid_t machine_pid = 0;
2525	pid_t pid = ptq->pid;
2526	pid_t tid = ptq->tid;
2527	int vcpu = -1;
2528
2529	tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
2530
2531	if (pt->have_guest_sideband && state->from_nr) {
2532		machine_pid = ptq->guest_machine_pid;
2533		vcpu = ptq->vcpu;
2534		pid = ptq->guest_pid;
2535		tid = ptq->guest_tid;
2536	}
2537
2538	return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid,
2539				    state->from_ip, tm, machine_pid, vcpu);
2540}
2541
2542static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2543{
2544	struct auxtrace_queue *queue;
2545	pid_t tid = ptq->next_tid;
2546	int err;
2547
2548	if (tid == -1)
2549		return 0;
2550
2551	intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2552
2553	err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2554
2555	queue = &pt->queues.queue_array[ptq->queue_nr];
2556	intel_pt_set_pid_tid_cpu(pt, queue);
2557
2558	ptq->next_tid = -1;
2559
2560	return err;
2561}
2562
2563static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2564{
2565	struct intel_pt *pt = ptq->pt;
2566
2567	return ip == pt->switch_ip &&
2568	       (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2569	       !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2570			       PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
2571}
2572
2573#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
2574			  INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
2575
2576static int intel_pt_sample(struct intel_pt_queue *ptq)
2577{
2578	const struct intel_pt_state *state = ptq->state;
2579	struct intel_pt *pt = ptq->pt;
2580	int err;
2581
2582	if (!ptq->have_sample)
2583		return 0;
2584
2585	ptq->have_sample = false;
2586
2587	if (pt->synth_opts.approx_ipc) {
2588		ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2589		ptq->ipc_cyc_cnt = ptq->state->cycles;
2590		ptq->sample_ipc = true;
2591	} else {
2592		ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2593		ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2594		ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
2595	}
2596
2597	/* Ensure guest code maps are set up */
2598	if (symbol_conf.guest_code && (state->from_nr || state->to_nr))
2599		intel_pt_get_guest(ptq);
2600
2601	/*
2602	 * Do PEBS first to allow for the possibility that the PEBS timestamp
2603	 * precedes the current timestamp.
2604	 */
2605	if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
2606		err = intel_pt_synth_pebs_sample(ptq);
2607		if (err)
2608			return err;
2609	}
2610
2611	if (pt->synth_opts.intr_events) {
2612		if (state->type & INTEL_PT_EVT) {
2613			err = intel_pt_synth_events_sample(ptq);
2614			if (err)
2615				return err;
2616		}
2617		if (state->type & INTEL_PT_IFLAG_CHG) {
2618			err = intel_pt_synth_iflag_chg_sample(ptq);
2619			if (err)
2620				return err;
2621		}
2622	}
2623
2624	if (pt->sample_pwr_events) {
2625		if (state->type & INTEL_PT_PSB_EVT) {
2626			err = intel_pt_synth_psb_sample(ptq);
2627			if (err)
2628				return err;
2629		}
2630		if (ptq->state->cbr != ptq->cbr_seen) {
2631			err = intel_pt_synth_cbr_sample(ptq);
2632			if (err)
2633				return err;
2634		}
2635		if (state->type & INTEL_PT_PWR_EVT) {
2636			if (state->type & INTEL_PT_MWAIT_OP) {
2637				err = intel_pt_synth_mwait_sample(ptq);
2638				if (err)
2639					return err;
2640			}
2641			if (state->type & INTEL_PT_PWR_ENTRY) {
2642				err = intel_pt_synth_pwre_sample(ptq);
2643				if (err)
2644					return err;
2645			}
2646			if (state->type & INTEL_PT_EX_STOP) {
2647				err = intel_pt_synth_exstop_sample(ptq);
2648				if (err)
2649					return err;
2650			}
2651			if (state->type & INTEL_PT_PWR_EXIT) {
2652				err = intel_pt_synth_pwrx_sample(ptq);
2653				if (err)
2654					return err;
2655			}
2656		}
2657	}
2658
2659	if (state->type & INTEL_PT_INSTRUCTION) {
2660		if (pt->sample_instructions) {
2661			err = intel_pt_synth_instruction_sample(ptq);
2662			if (err)
2663				return err;
2664		}
2665		if (pt->sample_cycles) {
2666			err = intel_pt_synth_cycle_sample(ptq);
2667			if (err)
2668				return err;
2669		}
2670	}
2671
2672	if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
2673		err = intel_pt_synth_transaction_sample(ptq);
2674		if (err)
2675			return err;
2676	}
2677
2678	if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
2679		err = intel_pt_synth_ptwrite_sample(ptq);
2680		if (err)
2681			return err;
2682	}
2683
2684	if (!(state->type & INTEL_PT_BRANCH))
2685		return 0;
2686
2687	if (pt->use_thread_stack) {
2688		thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2689				    state->from_ip, state->to_ip, ptq->insn_len,
2690				    state->trace_nr, pt->callstack,
2691				    pt->br_stack_sz_plus,
2692				    pt->mispred_all);
2693	} else {
2694		thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2695	}
2696
2697	if (pt->sample_branches) {
2698		if (state->from_nr != state->to_nr &&
2699		    state->from_ip && state->to_ip) {
2700			struct intel_pt_state *st = (struct intel_pt_state *)state;
2701			u64 to_ip = st->to_ip;
2702			u64 from_ip = st->from_ip;
2703
2704			/*
2705			 * perf cannot handle having different machines for ip
2706			 * and addr, so create 2 branches.
2707			 */
2708			st->to_ip = 0;
2709			err = intel_pt_synth_branch_sample(ptq);
2710			if (err)
2711				return err;
2712			st->from_ip = 0;
2713			st->to_ip = to_ip;
2714			err = intel_pt_synth_branch_sample(ptq);
2715			st->from_ip = from_ip;
2716		} else {
2717			err = intel_pt_synth_branch_sample(ptq);
2718		}
2719		if (err)
2720			return err;
2721	}
2722
2723	if (!ptq->sync_switch)
2724		return 0;
2725
2726	if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2727		switch (ptq->switch_state) {
2728		case INTEL_PT_SS_NOT_TRACING:
2729		case INTEL_PT_SS_UNKNOWN:
2730		case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2731			err = intel_pt_next_tid(pt, ptq);
2732			if (err)
2733				return err;
2734			ptq->switch_state = INTEL_PT_SS_TRACING;
2735			break;
2736		default:
2737			ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2738			return 1;
2739		}
2740	} else if (!state->to_ip) {
2741		ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2742	} else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2743		ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2744	} else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2745		   state->to_ip == pt->ptss_ip &&
2746		   (ptq->flags & PERF_IP_FLAG_CALL)) {
2747		ptq->switch_state = INTEL_PT_SS_TRACING;
2748	}
2749
2750	return 0;
2751}
2752
2753static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2754{
2755	struct machine *machine = pt->machine;
2756	struct map *map;
2757	struct symbol *sym, *start;
2758	u64 ip, switch_ip = 0;
2759	const char *ptss;
2760
2761	if (ptss_ip)
2762		*ptss_ip = 0;
2763
2764	map = machine__kernel_map(machine);
2765	if (!map)
2766		return 0;
2767
2768	if (map__load(map))
2769		return 0;
2770
2771	start = dso__first_symbol(map__dso(map));
2772
2773	for (sym = start; sym; sym = dso__next_symbol(sym)) {
2774		if (sym->binding == STB_GLOBAL &&
2775		    !strcmp(sym->name, "__switch_to")) {
2776			ip = map__unmap_ip(map, sym->start);
2777			if (ip >= map__start(map) && ip < map__end(map)) {
2778				switch_ip = ip;
2779				break;
2780			}
2781		}
2782	}
2783
2784	if (!switch_ip || !ptss_ip)
2785		return 0;
2786
2787	if (pt->have_sched_switch == 1)
2788		ptss = "perf_trace_sched_switch";
2789	else
2790		ptss = "__perf_event_task_sched_out";
2791
2792	for (sym = start; sym; sym = dso__next_symbol(sym)) {
2793		if (!strcmp(sym->name, ptss)) {
2794			ip = map__unmap_ip(map, sym->start);
2795			if (ip >= map__start(map) && ip < map__end(map)) {
2796				*ptss_ip = ip;
2797				break;
2798			}
2799		}
2800	}
2801
2802	return switch_ip;
2803}
2804
2805static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2806{
2807	unsigned int i;
2808
2809	if (pt->sync_switch_not_supported)
2810		return;
2811
2812	pt->sync_switch = true;
2813
2814	for (i = 0; i < pt->queues.nr_queues; i++) {
2815		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2816		struct intel_pt_queue *ptq = queue->priv;
2817
2818		if (ptq)
2819			ptq->sync_switch = true;
2820	}
2821}
2822
2823static void intel_pt_disable_sync_switch(struct intel_pt *pt)
2824{
2825	unsigned int i;
2826
2827	pt->sync_switch = false;
2828
2829	for (i = 0; i < pt->queues.nr_queues; i++) {
2830		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2831		struct intel_pt_queue *ptq = queue->priv;
2832
2833		if (ptq) {
2834			ptq->sync_switch = false;
2835			intel_pt_next_tid(pt, ptq);
2836		}
2837	}
2838}
2839
2840/*
2841 * To filter against time ranges, it is only necessary to look at the next start
2842 * or end time.
2843 */
2844static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2845{
2846	struct intel_pt *pt = ptq->pt;
2847
2848	if (ptq->sel_start) {
2849		/* Next time is an end time */
2850		ptq->sel_start = false;
2851		ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2852		return true;
2853	} else if (ptq->sel_idx + 1 < pt->range_cnt) {
2854		/* Next time is a start time */
2855		ptq->sel_start = true;
2856		ptq->sel_idx += 1;
2857		ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2858		return true;
2859	}
2860
2861	/* No next time */
2862	return false;
2863}
2864
2865static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2866{
2867	int err;
2868
2869	while (1) {
2870		if (ptq->sel_start) {
2871			if (ptq->timestamp >= ptq->sel_timestamp) {
2872				/* After start time, so consider next time */
2873				intel_pt_next_time(ptq);
2874				if (!ptq->sel_timestamp) {
2875					/* No end time */
2876					return 0;
2877				}
2878				/* Check against end time */
2879				continue;
2880			}
2881			/* Before start time, so fast forward */
2882			ptq->have_sample = false;
2883			if (ptq->sel_timestamp > *ff_timestamp) {
2884				if (ptq->sync_switch) {
2885					intel_pt_next_tid(ptq->pt, ptq);
2886					ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2887				}
2888				*ff_timestamp = ptq->sel_timestamp;
2889				err = intel_pt_fast_forward(ptq->decoder,
2890							    ptq->sel_timestamp);
2891				if (err)
2892					return err;
2893			}
2894			return 0;
2895		} else if (ptq->timestamp > ptq->sel_timestamp) {
2896			/* After end time, so consider next time */
2897			if (!intel_pt_next_time(ptq)) {
2898				/* No next time range, so stop decoding */
2899				ptq->have_sample = false;
2900				ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2901				return 1;
2902			}
2903			/* Check against next start time */
2904			continue;
2905		} else {
2906			/* Before end time */
2907			return 0;
2908		}
2909	}
2910}
2911
2912static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2913{
2914	const struct intel_pt_state *state = ptq->state;
2915	struct intel_pt *pt = ptq->pt;
2916	u64 ff_timestamp = 0;
2917	int err;
2918
2919	if (!pt->kernel_start) {
2920		pt->kernel_start = machine__kernel_start(pt->machine);
2921		if (pt->per_cpu_mmaps &&
2922		    (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2923		    !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2924		    !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
2925			pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2926			if (pt->switch_ip) {
2927				intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2928					     pt->switch_ip, pt->ptss_ip);
2929				intel_pt_enable_sync_switch(pt);
2930			}
2931		}
2932	}
2933
2934	intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2935		     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2936	while (1) {
2937		err = intel_pt_sample(ptq);
2938		if (err)
2939			return err;
2940
2941		state = intel_pt_decode(ptq->decoder);
2942		if (state->err) {
2943			if (state->err == INTEL_PT_ERR_NODATA)
2944				return 1;
2945			if (ptq->sync_switch &&
2946			    state->from_ip >= pt->kernel_start) {
2947				ptq->sync_switch = false;
2948				intel_pt_next_tid(pt, ptq);
2949			}
2950			ptq->timestamp = state->est_timestamp;
2951			if (pt->synth_opts.errors) {
2952				err = intel_ptq_synth_error(ptq, state);
2953				if (err)
2954					return err;
2955			}
2956			continue;
2957		}
2958
2959		ptq->state = state;
2960		ptq->have_sample = true;
2961		intel_pt_sample_flags(ptq);
2962
2963		/* Use estimated TSC upon return to user space */
2964		if (pt->est_tsc &&
2965		    (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2966		    state->to_ip && state->to_ip < pt->kernel_start) {
2967			intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2968				     state->timestamp, state->est_timestamp);
2969			ptq->timestamp = state->est_timestamp;
2970		/* Use estimated TSC in unknown switch state */
2971		} else if (ptq->sync_switch &&
2972			   ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2973			   intel_pt_is_switch_ip(ptq, state->to_ip) &&
2974			   ptq->next_tid == -1) {
2975			intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2976				     state->timestamp, state->est_timestamp);
2977			ptq->timestamp = state->est_timestamp;
2978		} else if (state->timestamp > ptq->timestamp) {
2979			ptq->timestamp = state->timestamp;
2980		}
2981
2982		if (ptq->sel_timestamp) {
2983			err = intel_pt_time_filter(ptq, &ff_timestamp);
2984			if (err)
2985				return err;
2986		}
2987
2988		if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2989			*timestamp = ptq->timestamp;
2990			return 0;
2991		}
2992	}
2993	return 0;
2994}
2995
2996static inline int intel_pt_update_queues(struct intel_pt *pt)
2997{
2998	if (pt->queues.new_data) {
2999		pt->queues.new_data = false;
3000		return intel_pt_setup_queues(pt);
3001	}
3002	return 0;
3003}
3004
3005static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
3006{
3007	unsigned int queue_nr;
3008	u64 ts;
3009	int ret;
3010
3011	while (1) {
3012		struct auxtrace_queue *queue;
3013		struct intel_pt_queue *ptq;
3014
3015		if (!pt->heap.heap_cnt)
3016			return 0;
3017
3018		if (pt->heap.heap_array[0].ordinal >= timestamp)
3019			return 0;
3020
3021		queue_nr = pt->heap.heap_array[0].queue_nr;
3022		queue = &pt->queues.queue_array[queue_nr];
3023		ptq = queue->priv;
3024
3025		intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
3026			     queue_nr, pt->heap.heap_array[0].ordinal,
3027			     timestamp);
3028
3029		auxtrace_heap__pop(&pt->heap);
3030
3031		if (pt->heap.heap_cnt) {
3032			ts = pt->heap.heap_array[0].ordinal + 1;
3033			if (ts > timestamp)
3034				ts = timestamp;
3035		} else {
3036			ts = timestamp;
3037		}
3038
3039		intel_pt_set_pid_tid_cpu(pt, queue);
3040
3041		ret = intel_pt_run_decoder(ptq, &ts);
3042
3043		if (ret < 0) {
3044			auxtrace_heap__add(&pt->heap, queue_nr, ts);
3045			return ret;
3046		}
3047
3048		if (!ret) {
3049			ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
3050			if (ret < 0)
3051				return ret;
3052		} else {
3053			ptq->on_heap = false;
3054		}
3055	}
3056
3057	return 0;
3058}
3059
3060static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
3061					    u64 time_)
3062{
3063	struct auxtrace_queues *queues = &pt->queues;
3064	unsigned int i;
3065	u64 ts = 0;
3066
3067	for (i = 0; i < queues->nr_queues; i++) {
3068		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
3069		struct intel_pt_queue *ptq = queue->priv;
3070
3071		if (ptq && (tid == -1 || ptq->tid == tid)) {
3072			ptq->time = time_;
3073			intel_pt_set_pid_tid_cpu(pt, queue);
3074			intel_pt_run_decoder(ptq, &ts);
3075		}
3076	}
3077	return 0;
3078}
3079
3080static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
3081					    struct auxtrace_queue *queue,
3082					    struct perf_sample *sample)
3083{
3084	struct machine *m = ptq->pt->machine;
3085
3086	ptq->pid = sample->pid;
3087	ptq->tid = sample->tid;
3088	ptq->cpu = queue->cpu;
3089
3090	intel_pt_log("queue %u cpu %d pid %d tid %d\n",
3091		     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
3092
3093	thread__zput(ptq->thread);
3094
3095	if (ptq->tid == -1)
3096		return;
3097
3098	if (ptq->pid == -1) {
3099		ptq->thread = machine__find_thread(m, -1, ptq->tid);
3100		if (ptq->thread)
3101			ptq->pid = thread__pid(ptq->thread);
3102		return;
3103	}
3104
3105	ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
3106}
3107
3108static int intel_pt_process_timeless_sample(struct intel_pt *pt,
3109					    struct perf_sample *sample)
3110{
3111	struct auxtrace_queue *queue;
3112	struct intel_pt_queue *ptq;
3113	u64 ts = 0;
3114
3115	queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
3116	if (!queue)
3117		return -EINVAL;
3118
3119	ptq = queue->priv;
3120	if (!ptq)
3121		return 0;
3122
3123	ptq->stop = false;
3124	ptq->time = sample->time;
3125	intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
3126	intel_pt_run_decoder(ptq, &ts);
3127	return 0;
3128}
3129
3130static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
3131{
3132	return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
3133				    sample->pid, sample->tid, 0, sample->time,
3134				    sample->machine_pid, sample->vcpu);
3135}
3136
3137static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
3138{
3139	unsigned i, j;
3140
3141	if (cpu < 0 || !pt->queues.nr_queues)
3142		return NULL;
3143
3144	if ((unsigned)cpu >= pt->queues.nr_queues)
3145		i = pt->queues.nr_queues - 1;
3146	else
3147		i = cpu;
3148
3149	if (pt->queues.queue_array[i].cpu == cpu)
3150		return pt->queues.queue_array[i].priv;
3151
3152	for (j = 0; i > 0; j++) {
3153		if (pt->queues.queue_array[--i].cpu == cpu)
3154			return pt->queues.queue_array[i].priv;
3155	}
3156
3157	for (; j < pt->queues.nr_queues; j++) {
3158		if (pt->queues.queue_array[j].cpu == cpu)
3159			return pt->queues.queue_array[j].priv;
3160	}
3161
3162	return NULL;
3163}
3164
3165static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
3166				u64 timestamp)
3167{
3168	struct intel_pt_queue *ptq;
3169	int err;
3170
3171	if (!pt->sync_switch)
3172		return 1;
3173
3174	ptq = intel_pt_cpu_to_ptq(pt, cpu);
3175	if (!ptq || !ptq->sync_switch)
3176		return 1;
3177
3178	switch (ptq->switch_state) {
3179	case INTEL_PT_SS_NOT_TRACING:
3180		break;
3181	case INTEL_PT_SS_UNKNOWN:
3182	case INTEL_PT_SS_TRACING:
3183		ptq->next_tid = tid;
3184		ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
3185		return 0;
3186	case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
3187		if (!ptq->on_heap) {
3188			ptq->timestamp = perf_time_to_tsc(timestamp,
3189							  &pt->tc);
3190			err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
3191						 ptq->timestamp);
3192			if (err)
3193				return err;
3194			ptq->on_heap = true;
3195		}
3196		ptq->switch_state = INTEL_PT_SS_TRACING;
3197		break;
3198	case INTEL_PT_SS_EXPECTING_SWITCH_IP:
3199		intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
3200		break;
3201	default:
3202		break;
3203	}
3204
3205	ptq->next_tid = -1;
3206
3207	return 1;
3208}
3209
3210#ifdef HAVE_LIBTRACEEVENT
3211static int intel_pt_process_switch(struct intel_pt *pt,
3212				   struct perf_sample *sample)
3213{
3214	pid_t tid;
3215	int cpu, ret;
3216	struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
3217
3218	if (evsel != pt->switch_evsel)
3219		return 0;
3220
3221	tid = evsel__intval(evsel, sample, "next_pid");
3222	cpu = sample->cpu;
3223
3224	intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
3225		     cpu, tid, sample->time, perf_time_to_tsc(sample->time,
3226		     &pt->tc));
3227
3228	ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
3229	if (ret <= 0)
3230		return ret;
3231
3232	return machine__set_current_tid(pt->machine, cpu, -1, tid);
3233}
3234#endif /* HAVE_LIBTRACEEVENT */
3235
3236static int intel_pt_context_switch_in(struct intel_pt *pt,
3237				      struct perf_sample *sample)
3238{
3239	pid_t pid = sample->pid;
3240	pid_t tid = sample->tid;
3241	int cpu = sample->cpu;
3242
3243	if (pt->sync_switch) {
3244		struct intel_pt_queue *ptq;
3245
3246		ptq = intel_pt_cpu_to_ptq(pt, cpu);
3247		if (ptq && ptq->sync_switch) {
3248			ptq->next_tid = -1;
3249			switch (ptq->switch_state) {
3250			case INTEL_PT_SS_NOT_TRACING:
3251			case INTEL_PT_SS_UNKNOWN:
3252			case INTEL_PT_SS_TRACING:
3253				break;
3254			case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
3255			case INTEL_PT_SS_EXPECTING_SWITCH_IP:
3256				ptq->switch_state = INTEL_PT_SS_TRACING;
3257				break;
3258			default:
3259				break;
3260			}
3261		}
3262	}
3263
3264	/*
3265	 * If the current tid has not been updated yet, ensure it is now that
3266	 * a "switch in" event has occurred.
3267	 */
3268	if (machine__get_current_tid(pt->machine, cpu) == tid)
3269		return 0;
3270
3271	return machine__set_current_tid(pt->machine, cpu, pid, tid);
3272}
3273
3274static int intel_pt_guest_context_switch(struct intel_pt *pt,
3275					 union perf_event *event,
3276					 struct perf_sample *sample)
3277{
3278	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
3279	struct machines *machines = &pt->session->machines;
3280	struct machine *machine = machines__find(machines, sample->machine_pid);
3281
3282	pt->have_guest_sideband = true;
3283
3284	/*
3285	 * sync_switch cannot handle guest machines at present, so just disable
3286	 * it.
3287	 */
3288	pt->sync_switch_not_supported = true;
3289	if (pt->sync_switch)
3290		intel_pt_disable_sync_switch(pt);
3291
3292	if (out)
3293		return 0;
3294
3295	if (!machine)
3296		return -EINVAL;
3297
3298	return machine__set_current_tid(machine, sample->vcpu, sample->pid, sample->tid);
3299}
3300
3301static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
3302				   struct perf_sample *sample)
3303{
3304	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
3305	pid_t pid, tid;
3306	int cpu, ret;
3307
3308	if (perf_event__is_guest(event))
3309		return intel_pt_guest_context_switch(pt, event, sample);
3310
3311	cpu = sample->cpu;
3312
3313	if (pt->have_sched_switch == 3) {
3314		if (!out)
3315			return intel_pt_context_switch_in(pt, sample);
3316		if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
3317			pr_err("Expecting CPU-wide context switch event\n");
3318			return -EINVAL;
3319		}
3320		pid = event->context_switch.next_prev_pid;
3321		tid = event->context_switch.next_prev_tid;
3322	} else {
3323		if (out)
3324			return 0;
3325		pid = sample->pid;
3326		tid = sample->tid;
3327	}
3328
3329	if (tid == -1)
3330		intel_pt_log("context_switch event has no tid\n");
3331
3332	ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
3333	if (ret <= 0)
3334		return ret;
3335
3336	return machine__set_current_tid(pt->machine, cpu, pid, tid);
3337}
3338
3339static int intel_pt_process_itrace_start(struct intel_pt *pt,
3340					 union perf_event *event,
3341					 struct perf_sample *sample)
3342{
3343	if (!pt->per_cpu_mmaps)
3344		return 0;
3345
3346	intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
3347		     sample->cpu, event->itrace_start.pid,
3348		     event->itrace_start.tid, sample->time,
3349		     perf_time_to_tsc(sample->time, &pt->tc));
3350
3351	return machine__set_current_tid(pt->machine, sample->cpu,
3352					event->itrace_start.pid,
3353					event->itrace_start.tid);
3354}
3355
3356static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
3357					     union perf_event *event,
3358					     struct perf_sample *sample)
3359{
3360	u64 hw_id = event->aux_output_hw_id.hw_id;
3361	struct auxtrace_queue *queue;
3362	struct intel_pt_queue *ptq;
3363	struct evsel *evsel;
3364
3365	queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
3366	evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id);
3367	if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) {
3368		pr_err("Bad AUX output hardware ID\n");
3369		return -EINVAL;
3370	}
3371
3372	ptq = queue->priv;
3373
3374	ptq->pebs[hw_id].evsel = evsel;
3375	ptq->pebs[hw_id].id = sample->id;
3376
3377	return 0;
3378}
3379
3380static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
3381			     struct addr_location *al)
3382{
3383	if (!al->map || addr < map__start(al->map) || addr >= map__end(al->map)) {
3384		if (!thread__find_map(thread, cpumode, addr, al))
3385			return -1;
3386	}
3387
3388	return 0;
3389}
3390
3391/* Invalidate all instruction cache entries that overlap the text poke */
3392static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
3393{
3394	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
3395	u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
3396	/* Assume text poke begins in a basic block no more than 4096 bytes */
3397	int cnt = 4096 + event->text_poke.new_len;
3398	struct thread *thread = pt->unknown_thread;
3399	struct addr_location al;
3400	struct machine *machine = pt->machine;
3401	struct intel_pt_cache_entry *e;
3402	u64 offset;
3403	int ret = 0;
3404
3405	addr_location__init(&al);
3406	if (!event->text_poke.new_len)
3407		goto out;
3408
3409	for (; cnt; cnt--, addr--) {
3410		struct dso *dso;
3411
3412		if (intel_pt_find_map(thread, cpumode, addr, &al)) {
3413			if (addr < event->text_poke.addr)
3414				goto out;
3415			continue;
3416		}
3417
3418		dso = map__dso(al.map);
3419		if (!dso || !dso->auxtrace_cache)
3420			continue;
3421
3422		offset = map__map_ip(al.map, addr);
3423
3424		e = intel_pt_cache_lookup(dso, machine, offset);
3425		if (!e)
3426			continue;
3427
3428		if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
3429			/*
3430			 * No overlap. Working backwards there cannot be another
3431			 * basic block that overlaps the text poke if there is a
3432			 * branch instruction before the text poke address.
3433			 */
3434			if (e->branch != INTEL_PT_BR_NO_BRANCH)
3435				goto out;
3436		} else {
3437			intel_pt_cache_invalidate(dso, machine, offset);
3438			intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
3439				     dso->long_name, addr);
3440		}
3441	}
3442out:
3443	addr_location__exit(&al);
3444	return ret;
3445}
3446
3447static int intel_pt_process_event(struct perf_session *session,
3448				  union perf_event *event,
3449				  struct perf_sample *sample,
3450				  struct perf_tool *tool)
3451{
3452	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3453					   auxtrace);
3454	u64 timestamp;
3455	int err = 0;
3456
3457	if (dump_trace)
3458		return 0;
3459
3460	if (!tool->ordered_events) {
3461		pr_err("Intel Processor Trace requires ordered events\n");
3462		return -EINVAL;
3463	}
3464
3465	if (sample->time && sample->time != (u64)-1)
3466		timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3467	else
3468		timestamp = 0;
3469
3470	if (timestamp || pt->timeless_decoding) {
3471		err = intel_pt_update_queues(pt);
3472		if (err)
3473			return err;
3474	}
3475
3476	if (pt->timeless_decoding) {
3477		if (pt->sampling_mode) {
3478			if (sample->aux_sample.size)
3479				err = intel_pt_process_timeless_sample(pt,
3480								       sample);
3481		} else if (event->header.type == PERF_RECORD_EXIT) {
3482			err = intel_pt_process_timeless_queues(pt,
3483							       event->fork.tid,
3484							       sample->time);
3485		}
3486	} else if (timestamp) {
3487		if (!pt->first_timestamp)
3488			intel_pt_first_timestamp(pt, timestamp);
3489		err = intel_pt_process_queues(pt, timestamp);
3490	}
3491	if (err)
3492		return err;
3493
3494	if (event->header.type == PERF_RECORD_SAMPLE) {
3495		if (pt->synth_opts.add_callchain && !sample->callchain)
3496			intel_pt_add_callchain(pt, sample);
3497		if (pt->synth_opts.add_last_branch && !sample->branch_stack)
3498			intel_pt_add_br_stack(pt, sample);
3499	}
3500
3501	if (event->header.type == PERF_RECORD_AUX &&
3502	    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
3503	    pt->synth_opts.errors) {
3504		err = intel_pt_lost(pt, sample);
3505		if (err)
3506			return err;
3507	}
3508
3509#ifdef HAVE_LIBTRACEEVENT
3510	if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
3511		err = intel_pt_process_switch(pt, sample);
3512	else
3513#endif
3514	if (event->header.type == PERF_RECORD_ITRACE_START)
3515		err = intel_pt_process_itrace_start(pt, event, sample);
3516	else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID)
3517		err = intel_pt_process_aux_output_hw_id(pt, event, sample);
3518	else if (event->header.type == PERF_RECORD_SWITCH ||
3519		 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
3520		err = intel_pt_context_switch(pt, event, sample);
3521
3522	if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
3523		err = intel_pt_text_poke(pt, event);
3524
3525	if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
3526		intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
3527			     event->header.type, sample->cpu, sample->time, timestamp);
3528		intel_pt_log_event(event);
3529	}
3530
3531	return err;
3532}
3533
3534static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
3535{
3536	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3537					   auxtrace);
3538	int ret;
3539
3540	if (dump_trace)
3541		return 0;
3542
3543	if (!tool->ordered_events)
3544		return -EINVAL;
3545
3546	ret = intel_pt_update_queues(pt);
3547	if (ret < 0)
3548		return ret;
3549
3550	if (pt->timeless_decoding)
3551		return intel_pt_process_timeless_queues(pt, -1,
3552							MAX_TIMESTAMP - 1);
3553
3554	return intel_pt_process_queues(pt, MAX_TIMESTAMP);
3555}
3556
3557static void intel_pt_free_events(struct perf_session *session)
3558{
3559	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3560					   auxtrace);
3561	struct auxtrace_queues *queues = &pt->queues;
3562	unsigned int i;
3563
3564	for (i = 0; i < queues->nr_queues; i++) {
3565		intel_pt_free_queue(queues->queue_array[i].priv);
3566		queues->queue_array[i].priv = NULL;
3567	}
3568	intel_pt_log_disable();
3569	auxtrace_queues__free(queues);
3570}
3571
3572static void intel_pt_free(struct perf_session *session)
3573{
3574	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3575					   auxtrace);
3576
3577	auxtrace_heap__free(&pt->heap);
3578	intel_pt_free_events(session);
3579	session->auxtrace = NULL;
3580	intel_pt_free_vmcs_info(pt);
3581	thread__put(pt->unknown_thread);
3582	addr_filters__exit(&pt->filts);
3583	zfree(&pt->chain);
3584	zfree(&pt->filter);
3585	zfree(&pt->time_ranges);
3586	zfree(&pt->br_stack);
3587	free(pt);
3588}
3589
3590static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
3591				       struct evsel *evsel)
3592{
3593	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3594					   auxtrace);
3595
3596	return evsel->core.attr.type == pt->pmu_type;
3597}
3598
3599static int intel_pt_process_auxtrace_event(struct perf_session *session,
3600					   union perf_event *event,
3601					   struct perf_tool *tool __maybe_unused)
3602{
3603	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3604					   auxtrace);
3605
3606	if (!pt->data_queued) {
3607		struct auxtrace_buffer *buffer;
3608		off_t data_offset;
3609		int fd = perf_data__fd(session->data);
3610		int err;
3611
3612		if (perf_data__is_pipe(session->data)) {
3613			data_offset = 0;
3614		} else {
3615			data_offset = lseek(fd, 0, SEEK_CUR);
3616			if (data_offset == -1)
3617				return -errno;
3618		}
3619
3620		err = auxtrace_queues__add_event(&pt->queues, session, event,
3621						 data_offset, &buffer);
3622		if (err)
3623			return err;
3624
3625		/* Dump here now we have copied a piped trace out of the pipe */
3626		if (dump_trace) {
3627			if (auxtrace_buffer__get_data(buffer, fd)) {
3628				intel_pt_dump_event(pt, buffer->data,
3629						    buffer->size);
3630				auxtrace_buffer__put_data(buffer);
3631			}
3632		}
3633	}
3634
3635	return 0;
3636}
3637
3638static int intel_pt_queue_data(struct perf_session *session,
3639			       struct perf_sample *sample,
3640			       union perf_event *event, u64 data_offset)
3641{
3642	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3643					   auxtrace);
3644	u64 timestamp;
3645
3646	if (event) {
3647		return auxtrace_queues__add_event(&pt->queues, session, event,
3648						  data_offset, NULL);
3649	}
3650
3651	if (sample->time && sample->time != (u64)-1)
3652		timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3653	else
3654		timestamp = 0;
3655
3656	return auxtrace_queues__add_sample(&pt->queues, session, sample,
3657					   data_offset, timestamp);
3658}
3659
3660struct intel_pt_synth {
3661	struct perf_tool dummy_tool;
3662	struct perf_session *session;
3663};
3664
3665static int intel_pt_event_synth(struct perf_tool *tool,
3666				union perf_event *event,
3667				struct perf_sample *sample __maybe_unused,
3668				struct machine *machine __maybe_unused)
3669{
3670	struct intel_pt_synth *intel_pt_synth =
3671			container_of(tool, struct intel_pt_synth, dummy_tool);
3672
3673	return perf_session__deliver_synth_event(intel_pt_synth->session, event,
3674						 NULL);
3675}
3676
3677static int intel_pt_synth_event(struct perf_session *session, const char *name,
3678				struct perf_event_attr *attr, u64 id)
3679{
3680	struct intel_pt_synth intel_pt_synth;
3681	int err;
3682
3683	pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
3684		 name, id, (u64)attr->sample_type);
3685
3686	memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
3687	intel_pt_synth.session = session;
3688
3689	err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
3690					  &id, intel_pt_event_synth);
3691	if (err)
3692		pr_err("%s: failed to synthesize '%s' event type\n",
3693		       __func__, name);
3694
3695	return err;
3696}
3697
3698static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
3699				    const char *name)
3700{
3701	struct evsel *evsel;
3702
3703	evlist__for_each_entry(evlist, evsel) {
3704		if (evsel->core.id && evsel->core.id[0] == id) {
3705			if (evsel->name)
3706				zfree(&evsel->name);
3707			evsel->name = strdup(name);
3708			break;
3709		}
3710	}
3711}
3712
3713static struct evsel *intel_pt_evsel(struct intel_pt *pt,
3714					 struct evlist *evlist)
3715{
3716	struct evsel *evsel;
3717
3718	evlist__for_each_entry(evlist, evsel) {
3719		if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
3720			return evsel;
3721	}
3722
3723	return NULL;
3724}
3725
3726static int intel_pt_synth_events(struct intel_pt *pt,
3727				 struct perf_session *session)
3728{
3729	struct evlist *evlist = session->evlist;
3730	struct evsel *evsel = intel_pt_evsel(pt, evlist);
3731	struct perf_event_attr attr;
3732	u64 id;
3733	int err;
3734
3735	if (!evsel) {
3736		pr_debug("There are no selected events with Intel Processor Trace data\n");
3737		return 0;
3738	}
3739
3740	memset(&attr, 0, sizeof(struct perf_event_attr));
3741	attr.size = sizeof(struct perf_event_attr);
3742	attr.type = PERF_TYPE_HARDWARE;
3743	attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
3744	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
3745			    PERF_SAMPLE_PERIOD;
3746	if (pt->timeless_decoding)
3747		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
3748	else
3749		attr.sample_type |= PERF_SAMPLE_TIME;
3750	if (!pt->per_cpu_mmaps)
3751		attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
3752	attr.exclude_user = evsel->core.attr.exclude_user;
3753	attr.exclude_kernel = evsel->core.attr.exclude_kernel;
3754	attr.exclude_hv = evsel->core.attr.exclude_hv;
3755	attr.exclude_host = evsel->core.attr.exclude_host;
3756	attr.exclude_guest = evsel->core.attr.exclude_guest;
3757	attr.sample_id_all = evsel->core.attr.sample_id_all;
3758	attr.read_format = evsel->core.attr.read_format;
3759
3760	id = evsel->core.id[0] + 1000000000;
3761	if (!id)
3762		id = 1;
3763
3764	if (pt->synth_opts.branches) {
3765		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
3766		attr.sample_period = 1;
3767		attr.sample_type |= PERF_SAMPLE_ADDR;
3768		err = intel_pt_synth_event(session, "branches", &attr, id);
3769		if (err)
3770			return err;
3771		pt->sample_branches = true;
3772		pt->branches_sample_type = attr.sample_type;
3773		pt->branches_id = id;
3774		id += 1;
3775		attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
3776	}
3777
3778	if (pt->synth_opts.callchain)
3779		attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
3780	if (pt->synth_opts.last_branch) {
3781		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
3782		/*
3783		 * We don't use the hardware index, but the sample generation
3784		 * code uses the new format branch_stack with this field,
3785		 * so the event attributes must indicate that it's present.
3786		 */
3787		attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
3788	}
3789
3790	if (pt->synth_opts.instructions) {
3791		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3792		if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3793			attr.sample_period =
3794				intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3795		else
3796			attr.sample_period = pt->synth_opts.period;
3797		err = intel_pt_synth_event(session, "instructions", &attr, id);
3798		if (err)
3799			return err;
3800		pt->sample_instructions = true;
3801		pt->instructions_sample_type = attr.sample_type;
3802		pt->instructions_id = id;
3803		id += 1;
3804	}
3805
3806	if (pt->synth_opts.cycles) {
3807		attr.config = PERF_COUNT_HW_CPU_CYCLES;
3808		if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3809			attr.sample_period =
3810				intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3811		else
3812			attr.sample_period = pt->synth_opts.period;
3813		err = intel_pt_synth_event(session, "cycles", &attr, id);
3814		if (err)
3815			return err;
3816		pt->sample_cycles = true;
3817		pt->cycles_sample_type = attr.sample_type;
3818		pt->cycles_id = id;
3819		id += 1;
3820	}
3821
3822	attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3823	attr.sample_period = 1;
3824
3825	if (pt->synth_opts.transactions) {
3826		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3827		err = intel_pt_synth_event(session, "transactions", &attr, id);
3828		if (err)
3829			return err;
3830		pt->sample_transactions = true;
3831		pt->transactions_sample_type = attr.sample_type;
3832		pt->transactions_id = id;
3833		intel_pt_set_event_name(evlist, id, "transactions");
3834		id += 1;
3835	}
3836
3837	attr.type = PERF_TYPE_SYNTH;
3838	attr.sample_type |= PERF_SAMPLE_RAW;
3839
3840	if (pt->synth_opts.ptwrites) {
3841		attr.config = PERF_SYNTH_INTEL_PTWRITE;
3842		err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3843		if (err)
3844			return err;
3845		pt->sample_ptwrites = true;
3846		pt->ptwrites_sample_type = attr.sample_type;
3847		pt->ptwrites_id = id;
3848		intel_pt_set_event_name(evlist, id, "ptwrite");
3849		id += 1;
3850	}
3851
3852	if (pt->synth_opts.pwr_events) {
3853		pt->sample_pwr_events = true;
3854		pt->pwr_events_sample_type = attr.sample_type;
3855
3856		attr.config = PERF_SYNTH_INTEL_CBR;
3857		err = intel_pt_synth_event(session, "cbr", &attr, id);
3858		if (err)
3859			return err;
3860		pt->cbr_id = id;
3861		intel_pt_set_event_name(evlist, id, "cbr");
3862		id += 1;
3863
3864		attr.config = PERF_SYNTH_INTEL_PSB;
3865		err = intel_pt_synth_event(session, "psb", &attr, id);
3866		if (err)
3867			return err;
3868		pt->psb_id = id;
3869		intel_pt_set_event_name(evlist, id, "psb");
3870		id += 1;
3871	}
3872
3873	if (pt->synth_opts.pwr_events && (evsel->core.attr.config & INTEL_PT_CFG_PWR_EVT_EN)) {
3874		attr.config = PERF_SYNTH_INTEL_MWAIT;
3875		err = intel_pt_synth_event(session, "mwait", &attr, id);
3876		if (err)
3877			return err;
3878		pt->mwait_id = id;
3879		intel_pt_set_event_name(evlist, id, "mwait");
3880		id += 1;
3881
3882		attr.config = PERF_SYNTH_INTEL_PWRE;
3883		err = intel_pt_synth_event(session, "pwre", &attr, id);
3884		if (err)
3885			return err;
3886		pt->pwre_id = id;
3887		intel_pt_set_event_name(evlist, id, "pwre");
3888		id += 1;
3889
3890		attr.config = PERF_SYNTH_INTEL_EXSTOP;
3891		err = intel_pt_synth_event(session, "exstop", &attr, id);
3892		if (err)
3893			return err;
3894		pt->exstop_id = id;
3895		intel_pt_set_event_name(evlist, id, "exstop");
3896		id += 1;
3897
3898		attr.config = PERF_SYNTH_INTEL_PWRX;
3899		err = intel_pt_synth_event(session, "pwrx", &attr, id);
3900		if (err)
3901			return err;
3902		pt->pwrx_id = id;
3903		intel_pt_set_event_name(evlist, id, "pwrx");
3904		id += 1;
3905	}
3906
3907	if (pt->synth_opts.intr_events && (evsel->core.attr.config & INTEL_PT_CFG_EVT_EN)) {
3908		attr.config = PERF_SYNTH_INTEL_EVT;
3909		err = intel_pt_synth_event(session, "evt", &attr, id);
3910		if (err)
3911			return err;
3912		pt->evt_sample_type = attr.sample_type;
3913		pt->evt_id = id;
3914		intel_pt_set_event_name(evlist, id, "evt");
3915		id += 1;
3916	}
3917
3918	if (pt->synth_opts.intr_events && pt->cap_event_trace) {
3919		attr.config = PERF_SYNTH_INTEL_IFLAG_CHG;
3920		err = intel_pt_synth_event(session, "iflag", &attr, id);
3921		if (err)
3922			return err;
3923		pt->iflag_chg_sample_type = attr.sample_type;
3924		pt->iflag_chg_id = id;
3925		intel_pt_set_event_name(evlist, id, "iflag");
3926		id += 1;
3927	}
3928
3929	return 0;
3930}
3931
3932static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3933{
3934	struct evsel *evsel;
3935
3936	if (!pt->synth_opts.other_events)
3937		return;
3938
3939	evlist__for_each_entry(pt->session->evlist, evsel) {
3940		if (evsel->core.attr.aux_output && evsel->core.id) {
3941			if (pt->single_pebs) {
3942				pt->single_pebs = false;
3943				return;
3944			}
3945			pt->single_pebs = true;
3946			pt->sample_pebs = true;
3947			pt->pebs_evsel = evsel;
3948		}
3949	}
3950}
3951
3952static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3953{
3954	struct evsel *evsel;
3955
3956	evlist__for_each_entry_reverse(evlist, evsel) {
3957		const char *name = evsel__name(evsel);
3958
3959		if (!strcmp(name, "sched:sched_switch"))
3960			return evsel;
3961	}
3962
3963	return NULL;
3964}
3965
3966static bool intel_pt_find_switch(struct evlist *evlist)
3967{
3968	struct evsel *evsel;
3969
3970	evlist__for_each_entry(evlist, evsel) {
3971		if (evsel->core.attr.context_switch)
3972			return true;
3973	}
3974
3975	return false;
3976}
3977
3978static int intel_pt_perf_config(const char *var, const char *value, void *data)
3979{
3980	struct intel_pt *pt = data;
3981
3982	if (!strcmp(var, "intel-pt.mispred-all"))
3983		pt->mispred_all = perf_config_bool(var, value);
3984
3985	if (!strcmp(var, "intel-pt.max-loops"))
3986		perf_config_int(&pt->max_loops, var, value);
3987
3988	return 0;
3989}
3990
3991/* Find least TSC which converts to ns or later */
3992static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3993{
3994	u64 tsc, tm;
3995
3996	tsc = perf_time_to_tsc(ns, &pt->tc);
3997
3998	while (1) {
3999		tm = tsc_to_perf_time(tsc, &pt->tc);
4000		if (tm < ns)
4001			break;
4002		tsc -= 1;
4003	}
4004
4005	while (tm < ns)
4006		tm = tsc_to_perf_time(++tsc, &pt->tc);
4007
4008	return tsc;
4009}
4010
4011/* Find greatest TSC which converts to ns or earlier */
4012static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
4013{
4014	u64 tsc, tm;
4015
4016	tsc = perf_time_to_tsc(ns, &pt->tc);
4017
4018	while (1) {
4019		tm = tsc_to_perf_time(tsc, &pt->tc);
4020		if (tm > ns)
4021			break;
4022		tsc += 1;
4023	}
4024
4025	while (tm > ns)
4026		tm = tsc_to_perf_time(--tsc, &pt->tc);
4027
4028	return tsc;
4029}
4030
4031static int intel_pt_setup_time_ranges(struct intel_pt *pt,
4032				      struct itrace_synth_opts *opts)
4033{
4034	struct perf_time_interval *p = opts->ptime_range;
4035	int n = opts->range_num;
4036	int i;
4037
4038	if (!n || !p || pt->timeless_decoding)
4039		return 0;
4040
4041	pt->time_ranges = calloc(n, sizeof(struct range));
4042	if (!pt->time_ranges)
4043		return -ENOMEM;
4044
4045	pt->range_cnt = n;
4046
4047	intel_pt_log("%s: %u range(s)\n", __func__, n);
4048
4049	for (i = 0; i < n; i++) {
4050		struct range *r = &pt->time_ranges[i];
4051		u64 ts = p[i].start;
4052		u64 te = p[i].end;
4053
4054		/*
4055		 * Take care to ensure the TSC range matches the perf-time range
4056		 * when converted back to perf-time.
4057		 */
4058		r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
4059		r->end   = te ? intel_pt_tsc_end(te, pt) : 0;
4060
4061		intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
4062			     i, ts, te);
4063		intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
4064			     i, r->start, r->end);
4065	}
4066
4067	return 0;
4068}
4069
4070static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
4071{
4072	struct intel_pt_vmcs_info *vmcs_info;
4073	u64 tsc_offset, vmcs;
4074	char *p = *args;
4075
4076	errno = 0;
4077
4078	p = skip_spaces(p);
4079	if (!*p)
4080		return 1;
4081
4082	tsc_offset = strtoull(p, &p, 0);
4083	if (errno)
4084		return -errno;
4085	p = skip_spaces(p);
4086	if (*p != ':') {
4087		pt->dflt_tsc_offset = tsc_offset;
4088		*args = p;
4089		return 0;
4090	}
4091	p += 1;
4092	while (1) {
4093		vmcs = strtoull(p, &p, 0);
4094		if (errno)
4095			return -errno;
4096		if (!vmcs)
4097			return -EINVAL;
4098		vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
4099		if (!vmcs_info)
4100			return -ENOMEM;
4101		p = skip_spaces(p);
4102		if (*p != ',')
4103			break;
4104		p += 1;
4105	}
4106	*args = p;
4107	return 0;
4108}
4109
4110static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
4111{
4112	char *args = pt->synth_opts.vm_tm_corr_args;
4113	int ret;
4114
4115	if (!args)
4116		return 0;
4117
4118	do {
4119		ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
4120	} while (!ret);
4121
4122	if (ret < 0) {
4123		pr_err("Failed to parse VM Time Correlation options\n");
4124		return ret;
4125	}
4126
4127	return 0;
4128}
4129
4130static const char * const intel_pt_info_fmts[] = {
4131	[INTEL_PT_PMU_TYPE]		= "  PMU Type            %"PRId64"\n",
4132	[INTEL_PT_TIME_SHIFT]		= "  Time Shift          %"PRIu64"\n",
4133	[INTEL_PT_TIME_MULT]		= "  Time Muliplier      %"PRIu64"\n",
4134	[INTEL_PT_TIME_ZERO]		= "  Time Zero           %"PRIu64"\n",
4135	[INTEL_PT_CAP_USER_TIME_ZERO]	= "  Cap Time Zero       %"PRId64"\n",
4136	[INTEL_PT_TSC_BIT]		= "  TSC bit             %#"PRIx64"\n",
4137	[INTEL_PT_NORETCOMP_BIT]	= "  NoRETComp bit       %#"PRIx64"\n",
4138	[INTEL_PT_HAVE_SCHED_SWITCH]	= "  Have sched_switch   %"PRId64"\n",
4139	[INTEL_PT_SNAPSHOT_MODE]	= "  Snapshot mode       %"PRId64"\n",
4140	[INTEL_PT_PER_CPU_MMAPS]	= "  Per-cpu maps        %"PRId64"\n",
4141	[INTEL_PT_MTC_BIT]		= "  MTC bit             %#"PRIx64"\n",
4142	[INTEL_PT_MTC_FREQ_BITS]	= "  MTC freq bits       %#"PRIx64"\n",
4143	[INTEL_PT_TSC_CTC_N]		= "  TSC:CTC numerator   %"PRIu64"\n",
4144	[INTEL_PT_TSC_CTC_D]		= "  TSC:CTC denominator %"PRIu64"\n",
4145	[INTEL_PT_CYC_BIT]		= "  CYC bit             %#"PRIx64"\n",
4146	[INTEL_PT_MAX_NONTURBO_RATIO]	= "  Max non-turbo ratio %"PRIu64"\n",
4147	[INTEL_PT_FILTER_STR_LEN]	= "  Filter string len.  %"PRIu64"\n",
4148};
4149
4150static void intel_pt_print_info(__u64 *arr, int start, int finish)
4151{
4152	int i;
4153
4154	if (!dump_trace)
4155		return;
4156
4157	for (i = start; i <= finish; i++) {
4158		const char *fmt = intel_pt_info_fmts[i];
4159
4160		if (fmt)
4161			fprintf(stdout, fmt, arr[i]);
4162	}
4163}
4164
4165static void intel_pt_print_info_str(const char *name, const char *str)
4166{
4167	if (!dump_trace)
4168		return;
4169
4170	fprintf(stdout, "  %-20s%s\n", name, str ? str : "");
4171}
4172
4173static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
4174{
4175	return auxtrace_info->header.size >=
4176		sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
4177}
4178
4179int intel_pt_process_auxtrace_info(union perf_event *event,
4180				   struct perf_session *session)
4181{
4182	struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
4183	size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
4184	struct intel_pt *pt;
4185	void *info_end;
4186	__u64 *info;
4187	int err;
4188
4189	if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
4190					min_sz)
4191		return -EINVAL;
4192
4193	pt = zalloc(sizeof(struct intel_pt));
4194	if (!pt)
4195		return -ENOMEM;
4196
4197	pt->vmcs_info = RB_ROOT;
4198
4199	addr_filters__init(&pt->filts);
4200
4201	err = perf_config(intel_pt_perf_config, pt);
4202	if (err)
4203		goto err_free;
4204
4205	err = auxtrace_queues__init(&pt->queues);
4206	if (err)
4207		goto err_free;
4208
4209	if (session->itrace_synth_opts->set) {
4210		pt->synth_opts = *session->itrace_synth_opts;
4211	} else {
4212		struct itrace_synth_opts *opts = session->itrace_synth_opts;
4213
4214		itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
4215		if (!opts->default_no_sample && !opts->inject) {
4216			pt->synth_opts.branches = false;
4217			pt->synth_opts.callchain = true;
4218			pt->synth_opts.add_callchain = true;
4219		}
4220		pt->synth_opts.thread_stack = opts->thread_stack;
4221	}
4222
4223	if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT))
4224		intel_pt_log_set_name(INTEL_PT_PMU_NAME);
4225
4226	pt->session = session;
4227	pt->machine = &session->machines.host; /* No kvm support */
4228	pt->auxtrace_type = auxtrace_info->type;
4229	pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
4230	pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
4231	pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
4232	pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
4233	pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
4234	pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
4235	pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
4236	pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
4237	pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
4238	pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
4239	intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
4240			    INTEL_PT_PER_CPU_MMAPS);
4241
4242	if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
4243		pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
4244		pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
4245		pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
4246		pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
4247		pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
4248		intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
4249				    INTEL_PT_CYC_BIT);
4250	}
4251
4252	if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
4253		pt->max_non_turbo_ratio =
4254			auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
4255		intel_pt_print_info(&auxtrace_info->priv[0],
4256				    INTEL_PT_MAX_NONTURBO_RATIO,
4257				    INTEL_PT_MAX_NONTURBO_RATIO);
4258	}
4259
4260	info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
4261	info_end = (void *)auxtrace_info + auxtrace_info->header.size;
4262
4263	if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
4264		size_t len;
4265
4266		len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
4267		intel_pt_print_info(&auxtrace_info->priv[0],
4268				    INTEL_PT_FILTER_STR_LEN,
4269				    INTEL_PT_FILTER_STR_LEN);
4270		if (len) {
4271			const char *filter = (const char *)info;
4272
4273			len = roundup(len + 1, 8);
4274			info += len >> 3;
4275			if ((void *)info > info_end) {
4276				pr_err("%s: bad filter string length\n", __func__);
4277				err = -EINVAL;
4278				goto err_free_queues;
4279			}
4280			pt->filter = memdup(filter, len);
4281			if (!pt->filter) {
4282				err = -ENOMEM;
4283				goto err_free_queues;
4284			}
4285			if (session->header.needs_swap)
4286				mem_bswap_64(pt->filter, len);
4287			if (pt->filter[len - 1]) {
4288				pr_err("%s: filter string not null terminated\n", __func__);
4289				err = -EINVAL;
4290				goto err_free_queues;
4291			}
4292			err = addr_filters__parse_bare_filter(&pt->filts,
4293							      filter);
4294			if (err)
4295				goto err_free_queues;
4296		}
4297		intel_pt_print_info_str("Filter string", pt->filter);
4298	}
4299
4300	if ((void *)info < info_end) {
4301		pt->cap_event_trace = *info++;
4302		if (dump_trace)
4303			fprintf(stdout, "  Cap Event Trace     %d\n",
4304				pt->cap_event_trace);
4305	}
4306
4307	pt->timeless_decoding = intel_pt_timeless_decoding(pt);
4308	if (pt->timeless_decoding && !pt->tc.time_mult)
4309		pt->tc.time_mult = 1;
4310	pt->have_tsc = intel_pt_have_tsc(pt);
4311	pt->sampling_mode = intel_pt_sampling_mode(pt);
4312	pt->est_tsc = !pt->timeless_decoding;
4313
4314	if (pt->synth_opts.vm_time_correlation) {
4315		if (pt->timeless_decoding) {
4316			pr_err("Intel PT has no time information for VM Time Correlation\n");
4317			err = -EINVAL;
4318			goto err_free_queues;
4319		}
4320		if (session->itrace_synth_opts->ptime_range) {
4321			pr_err("Time ranges cannot be specified with VM Time Correlation\n");
4322			err = -EINVAL;
4323			goto err_free_queues;
4324		}
4325		/* Currently TSC Offset is calculated using MTC packets */
4326		if (!intel_pt_have_mtc(pt)) {
4327			pr_err("MTC packets must have been enabled for VM Time Correlation\n");
4328			err = -EINVAL;
4329			goto err_free_queues;
4330		}
4331		err = intel_pt_parse_vm_tm_corr_args(pt);
4332		if (err)
4333			goto err_free_queues;
4334	}
4335
4336	pt->unknown_thread = thread__new(999999999, 999999999);
4337	if (!pt->unknown_thread) {
4338		err = -ENOMEM;
4339		goto err_free_queues;
4340	}
4341
4342	err = thread__set_comm(pt->unknown_thread, "unknown", 0);
4343	if (err)
4344		goto err_delete_thread;
4345	if (thread__init_maps(pt->unknown_thread, pt->machine)) {
4346		err = -ENOMEM;
4347		goto err_delete_thread;
4348	}
4349
4350	pt->auxtrace.process_event = intel_pt_process_event;
4351	pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
4352	pt->auxtrace.queue_data = intel_pt_queue_data;
4353	pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
4354	pt->auxtrace.flush_events = intel_pt_flush;
4355	pt->auxtrace.free_events = intel_pt_free_events;
4356	pt->auxtrace.free = intel_pt_free;
4357	pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
4358	session->auxtrace = &pt->auxtrace;
4359
4360	if (dump_trace)
4361		return 0;
4362
4363	if (pt->have_sched_switch == 1) {
4364		pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
4365		if (!pt->switch_evsel) {
4366			pr_err("%s: missing sched_switch event\n", __func__);
4367			err = -EINVAL;
4368			goto err_delete_thread;
4369		}
4370	} else if (pt->have_sched_switch == 2 &&
4371		   !intel_pt_find_switch(session->evlist)) {
4372		pr_err("%s: missing context_switch attribute flag\n", __func__);
4373		err = -EINVAL;
4374		goto err_delete_thread;
4375	}
4376
4377	if (pt->synth_opts.log) {
4378		bool log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
4379		unsigned int log_on_error_size = pt->synth_opts.log_on_error_size;
4380
4381		intel_pt_log_enable(log_on_error, log_on_error_size);
4382	}
4383
4384	/* Maximum non-turbo ratio is TSC freq / 100 MHz */
4385	if (pt->tc.time_mult) {
4386		u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
4387
4388		if (!pt->max_non_turbo_ratio)
4389			pt->max_non_turbo_ratio =
4390					(tsc_freq + 50000000) / 100000000;
4391		intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
4392		intel_pt_log("Maximum non-turbo ratio %u\n",
4393			     pt->max_non_turbo_ratio);
4394		pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
4395	}
4396
4397	err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
4398	if (err)
4399		goto err_delete_thread;
4400
4401	if (pt->synth_opts.calls)
4402		pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
4403				       PERF_IP_FLAG_TRACE_END;
4404	if (pt->synth_opts.returns)
4405		pt->branches_filter |= PERF_IP_FLAG_RETURN |
4406				       PERF_IP_FLAG_TRACE_BEGIN;
4407
4408	if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
4409	    !symbol_conf.use_callchain) {
4410		symbol_conf.use_callchain = true;
4411		if (callchain_register_param(&callchain_param) < 0) {
4412			symbol_conf.use_callchain = false;
4413			pt->synth_opts.callchain = false;
4414			pt->synth_opts.add_callchain = false;
4415		}
4416	}
4417
4418	if (pt->synth_opts.add_callchain) {
4419		err = intel_pt_callchain_init(pt);
4420		if (err)
4421			goto err_delete_thread;
4422	}
4423
4424	if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
4425		pt->br_stack_sz = pt->synth_opts.last_branch_sz;
4426		pt->br_stack_sz_plus = pt->br_stack_sz;
4427	}
4428
4429	if (pt->synth_opts.add_last_branch) {
4430		err = intel_pt_br_stack_init(pt);
4431		if (err)
4432			goto err_delete_thread;
4433		/*
4434		 * Additional branch stack size to cater for tracing from the
4435		 * actual sample ip to where the sample time is recorded.
4436		 * Measured at about 200 branches, but generously set to 1024.
4437		 * If kernel space is not being traced, then add just 1 for the
4438		 * branch to kernel space.
4439		 */
4440		if (intel_pt_tracing_kernel(pt))
4441			pt->br_stack_sz_plus += 1024;
4442		else
4443			pt->br_stack_sz_plus += 1;
4444	}
4445
4446	pt->use_thread_stack = pt->synth_opts.callchain ||
4447			       pt->synth_opts.add_callchain ||
4448			       pt->synth_opts.thread_stack ||
4449			       pt->synth_opts.last_branch ||
4450			       pt->synth_opts.add_last_branch;
4451
4452	pt->callstack = pt->synth_opts.callchain ||
4453			pt->synth_opts.add_callchain ||
4454			pt->synth_opts.thread_stack;
4455
4456	err = intel_pt_synth_events(pt, session);
4457	if (err)
4458		goto err_delete_thread;
4459
4460	intel_pt_setup_pebs_events(pt);
4461
4462	if (perf_data__is_pipe(session->data)) {
4463		pr_warning("WARNING: Intel PT with pipe mode is not recommended.\n"
4464			   "         The output cannot relied upon.  In particular,\n"
4465			   "         timestamps and the order of events may be incorrect.\n");
4466	}
4467
4468	if (pt->sampling_mode || list_empty(&session->auxtrace_index))
4469		err = auxtrace_queue_data(session, true, true);
4470	else
4471		err = auxtrace_queues__process_index(&pt->queues, session);
4472	if (err)
4473		goto err_delete_thread;
4474
4475	if (pt->queues.populated)
4476		pt->data_queued = true;
4477
4478	if (pt->timeless_decoding)
4479		pr_debug2("Intel PT decoding without timestamps\n");
4480
4481	return 0;
4482
4483err_delete_thread:
4484	zfree(&pt->chain);
4485	thread__zput(pt->unknown_thread);
4486err_free_queues:
4487	intel_pt_log_disable();
4488	auxtrace_queues__free(&pt->queues);
4489	session->auxtrace = NULL;
4490err_free:
4491	addr_filters__exit(&pt->filts);
4492	zfree(&pt->filter);
4493	zfree(&pt->time_ranges);
4494	free(pt);
4495	return err;
4496}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * intel_pt.c: Intel Processor Trace support
   4 * Copyright (c) 2013-2015, Intel Corporation.
   5 */
   6
   7#include <inttypes.h>
   8#include <linux/perf_event.h>
   9#include <stdio.h>
  10#include <stdbool.h>
  11#include <errno.h>
  12#include <linux/kernel.h>
  13#include <linux/string.h>
  14#include <linux/types.h>
  15#include <linux/zalloc.h>
  16
  17#include "session.h"
  18#include "machine.h"
  19#include "memswap.h"
  20#include "sort.h"
  21#include "tool.h"
  22#include "event.h"
  23#include "evlist.h"
  24#include "evsel.h"
  25#include "map.h"
  26#include "color.h"
  27#include "thread.h"
  28#include "thread-stack.h"
  29#include "symbol.h"
  30#include "callchain.h"
  31#include "dso.h"
  32#include "debug.h"
  33#include "auxtrace.h"
  34#include "tsc.h"
  35#include "intel-pt.h"
  36#include "config.h"
  37#include "util/perf_api_probe.h"
  38#include "util/synthetic-events.h"
  39#include "time-utils.h"
  40
  41#include "../arch/x86/include/uapi/asm/perf_regs.h"
  42
  43#include "intel-pt-decoder/intel-pt-log.h"
  44#include "intel-pt-decoder/intel-pt-decoder.h"
  45#include "intel-pt-decoder/intel-pt-insn-decoder.h"
  46#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
  47
  48#define MAX_TIMESTAMP (~0ULL)
  49
  50#define INTEL_PT_CFG_PASS_THRU	BIT_ULL(0)
  51#define INTEL_PT_CFG_PWR_EVT_EN	BIT_ULL(4)
  52#define INTEL_PT_CFG_BRANCH_EN	BIT_ULL(13)
  53#define INTEL_PT_CFG_EVT_EN	BIT_ULL(31)
  54#define INTEL_PT_CFG_TNT_DIS	BIT_ULL(55)
  55
  56struct range {
  57	u64 start;
  58	u64 end;
  59};
  60
  61struct intel_pt {
  62	struct auxtrace auxtrace;
  63	struct auxtrace_queues queues;
  64	struct auxtrace_heap heap;
  65	u32 auxtrace_type;
  66	struct perf_session *session;
  67	struct machine *machine;
  68	struct evsel *switch_evsel;
  69	struct thread *unknown_thread;
  70	bool timeless_decoding;
  71	bool sampling_mode;
  72	bool snapshot_mode;
  73	bool per_cpu_mmaps;
  74	bool have_tsc;
  75	bool data_queued;
  76	bool est_tsc;
  77	bool sync_switch;
  78	bool sync_switch_not_supported;
  79	bool mispred_all;
  80	bool use_thread_stack;
  81	bool callstack;
  82	bool cap_event_trace;
  83	bool have_guest_sideband;
  84	unsigned int br_stack_sz;
  85	unsigned int br_stack_sz_plus;
  86	int have_sched_switch;
  87	u32 pmu_type;
  88	u64 kernel_start;
  89	u64 switch_ip;
  90	u64 ptss_ip;
  91	u64 first_timestamp;
  92
  93	struct perf_tsc_conversion tc;
  94	bool cap_user_time_zero;
  95
  96	struct itrace_synth_opts synth_opts;
  97
  98	bool sample_instructions;
  99	u64 instructions_sample_type;
 100	u64 instructions_id;
 101
 102	bool sample_cycles;
 103	u64 cycles_sample_type;
 104	u64 cycles_id;
 105
 106	bool sample_branches;
 107	u32 branches_filter;
 108	u64 branches_sample_type;
 109	u64 branches_id;
 110
 111	bool sample_transactions;
 112	u64 transactions_sample_type;
 113	u64 transactions_id;
 114
 115	bool sample_ptwrites;
 116	u64 ptwrites_sample_type;
 117	u64 ptwrites_id;
 118
 119	bool sample_pwr_events;
 120	u64 pwr_events_sample_type;
 121	u64 mwait_id;
 122	u64 pwre_id;
 123	u64 exstop_id;
 124	u64 pwrx_id;
 125	u64 cbr_id;
 126	u64 psb_id;
 127
 128	bool single_pebs;
 129	bool sample_pebs;
 130	struct evsel *pebs_evsel;
 131
 132	u64 evt_sample_type;
 133	u64 evt_id;
 134
 135	u64 iflag_chg_sample_type;
 136	u64 iflag_chg_id;
 137
 138	u64 tsc_bit;
 139	u64 mtc_bit;
 140	u64 mtc_freq_bits;
 141	u32 tsc_ctc_ratio_n;
 142	u32 tsc_ctc_ratio_d;
 143	u64 cyc_bit;
 144	u64 noretcomp_bit;
 145	unsigned max_non_turbo_ratio;
 146	unsigned cbr2khz;
 147	int max_loops;
 148
 149	unsigned long num_events;
 150
 151	char *filter;
 152	struct addr_filters filts;
 153
 154	struct range *time_ranges;
 155	unsigned int range_cnt;
 156
 157	struct ip_callchain *chain;
 158	struct branch_stack *br_stack;
 159
 160	u64 dflt_tsc_offset;
 161	struct rb_root vmcs_info;
 162};
 163
 164enum switch_state {
 165	INTEL_PT_SS_NOT_TRACING,
 166	INTEL_PT_SS_UNKNOWN,
 167	INTEL_PT_SS_TRACING,
 168	INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
 169	INTEL_PT_SS_EXPECTING_SWITCH_IP,
 170};
 171
 172/* applicable_counters is 64-bits */
 173#define INTEL_PT_MAX_PEBS 64
 174
 175struct intel_pt_pebs_event {
 176	struct evsel *evsel;
 177	u64 id;
 178};
 179
 180struct intel_pt_queue {
 181	struct intel_pt *pt;
 182	unsigned int queue_nr;
 183	struct auxtrace_buffer *buffer;
 184	struct auxtrace_buffer *old_buffer;
 185	void *decoder;
 186	const struct intel_pt_state *state;
 187	struct ip_callchain *chain;
 188	struct branch_stack *last_branch;
 189	union perf_event *event_buf;
 190	bool on_heap;
 191	bool stop;
 192	bool step_through_buffers;
 193	bool use_buffer_pid_tid;
 194	bool sync_switch;
 195	bool sample_ipc;
 196	pid_t pid, tid;
 197	int cpu;
 198	int switch_state;
 199	pid_t next_tid;
 200	struct thread *thread;
 201	struct machine *guest_machine;
 202	struct thread *guest_thread;
 203	struct thread *unknown_guest_thread;
 204	pid_t guest_machine_pid;
 205	pid_t guest_pid;
 206	pid_t guest_tid;
 207	int vcpu;
 208	bool exclude_kernel;
 209	bool have_sample;
 210	u64 time;
 211	u64 timestamp;
 212	u64 sel_timestamp;
 213	bool sel_start;
 214	unsigned int sel_idx;
 215	u32 flags;
 216	u16 insn_len;
 217	u64 last_insn_cnt;
 218	u64 ipc_insn_cnt;
 219	u64 ipc_cyc_cnt;
 220	u64 last_in_insn_cnt;
 221	u64 last_in_cyc_cnt;
 222	u64 last_cy_insn_cnt;
 223	u64 last_cy_cyc_cnt;
 224	u64 last_br_insn_cnt;
 225	u64 last_br_cyc_cnt;
 226	unsigned int cbr_seen;
 227	char insn[INTEL_PT_INSN_BUF_SZ];
 228	struct intel_pt_pebs_event pebs[INTEL_PT_MAX_PEBS];
 229};
 230
 231static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
 232			  unsigned char *buf, size_t len)
 233{
 234	struct intel_pt_pkt packet;
 235	size_t pos = 0;
 236	int ret, pkt_len, i;
 237	char desc[INTEL_PT_PKT_DESC_MAX];
 238	const char *color = PERF_COLOR_BLUE;
 239	enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
 240
 241	color_fprintf(stdout, color,
 242		      ". ... Intel Processor Trace data: size %zu bytes\n",
 243		      len);
 244
 245	while (len) {
 246		ret = intel_pt_get_packet(buf, len, &packet, &ctx);
 247		if (ret > 0)
 248			pkt_len = ret;
 249		else
 250			pkt_len = 1;
 251		printf(".");
 252		color_fprintf(stdout, color, "  %08x: ", pos);
 253		for (i = 0; i < pkt_len; i++)
 254			color_fprintf(stdout, color, " %02x", buf[i]);
 255		for (; i < 16; i++)
 256			color_fprintf(stdout, color, "   ");
 257		if (ret > 0) {
 258			ret = intel_pt_pkt_desc(&packet, desc,
 259						INTEL_PT_PKT_DESC_MAX);
 260			if (ret > 0)
 261				color_fprintf(stdout, color, " %s\n", desc);
 262		} else {
 263			color_fprintf(stdout, color, " Bad packet!\n");
 264		}
 265		pos += pkt_len;
 266		buf += pkt_len;
 267		len -= pkt_len;
 268	}
 269}
 270
 271static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
 272				size_t len)
 273{
 274	printf(".\n");
 275	intel_pt_dump(pt, buf, len);
 276}
 277
 278static void intel_pt_log_event(union perf_event *event)
 279{
 280	FILE *f = intel_pt_log_fp();
 281
 282	if (!intel_pt_enable_logging || !f)
 283		return;
 284
 285	perf_event__fprintf(event, NULL, f);
 286}
 287
 288static void intel_pt_dump_sample(struct perf_session *session,
 289				 struct perf_sample *sample)
 290{
 291	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
 292					   auxtrace);
 293
 294	printf("\n");
 295	intel_pt_dump(pt, sample->aux_sample.data, sample->aux_sample.size);
 296}
 297
 298static bool intel_pt_log_events(struct intel_pt *pt, u64 tm)
 299{
 300	struct perf_time_interval *range = pt->synth_opts.ptime_range;
 301	int n = pt->synth_opts.range_num;
 302
 303	if (pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
 304		return true;
 305
 306	if (pt->synth_opts.log_minus_flags & AUXTRACE_LOG_FLG_ALL_PERF_EVTS)
 307		return false;
 308
 309	/* perf_time__ranges_skip_sample does not work if time is zero */
 310	if (!tm)
 311		tm = 1;
 312
 313	return !n || !perf_time__ranges_skip_sample(range, n, tm);
 314}
 315
 316static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs(struct rb_root *rb_root,
 317							u64 vmcs,
 318							u64 dflt_tsc_offset)
 319{
 320	struct rb_node **p = &rb_root->rb_node;
 321	struct rb_node *parent = NULL;
 322	struct intel_pt_vmcs_info *v;
 323
 324	while (*p) {
 325		parent = *p;
 326		v = rb_entry(parent, struct intel_pt_vmcs_info, rb_node);
 327
 328		if (v->vmcs == vmcs)
 329			return v;
 330
 331		if (vmcs < v->vmcs)
 332			p = &(*p)->rb_left;
 333		else
 334			p = &(*p)->rb_right;
 335	}
 336
 337	v = zalloc(sizeof(*v));
 338	if (v) {
 339		v->vmcs = vmcs;
 340		v->tsc_offset = dflt_tsc_offset;
 341		v->reliable = dflt_tsc_offset;
 342
 343		rb_link_node(&v->rb_node, parent, p);
 344		rb_insert_color(&v->rb_node, rb_root);
 345	}
 346
 347	return v;
 348}
 349
 350static struct intel_pt_vmcs_info *intel_pt_findnew_vmcs_info(void *data, uint64_t vmcs)
 351{
 352	struct intel_pt_queue *ptq = data;
 353	struct intel_pt *pt = ptq->pt;
 354
 355	if (!vmcs && !pt->dflt_tsc_offset)
 356		return NULL;
 357
 358	return intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, pt->dflt_tsc_offset);
 359}
 360
 361static void intel_pt_free_vmcs_info(struct intel_pt *pt)
 362{
 363	struct intel_pt_vmcs_info *v;
 364	struct rb_node *n;
 365
 366	n = rb_first(&pt->vmcs_info);
 367	while (n) {
 368		v = rb_entry(n, struct intel_pt_vmcs_info, rb_node);
 369		n = rb_next(n);
 370		rb_erase(&v->rb_node, &pt->vmcs_info);
 371		free(v);
 372	}
 373}
 374
 375static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
 376				   struct auxtrace_buffer *b)
 377{
 378	bool consecutive = false;
 379	void *start;
 380
 381	start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
 382				      pt->have_tsc, &consecutive,
 383				      pt->synth_opts.vm_time_correlation);
 384	if (!start)
 385		return -EINVAL;
 386	/*
 387	 * In the case of vm_time_correlation, the overlap might contain TSC
 388	 * packets that will not be fixed, and that will then no longer work for
 389	 * overlap detection. Avoid that by zeroing out the overlap.
 390	 */
 391	if (pt->synth_opts.vm_time_correlation)
 392		memset(b->data, 0, start - b->data);
 393	b->use_size = b->data + b->size - start;
 394	b->use_data = start;
 395	if (b->use_size && consecutive)
 396		b->consecutive = true;
 397	return 0;
 398}
 399
 400static int intel_pt_get_buffer(struct intel_pt_queue *ptq,
 401			       struct auxtrace_buffer *buffer,
 402			       struct auxtrace_buffer *old_buffer,
 403			       struct intel_pt_buffer *b)
 404{
 405	bool might_overlap;
 406
 407	if (!buffer->data) {
 408		int fd = perf_data__fd(ptq->pt->session->data);
 409
 410		buffer->data = auxtrace_buffer__get_data(buffer, fd);
 411		if (!buffer->data)
 412			return -ENOMEM;
 413	}
 414
 415	might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
 416	if (might_overlap && !buffer->consecutive && old_buffer &&
 417	    intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
 418		return -ENOMEM;
 419
 420	if (buffer->use_data) {
 421		b->len = buffer->use_size;
 422		b->buf = buffer->use_data;
 423	} else {
 424		b->len = buffer->size;
 425		b->buf = buffer->data;
 426	}
 427	b->ref_timestamp = buffer->reference;
 428
 429	if (!old_buffer || (might_overlap && !buffer->consecutive)) {
 430		b->consecutive = false;
 431		b->trace_nr = buffer->buffer_nr + 1;
 432	} else {
 433		b->consecutive = true;
 434	}
 435
 436	return 0;
 437}
 438
 439/* Do not drop buffers with references - refer intel_pt_get_trace() */
 440static void intel_pt_lookahead_drop_buffer(struct intel_pt_queue *ptq,
 441					   struct auxtrace_buffer *buffer)
 442{
 443	if (!buffer || buffer == ptq->buffer || buffer == ptq->old_buffer)
 444		return;
 445
 446	auxtrace_buffer__drop_data(buffer);
 447}
 448
 449/* Must be serialized with respect to intel_pt_get_trace() */
 450static int intel_pt_lookahead(void *data, intel_pt_lookahead_cb_t cb,
 451			      void *cb_data)
 452{
 453	struct intel_pt_queue *ptq = data;
 454	struct auxtrace_buffer *buffer = ptq->buffer;
 455	struct auxtrace_buffer *old_buffer = ptq->old_buffer;
 456	struct auxtrace_queue *queue;
 457	int err = 0;
 458
 459	queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
 460
 461	while (1) {
 462		struct intel_pt_buffer b = { .len = 0 };
 463
 464		buffer = auxtrace_buffer__next(queue, buffer);
 465		if (!buffer)
 466			break;
 467
 468		err = intel_pt_get_buffer(ptq, buffer, old_buffer, &b);
 469		if (err)
 470			break;
 471
 472		if (b.len) {
 473			intel_pt_lookahead_drop_buffer(ptq, old_buffer);
 474			old_buffer = buffer;
 475		} else {
 476			intel_pt_lookahead_drop_buffer(ptq, buffer);
 477			continue;
 478		}
 479
 480		err = cb(&b, cb_data);
 481		if (err)
 482			break;
 483	}
 484
 485	if (buffer != old_buffer)
 486		intel_pt_lookahead_drop_buffer(ptq, buffer);
 487	intel_pt_lookahead_drop_buffer(ptq, old_buffer);
 488
 489	return err;
 490}
 491
 492/*
 493 * This function assumes data is processed sequentially only.
 494 * Must be serialized with respect to intel_pt_lookahead()
 495 */
 496static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
 497{
 498	struct intel_pt_queue *ptq = data;
 499	struct auxtrace_buffer *buffer = ptq->buffer;
 500	struct auxtrace_buffer *old_buffer = ptq->old_buffer;
 501	struct auxtrace_queue *queue;
 502	int err;
 503
 504	if (ptq->stop) {
 505		b->len = 0;
 506		return 0;
 507	}
 508
 509	queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
 510
 511	buffer = auxtrace_buffer__next(queue, buffer);
 512	if (!buffer) {
 513		if (old_buffer)
 514			auxtrace_buffer__drop_data(old_buffer);
 515		b->len = 0;
 516		return 0;
 517	}
 518
 519	ptq->buffer = buffer;
 520
 521	err = intel_pt_get_buffer(ptq, buffer, old_buffer, b);
 522	if (err)
 523		return err;
 524
 525	if (ptq->step_through_buffers)
 526		ptq->stop = true;
 527
 528	if (b->len) {
 529		if (old_buffer)
 530			auxtrace_buffer__drop_data(old_buffer);
 531		ptq->old_buffer = buffer;
 532	} else {
 533		auxtrace_buffer__drop_data(buffer);
 534		return intel_pt_get_trace(b, data);
 535	}
 536
 537	return 0;
 538}
 539
 540struct intel_pt_cache_entry {
 541	struct auxtrace_cache_entry	entry;
 542	u64				insn_cnt;
 543	u64				byte_cnt;
 544	enum intel_pt_insn_op		op;
 545	enum intel_pt_insn_branch	branch;
 546	bool				emulated_ptwrite;
 547	int				length;
 548	int32_t				rel;
 549	char				insn[INTEL_PT_INSN_BUF_SZ];
 550};
 551
 552static int intel_pt_config_div(const char *var, const char *value, void *data)
 553{
 554	int *d = data;
 555	long val;
 556
 557	if (!strcmp(var, "intel-pt.cache-divisor")) {
 558		val = strtol(value, NULL, 0);
 559		if (val > 0 && val <= INT_MAX)
 560			*d = val;
 561	}
 562
 563	return 0;
 564}
 565
 566static int intel_pt_cache_divisor(void)
 567{
 568	static int d;
 569
 570	if (d)
 571		return d;
 572
 573	perf_config(intel_pt_config_div, &d);
 574
 575	if (!d)
 576		d = 64;
 577
 578	return d;
 579}
 580
 581static unsigned int intel_pt_cache_size(struct dso *dso,
 582					struct machine *machine)
 583{
 584	off_t size;
 585
 586	size = dso__data_size(dso, machine);
 587	size /= intel_pt_cache_divisor();
 588	if (size < 1000)
 589		return 10;
 590	if (size > (1 << 21))
 591		return 21;
 592	return 32 - __builtin_clz(size);
 593}
 594
 595static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
 596					     struct machine *machine)
 597{
 598	struct auxtrace_cache *c;
 599	unsigned int bits;
 600
 601	if (dso->auxtrace_cache)
 602		return dso->auxtrace_cache;
 603
 604	bits = intel_pt_cache_size(dso, machine);
 605
 606	/* Ignoring cache creation failure */
 607	c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
 608
 609	dso->auxtrace_cache = c;
 610
 611	return c;
 612}
 613
 614static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
 615			      u64 offset, u64 insn_cnt, u64 byte_cnt,
 616			      struct intel_pt_insn *intel_pt_insn)
 617{
 618	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
 619	struct intel_pt_cache_entry *e;
 620	int err;
 621
 622	if (!c)
 623		return -ENOMEM;
 624
 625	e = auxtrace_cache__alloc_entry(c);
 626	if (!e)
 627		return -ENOMEM;
 628
 629	e->insn_cnt = insn_cnt;
 630	e->byte_cnt = byte_cnt;
 631	e->op = intel_pt_insn->op;
 632	e->branch = intel_pt_insn->branch;
 633	e->emulated_ptwrite = intel_pt_insn->emulated_ptwrite;
 634	e->length = intel_pt_insn->length;
 635	e->rel = intel_pt_insn->rel;
 636	memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
 637
 638	err = auxtrace_cache__add(c, offset, &e->entry);
 639	if (err)
 640		auxtrace_cache__free_entry(c, e);
 641
 642	return err;
 643}
 644
 645static struct intel_pt_cache_entry *
 646intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
 647{
 648	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
 649
 650	if (!c)
 651		return NULL;
 652
 653	return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
 654}
 655
 656static void intel_pt_cache_invalidate(struct dso *dso, struct machine *machine,
 657				      u64 offset)
 658{
 659	struct auxtrace_cache *c = intel_pt_cache(dso, machine);
 660
 661	if (!c)
 662		return;
 663
 664	auxtrace_cache__remove(dso->auxtrace_cache, offset);
 665}
 666
 667static inline bool intel_pt_guest_kernel_ip(uint64_t ip)
 668{
 669	/* Assumes 64-bit kernel */
 670	return ip & (1ULL << 63);
 671}
 672
 673static inline u8 intel_pt_nr_cpumode(struct intel_pt_queue *ptq, uint64_t ip, bool nr)
 674{
 675	if (nr) {
 676		return intel_pt_guest_kernel_ip(ip) ?
 677		       PERF_RECORD_MISC_GUEST_KERNEL :
 678		       PERF_RECORD_MISC_GUEST_USER;
 679	}
 680
 681	return ip >= ptq->pt->kernel_start ?
 682	       PERF_RECORD_MISC_KERNEL :
 683	       PERF_RECORD_MISC_USER;
 684}
 685
 686static inline u8 intel_pt_cpumode(struct intel_pt_queue *ptq, uint64_t from_ip, uint64_t to_ip)
 687{
 688	/* No support for non-zero CS base */
 689	if (from_ip)
 690		return intel_pt_nr_cpumode(ptq, from_ip, ptq->state->from_nr);
 691	return intel_pt_nr_cpumode(ptq, to_ip, ptq->state->to_nr);
 692}
 693
 694static int intel_pt_get_guest(struct intel_pt_queue *ptq)
 695{
 696	struct machines *machines = &ptq->pt->session->machines;
 697	struct machine *machine;
 698	pid_t pid = ptq->pid <= 0 ? DEFAULT_GUEST_KERNEL_ID : ptq->pid;
 699
 700	if (ptq->guest_machine && pid == ptq->guest_machine->pid)
 701		return 0;
 702
 703	ptq->guest_machine = NULL;
 704	thread__zput(ptq->unknown_guest_thread);
 705
 706	if (symbol_conf.guest_code) {
 707		thread__zput(ptq->guest_thread);
 708		ptq->guest_thread = machines__findnew_guest_code(machines, pid);
 709	}
 710
 711	machine = machines__find_guest(machines, pid);
 712	if (!machine)
 713		return -1;
 714
 715	ptq->unknown_guest_thread = machine__idle_thread(machine);
 716	if (!ptq->unknown_guest_thread)
 717		return -1;
 718
 719	ptq->guest_machine = machine;
 720
 721	return 0;
 722}
 723
 724static inline bool intel_pt_jmp_16(struct intel_pt_insn *intel_pt_insn)
 725{
 726	return intel_pt_insn->rel == 16 && intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL;
 727}
 728
 729#define PTWRITE_MAGIC		"\x0f\x0bperf,ptwrite  "
 730#define PTWRITE_MAGIC_LEN	16
 731
 732static bool intel_pt_emulated_ptwrite(struct dso *dso, struct machine *machine, u64 offset)
 733{
 734	unsigned char buf[PTWRITE_MAGIC_LEN];
 735	ssize_t len;
 736
 737	len = dso__data_read_offset(dso, machine, offset, buf, PTWRITE_MAGIC_LEN);
 738	if (len == PTWRITE_MAGIC_LEN && !memcmp(buf, PTWRITE_MAGIC, PTWRITE_MAGIC_LEN)) {
 739		intel_pt_log("Emulated ptwrite signature found\n");
 740		return true;
 741	}
 742	intel_pt_log("Emulated ptwrite signature not found\n");
 743	return false;
 744}
 745
 746static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
 747				   uint64_t *insn_cnt_ptr, uint64_t *ip,
 748				   uint64_t to_ip, uint64_t max_insn_cnt,
 749				   void *data)
 750{
 751	struct intel_pt_queue *ptq = data;
 752	struct machine *machine = ptq->pt->machine;
 753	struct thread *thread;
 754	struct addr_location al;
 755	unsigned char buf[INTEL_PT_INSN_BUF_SZ];
 756	ssize_t len;
 757	int x86_64, ret = 0;
 758	u8 cpumode;
 759	u64 offset, start_offset, start_ip;
 760	u64 insn_cnt = 0;
 761	bool one_map = true;
 762	bool nr;
 763
 764
 765	addr_location__init(&al);
 766	intel_pt_insn->length = 0;
 767	intel_pt_insn->op = INTEL_PT_OP_OTHER;
 768
 769	if (to_ip && *ip == to_ip)
 770		goto out_no_cache;
 771
 772	nr = ptq->state->to_nr;
 773	cpumode = intel_pt_nr_cpumode(ptq, *ip, nr);
 774
 775	if (nr) {
 776		if (ptq->pt->have_guest_sideband) {
 777			if (!ptq->guest_machine || ptq->guest_machine_pid != ptq->pid) {
 778				intel_pt_log("ERROR: guest sideband but no guest machine\n");
 779				ret = -EINVAL;
 780				goto out_ret;
 781			}
 782		} else if ((!symbol_conf.guest_code && cpumode != PERF_RECORD_MISC_GUEST_KERNEL) ||
 783			   intel_pt_get_guest(ptq)) {
 784			intel_pt_log("ERROR: no guest machine\n");
 785			ret = -EINVAL;
 786			goto out_ret;
 787		}
 788		machine = ptq->guest_machine;
 789		thread = ptq->guest_thread;
 790		if (!thread) {
 791			if (cpumode != PERF_RECORD_MISC_GUEST_KERNEL) {
 792				intel_pt_log("ERROR: no guest thread\n");
 793				ret = -EINVAL;
 794				goto out_ret;
 795			}
 796			thread = ptq->unknown_guest_thread;
 797		}
 798	} else {
 799		thread = ptq->thread;
 800		if (!thread) {
 801			if (cpumode != PERF_RECORD_MISC_KERNEL) {
 802				intel_pt_log("ERROR: no thread\n");
 803				ret = -EINVAL;
 804				goto out_ret;
 805			}
 806			thread = ptq->pt->unknown_thread;
 807		}
 808	}
 809
 810	while (1) {
 811		struct dso *dso;
 812
 813		if (!thread__find_map(thread, cpumode, *ip, &al) || !map__dso(al.map)) {
 814			if (al.map)
 815				intel_pt_log("ERROR: thread has no dso for %#" PRIx64 "\n", *ip);
 816			else
 817				intel_pt_log("ERROR: thread has no map for %#" PRIx64 "\n", *ip);
 818			addr_location__exit(&al);
 819			ret = -EINVAL;
 820			goto out_ret;
 821		}
 822		dso = map__dso(al.map);
 823
 824		if (dso->data.status == DSO_DATA_STATUS_ERROR &&
 825			dso__data_status_seen(dso, DSO_DATA_STATUS_SEEN_ITRACE)) {
 826			ret = -ENOENT;
 827			goto out_ret;
 828		}
 829
 830		offset = map__map_ip(al.map, *ip);
 831
 832		if (!to_ip && one_map) {
 833			struct intel_pt_cache_entry *e;
 834
 835			e = intel_pt_cache_lookup(dso, machine, offset);
 836			if (e &&
 837			    (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
 838				*insn_cnt_ptr = e->insn_cnt;
 839				*ip += e->byte_cnt;
 840				intel_pt_insn->op = e->op;
 841				intel_pt_insn->branch = e->branch;
 842				intel_pt_insn->emulated_ptwrite = e->emulated_ptwrite;
 843				intel_pt_insn->length = e->length;
 844				intel_pt_insn->rel = e->rel;
 845				memcpy(intel_pt_insn->buf, e->insn, INTEL_PT_INSN_BUF_SZ);
 846				intel_pt_log_insn_no_data(intel_pt_insn, *ip);
 847				ret = 0;
 848				goto out_ret;
 849			}
 850		}
 851
 852		start_offset = offset;
 853		start_ip = *ip;
 854
 855		/* Load maps to ensure dso->is_64_bit has been updated */
 856		map__load(al.map);
 857
 858		x86_64 = dso->is_64_bit;
 859
 860		while (1) {
 861			len = dso__data_read_offset(dso, machine,
 862						    offset, buf,
 863						    INTEL_PT_INSN_BUF_SZ);
 864			if (len <= 0) {
 865				intel_pt_log("ERROR: failed to read at offset %#" PRIx64 " ",
 866					     offset);
 867				if (intel_pt_enable_logging)
 868					dso__fprintf(dso, intel_pt_log_fp());
 869				ret = -EINVAL;
 870				goto out_ret;
 871			}
 872
 873			if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn)) {
 874				ret = -EINVAL;
 875				goto out_ret;
 876			}
 877
 878			intel_pt_log_insn(intel_pt_insn, *ip);
 879
 880			insn_cnt += 1;
 881
 882			if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH) {
 883				bool eptw;
 884				u64 offs;
 885
 886				if (!intel_pt_jmp_16(intel_pt_insn))
 887					goto out;
 888				/* Check for emulated ptwrite */
 889				offs = offset + intel_pt_insn->length;
 890				eptw = intel_pt_emulated_ptwrite(dso, machine, offs);
 891				intel_pt_insn->emulated_ptwrite = eptw;
 892				goto out;
 893			}
 894
 895			if (max_insn_cnt && insn_cnt >= max_insn_cnt)
 896				goto out_no_cache;
 897
 898			*ip += intel_pt_insn->length;
 899
 900			if (to_ip && *ip == to_ip) {
 901				intel_pt_insn->length = 0;
 902				intel_pt_insn->op = INTEL_PT_OP_OTHER;
 903				goto out_no_cache;
 904			}
 905
 906			if (*ip >= map__end(al.map))
 907				break;
 908
 909			offset += intel_pt_insn->length;
 910		}
 911		one_map = false;
 912	}
 913out:
 914	*insn_cnt_ptr = insn_cnt;
 915
 916	if (!one_map)
 917		goto out_no_cache;
 918
 919	/*
 920	 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
 921	 * entries.
 922	 */
 923	if (to_ip) {
 924		struct intel_pt_cache_entry *e;
 925
 926		e = intel_pt_cache_lookup(map__dso(al.map), machine, start_offset);
 927		if (e)
 928			goto out_ret;
 929	}
 930
 931	/* Ignore cache errors */
 932	intel_pt_cache_add(map__dso(al.map), machine, start_offset, insn_cnt,
 933			   *ip - start_ip, intel_pt_insn);
 934
 935out_ret:
 936	addr_location__exit(&al);
 937	return ret;
 938
 939out_no_cache:
 940	*insn_cnt_ptr = insn_cnt;
 941	addr_location__exit(&al);
 942	return 0;
 943}
 944
 945static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
 946				  uint64_t offset, const char *filename)
 947{
 948	struct addr_filter *filt;
 949	bool have_filter   = false;
 950	bool hit_tracestop = false;
 951	bool hit_filter    = false;
 952
 953	list_for_each_entry(filt, &pt->filts.head, list) {
 954		if (filt->start)
 955			have_filter = true;
 956
 957		if ((filename && !filt->filename) ||
 958		    (!filename && filt->filename) ||
 959		    (filename && strcmp(filename, filt->filename)))
 960			continue;
 961
 962		if (!(offset >= filt->addr && offset < filt->addr + filt->size))
 963			continue;
 964
 965		intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
 966			     ip, offset, filename ? filename : "[kernel]",
 967			     filt->start ? "filter" : "stop",
 968			     filt->addr, filt->size);
 969
 970		if (filt->start)
 971			hit_filter = true;
 972		else
 973			hit_tracestop = true;
 974	}
 975
 976	if (!hit_tracestop && !hit_filter)
 977		intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
 978			     ip, offset, filename ? filename : "[kernel]");
 979
 980	return hit_tracestop || (have_filter && !hit_filter);
 981}
 982
 983static int __intel_pt_pgd_ip(uint64_t ip, void *data)
 984{
 985	struct intel_pt_queue *ptq = data;
 986	struct thread *thread;
 987	struct addr_location al;
 988	u8 cpumode;
 989	u64 offset;
 990	int res;
 991
 992	if (ptq->state->to_nr) {
 993		if (intel_pt_guest_kernel_ip(ip))
 994			return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
 995		/* No support for decoding guest user space */
 996		return -EINVAL;
 997	} else if (ip >= ptq->pt->kernel_start) {
 998		return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
 999	}
1000
1001	cpumode = PERF_RECORD_MISC_USER;
1002
1003	thread = ptq->thread;
1004	if (!thread)
1005		return -EINVAL;
1006
1007	addr_location__init(&al);
1008	if (!thread__find_map(thread, cpumode, ip, &al) || !map__dso(al.map))
1009		return -EINVAL;
1010
1011	offset = map__map_ip(al.map, ip);
1012
1013	res = intel_pt_match_pgd_ip(ptq->pt, ip, offset, map__dso(al.map)->long_name);
1014	addr_location__exit(&al);
1015	return res;
1016}
1017
1018static bool intel_pt_pgd_ip(uint64_t ip, void *data)
1019{
1020	return __intel_pt_pgd_ip(ip, data) > 0;
1021}
1022
1023static bool intel_pt_get_config(struct intel_pt *pt,
1024				struct perf_event_attr *attr, u64 *config)
1025{
1026	if (attr->type == pt->pmu_type) {
1027		if (config)
1028			*config = attr->config;
1029		return true;
1030	}
1031
1032	return false;
1033}
1034
1035static bool intel_pt_exclude_kernel(struct intel_pt *pt)
1036{
1037	struct evsel *evsel;
1038
1039	evlist__for_each_entry(pt->session->evlist, evsel) {
1040		if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
1041		    !evsel->core.attr.exclude_kernel)
1042			return false;
1043	}
1044	return true;
1045}
1046
1047static bool intel_pt_return_compression(struct intel_pt *pt)
1048{
1049	struct evsel *evsel;
1050	u64 config;
1051
1052	if (!pt->noretcomp_bit)
1053		return true;
1054
1055	evlist__for_each_entry(pt->session->evlist, evsel) {
1056		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1057		    (config & pt->noretcomp_bit))
1058			return false;
1059	}
1060	return true;
1061}
1062
1063static bool intel_pt_branch_enable(struct intel_pt *pt)
1064{
1065	struct evsel *evsel;
1066	u64 config;
1067
1068	evlist__for_each_entry(pt->session->evlist, evsel) {
1069		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1070		    (config & INTEL_PT_CFG_PASS_THRU) &&
1071		    !(config & INTEL_PT_CFG_BRANCH_EN))
1072			return false;
1073	}
1074	return true;
1075}
1076
1077static bool intel_pt_disabled_tnt(struct intel_pt *pt)
1078{
1079	struct evsel *evsel;
1080	u64 config;
1081
1082	evlist__for_each_entry(pt->session->evlist, evsel) {
1083		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1084		    config & INTEL_PT_CFG_TNT_DIS)
1085			return true;
1086	}
1087	return false;
1088}
1089
1090static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
1091{
1092	struct evsel *evsel;
1093	unsigned int shift;
1094	u64 config;
1095
1096	if (!pt->mtc_freq_bits)
1097		return 0;
1098
1099	for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
1100		config >>= 1;
1101
1102	evlist__for_each_entry(pt->session->evlist, evsel) {
1103		if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1104			return (config & pt->mtc_freq_bits) >> shift;
1105	}
1106	return 0;
1107}
1108
1109static bool intel_pt_timeless_decoding(struct intel_pt *pt)
1110{
1111	struct evsel *evsel;
1112	bool timeless_decoding = true;
1113	u64 config;
1114
1115	if (!pt->tsc_bit || !pt->cap_user_time_zero || pt->synth_opts.timeless_decoding)
1116		return true;
1117
1118	evlist__for_each_entry(pt->session->evlist, evsel) {
1119		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
1120			return true;
1121		if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1122			if (config & pt->tsc_bit)
1123				timeless_decoding = false;
1124			else
1125				return true;
1126		}
1127	}
1128	return timeless_decoding;
1129}
1130
1131static bool intel_pt_tracing_kernel(struct intel_pt *pt)
1132{
1133	struct evsel *evsel;
1134
1135	evlist__for_each_entry(pt->session->evlist, evsel) {
1136		if (intel_pt_get_config(pt, &evsel->core.attr, NULL) &&
1137		    !evsel->core.attr.exclude_kernel)
1138			return true;
1139	}
1140	return false;
1141}
1142
1143static bool intel_pt_have_tsc(struct intel_pt *pt)
1144{
1145	struct evsel *evsel;
1146	bool have_tsc = false;
1147	u64 config;
1148
1149	if (!pt->tsc_bit)
1150		return false;
1151
1152	evlist__for_each_entry(pt->session->evlist, evsel) {
1153		if (intel_pt_get_config(pt, &evsel->core.attr, &config)) {
1154			if (config & pt->tsc_bit)
1155				have_tsc = true;
1156			else
1157				return false;
1158		}
1159	}
1160	return have_tsc;
1161}
1162
1163static bool intel_pt_have_mtc(struct intel_pt *pt)
1164{
1165	struct evsel *evsel;
1166	u64 config;
1167
1168	evlist__for_each_entry(pt->session->evlist, evsel) {
1169		if (intel_pt_get_config(pt, &evsel->core.attr, &config) &&
1170		    (config & pt->mtc_bit))
1171			return true;
1172	}
1173	return false;
1174}
1175
1176static bool intel_pt_sampling_mode(struct intel_pt *pt)
1177{
1178	struct evsel *evsel;
1179
1180	evlist__for_each_entry(pt->session->evlist, evsel) {
1181		if ((evsel->core.attr.sample_type & PERF_SAMPLE_AUX) &&
1182		    evsel->core.attr.aux_sample_size)
1183			return true;
1184	}
1185	return false;
1186}
1187
1188static u64 intel_pt_ctl(struct intel_pt *pt)
1189{
1190	struct evsel *evsel;
1191	u64 config;
1192
1193	evlist__for_each_entry(pt->session->evlist, evsel) {
1194		if (intel_pt_get_config(pt, &evsel->core.attr, &config))
1195			return config;
1196	}
1197	return 0;
1198}
1199
1200static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
1201{
1202	u64 quot, rem;
1203
1204	quot = ns / pt->tc.time_mult;
1205	rem  = ns % pt->tc.time_mult;
1206	return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
1207		pt->tc.time_mult;
1208}
1209
1210static struct ip_callchain *intel_pt_alloc_chain(struct intel_pt *pt)
1211{
1212	size_t sz = sizeof(struct ip_callchain);
1213
1214	/* Add 1 to callchain_sz for callchain context */
1215	sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
1216	return zalloc(sz);
1217}
1218
1219static int intel_pt_callchain_init(struct intel_pt *pt)
1220{
1221	struct evsel *evsel;
1222
1223	evlist__for_each_entry(pt->session->evlist, evsel) {
1224		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CALLCHAIN))
1225			evsel->synth_sample_type |= PERF_SAMPLE_CALLCHAIN;
1226	}
1227
1228	pt->chain = intel_pt_alloc_chain(pt);
1229	if (!pt->chain)
1230		return -ENOMEM;
1231
1232	return 0;
1233}
1234
1235static void intel_pt_add_callchain(struct intel_pt *pt,
1236				   struct perf_sample *sample)
1237{
1238	struct thread *thread = machine__findnew_thread(pt->machine,
1239							sample->pid,
1240							sample->tid);
1241
1242	thread_stack__sample_late(thread, sample->cpu, pt->chain,
1243				  pt->synth_opts.callchain_sz + 1, sample->ip,
1244				  pt->kernel_start);
1245
1246	sample->callchain = pt->chain;
1247}
1248
1249static struct branch_stack *intel_pt_alloc_br_stack(unsigned int entry_cnt)
1250{
1251	size_t sz = sizeof(struct branch_stack);
1252
1253	sz += entry_cnt * sizeof(struct branch_entry);
1254	return zalloc(sz);
1255}
1256
1257static int intel_pt_br_stack_init(struct intel_pt *pt)
1258{
1259	struct evsel *evsel;
1260
1261	evlist__for_each_entry(pt->session->evlist, evsel) {
1262		if (!(evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK))
1263			evsel->synth_sample_type |= PERF_SAMPLE_BRANCH_STACK;
1264	}
1265
1266	pt->br_stack = intel_pt_alloc_br_stack(pt->br_stack_sz);
1267	if (!pt->br_stack)
1268		return -ENOMEM;
1269
1270	return 0;
1271}
1272
1273static void intel_pt_add_br_stack(struct intel_pt *pt,
1274				  struct perf_sample *sample)
1275{
1276	struct thread *thread = machine__findnew_thread(pt->machine,
1277							sample->pid,
1278							sample->tid);
1279
1280	thread_stack__br_sample_late(thread, sample->cpu, pt->br_stack,
1281				     pt->br_stack_sz, sample->ip,
1282				     pt->kernel_start);
1283
1284	sample->branch_stack = pt->br_stack;
1285	thread__put(thread);
1286}
1287
1288/* INTEL_PT_LBR_0, INTEL_PT_LBR_1 and INTEL_PT_LBR_2 */
1289#define LBRS_MAX (INTEL_PT_BLK_ITEM_ID_CNT * 3U)
1290
1291static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
1292						   unsigned int queue_nr)
1293{
1294	struct intel_pt_params params = { .get_trace = 0, };
1295	struct perf_env *env = pt->machine->env;
1296	struct intel_pt_queue *ptq;
1297
1298	ptq = zalloc(sizeof(struct intel_pt_queue));
1299	if (!ptq)
1300		return NULL;
1301
1302	if (pt->synth_opts.callchain) {
1303		ptq->chain = intel_pt_alloc_chain(pt);
1304		if (!ptq->chain)
1305			goto out_free;
1306	}
1307
1308	if (pt->synth_opts.last_branch || pt->synth_opts.other_events) {
1309		unsigned int entry_cnt = max(LBRS_MAX, pt->br_stack_sz);
1310
1311		ptq->last_branch = intel_pt_alloc_br_stack(entry_cnt);
1312		if (!ptq->last_branch)
1313			goto out_free;
1314	}
1315
1316	ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
1317	if (!ptq->event_buf)
1318		goto out_free;
1319
1320	ptq->pt = pt;
1321	ptq->queue_nr = queue_nr;
1322	ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
1323	ptq->pid = -1;
1324	ptq->tid = -1;
1325	ptq->cpu = -1;
1326	ptq->next_tid = -1;
1327
1328	params.get_trace = intel_pt_get_trace;
1329	params.walk_insn = intel_pt_walk_next_insn;
1330	params.lookahead = intel_pt_lookahead;
1331	params.findnew_vmcs_info = intel_pt_findnew_vmcs_info;
1332	params.data = ptq;
1333	params.return_compression = intel_pt_return_compression(pt);
1334	params.branch_enable = intel_pt_branch_enable(pt);
1335	params.ctl = intel_pt_ctl(pt);
1336	params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
1337	params.mtc_period = intel_pt_mtc_period(pt);
1338	params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
1339	params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
1340	params.quick = pt->synth_opts.quick;
1341	params.vm_time_correlation = pt->synth_opts.vm_time_correlation;
1342	params.vm_tm_corr_dry_run = pt->synth_opts.vm_tm_corr_dry_run;
1343	params.first_timestamp = pt->first_timestamp;
1344	params.max_loops = pt->max_loops;
1345
1346	/* Cannot walk code without TNT, so force 'quick' mode */
1347	if (params.branch_enable && intel_pt_disabled_tnt(pt) && !params.quick)
1348		params.quick = 1;
1349
1350	if (pt->filts.cnt > 0)
1351		params.pgd_ip = intel_pt_pgd_ip;
1352
1353	if (pt->synth_opts.instructions || pt->synth_opts.cycles) {
1354		if (pt->synth_opts.period) {
1355			switch (pt->synth_opts.period_type) {
1356			case PERF_ITRACE_PERIOD_INSTRUCTIONS:
1357				params.period_type =
1358						INTEL_PT_PERIOD_INSTRUCTIONS;
1359				params.period = pt->synth_opts.period;
1360				break;
1361			case PERF_ITRACE_PERIOD_TICKS:
1362				params.period_type = INTEL_PT_PERIOD_TICKS;
1363				params.period = pt->synth_opts.period;
1364				break;
1365			case PERF_ITRACE_PERIOD_NANOSECS:
1366				params.period_type = INTEL_PT_PERIOD_TICKS;
1367				params.period = intel_pt_ns_to_ticks(pt,
1368							pt->synth_opts.period);
1369				break;
1370			default:
1371				break;
1372			}
1373		}
1374
1375		if (!params.period) {
1376			params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
1377			params.period = 1;
1378		}
1379	}
1380
1381	if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
1382		params.flags |= INTEL_PT_FUP_WITH_NLIP;
1383
1384	ptq->decoder = intel_pt_decoder_new(&params);
1385	if (!ptq->decoder)
1386		goto out_free;
1387
1388	return ptq;
1389
1390out_free:
1391	zfree(&ptq->event_buf);
1392	zfree(&ptq->last_branch);
1393	zfree(&ptq->chain);
1394	free(ptq);
1395	return NULL;
1396}
1397
1398static void intel_pt_free_queue(void *priv)
1399{
1400	struct intel_pt_queue *ptq = priv;
1401
1402	if (!ptq)
1403		return;
1404	thread__zput(ptq->thread);
1405	thread__zput(ptq->guest_thread);
1406	thread__zput(ptq->unknown_guest_thread);
1407	intel_pt_decoder_free(ptq->decoder);
1408	zfree(&ptq->event_buf);
1409	zfree(&ptq->last_branch);
1410	zfree(&ptq->chain);
1411	free(ptq);
1412}
1413
1414static void intel_pt_first_timestamp(struct intel_pt *pt, u64 timestamp)
1415{
1416	unsigned int i;
1417
1418	pt->first_timestamp = timestamp;
1419
1420	for (i = 0; i < pt->queues.nr_queues; i++) {
1421		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1422		struct intel_pt_queue *ptq = queue->priv;
1423
1424		if (ptq && ptq->decoder)
1425			intel_pt_set_first_timestamp(ptq->decoder, timestamp);
1426	}
1427}
1428
1429static int intel_pt_get_guest_from_sideband(struct intel_pt_queue *ptq)
1430{
1431	struct machines *machines = &ptq->pt->session->machines;
1432	struct machine *machine;
1433	pid_t machine_pid = ptq->pid;
1434	pid_t tid;
1435	int vcpu;
1436
1437	if (machine_pid <= 0)
1438		return 0; /* Not a guest machine */
1439
1440	machine = machines__find(machines, machine_pid);
1441	if (!machine)
1442		return 0; /* Not a guest machine */
1443
1444	if (ptq->guest_machine != machine) {
1445		ptq->guest_machine = NULL;
1446		thread__zput(ptq->guest_thread);
1447		thread__zput(ptq->unknown_guest_thread);
1448
1449		ptq->unknown_guest_thread = machine__find_thread(machine, 0, 0);
1450		if (!ptq->unknown_guest_thread)
1451			return -1;
1452		ptq->guest_machine = machine;
1453	}
1454
1455	vcpu = ptq->thread ? thread__guest_cpu(ptq->thread) : -1;
1456	if (vcpu < 0)
1457		return -1;
1458
1459	tid = machine__get_current_tid(machine, vcpu);
1460
1461	if (ptq->guest_thread && thread__tid(ptq->guest_thread) != tid)
1462		thread__zput(ptq->guest_thread);
1463
1464	if (!ptq->guest_thread) {
1465		ptq->guest_thread = machine__find_thread(machine, -1, tid);
1466		if (!ptq->guest_thread)
1467			return -1;
1468	}
1469
1470	ptq->guest_machine_pid = machine_pid;
1471	ptq->guest_pid = thread__pid(ptq->guest_thread);
1472	ptq->guest_tid = tid;
1473	ptq->vcpu = vcpu;
1474
1475	return 0;
1476}
1477
1478static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
1479				     struct auxtrace_queue *queue)
1480{
1481	struct intel_pt_queue *ptq = queue->priv;
1482
1483	if (queue->tid == -1 || pt->have_sched_switch) {
1484		ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
1485		if (ptq->tid == -1)
1486			ptq->pid = -1;
1487		thread__zput(ptq->thread);
1488	}
1489
1490	if (!ptq->thread && ptq->tid != -1)
1491		ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
1492
1493	if (ptq->thread) {
1494		ptq->pid = thread__pid(ptq->thread);
1495		if (queue->cpu == -1)
1496			ptq->cpu = thread__cpu(ptq->thread);
1497	}
1498
1499	if (pt->have_guest_sideband && intel_pt_get_guest_from_sideband(ptq)) {
1500		ptq->guest_machine_pid = 0;
1501		ptq->guest_pid = -1;
1502		ptq->guest_tid = -1;
1503		ptq->vcpu = -1;
1504	}
1505}
1506
1507static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
1508{
1509	struct intel_pt *pt = ptq->pt;
1510
1511	ptq->insn_len = 0;
1512	if (ptq->state->flags & INTEL_PT_ABORT_TX) {
1513		ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
1514	} else if (ptq->state->flags & INTEL_PT_ASYNC) {
1515		if (!ptq->state->to_ip)
1516			ptq->flags = PERF_IP_FLAG_BRANCH |
1517				     PERF_IP_FLAG_ASYNC |
1518				     PERF_IP_FLAG_TRACE_END;
1519		else if (ptq->state->from_nr && !ptq->state->to_nr)
1520			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1521				     PERF_IP_FLAG_ASYNC |
1522				     PERF_IP_FLAG_VMEXIT;
1523		else
1524			ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
1525				     PERF_IP_FLAG_ASYNC |
1526				     PERF_IP_FLAG_INTERRUPT;
1527	} else {
1528		if (ptq->state->from_ip)
1529			ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
1530		else
1531			ptq->flags = PERF_IP_FLAG_BRANCH |
1532				     PERF_IP_FLAG_TRACE_BEGIN;
1533		if (ptq->state->flags & INTEL_PT_IN_TX)
1534			ptq->flags |= PERF_IP_FLAG_IN_TX;
1535		ptq->insn_len = ptq->state->insn_len;
1536		memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
1537	}
1538
1539	if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
1540		ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
1541	if (ptq->state->type & INTEL_PT_TRACE_END)
1542		ptq->flags |= PERF_IP_FLAG_TRACE_END;
1543
1544	if (pt->cap_event_trace) {
1545		if (ptq->state->type & INTEL_PT_IFLAG_CHG) {
1546			if (!ptq->state->from_iflag)
1547				ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1548			if (ptq->state->from_iflag != ptq->state->to_iflag)
1549				ptq->flags |= PERF_IP_FLAG_INTR_TOGGLE;
1550		} else if (!ptq->state->to_iflag) {
1551			ptq->flags |= PERF_IP_FLAG_INTR_DISABLE;
1552		}
1553	}
1554}
1555
1556static void intel_pt_setup_time_range(struct intel_pt *pt,
1557				      struct intel_pt_queue *ptq)
1558{
1559	if (!pt->range_cnt)
1560		return;
1561
1562	ptq->sel_timestamp = pt->time_ranges[0].start;
1563	ptq->sel_idx = 0;
1564
1565	if (ptq->sel_timestamp) {
1566		ptq->sel_start = true;
1567	} else {
1568		ptq->sel_timestamp = pt->time_ranges[0].end;
1569		ptq->sel_start = false;
1570	}
1571}
1572
1573static int intel_pt_setup_queue(struct intel_pt *pt,
1574				struct auxtrace_queue *queue,
1575				unsigned int queue_nr)
1576{
1577	struct intel_pt_queue *ptq = queue->priv;
1578
1579	if (list_empty(&queue->head))
1580		return 0;
1581
1582	if (!ptq) {
1583		ptq = intel_pt_alloc_queue(pt, queue_nr);
1584		if (!ptq)
1585			return -ENOMEM;
1586		queue->priv = ptq;
1587
1588		if (queue->cpu != -1)
1589			ptq->cpu = queue->cpu;
1590		ptq->tid = queue->tid;
1591
1592		ptq->cbr_seen = UINT_MAX;
1593
1594		if (pt->sampling_mode && !pt->snapshot_mode &&
1595		    pt->timeless_decoding)
1596			ptq->step_through_buffers = true;
1597
1598		ptq->sync_switch = pt->sync_switch;
1599
1600		intel_pt_setup_time_range(pt, ptq);
1601	}
1602
1603	if (!ptq->on_heap &&
1604	    (!ptq->sync_switch ||
1605	     ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
1606		const struct intel_pt_state *state;
1607		int ret;
1608
1609		if (pt->timeless_decoding)
1610			return 0;
1611
1612		intel_pt_log("queue %u getting timestamp\n", queue_nr);
1613		intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1614			     queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1615
1616		if (ptq->sel_start && ptq->sel_timestamp) {
1617			ret = intel_pt_fast_forward(ptq->decoder,
1618						    ptq->sel_timestamp);
1619			if (ret)
1620				return ret;
1621		}
1622
1623		while (1) {
1624			state = intel_pt_decode(ptq->decoder);
1625			if (state->err) {
1626				if (state->err == INTEL_PT_ERR_NODATA) {
1627					intel_pt_log("queue %u has no timestamp\n",
1628						     queue_nr);
1629					return 0;
1630				}
1631				continue;
1632			}
1633			if (state->timestamp)
1634				break;
1635		}
1636
1637		ptq->timestamp = state->timestamp;
1638		intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
1639			     queue_nr, ptq->timestamp);
1640		ptq->state = state;
1641		ptq->have_sample = true;
1642		if (ptq->sel_start && ptq->sel_timestamp &&
1643		    ptq->timestamp < ptq->sel_timestamp)
1644			ptq->have_sample = false;
1645		intel_pt_sample_flags(ptq);
1646		ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
1647		if (ret)
1648			return ret;
1649		ptq->on_heap = true;
1650	}
1651
1652	return 0;
1653}
1654
1655static int intel_pt_setup_queues(struct intel_pt *pt)
1656{
1657	unsigned int i;
1658	int ret;
1659
1660	for (i = 0; i < pt->queues.nr_queues; i++) {
1661		ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1662		if (ret)
1663			return ret;
1664	}
1665	return 0;
1666}
1667
1668static inline bool intel_pt_skip_event(struct intel_pt *pt)
1669{
1670	return pt->synth_opts.initial_skip &&
1671	       pt->num_events++ < pt->synth_opts.initial_skip;
1672}
1673
1674/*
1675 * Cannot count CBR as skipped because it won't go away until cbr == cbr_seen.
1676 * Also ensure CBR is first non-skipped event by allowing for 4 more samples
1677 * from this decoder state.
1678 */
1679static inline bool intel_pt_skip_cbr_event(struct intel_pt *pt)
1680{
1681	return pt->synth_opts.initial_skip &&
1682	       pt->num_events + 4 < pt->synth_opts.initial_skip;
1683}
1684
1685static void intel_pt_prep_a_sample(struct intel_pt_queue *ptq,
1686				   union perf_event *event,
1687				   struct perf_sample *sample)
1688{
1689	event->sample.header.type = PERF_RECORD_SAMPLE;
1690	event->sample.header.size = sizeof(struct perf_event_header);
1691
1692	sample->pid = ptq->pid;
1693	sample->tid = ptq->tid;
1694
1695	if (ptq->pt->have_guest_sideband) {
1696		if ((ptq->state->from_ip && ptq->state->from_nr) ||
1697		    (ptq->state->to_ip && ptq->state->to_nr)) {
1698			sample->pid = ptq->guest_pid;
1699			sample->tid = ptq->guest_tid;
1700			sample->machine_pid = ptq->guest_machine_pid;
1701			sample->vcpu = ptq->vcpu;
1702		}
1703	}
1704
1705	sample->cpu = ptq->cpu;
1706	sample->insn_len = ptq->insn_len;
1707	memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1708}
1709
1710static void intel_pt_prep_b_sample(struct intel_pt *pt,
1711				   struct intel_pt_queue *ptq,
1712				   union perf_event *event,
1713				   struct perf_sample *sample)
1714{
1715	intel_pt_prep_a_sample(ptq, event, sample);
1716
1717	if (!pt->timeless_decoding)
1718		sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1719
1720	sample->ip = ptq->state->from_ip;
1721	sample->addr = ptq->state->to_ip;
1722	sample->cpumode = intel_pt_cpumode(ptq, sample->ip, sample->addr);
1723	sample->period = 1;
1724	sample->flags = ptq->flags;
1725
1726	event->sample.header.misc = sample->cpumode;
1727}
1728
1729static int intel_pt_inject_event(union perf_event *event,
1730				 struct perf_sample *sample, u64 type)
1731{
1732	event->header.size = perf_event__sample_event_size(sample, type, 0);
1733	return perf_event__synthesize_sample(event, type, 0, sample);
1734}
1735
1736static inline int intel_pt_opt_inject(struct intel_pt *pt,
1737				      union perf_event *event,
1738				      struct perf_sample *sample, u64 type)
1739{
1740	if (!pt->synth_opts.inject)
1741		return 0;
1742
1743	return intel_pt_inject_event(event, sample, type);
1744}
1745
1746static int intel_pt_deliver_synth_event(struct intel_pt *pt,
1747					union perf_event *event,
1748					struct perf_sample *sample, u64 type)
1749{
1750	int ret;
1751
1752	ret = intel_pt_opt_inject(pt, event, sample, type);
1753	if (ret)
1754		return ret;
1755
1756	ret = perf_session__deliver_synth_event(pt->session, event, sample);
1757	if (ret)
1758		pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1759
1760	return ret;
1761}
1762
1763static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1764{
1765	struct intel_pt *pt = ptq->pt;
1766	union perf_event *event = ptq->event_buf;
1767	struct perf_sample sample = { .ip = 0, };
1768	struct dummy_branch_stack {
1769		u64			nr;
1770		u64			hw_idx;
1771		struct branch_entry	entries;
1772	} dummy_bs;
1773
1774	if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1775		return 0;
1776
1777	if (intel_pt_skip_event(pt))
1778		return 0;
1779
1780	intel_pt_prep_b_sample(pt, ptq, event, &sample);
1781
1782	sample.id = ptq->pt->branches_id;
1783	sample.stream_id = ptq->pt->branches_id;
1784
1785	/*
1786	 * perf report cannot handle events without a branch stack when using
1787	 * SORT_MODE__BRANCH so make a dummy one.
1788	 */
1789	if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1790		dummy_bs = (struct dummy_branch_stack){
1791			.nr = 1,
1792			.hw_idx = -1ULL,
1793			.entries = {
1794				.from = sample.ip,
1795				.to = sample.addr,
1796			},
1797		};
1798		sample.branch_stack = (struct branch_stack *)&dummy_bs;
1799	}
1800
1801	if (ptq->sample_ipc)
1802		sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1803	if (sample.cyc_cnt) {
1804		sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1805		ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1806		ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1807	}
1808
1809	return intel_pt_deliver_synth_event(pt, event, &sample,
1810					    pt->branches_sample_type);
1811}
1812
1813static void intel_pt_prep_sample(struct intel_pt *pt,
1814				 struct intel_pt_queue *ptq,
1815				 union perf_event *event,
1816				 struct perf_sample *sample)
1817{
1818	intel_pt_prep_b_sample(pt, ptq, event, sample);
1819
1820	if (pt->synth_opts.callchain) {
1821		thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1822				     pt->synth_opts.callchain_sz + 1,
1823				     sample->ip, pt->kernel_start);
1824		sample->callchain = ptq->chain;
1825	}
1826
1827	if (pt->synth_opts.last_branch) {
1828		thread_stack__br_sample(ptq->thread, ptq->cpu, ptq->last_branch,
1829					pt->br_stack_sz);
1830		sample->branch_stack = ptq->last_branch;
1831	}
1832}
1833
1834static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1835{
1836	struct intel_pt *pt = ptq->pt;
1837	union perf_event *event = ptq->event_buf;
1838	struct perf_sample sample = { .ip = 0, };
1839
1840	if (intel_pt_skip_event(pt))
1841		return 0;
1842
1843	intel_pt_prep_sample(pt, ptq, event, &sample);
1844
1845	sample.id = ptq->pt->instructions_id;
1846	sample.stream_id = ptq->pt->instructions_id;
1847	if (pt->synth_opts.quick)
1848		sample.period = 1;
1849	else
1850		sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1851
1852	if (ptq->sample_ipc)
1853		sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1854	if (sample.cyc_cnt) {
1855		sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1856		ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1857		ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1858	}
1859
1860	ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1861
1862	return intel_pt_deliver_synth_event(pt, event, &sample,
1863					    pt->instructions_sample_type);
1864}
1865
1866static int intel_pt_synth_cycle_sample(struct intel_pt_queue *ptq)
1867{
1868	struct intel_pt *pt = ptq->pt;
1869	union perf_event *event = ptq->event_buf;
1870	struct perf_sample sample = { .ip = 0, };
1871	u64 period = 0;
1872
1873	if (ptq->sample_ipc)
1874		period = ptq->ipc_cyc_cnt - ptq->last_cy_cyc_cnt;
1875
1876	if (!period || intel_pt_skip_event(pt))
1877		return 0;
1878
1879	intel_pt_prep_sample(pt, ptq, event, &sample);
1880
1881	sample.id = ptq->pt->cycles_id;
1882	sample.stream_id = ptq->pt->cycles_id;
1883	sample.period = period;
1884
1885	sample.cyc_cnt = period;
1886	sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_cy_insn_cnt;
1887	ptq->last_cy_insn_cnt = ptq->ipc_insn_cnt;
1888	ptq->last_cy_cyc_cnt = ptq->ipc_cyc_cnt;
1889
1890	return intel_pt_deliver_synth_event(pt, event, &sample, pt->cycles_sample_type);
1891}
1892
1893static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1894{
1895	struct intel_pt *pt = ptq->pt;
1896	union perf_event *event = ptq->event_buf;
1897	struct perf_sample sample = { .ip = 0, };
1898
1899	if (intel_pt_skip_event(pt))
1900		return 0;
1901
1902	intel_pt_prep_sample(pt, ptq, event, &sample);
1903
1904	sample.id = ptq->pt->transactions_id;
1905	sample.stream_id = ptq->pt->transactions_id;
1906
1907	return intel_pt_deliver_synth_event(pt, event, &sample,
1908					    pt->transactions_sample_type);
1909}
1910
1911static void intel_pt_prep_p_sample(struct intel_pt *pt,
1912				   struct intel_pt_queue *ptq,
1913				   union perf_event *event,
1914				   struct perf_sample *sample)
1915{
1916	intel_pt_prep_sample(pt, ptq, event, sample);
1917
1918	/*
1919	 * Zero IP is used to mean "trace start" but that is not the case for
1920	 * power or PTWRITE events with no IP, so clear the flags.
1921	 */
1922	if (!sample->ip)
1923		sample->flags = 0;
1924}
1925
1926static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1927{
1928	struct intel_pt *pt = ptq->pt;
1929	union perf_event *event = ptq->event_buf;
1930	struct perf_sample sample = { .ip = 0, };
1931	struct perf_synth_intel_ptwrite raw;
1932
1933	if (intel_pt_skip_event(pt))
1934		return 0;
1935
1936	intel_pt_prep_p_sample(pt, ptq, event, &sample);
1937
1938	sample.id = ptq->pt->ptwrites_id;
1939	sample.stream_id = ptq->pt->ptwrites_id;
1940
1941	raw.flags = 0;
1942	raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1943	raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1944
1945	sample.raw_size = perf_synth__raw_size(raw);
1946	sample.raw_data = perf_synth__raw_data(&raw);
1947
1948	return intel_pt_deliver_synth_event(pt, event, &sample,
1949					    pt->ptwrites_sample_type);
1950}
1951
1952static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1953{
1954	struct intel_pt *pt = ptq->pt;
1955	union perf_event *event = ptq->event_buf;
1956	struct perf_sample sample = { .ip = 0, };
1957	struct perf_synth_intel_cbr raw;
1958	u32 flags;
1959
1960	if (intel_pt_skip_cbr_event(pt))
1961		return 0;
1962
1963	ptq->cbr_seen = ptq->state->cbr;
1964
1965	intel_pt_prep_p_sample(pt, ptq, event, &sample);
1966
1967	sample.id = ptq->pt->cbr_id;
1968	sample.stream_id = ptq->pt->cbr_id;
1969
1970	flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1971	raw.flags = cpu_to_le32(flags);
1972	raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1973	raw.reserved3 = 0;
1974
1975	sample.raw_size = perf_synth__raw_size(raw);
1976	sample.raw_data = perf_synth__raw_data(&raw);
1977
1978	return intel_pt_deliver_synth_event(pt, event, &sample,
1979					    pt->pwr_events_sample_type);
1980}
1981
1982static int intel_pt_synth_psb_sample(struct intel_pt_queue *ptq)
1983{
1984	struct intel_pt *pt = ptq->pt;
1985	union perf_event *event = ptq->event_buf;
1986	struct perf_sample sample = { .ip = 0, };
1987	struct perf_synth_intel_psb raw;
1988
1989	if (intel_pt_skip_event(pt))
1990		return 0;
1991
1992	intel_pt_prep_p_sample(pt, ptq, event, &sample);
1993
1994	sample.id = ptq->pt->psb_id;
1995	sample.stream_id = ptq->pt->psb_id;
1996	sample.flags = 0;
1997
1998	raw.reserved = 0;
1999	raw.offset = ptq->state->psb_offset;
2000
2001	sample.raw_size = perf_synth__raw_size(raw);
2002	sample.raw_data = perf_synth__raw_data(&raw);
2003
2004	return intel_pt_deliver_synth_event(pt, event, &sample,
2005					    pt->pwr_events_sample_type);
2006}
2007
2008static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
2009{
2010	struct intel_pt *pt = ptq->pt;
2011	union perf_event *event = ptq->event_buf;
2012	struct perf_sample sample = { .ip = 0, };
2013	struct perf_synth_intel_mwait raw;
2014
2015	if (intel_pt_skip_event(pt))
2016		return 0;
2017
2018	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2019
2020	sample.id = ptq->pt->mwait_id;
2021	sample.stream_id = ptq->pt->mwait_id;
2022
2023	raw.reserved = 0;
2024	raw.payload = cpu_to_le64(ptq->state->mwait_payload);
2025
2026	sample.raw_size = perf_synth__raw_size(raw);
2027	sample.raw_data = perf_synth__raw_data(&raw);
2028
2029	return intel_pt_deliver_synth_event(pt, event, &sample,
2030					    pt->pwr_events_sample_type);
2031}
2032
2033static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
2034{
2035	struct intel_pt *pt = ptq->pt;
2036	union perf_event *event = ptq->event_buf;
2037	struct perf_sample sample = { .ip = 0, };
2038	struct perf_synth_intel_pwre raw;
2039
2040	if (intel_pt_skip_event(pt))
2041		return 0;
2042
2043	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2044
2045	sample.id = ptq->pt->pwre_id;
2046	sample.stream_id = ptq->pt->pwre_id;
2047
2048	raw.reserved = 0;
2049	raw.payload = cpu_to_le64(ptq->state->pwre_payload);
2050
2051	sample.raw_size = perf_synth__raw_size(raw);
2052	sample.raw_data = perf_synth__raw_data(&raw);
2053
2054	return intel_pt_deliver_synth_event(pt, event, &sample,
2055					    pt->pwr_events_sample_type);
2056}
2057
2058static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
2059{
2060	struct intel_pt *pt = ptq->pt;
2061	union perf_event *event = ptq->event_buf;
2062	struct perf_sample sample = { .ip = 0, };
2063	struct perf_synth_intel_exstop raw;
2064
2065	if (intel_pt_skip_event(pt))
2066		return 0;
2067
2068	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2069
2070	sample.id = ptq->pt->exstop_id;
2071	sample.stream_id = ptq->pt->exstop_id;
2072
2073	raw.flags = 0;
2074	raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2075
2076	sample.raw_size = perf_synth__raw_size(raw);
2077	sample.raw_data = perf_synth__raw_data(&raw);
2078
2079	return intel_pt_deliver_synth_event(pt, event, &sample,
2080					    pt->pwr_events_sample_type);
2081}
2082
2083static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
2084{
2085	struct intel_pt *pt = ptq->pt;
2086	union perf_event *event = ptq->event_buf;
2087	struct perf_sample sample = { .ip = 0, };
2088	struct perf_synth_intel_pwrx raw;
2089
2090	if (intel_pt_skip_event(pt))
2091		return 0;
2092
2093	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2094
2095	sample.id = ptq->pt->pwrx_id;
2096	sample.stream_id = ptq->pt->pwrx_id;
2097
2098	raw.reserved = 0;
2099	raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
2100
2101	sample.raw_size = perf_synth__raw_size(raw);
2102	sample.raw_data = perf_synth__raw_data(&raw);
2103
2104	return intel_pt_deliver_synth_event(pt, event, &sample,
2105					    pt->pwr_events_sample_type);
2106}
2107
2108/*
2109 * PEBS gp_regs array indexes plus 1 so that 0 means not present. Refer
2110 * intel_pt_add_gp_regs().
2111 */
2112static const int pebs_gp_regs[] = {
2113	[PERF_REG_X86_FLAGS]	= 1,
2114	[PERF_REG_X86_IP]	= 2,
2115	[PERF_REG_X86_AX]	= 3,
2116	[PERF_REG_X86_CX]	= 4,
2117	[PERF_REG_X86_DX]	= 5,
2118	[PERF_REG_X86_BX]	= 6,
2119	[PERF_REG_X86_SP]	= 7,
2120	[PERF_REG_X86_BP]	= 8,
2121	[PERF_REG_X86_SI]	= 9,
2122	[PERF_REG_X86_DI]	= 10,
2123	[PERF_REG_X86_R8]	= 11,
2124	[PERF_REG_X86_R9]	= 12,
2125	[PERF_REG_X86_R10]	= 13,
2126	[PERF_REG_X86_R11]	= 14,
2127	[PERF_REG_X86_R12]	= 15,
2128	[PERF_REG_X86_R13]	= 16,
2129	[PERF_REG_X86_R14]	= 17,
2130	[PERF_REG_X86_R15]	= 18,
2131};
2132
2133static u64 *intel_pt_add_gp_regs(struct regs_dump *intr_regs, u64 *pos,
2134				 const struct intel_pt_blk_items *items,
2135				 u64 regs_mask)
2136{
2137	const u64 *gp_regs = items->val[INTEL_PT_GP_REGS_POS];
2138	u32 mask = items->mask[INTEL_PT_GP_REGS_POS];
2139	u32 bit;
2140	int i;
2141
2142	for (i = 0, bit = 1; i < PERF_REG_X86_64_MAX; i++, bit <<= 1) {
2143		/* Get the PEBS gp_regs array index */
2144		int n = pebs_gp_regs[i] - 1;
2145
2146		if (n < 0)
2147			continue;
2148		/*
2149		 * Add only registers that were requested (i.e. 'regs_mask') and
2150		 * that were provided (i.e. 'mask'), and update the resulting
2151		 * mask (i.e. 'intr_regs->mask') accordingly.
2152		 */
2153		if (mask & 1 << n && regs_mask & bit) {
2154			intr_regs->mask |= bit;
2155			*pos++ = gp_regs[n];
2156		}
2157	}
2158
2159	return pos;
2160}
2161
2162#ifndef PERF_REG_X86_XMM0
2163#define PERF_REG_X86_XMM0 32
2164#endif
2165
2166static void intel_pt_add_xmm(struct regs_dump *intr_regs, u64 *pos,
2167			     const struct intel_pt_blk_items *items,
2168			     u64 regs_mask)
2169{
2170	u32 mask = items->has_xmm & (regs_mask >> PERF_REG_X86_XMM0);
2171	const u64 *xmm = items->xmm;
2172
2173	/*
2174	 * If there are any XMM registers, then there should be all of them.
2175	 * Nevertheless, follow the logic to add only registers that were
2176	 * requested (i.e. 'regs_mask') and that were provided (i.e. 'mask'),
2177	 * and update the resulting mask (i.e. 'intr_regs->mask') accordingly.
2178	 */
2179	intr_regs->mask |= (u64)mask << PERF_REG_X86_XMM0;
2180
2181	for (; mask; mask >>= 1, xmm++) {
2182		if (mask & 1)
2183			*pos++ = *xmm;
2184	}
2185}
2186
2187#define LBR_INFO_MISPRED	(1ULL << 63)
2188#define LBR_INFO_IN_TX		(1ULL << 62)
2189#define LBR_INFO_ABORT		(1ULL << 61)
2190#define LBR_INFO_CYCLES		0xffff
2191
2192/* Refer kernel's intel_pmu_store_pebs_lbrs() */
2193static u64 intel_pt_lbr_flags(u64 info)
2194{
2195	union {
2196		struct branch_flags flags;
2197		u64 result;
2198	} u;
2199
2200	u.result	  = 0;
2201	u.flags.mispred	  = !!(info & LBR_INFO_MISPRED);
2202	u.flags.predicted = !(info & LBR_INFO_MISPRED);
2203	u.flags.in_tx	  = !!(info & LBR_INFO_IN_TX);
2204	u.flags.abort	  = !!(info & LBR_INFO_ABORT);
2205	u.flags.cycles	  = info & LBR_INFO_CYCLES;
2206
2207	return u.result;
2208}
2209
2210static void intel_pt_add_lbrs(struct branch_stack *br_stack,
2211			      const struct intel_pt_blk_items *items)
2212{
2213	u64 *to;
2214	int i;
2215
2216	br_stack->nr = 0;
2217
2218	to = &br_stack->entries[0].from;
2219
2220	for (i = INTEL_PT_LBR_0_POS; i <= INTEL_PT_LBR_2_POS; i++) {
2221		u32 mask = items->mask[i];
2222		const u64 *from = items->val[i];
2223
2224		for (; mask; mask >>= 3, from += 3) {
2225			if ((mask & 7) == 7) {
2226				*to++ = from[0];
2227				*to++ = from[1];
2228				*to++ = intel_pt_lbr_flags(from[2]);
2229				br_stack->nr += 1;
2230			}
2231		}
2232	}
2233}
2234
2235static int intel_pt_do_synth_pebs_sample(struct intel_pt_queue *ptq, struct evsel *evsel, u64 id)
2236{
2237	const struct intel_pt_blk_items *items = &ptq->state->items;
2238	struct perf_sample sample = { .ip = 0, };
2239	union perf_event *event = ptq->event_buf;
2240	struct intel_pt *pt = ptq->pt;
2241	u64 sample_type = evsel->core.attr.sample_type;
2242	u8 cpumode;
2243	u64 regs[8 * sizeof(sample.intr_regs.mask)];
2244
2245	if (intel_pt_skip_event(pt))
2246		return 0;
2247
2248	intel_pt_prep_a_sample(ptq, event, &sample);
2249
2250	sample.id = id;
2251	sample.stream_id = id;
2252
2253	if (!evsel->core.attr.freq)
2254		sample.period = evsel->core.attr.sample_period;
2255
2256	/* No support for non-zero CS base */
2257	if (items->has_ip)
2258		sample.ip = items->ip;
2259	else if (items->has_rip)
2260		sample.ip = items->rip;
2261	else
2262		sample.ip = ptq->state->from_ip;
2263
2264	cpumode = intel_pt_cpumode(ptq, sample.ip, 0);
2265
2266	event->sample.header.misc = cpumode | PERF_RECORD_MISC_EXACT_IP;
2267
2268	sample.cpumode = cpumode;
2269
2270	if (sample_type & PERF_SAMPLE_TIME) {
2271		u64 timestamp = 0;
2272
2273		if (items->has_timestamp)
2274			timestamp = items->timestamp;
2275		else if (!pt->timeless_decoding)
2276			timestamp = ptq->timestamp;
2277		if (timestamp)
2278			sample.time = tsc_to_perf_time(timestamp, &pt->tc);
2279	}
2280
2281	if (sample_type & PERF_SAMPLE_CALLCHAIN &&
2282	    pt->synth_opts.callchain) {
2283		thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
2284				     pt->synth_opts.callchain_sz, sample.ip,
2285				     pt->kernel_start);
2286		sample.callchain = ptq->chain;
2287	}
2288
2289	if (sample_type & PERF_SAMPLE_REGS_INTR &&
2290	    (items->mask[INTEL_PT_GP_REGS_POS] ||
2291	     items->mask[INTEL_PT_XMM_POS])) {
2292		u64 regs_mask = evsel->core.attr.sample_regs_intr;
2293		u64 *pos;
2294
2295		sample.intr_regs.abi = items->is_32_bit ?
2296				       PERF_SAMPLE_REGS_ABI_32 :
2297				       PERF_SAMPLE_REGS_ABI_64;
2298		sample.intr_regs.regs = regs;
2299
2300		pos = intel_pt_add_gp_regs(&sample.intr_regs, regs, items, regs_mask);
2301
2302		intel_pt_add_xmm(&sample.intr_regs, pos, items, regs_mask);
2303	}
2304
2305	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
2306		if (items->mask[INTEL_PT_LBR_0_POS] ||
2307		    items->mask[INTEL_PT_LBR_1_POS] ||
2308		    items->mask[INTEL_PT_LBR_2_POS]) {
2309			intel_pt_add_lbrs(ptq->last_branch, items);
2310		} else if (pt->synth_opts.last_branch) {
2311			thread_stack__br_sample(ptq->thread, ptq->cpu,
2312						ptq->last_branch,
2313						pt->br_stack_sz);
2314		} else {
2315			ptq->last_branch->nr = 0;
2316		}
2317		sample.branch_stack = ptq->last_branch;
2318	}
2319
2320	if (sample_type & PERF_SAMPLE_ADDR && items->has_mem_access_address)
2321		sample.addr = items->mem_access_address;
2322
2323	if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
2324		/*
2325		 * Refer kernel's setup_pebs_adaptive_sample_data() and
2326		 * intel_hsw_weight().
2327		 */
2328		if (items->has_mem_access_latency) {
2329			u64 weight = items->mem_access_latency >> 32;
2330
2331			/*
2332			 * Starts from SPR, the mem access latency field
2333			 * contains both cache latency [47:32] and instruction
2334			 * latency [15:0]. The cache latency is the same as the
2335			 * mem access latency on previous platforms.
2336			 *
2337			 * In practice, no memory access could last than 4G
2338			 * cycles. Use latency >> 32 to distinguish the
2339			 * different format of the mem access latency field.
2340			 */
2341			if (weight > 0) {
2342				sample.weight = weight & 0xffff;
2343				sample.ins_lat = items->mem_access_latency & 0xffff;
2344			} else
2345				sample.weight = items->mem_access_latency;
2346		}
2347		if (!sample.weight && items->has_tsx_aux_info) {
2348			/* Cycles last block */
2349			sample.weight = (u32)items->tsx_aux_info;
2350		}
2351	}
2352
2353	if (sample_type & PERF_SAMPLE_TRANSACTION && items->has_tsx_aux_info) {
2354		u64 ax = items->has_rax ? items->rax : 0;
2355		/* Refer kernel's intel_hsw_transaction() */
2356		u64 txn = (u8)(items->tsx_aux_info >> 32);
2357
2358		/* For RTM XABORTs also log the abort code from AX */
2359		if (txn & PERF_TXN_TRANSACTION && ax & 1)
2360			txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
2361		sample.transaction = txn;
2362	}
2363
2364	return intel_pt_deliver_synth_event(pt, event, &sample, sample_type);
2365}
2366
2367static int intel_pt_synth_single_pebs_sample(struct intel_pt_queue *ptq)
2368{
2369	struct intel_pt *pt = ptq->pt;
2370	struct evsel *evsel = pt->pebs_evsel;
2371	u64 id = evsel->core.id[0];
2372
2373	return intel_pt_do_synth_pebs_sample(ptq, evsel, id);
2374}
2375
2376static int intel_pt_synth_pebs_sample(struct intel_pt_queue *ptq)
2377{
2378	const struct intel_pt_blk_items *items = &ptq->state->items;
2379	struct intel_pt_pebs_event *pe;
2380	struct intel_pt *pt = ptq->pt;
2381	int err = -EINVAL;
2382	int hw_id;
2383
2384	if (!items->has_applicable_counters || !items->applicable_counters) {
2385		if (!pt->single_pebs)
2386			pr_err("PEBS-via-PT record with no applicable_counters\n");
2387		return intel_pt_synth_single_pebs_sample(ptq);
2388	}
2389
2390	for_each_set_bit(hw_id, (unsigned long *)&items->applicable_counters, INTEL_PT_MAX_PEBS) {
2391		pe = &ptq->pebs[hw_id];
2392		if (!pe->evsel) {
2393			if (!pt->single_pebs)
2394				pr_err("PEBS-via-PT record with no matching event, hw_id %d\n",
2395				       hw_id);
2396			return intel_pt_synth_single_pebs_sample(ptq);
2397		}
2398		err = intel_pt_do_synth_pebs_sample(ptq, pe->evsel, pe->id);
2399		if (err)
2400			return err;
2401	}
2402
2403	return err;
2404}
2405
2406static int intel_pt_synth_events_sample(struct intel_pt_queue *ptq)
2407{
2408	struct intel_pt *pt = ptq->pt;
2409	union perf_event *event = ptq->event_buf;
2410	struct perf_sample sample = { .ip = 0, };
2411	struct {
2412		struct perf_synth_intel_evt cfe;
2413		struct perf_synth_intel_evd evd[INTEL_PT_MAX_EVDS];
2414	} raw;
2415	int i;
2416
2417	if (intel_pt_skip_event(pt))
2418		return 0;
2419
2420	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2421
2422	sample.id        = ptq->pt->evt_id;
2423	sample.stream_id = ptq->pt->evt_id;
2424
2425	raw.cfe.type     = ptq->state->cfe_type;
2426	raw.cfe.reserved = 0;
2427	raw.cfe.ip       = !!(ptq->state->flags & INTEL_PT_FUP_IP);
2428	raw.cfe.vector   = ptq->state->cfe_vector;
2429	raw.cfe.evd_cnt  = ptq->state->evd_cnt;
2430
2431	for (i = 0; i < ptq->state->evd_cnt; i++) {
2432		raw.evd[i].et       = 0;
2433		raw.evd[i].evd_type = ptq->state->evd[i].type;
2434		raw.evd[i].payload  = ptq->state->evd[i].payload;
2435	}
2436
2437	sample.raw_size = perf_synth__raw_size(raw) +
2438			  ptq->state->evd_cnt * sizeof(struct perf_synth_intel_evd);
2439	sample.raw_data = perf_synth__raw_data(&raw);
2440
2441	return intel_pt_deliver_synth_event(pt, event, &sample,
2442					    pt->evt_sample_type);
2443}
2444
2445static int intel_pt_synth_iflag_chg_sample(struct intel_pt_queue *ptq)
2446{
2447	struct intel_pt *pt = ptq->pt;
2448	union perf_event *event = ptq->event_buf;
2449	struct perf_sample sample = { .ip = 0, };
2450	struct perf_synth_intel_iflag_chg raw;
2451
2452	if (intel_pt_skip_event(pt))
2453		return 0;
2454
2455	intel_pt_prep_p_sample(pt, ptq, event, &sample);
2456
2457	sample.id = ptq->pt->iflag_chg_id;
2458	sample.stream_id = ptq->pt->iflag_chg_id;
2459
2460	raw.flags = 0;
2461	raw.iflag = ptq->state->to_iflag;
2462
2463	if (ptq->state->type & INTEL_PT_BRANCH) {
2464		raw.via_branch = 1;
2465		raw.branch_ip = ptq->state->to_ip;
2466	} else {
2467		sample.addr = 0;
2468	}
2469	sample.flags = ptq->flags;
2470
2471	sample.raw_size = perf_synth__raw_size(raw);
2472	sample.raw_data = perf_synth__raw_data(&raw);
2473
2474	return intel_pt_deliver_synth_event(pt, event, &sample,
2475					    pt->iflag_chg_sample_type);
2476}
2477
2478static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
2479				pid_t pid, pid_t tid, u64 ip, u64 timestamp,
2480				pid_t machine_pid, int vcpu)
2481{
2482	bool dump_log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
2483	bool log_on_stdout = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT;
2484	union perf_event event;
2485	char msg[MAX_AUXTRACE_ERROR_MSG];
2486	int err;
2487
2488	if (pt->synth_opts.error_minus_flags) {
2489		if (code == INTEL_PT_ERR_OVR &&
2490		    pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_OVERFLOW)
2491			return 0;
2492		if (code == INTEL_PT_ERR_LOST &&
2493		    pt->synth_opts.error_minus_flags & AUXTRACE_ERR_FLG_DATA_LOST)
2494			return 0;
2495	}
2496
2497	intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
2498
2499	auxtrace_synth_guest_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
2500				   code, cpu, pid, tid, ip, msg, timestamp,
2501				   machine_pid, vcpu);
2502
2503	if (intel_pt_enable_logging && !log_on_stdout) {
2504		FILE *fp = intel_pt_log_fp();
2505
2506		if (fp)
2507			perf_event__fprintf_auxtrace_error(&event, fp);
2508	}
2509
2510	if (code != INTEL_PT_ERR_LOST && dump_log_on_error)
2511		intel_pt_log_dump_buf();
2512
2513	err = perf_session__deliver_synth_event(pt->session, &event, NULL);
2514	if (err)
2515		pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
2516		       err);
2517
2518	return err;
2519}
2520
2521static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
2522				 const struct intel_pt_state *state)
2523{
2524	struct intel_pt *pt = ptq->pt;
2525	u64 tm = ptq->timestamp;
2526	pid_t machine_pid = 0;
2527	pid_t pid = ptq->pid;
2528	pid_t tid = ptq->tid;
2529	int vcpu = -1;
2530
2531	tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
2532
2533	if (pt->have_guest_sideband && state->from_nr) {
2534		machine_pid = ptq->guest_machine_pid;
2535		vcpu = ptq->vcpu;
2536		pid = ptq->guest_pid;
2537		tid = ptq->guest_tid;
2538	}
2539
2540	return intel_pt_synth_error(pt, state->err, ptq->cpu, pid, tid,
2541				    state->from_ip, tm, machine_pid, vcpu);
2542}
2543
2544static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
2545{
2546	struct auxtrace_queue *queue;
2547	pid_t tid = ptq->next_tid;
2548	int err;
2549
2550	if (tid == -1)
2551		return 0;
2552
2553	intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
2554
2555	err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
2556
2557	queue = &pt->queues.queue_array[ptq->queue_nr];
2558	intel_pt_set_pid_tid_cpu(pt, queue);
2559
2560	ptq->next_tid = -1;
2561
2562	return err;
2563}
2564
2565static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
2566{
2567	struct intel_pt *pt = ptq->pt;
2568
2569	return ip == pt->switch_ip &&
2570	       (ptq->flags & PERF_IP_FLAG_BRANCH) &&
2571	       !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
2572			       PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
2573}
2574
2575#define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
2576			  INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT)
2577
2578static int intel_pt_sample(struct intel_pt_queue *ptq)
2579{
2580	const struct intel_pt_state *state = ptq->state;
2581	struct intel_pt *pt = ptq->pt;
2582	int err;
2583
2584	if (!ptq->have_sample)
2585		return 0;
2586
2587	ptq->have_sample = false;
2588
2589	if (pt->synth_opts.approx_ipc) {
2590		ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2591		ptq->ipc_cyc_cnt = ptq->state->cycles;
2592		ptq->sample_ipc = true;
2593	} else {
2594		ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
2595		ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
2596		ptq->sample_ipc = ptq->state->flags & INTEL_PT_SAMPLE_IPC;
2597	}
2598
2599	/* Ensure guest code maps are set up */
2600	if (symbol_conf.guest_code && (state->from_nr || state->to_nr))
2601		intel_pt_get_guest(ptq);
2602
2603	/*
2604	 * Do PEBS first to allow for the possibility that the PEBS timestamp
2605	 * precedes the current timestamp.
2606	 */
2607	if (pt->sample_pebs && state->type & INTEL_PT_BLK_ITEMS) {
2608		err = intel_pt_synth_pebs_sample(ptq);
2609		if (err)
2610			return err;
2611	}
2612
2613	if (pt->synth_opts.intr_events) {
2614		if (state->type & INTEL_PT_EVT) {
2615			err = intel_pt_synth_events_sample(ptq);
2616			if (err)
2617				return err;
2618		}
2619		if (state->type & INTEL_PT_IFLAG_CHG) {
2620			err = intel_pt_synth_iflag_chg_sample(ptq);
2621			if (err)
2622				return err;
2623		}
2624	}
2625
2626	if (pt->sample_pwr_events) {
2627		if (state->type & INTEL_PT_PSB_EVT) {
2628			err = intel_pt_synth_psb_sample(ptq);
2629			if (err)
2630				return err;
2631		}
2632		if (ptq->state->cbr != ptq->cbr_seen) {
2633			err = intel_pt_synth_cbr_sample(ptq);
2634			if (err)
2635				return err;
2636		}
2637		if (state->type & INTEL_PT_PWR_EVT) {
2638			if (state->type & INTEL_PT_MWAIT_OP) {
2639				err = intel_pt_synth_mwait_sample(ptq);
2640				if (err)
2641					return err;
2642			}
2643			if (state->type & INTEL_PT_PWR_ENTRY) {
2644				err = intel_pt_synth_pwre_sample(ptq);
2645				if (err)
2646					return err;
2647			}
2648			if (state->type & INTEL_PT_EX_STOP) {
2649				err = intel_pt_synth_exstop_sample(ptq);
2650				if (err)
2651					return err;
2652			}
2653			if (state->type & INTEL_PT_PWR_EXIT) {
2654				err = intel_pt_synth_pwrx_sample(ptq);
2655				if (err)
2656					return err;
2657			}
2658		}
2659	}
2660
2661	if (state->type & INTEL_PT_INSTRUCTION) {
2662		if (pt->sample_instructions) {
2663			err = intel_pt_synth_instruction_sample(ptq);
2664			if (err)
2665				return err;
2666		}
2667		if (pt->sample_cycles) {
2668			err = intel_pt_synth_cycle_sample(ptq);
2669			if (err)
2670				return err;
2671		}
2672	}
2673
2674	if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
2675		err = intel_pt_synth_transaction_sample(ptq);
2676		if (err)
2677			return err;
2678	}
2679
2680	if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
2681		err = intel_pt_synth_ptwrite_sample(ptq);
2682		if (err)
2683			return err;
2684	}
2685
2686	if (!(state->type & INTEL_PT_BRANCH))
2687		return 0;
2688
2689	if (pt->use_thread_stack) {
2690		thread_stack__event(ptq->thread, ptq->cpu, ptq->flags,
2691				    state->from_ip, state->to_ip, ptq->insn_len,
2692				    state->trace_nr, pt->callstack,
2693				    pt->br_stack_sz_plus,
2694				    pt->mispred_all);
2695	} else {
2696		thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
2697	}
2698
2699	if (pt->sample_branches) {
2700		if (state->from_nr != state->to_nr &&
2701		    state->from_ip && state->to_ip) {
2702			struct intel_pt_state *st = (struct intel_pt_state *)state;
2703			u64 to_ip = st->to_ip;
2704			u64 from_ip = st->from_ip;
2705
2706			/*
2707			 * perf cannot handle having different machines for ip
2708			 * and addr, so create 2 branches.
2709			 */
2710			st->to_ip = 0;
2711			err = intel_pt_synth_branch_sample(ptq);
2712			if (err)
2713				return err;
2714			st->from_ip = 0;
2715			st->to_ip = to_ip;
2716			err = intel_pt_synth_branch_sample(ptq);
2717			st->from_ip = from_ip;
2718		} else {
2719			err = intel_pt_synth_branch_sample(ptq);
2720		}
2721		if (err)
2722			return err;
2723	}
2724
2725	if (!ptq->sync_switch)
2726		return 0;
2727
2728	if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
2729		switch (ptq->switch_state) {
2730		case INTEL_PT_SS_NOT_TRACING:
2731		case INTEL_PT_SS_UNKNOWN:
2732		case INTEL_PT_SS_EXPECTING_SWITCH_IP:
2733			err = intel_pt_next_tid(pt, ptq);
2734			if (err)
2735				return err;
2736			ptq->switch_state = INTEL_PT_SS_TRACING;
2737			break;
2738		default:
2739			ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
2740			return 1;
2741		}
2742	} else if (!state->to_ip) {
2743		ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2744	} else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
2745		ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2746	} else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2747		   state->to_ip == pt->ptss_ip &&
2748		   (ptq->flags & PERF_IP_FLAG_CALL)) {
2749		ptq->switch_state = INTEL_PT_SS_TRACING;
2750	}
2751
2752	return 0;
2753}
2754
2755static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
2756{
2757	struct machine *machine = pt->machine;
2758	struct map *map;
2759	struct symbol *sym, *start;
2760	u64 ip, switch_ip = 0;
2761	const char *ptss;
2762
2763	if (ptss_ip)
2764		*ptss_ip = 0;
2765
2766	map = machine__kernel_map(machine);
2767	if (!map)
2768		return 0;
2769
2770	if (map__load(map))
2771		return 0;
2772
2773	start = dso__first_symbol(map__dso(map));
2774
2775	for (sym = start; sym; sym = dso__next_symbol(sym)) {
2776		if (sym->binding == STB_GLOBAL &&
2777		    !strcmp(sym->name, "__switch_to")) {
2778			ip = map__unmap_ip(map, sym->start);
2779			if (ip >= map__start(map) && ip < map__end(map)) {
2780				switch_ip = ip;
2781				break;
2782			}
2783		}
2784	}
2785
2786	if (!switch_ip || !ptss_ip)
2787		return 0;
2788
2789	if (pt->have_sched_switch == 1)
2790		ptss = "perf_trace_sched_switch";
2791	else
2792		ptss = "__perf_event_task_sched_out";
2793
2794	for (sym = start; sym; sym = dso__next_symbol(sym)) {
2795		if (!strcmp(sym->name, ptss)) {
2796			ip = map__unmap_ip(map, sym->start);
2797			if (ip >= map__start(map) && ip < map__end(map)) {
2798				*ptss_ip = ip;
2799				break;
2800			}
2801		}
2802	}
2803
2804	return switch_ip;
2805}
2806
2807static void intel_pt_enable_sync_switch(struct intel_pt *pt)
2808{
2809	unsigned int i;
2810
2811	if (pt->sync_switch_not_supported)
2812		return;
2813
2814	pt->sync_switch = true;
2815
2816	for (i = 0; i < pt->queues.nr_queues; i++) {
2817		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2818		struct intel_pt_queue *ptq = queue->priv;
2819
2820		if (ptq)
2821			ptq->sync_switch = true;
2822	}
2823}
2824
2825static void intel_pt_disable_sync_switch(struct intel_pt *pt)
2826{
2827	unsigned int i;
2828
2829	pt->sync_switch = false;
2830
2831	for (i = 0; i < pt->queues.nr_queues; i++) {
2832		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
2833		struct intel_pt_queue *ptq = queue->priv;
2834
2835		if (ptq) {
2836			ptq->sync_switch = false;
2837			intel_pt_next_tid(pt, ptq);
2838		}
2839	}
2840}
2841
2842/*
2843 * To filter against time ranges, it is only necessary to look at the next start
2844 * or end time.
2845 */
2846static bool intel_pt_next_time(struct intel_pt_queue *ptq)
2847{
2848	struct intel_pt *pt = ptq->pt;
2849
2850	if (ptq->sel_start) {
2851		/* Next time is an end time */
2852		ptq->sel_start = false;
2853		ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].end;
2854		return true;
2855	} else if (ptq->sel_idx + 1 < pt->range_cnt) {
2856		/* Next time is a start time */
2857		ptq->sel_start = true;
2858		ptq->sel_idx += 1;
2859		ptq->sel_timestamp = pt->time_ranges[ptq->sel_idx].start;
2860		return true;
2861	}
2862
2863	/* No next time */
2864	return false;
2865}
2866
2867static int intel_pt_time_filter(struct intel_pt_queue *ptq, u64 *ff_timestamp)
2868{
2869	int err;
2870
2871	while (1) {
2872		if (ptq->sel_start) {
2873			if (ptq->timestamp >= ptq->sel_timestamp) {
2874				/* After start time, so consider next time */
2875				intel_pt_next_time(ptq);
2876				if (!ptq->sel_timestamp) {
2877					/* No end time */
2878					return 0;
2879				}
2880				/* Check against end time */
2881				continue;
2882			}
2883			/* Before start time, so fast forward */
2884			ptq->have_sample = false;
2885			if (ptq->sel_timestamp > *ff_timestamp) {
2886				if (ptq->sync_switch) {
2887					intel_pt_next_tid(ptq->pt, ptq);
2888					ptq->switch_state = INTEL_PT_SS_UNKNOWN;
2889				}
2890				*ff_timestamp = ptq->sel_timestamp;
2891				err = intel_pt_fast_forward(ptq->decoder,
2892							    ptq->sel_timestamp);
2893				if (err)
2894					return err;
2895			}
2896			return 0;
2897		} else if (ptq->timestamp > ptq->sel_timestamp) {
2898			/* After end time, so consider next time */
2899			if (!intel_pt_next_time(ptq)) {
2900				/* No next time range, so stop decoding */
2901				ptq->have_sample = false;
2902				ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
2903				return 1;
2904			}
2905			/* Check against next start time */
2906			continue;
2907		} else {
2908			/* Before end time */
2909			return 0;
2910		}
2911	}
2912}
2913
2914static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
2915{
2916	const struct intel_pt_state *state = ptq->state;
2917	struct intel_pt *pt = ptq->pt;
2918	u64 ff_timestamp = 0;
2919	int err;
2920
2921	if (!pt->kernel_start) {
2922		pt->kernel_start = machine__kernel_start(pt->machine);
2923		if (pt->per_cpu_mmaps &&
2924		    (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
2925		    !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
2926		    !pt->sampling_mode && !pt->synth_opts.vm_time_correlation) {
2927			pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
2928			if (pt->switch_ip) {
2929				intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
2930					     pt->switch_ip, pt->ptss_ip);
2931				intel_pt_enable_sync_switch(pt);
2932			}
2933		}
2934	}
2935
2936	intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
2937		     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
2938	while (1) {
2939		err = intel_pt_sample(ptq);
2940		if (err)
2941			return err;
2942
2943		state = intel_pt_decode(ptq->decoder);
2944		if (state->err) {
2945			if (state->err == INTEL_PT_ERR_NODATA)
2946				return 1;
2947			if (ptq->sync_switch &&
2948			    state->from_ip >= pt->kernel_start) {
2949				ptq->sync_switch = false;
2950				intel_pt_next_tid(pt, ptq);
2951			}
2952			ptq->timestamp = state->est_timestamp;
2953			if (pt->synth_opts.errors) {
2954				err = intel_ptq_synth_error(ptq, state);
2955				if (err)
2956					return err;
2957			}
2958			continue;
2959		}
2960
2961		ptq->state = state;
2962		ptq->have_sample = true;
2963		intel_pt_sample_flags(ptq);
2964
2965		/* Use estimated TSC upon return to user space */
2966		if (pt->est_tsc &&
2967		    (state->from_ip >= pt->kernel_start || !state->from_ip) &&
2968		    state->to_ip && state->to_ip < pt->kernel_start) {
2969			intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2970				     state->timestamp, state->est_timestamp);
2971			ptq->timestamp = state->est_timestamp;
2972		/* Use estimated TSC in unknown switch state */
2973		} else if (ptq->sync_switch &&
2974			   ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
2975			   intel_pt_is_switch_ip(ptq, state->to_ip) &&
2976			   ptq->next_tid == -1) {
2977			intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
2978				     state->timestamp, state->est_timestamp);
2979			ptq->timestamp = state->est_timestamp;
2980		} else if (state->timestamp > ptq->timestamp) {
2981			ptq->timestamp = state->timestamp;
2982		}
2983
2984		if (ptq->sel_timestamp) {
2985			err = intel_pt_time_filter(ptq, &ff_timestamp);
2986			if (err)
2987				return err;
2988		}
2989
2990		if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
2991			*timestamp = ptq->timestamp;
2992			return 0;
2993		}
2994	}
2995	return 0;
2996}
2997
2998static inline int intel_pt_update_queues(struct intel_pt *pt)
2999{
3000	if (pt->queues.new_data) {
3001		pt->queues.new_data = false;
3002		return intel_pt_setup_queues(pt);
3003	}
3004	return 0;
3005}
3006
3007static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
3008{
3009	unsigned int queue_nr;
3010	u64 ts;
3011	int ret;
3012
3013	while (1) {
3014		struct auxtrace_queue *queue;
3015		struct intel_pt_queue *ptq;
3016
3017		if (!pt->heap.heap_cnt)
3018			return 0;
3019
3020		if (pt->heap.heap_array[0].ordinal >= timestamp)
3021			return 0;
3022
3023		queue_nr = pt->heap.heap_array[0].queue_nr;
3024		queue = &pt->queues.queue_array[queue_nr];
3025		ptq = queue->priv;
3026
3027		intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
3028			     queue_nr, pt->heap.heap_array[0].ordinal,
3029			     timestamp);
3030
3031		auxtrace_heap__pop(&pt->heap);
3032
3033		if (pt->heap.heap_cnt) {
3034			ts = pt->heap.heap_array[0].ordinal + 1;
3035			if (ts > timestamp)
3036				ts = timestamp;
3037		} else {
3038			ts = timestamp;
3039		}
3040
3041		intel_pt_set_pid_tid_cpu(pt, queue);
3042
3043		ret = intel_pt_run_decoder(ptq, &ts);
3044
3045		if (ret < 0) {
3046			auxtrace_heap__add(&pt->heap, queue_nr, ts);
3047			return ret;
3048		}
3049
3050		if (!ret) {
3051			ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
3052			if (ret < 0)
3053				return ret;
3054		} else {
3055			ptq->on_heap = false;
3056		}
3057	}
3058
3059	return 0;
3060}
3061
3062static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
3063					    u64 time_)
3064{
3065	struct auxtrace_queues *queues = &pt->queues;
3066	unsigned int i;
3067	u64 ts = 0;
3068
3069	for (i = 0; i < queues->nr_queues; i++) {
3070		struct auxtrace_queue *queue = &pt->queues.queue_array[i];
3071		struct intel_pt_queue *ptq = queue->priv;
3072
3073		if (ptq && (tid == -1 || ptq->tid == tid)) {
3074			ptq->time = time_;
3075			intel_pt_set_pid_tid_cpu(pt, queue);
3076			intel_pt_run_decoder(ptq, &ts);
3077		}
3078	}
3079	return 0;
3080}
3081
3082static void intel_pt_sample_set_pid_tid_cpu(struct intel_pt_queue *ptq,
3083					    struct auxtrace_queue *queue,
3084					    struct perf_sample *sample)
3085{
3086	struct machine *m = ptq->pt->machine;
3087
3088	ptq->pid = sample->pid;
3089	ptq->tid = sample->tid;
3090	ptq->cpu = queue->cpu;
3091
3092	intel_pt_log("queue %u cpu %d pid %d tid %d\n",
3093		     ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
3094
3095	thread__zput(ptq->thread);
3096
3097	if (ptq->tid == -1)
3098		return;
3099
3100	if (ptq->pid == -1) {
3101		ptq->thread = machine__find_thread(m, -1, ptq->tid);
3102		if (ptq->thread)
3103			ptq->pid = thread__pid(ptq->thread);
3104		return;
3105	}
3106
3107	ptq->thread = machine__findnew_thread(m, ptq->pid, ptq->tid);
3108}
3109
3110static int intel_pt_process_timeless_sample(struct intel_pt *pt,
3111					    struct perf_sample *sample)
3112{
3113	struct auxtrace_queue *queue;
3114	struct intel_pt_queue *ptq;
3115	u64 ts = 0;
3116
3117	queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
3118	if (!queue)
3119		return -EINVAL;
3120
3121	ptq = queue->priv;
3122	if (!ptq)
3123		return 0;
3124
3125	ptq->stop = false;
3126	ptq->time = sample->time;
3127	intel_pt_sample_set_pid_tid_cpu(ptq, queue, sample);
3128	intel_pt_run_decoder(ptq, &ts);
3129	return 0;
3130}
3131
3132static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
3133{
3134	return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
3135				    sample->pid, sample->tid, 0, sample->time,
3136				    sample->machine_pid, sample->vcpu);
3137}
3138
3139static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
3140{
3141	unsigned i, j;
3142
3143	if (cpu < 0 || !pt->queues.nr_queues)
3144		return NULL;
3145
3146	if ((unsigned)cpu >= pt->queues.nr_queues)
3147		i = pt->queues.nr_queues - 1;
3148	else
3149		i = cpu;
3150
3151	if (pt->queues.queue_array[i].cpu == cpu)
3152		return pt->queues.queue_array[i].priv;
3153
3154	for (j = 0; i > 0; j++) {
3155		if (pt->queues.queue_array[--i].cpu == cpu)
3156			return pt->queues.queue_array[i].priv;
3157	}
3158
3159	for (; j < pt->queues.nr_queues; j++) {
3160		if (pt->queues.queue_array[j].cpu == cpu)
3161			return pt->queues.queue_array[j].priv;
3162	}
3163
3164	return NULL;
3165}
3166
3167static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
3168				u64 timestamp)
3169{
3170	struct intel_pt_queue *ptq;
3171	int err;
3172
3173	if (!pt->sync_switch)
3174		return 1;
3175
3176	ptq = intel_pt_cpu_to_ptq(pt, cpu);
3177	if (!ptq || !ptq->sync_switch)
3178		return 1;
3179
3180	switch (ptq->switch_state) {
3181	case INTEL_PT_SS_NOT_TRACING:
3182		break;
3183	case INTEL_PT_SS_UNKNOWN:
3184	case INTEL_PT_SS_TRACING:
3185		ptq->next_tid = tid;
3186		ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
3187		return 0;
3188	case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
3189		if (!ptq->on_heap) {
3190			ptq->timestamp = perf_time_to_tsc(timestamp,
3191							  &pt->tc);
3192			err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
3193						 ptq->timestamp);
3194			if (err)
3195				return err;
3196			ptq->on_heap = true;
3197		}
3198		ptq->switch_state = INTEL_PT_SS_TRACING;
3199		break;
3200	case INTEL_PT_SS_EXPECTING_SWITCH_IP:
3201		intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
3202		break;
3203	default:
3204		break;
3205	}
3206
3207	ptq->next_tid = -1;
3208
3209	return 1;
3210}
3211
3212#ifdef HAVE_LIBTRACEEVENT
3213static int intel_pt_process_switch(struct intel_pt *pt,
3214				   struct perf_sample *sample)
3215{
3216	pid_t tid;
3217	int cpu, ret;
3218	struct evsel *evsel = evlist__id2evsel(pt->session->evlist, sample->id);
3219
3220	if (evsel != pt->switch_evsel)
3221		return 0;
3222
3223	tid = evsel__intval(evsel, sample, "next_pid");
3224	cpu = sample->cpu;
3225
3226	intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
3227		     cpu, tid, sample->time, perf_time_to_tsc(sample->time,
3228		     &pt->tc));
3229
3230	ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
3231	if (ret <= 0)
3232		return ret;
3233
3234	return machine__set_current_tid(pt->machine, cpu, -1, tid);
3235}
3236#endif /* HAVE_LIBTRACEEVENT */
3237
3238static int intel_pt_context_switch_in(struct intel_pt *pt,
3239				      struct perf_sample *sample)
3240{
3241	pid_t pid = sample->pid;
3242	pid_t tid = sample->tid;
3243	int cpu = sample->cpu;
3244
3245	if (pt->sync_switch) {
3246		struct intel_pt_queue *ptq;
3247
3248		ptq = intel_pt_cpu_to_ptq(pt, cpu);
3249		if (ptq && ptq->sync_switch) {
3250			ptq->next_tid = -1;
3251			switch (ptq->switch_state) {
3252			case INTEL_PT_SS_NOT_TRACING:
3253			case INTEL_PT_SS_UNKNOWN:
3254			case INTEL_PT_SS_TRACING:
3255				break;
3256			case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
3257			case INTEL_PT_SS_EXPECTING_SWITCH_IP:
3258				ptq->switch_state = INTEL_PT_SS_TRACING;
3259				break;
3260			default:
3261				break;
3262			}
3263		}
3264	}
3265
3266	/*
3267	 * If the current tid has not been updated yet, ensure it is now that
3268	 * a "switch in" event has occurred.
3269	 */
3270	if (machine__get_current_tid(pt->machine, cpu) == tid)
3271		return 0;
3272
3273	return machine__set_current_tid(pt->machine, cpu, pid, tid);
3274}
3275
3276static int intel_pt_guest_context_switch(struct intel_pt *pt,
3277					 union perf_event *event,
3278					 struct perf_sample *sample)
3279{
3280	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
3281	struct machines *machines = &pt->session->machines;
3282	struct machine *machine = machines__find(machines, sample->machine_pid);
3283
3284	pt->have_guest_sideband = true;
3285
3286	/*
3287	 * sync_switch cannot handle guest machines at present, so just disable
3288	 * it.
3289	 */
3290	pt->sync_switch_not_supported = true;
3291	if (pt->sync_switch)
3292		intel_pt_disable_sync_switch(pt);
3293
3294	if (out)
3295		return 0;
3296
3297	if (!machine)
3298		return -EINVAL;
3299
3300	return machine__set_current_tid(machine, sample->vcpu, sample->pid, sample->tid);
3301}
3302
3303static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
3304				   struct perf_sample *sample)
3305{
3306	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
3307	pid_t pid, tid;
3308	int cpu, ret;
3309
3310	if (perf_event__is_guest(event))
3311		return intel_pt_guest_context_switch(pt, event, sample);
3312
3313	cpu = sample->cpu;
3314
3315	if (pt->have_sched_switch == 3) {
3316		if (!out)
3317			return intel_pt_context_switch_in(pt, sample);
3318		if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
3319			pr_err("Expecting CPU-wide context switch event\n");
3320			return -EINVAL;
3321		}
3322		pid = event->context_switch.next_prev_pid;
3323		tid = event->context_switch.next_prev_tid;
3324	} else {
3325		if (out)
3326			return 0;
3327		pid = sample->pid;
3328		tid = sample->tid;
3329	}
3330
3331	if (tid == -1)
3332		intel_pt_log("context_switch event has no tid\n");
3333
3334	ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
3335	if (ret <= 0)
3336		return ret;
3337
3338	return machine__set_current_tid(pt->machine, cpu, pid, tid);
3339}
3340
3341static int intel_pt_process_itrace_start(struct intel_pt *pt,
3342					 union perf_event *event,
3343					 struct perf_sample *sample)
3344{
3345	if (!pt->per_cpu_mmaps)
3346		return 0;
3347
3348	intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
3349		     sample->cpu, event->itrace_start.pid,
3350		     event->itrace_start.tid, sample->time,
3351		     perf_time_to_tsc(sample->time, &pt->tc));
3352
3353	return machine__set_current_tid(pt->machine, sample->cpu,
3354					event->itrace_start.pid,
3355					event->itrace_start.tid);
3356}
3357
3358static int intel_pt_process_aux_output_hw_id(struct intel_pt *pt,
3359					     union perf_event *event,
3360					     struct perf_sample *sample)
3361{
3362	u64 hw_id = event->aux_output_hw_id.hw_id;
3363	struct auxtrace_queue *queue;
3364	struct intel_pt_queue *ptq;
3365	struct evsel *evsel;
3366
3367	queue = auxtrace_queues__sample_queue(&pt->queues, sample, pt->session);
3368	evsel = evlist__id2evsel_strict(pt->session->evlist, sample->id);
3369	if (!queue || !queue->priv || !evsel || hw_id > INTEL_PT_MAX_PEBS) {
3370		pr_err("Bad AUX output hardware ID\n");
3371		return -EINVAL;
3372	}
3373
3374	ptq = queue->priv;
3375
3376	ptq->pebs[hw_id].evsel = evsel;
3377	ptq->pebs[hw_id].id = sample->id;
3378
3379	return 0;
3380}
3381
3382static int intel_pt_find_map(struct thread *thread, u8 cpumode, u64 addr,
3383			     struct addr_location *al)
3384{
3385	if (!al->map || addr < map__start(al->map) || addr >= map__end(al->map)) {
3386		if (!thread__find_map(thread, cpumode, addr, al))
3387			return -1;
3388	}
3389
3390	return 0;
3391}
3392
3393/* Invalidate all instruction cache entries that overlap the text poke */
3394static int intel_pt_text_poke(struct intel_pt *pt, union perf_event *event)
3395{
3396	u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
3397	u64 addr = event->text_poke.addr + event->text_poke.new_len - 1;
3398	/* Assume text poke begins in a basic block no more than 4096 bytes */
3399	int cnt = 4096 + event->text_poke.new_len;
3400	struct thread *thread = pt->unknown_thread;
3401	struct addr_location al;
3402	struct machine *machine = pt->machine;
3403	struct intel_pt_cache_entry *e;
3404	u64 offset;
3405	int ret = 0;
3406
3407	addr_location__init(&al);
3408	if (!event->text_poke.new_len)
3409		goto out;
3410
3411	for (; cnt; cnt--, addr--) {
3412		struct dso *dso;
3413
3414		if (intel_pt_find_map(thread, cpumode, addr, &al)) {
3415			if (addr < event->text_poke.addr)
3416				goto out;
3417			continue;
3418		}
3419
3420		dso = map__dso(al.map);
3421		if (!dso || !dso->auxtrace_cache)
3422			continue;
3423
3424		offset = map__map_ip(al.map, addr);
3425
3426		e = intel_pt_cache_lookup(dso, machine, offset);
3427		if (!e)
3428			continue;
3429
3430		if (addr + e->byte_cnt + e->length <= event->text_poke.addr) {
3431			/*
3432			 * No overlap. Working backwards there cannot be another
3433			 * basic block that overlaps the text poke if there is a
3434			 * branch instruction before the text poke address.
3435			 */
3436			if (e->branch != INTEL_PT_BR_NO_BRANCH)
3437				goto out;
3438		} else {
3439			intel_pt_cache_invalidate(dso, machine, offset);
3440			intel_pt_log("Invalidated instruction cache for %s at %#"PRIx64"\n",
3441				     dso->long_name, addr);
3442		}
3443	}
3444out:
3445	addr_location__exit(&al);
3446	return ret;
3447}
3448
3449static int intel_pt_process_event(struct perf_session *session,
3450				  union perf_event *event,
3451				  struct perf_sample *sample,
3452				  struct perf_tool *tool)
3453{
3454	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3455					   auxtrace);
3456	u64 timestamp;
3457	int err = 0;
3458
3459	if (dump_trace)
3460		return 0;
3461
3462	if (!tool->ordered_events) {
3463		pr_err("Intel Processor Trace requires ordered events\n");
3464		return -EINVAL;
3465	}
3466
3467	if (sample->time && sample->time != (u64)-1)
3468		timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3469	else
3470		timestamp = 0;
3471
3472	if (timestamp || pt->timeless_decoding) {
3473		err = intel_pt_update_queues(pt);
3474		if (err)
3475			return err;
3476	}
3477
3478	if (pt->timeless_decoding) {
3479		if (pt->sampling_mode) {
3480			if (sample->aux_sample.size)
3481				err = intel_pt_process_timeless_sample(pt,
3482								       sample);
3483		} else if (event->header.type == PERF_RECORD_EXIT) {
3484			err = intel_pt_process_timeless_queues(pt,
3485							       event->fork.tid,
3486							       sample->time);
3487		}
3488	} else if (timestamp) {
3489		if (!pt->first_timestamp)
3490			intel_pt_first_timestamp(pt, timestamp);
3491		err = intel_pt_process_queues(pt, timestamp);
3492	}
3493	if (err)
3494		return err;
3495
3496	if (event->header.type == PERF_RECORD_SAMPLE) {
3497		if (pt->synth_opts.add_callchain && !sample->callchain)
3498			intel_pt_add_callchain(pt, sample);
3499		if (pt->synth_opts.add_last_branch && !sample->branch_stack)
3500			intel_pt_add_br_stack(pt, sample);
3501	}
3502
3503	if (event->header.type == PERF_RECORD_AUX &&
3504	    (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
3505	    pt->synth_opts.errors) {
3506		err = intel_pt_lost(pt, sample);
3507		if (err)
3508			return err;
3509	}
3510
3511#ifdef HAVE_LIBTRACEEVENT
3512	if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
3513		err = intel_pt_process_switch(pt, sample);
3514	else
3515#endif
3516	if (event->header.type == PERF_RECORD_ITRACE_START)
3517		err = intel_pt_process_itrace_start(pt, event, sample);
3518	else if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID)
3519		err = intel_pt_process_aux_output_hw_id(pt, event, sample);
3520	else if (event->header.type == PERF_RECORD_SWITCH ||
3521		 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
3522		err = intel_pt_context_switch(pt, event, sample);
3523
3524	if (!err && event->header.type == PERF_RECORD_TEXT_POKE)
3525		err = intel_pt_text_poke(pt, event);
3526
3527	if (intel_pt_enable_logging && intel_pt_log_events(pt, sample->time)) {
3528		intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
3529			     event->header.type, sample->cpu, sample->time, timestamp);
3530		intel_pt_log_event(event);
3531	}
3532
3533	return err;
3534}
3535
3536static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
3537{
3538	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3539					   auxtrace);
3540	int ret;
3541
3542	if (dump_trace)
3543		return 0;
3544
3545	if (!tool->ordered_events)
3546		return -EINVAL;
3547
3548	ret = intel_pt_update_queues(pt);
3549	if (ret < 0)
3550		return ret;
3551
3552	if (pt->timeless_decoding)
3553		return intel_pt_process_timeless_queues(pt, -1,
3554							MAX_TIMESTAMP - 1);
3555
3556	return intel_pt_process_queues(pt, MAX_TIMESTAMP);
3557}
3558
3559static void intel_pt_free_events(struct perf_session *session)
3560{
3561	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3562					   auxtrace);
3563	struct auxtrace_queues *queues = &pt->queues;
3564	unsigned int i;
3565
3566	for (i = 0; i < queues->nr_queues; i++) {
3567		intel_pt_free_queue(queues->queue_array[i].priv);
3568		queues->queue_array[i].priv = NULL;
3569	}
3570	intel_pt_log_disable();
3571	auxtrace_queues__free(queues);
3572}
3573
3574static void intel_pt_free(struct perf_session *session)
3575{
3576	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3577					   auxtrace);
3578
3579	auxtrace_heap__free(&pt->heap);
3580	intel_pt_free_events(session);
3581	session->auxtrace = NULL;
3582	intel_pt_free_vmcs_info(pt);
3583	thread__put(pt->unknown_thread);
3584	addr_filters__exit(&pt->filts);
3585	zfree(&pt->chain);
3586	zfree(&pt->filter);
3587	zfree(&pt->time_ranges);
3588	zfree(&pt->br_stack);
3589	free(pt);
3590}
3591
3592static bool intel_pt_evsel_is_auxtrace(struct perf_session *session,
3593				       struct evsel *evsel)
3594{
3595	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3596					   auxtrace);
3597
3598	return evsel->core.attr.type == pt->pmu_type;
3599}
3600
3601static int intel_pt_process_auxtrace_event(struct perf_session *session,
3602					   union perf_event *event,
3603					   struct perf_tool *tool __maybe_unused)
3604{
3605	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3606					   auxtrace);
3607
3608	if (!pt->data_queued) {
3609		struct auxtrace_buffer *buffer;
3610		off_t data_offset;
3611		int fd = perf_data__fd(session->data);
3612		int err;
3613
3614		if (perf_data__is_pipe(session->data)) {
3615			data_offset = 0;
3616		} else {
3617			data_offset = lseek(fd, 0, SEEK_CUR);
3618			if (data_offset == -1)
3619				return -errno;
3620		}
3621
3622		err = auxtrace_queues__add_event(&pt->queues, session, event,
3623						 data_offset, &buffer);
3624		if (err)
3625			return err;
3626
3627		/* Dump here now we have copied a piped trace out of the pipe */
3628		if (dump_trace) {
3629			if (auxtrace_buffer__get_data(buffer, fd)) {
3630				intel_pt_dump_event(pt, buffer->data,
3631						    buffer->size);
3632				auxtrace_buffer__put_data(buffer);
3633			}
3634		}
3635	}
3636
3637	return 0;
3638}
3639
3640static int intel_pt_queue_data(struct perf_session *session,
3641			       struct perf_sample *sample,
3642			       union perf_event *event, u64 data_offset)
3643{
3644	struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
3645					   auxtrace);
3646	u64 timestamp;
3647
3648	if (event) {
3649		return auxtrace_queues__add_event(&pt->queues, session, event,
3650						  data_offset, NULL);
3651	}
3652
3653	if (sample->time && sample->time != (u64)-1)
3654		timestamp = perf_time_to_tsc(sample->time, &pt->tc);
3655	else
3656		timestamp = 0;
3657
3658	return auxtrace_queues__add_sample(&pt->queues, session, sample,
3659					   data_offset, timestamp);
3660}
3661
3662struct intel_pt_synth {
3663	struct perf_tool dummy_tool;
3664	struct perf_session *session;
3665};
3666
3667static int intel_pt_event_synth(struct perf_tool *tool,
3668				union perf_event *event,
3669				struct perf_sample *sample __maybe_unused,
3670				struct machine *machine __maybe_unused)
3671{
3672	struct intel_pt_synth *intel_pt_synth =
3673			container_of(tool, struct intel_pt_synth, dummy_tool);
3674
3675	return perf_session__deliver_synth_event(intel_pt_synth->session, event,
3676						 NULL);
3677}
3678
3679static int intel_pt_synth_event(struct perf_session *session, const char *name,
3680				struct perf_event_attr *attr, u64 id)
3681{
3682	struct intel_pt_synth intel_pt_synth;
3683	int err;
3684
3685	pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
3686		 name, id, (u64)attr->sample_type);
3687
3688	memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
3689	intel_pt_synth.session = session;
3690
3691	err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
3692					  &id, intel_pt_event_synth);
3693	if (err)
3694		pr_err("%s: failed to synthesize '%s' event type\n",
3695		       __func__, name);
3696
3697	return err;
3698}
3699
3700static void intel_pt_set_event_name(struct evlist *evlist, u64 id,
3701				    const char *name)
3702{
3703	struct evsel *evsel;
3704
3705	evlist__for_each_entry(evlist, evsel) {
3706		if (evsel->core.id && evsel->core.id[0] == id) {
3707			if (evsel->name)
3708				zfree(&evsel->name);
3709			evsel->name = strdup(name);
3710			break;
3711		}
3712	}
3713}
3714
3715static struct evsel *intel_pt_evsel(struct intel_pt *pt,
3716					 struct evlist *evlist)
3717{
3718	struct evsel *evsel;
3719
3720	evlist__for_each_entry(evlist, evsel) {
3721		if (evsel->core.attr.type == pt->pmu_type && evsel->core.ids)
3722			return evsel;
3723	}
3724
3725	return NULL;
3726}
3727
3728static int intel_pt_synth_events(struct intel_pt *pt,
3729				 struct perf_session *session)
3730{
3731	struct evlist *evlist = session->evlist;
3732	struct evsel *evsel = intel_pt_evsel(pt, evlist);
3733	struct perf_event_attr attr;
3734	u64 id;
3735	int err;
3736
3737	if (!evsel) {
3738		pr_debug("There are no selected events with Intel Processor Trace data\n");
3739		return 0;
3740	}
3741
3742	memset(&attr, 0, sizeof(struct perf_event_attr));
3743	attr.size = sizeof(struct perf_event_attr);
3744	attr.type = PERF_TYPE_HARDWARE;
3745	attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
3746	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
3747			    PERF_SAMPLE_PERIOD;
3748	if (pt->timeless_decoding)
3749		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
3750	else
3751		attr.sample_type |= PERF_SAMPLE_TIME;
3752	if (!pt->per_cpu_mmaps)
3753		attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
3754	attr.exclude_user = evsel->core.attr.exclude_user;
3755	attr.exclude_kernel = evsel->core.attr.exclude_kernel;
3756	attr.exclude_hv = evsel->core.attr.exclude_hv;
3757	attr.exclude_host = evsel->core.attr.exclude_host;
3758	attr.exclude_guest = evsel->core.attr.exclude_guest;
3759	attr.sample_id_all = evsel->core.attr.sample_id_all;
3760	attr.read_format = evsel->core.attr.read_format;
3761
3762	id = evsel->core.id[0] + 1000000000;
3763	if (!id)
3764		id = 1;
3765
3766	if (pt->synth_opts.branches) {
3767		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
3768		attr.sample_period = 1;
3769		attr.sample_type |= PERF_SAMPLE_ADDR;
3770		err = intel_pt_synth_event(session, "branches", &attr, id);
3771		if (err)
3772			return err;
3773		pt->sample_branches = true;
3774		pt->branches_sample_type = attr.sample_type;
3775		pt->branches_id = id;
3776		id += 1;
3777		attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
3778	}
3779
3780	if (pt->synth_opts.callchain)
3781		attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
3782	if (pt->synth_opts.last_branch) {
3783		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
3784		/*
3785		 * We don't use the hardware index, but the sample generation
3786		 * code uses the new format branch_stack with this field,
3787		 * so the event attributes must indicate that it's present.
3788		 */
3789		attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
3790	}
3791
3792	if (pt->synth_opts.instructions) {
3793		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3794		if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3795			attr.sample_period =
3796				intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3797		else
3798			attr.sample_period = pt->synth_opts.period;
3799		err = intel_pt_synth_event(session, "instructions", &attr, id);
3800		if (err)
3801			return err;
3802		pt->sample_instructions = true;
3803		pt->instructions_sample_type = attr.sample_type;
3804		pt->instructions_id = id;
3805		id += 1;
3806	}
3807
3808	if (pt->synth_opts.cycles) {
3809		attr.config = PERF_COUNT_HW_CPU_CYCLES;
3810		if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
3811			attr.sample_period =
3812				intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
3813		else
3814			attr.sample_period = pt->synth_opts.period;
3815		err = intel_pt_synth_event(session, "cycles", &attr, id);
3816		if (err)
3817			return err;
3818		pt->sample_cycles = true;
3819		pt->cycles_sample_type = attr.sample_type;
3820		pt->cycles_id = id;
3821		id += 1;
3822	}
3823
3824	attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
3825	attr.sample_period = 1;
3826
3827	if (pt->synth_opts.transactions) {
3828		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
3829		err = intel_pt_synth_event(session, "transactions", &attr, id);
3830		if (err)
3831			return err;
3832		pt->sample_transactions = true;
3833		pt->transactions_sample_type = attr.sample_type;
3834		pt->transactions_id = id;
3835		intel_pt_set_event_name(evlist, id, "transactions");
3836		id += 1;
3837	}
3838
3839	attr.type = PERF_TYPE_SYNTH;
3840	attr.sample_type |= PERF_SAMPLE_RAW;
3841
3842	if (pt->synth_opts.ptwrites) {
3843		attr.config = PERF_SYNTH_INTEL_PTWRITE;
3844		err = intel_pt_synth_event(session, "ptwrite", &attr, id);
3845		if (err)
3846			return err;
3847		pt->sample_ptwrites = true;
3848		pt->ptwrites_sample_type = attr.sample_type;
3849		pt->ptwrites_id = id;
3850		intel_pt_set_event_name(evlist, id, "ptwrite");
3851		id += 1;
3852	}
3853
3854	if (pt->synth_opts.pwr_events) {
3855		pt->sample_pwr_events = true;
3856		pt->pwr_events_sample_type = attr.sample_type;
3857
3858		attr.config = PERF_SYNTH_INTEL_CBR;
3859		err = intel_pt_synth_event(session, "cbr", &attr, id);
3860		if (err)
3861			return err;
3862		pt->cbr_id = id;
3863		intel_pt_set_event_name(evlist, id, "cbr");
3864		id += 1;
3865
3866		attr.config = PERF_SYNTH_INTEL_PSB;
3867		err = intel_pt_synth_event(session, "psb", &attr, id);
3868		if (err)
3869			return err;
3870		pt->psb_id = id;
3871		intel_pt_set_event_name(evlist, id, "psb");
3872		id += 1;
3873	}
3874
3875	if (pt->synth_opts.pwr_events && (evsel->core.attr.config & INTEL_PT_CFG_PWR_EVT_EN)) {
3876		attr.config = PERF_SYNTH_INTEL_MWAIT;
3877		err = intel_pt_synth_event(session, "mwait", &attr, id);
3878		if (err)
3879			return err;
3880		pt->mwait_id = id;
3881		intel_pt_set_event_name(evlist, id, "mwait");
3882		id += 1;
3883
3884		attr.config = PERF_SYNTH_INTEL_PWRE;
3885		err = intel_pt_synth_event(session, "pwre", &attr, id);
3886		if (err)
3887			return err;
3888		pt->pwre_id = id;
3889		intel_pt_set_event_name(evlist, id, "pwre");
3890		id += 1;
3891
3892		attr.config = PERF_SYNTH_INTEL_EXSTOP;
3893		err = intel_pt_synth_event(session, "exstop", &attr, id);
3894		if (err)
3895			return err;
3896		pt->exstop_id = id;
3897		intel_pt_set_event_name(evlist, id, "exstop");
3898		id += 1;
3899
3900		attr.config = PERF_SYNTH_INTEL_PWRX;
3901		err = intel_pt_synth_event(session, "pwrx", &attr, id);
3902		if (err)
3903			return err;
3904		pt->pwrx_id = id;
3905		intel_pt_set_event_name(evlist, id, "pwrx");
3906		id += 1;
3907	}
3908
3909	if (pt->synth_opts.intr_events && (evsel->core.attr.config & INTEL_PT_CFG_EVT_EN)) {
3910		attr.config = PERF_SYNTH_INTEL_EVT;
3911		err = intel_pt_synth_event(session, "evt", &attr, id);
3912		if (err)
3913			return err;
3914		pt->evt_sample_type = attr.sample_type;
3915		pt->evt_id = id;
3916		intel_pt_set_event_name(evlist, id, "evt");
3917		id += 1;
3918	}
3919
3920	if (pt->synth_opts.intr_events && pt->cap_event_trace) {
3921		attr.config = PERF_SYNTH_INTEL_IFLAG_CHG;
3922		err = intel_pt_synth_event(session, "iflag", &attr, id);
3923		if (err)
3924			return err;
3925		pt->iflag_chg_sample_type = attr.sample_type;
3926		pt->iflag_chg_id = id;
3927		intel_pt_set_event_name(evlist, id, "iflag");
3928		id += 1;
3929	}
3930
3931	return 0;
3932}
3933
3934static void intel_pt_setup_pebs_events(struct intel_pt *pt)
3935{
3936	struct evsel *evsel;
3937
3938	if (!pt->synth_opts.other_events)
3939		return;
3940
3941	evlist__for_each_entry(pt->session->evlist, evsel) {
3942		if (evsel->core.attr.aux_output && evsel->core.id) {
3943			if (pt->single_pebs) {
3944				pt->single_pebs = false;
3945				return;
3946			}
3947			pt->single_pebs = true;
3948			pt->sample_pebs = true;
3949			pt->pebs_evsel = evsel;
3950		}
3951	}
3952}
3953
3954static struct evsel *intel_pt_find_sched_switch(struct evlist *evlist)
3955{
3956	struct evsel *evsel;
3957
3958	evlist__for_each_entry_reverse(evlist, evsel) {
3959		const char *name = evsel__name(evsel);
3960
3961		if (!strcmp(name, "sched:sched_switch"))
3962			return evsel;
3963	}
3964
3965	return NULL;
3966}
3967
3968static bool intel_pt_find_switch(struct evlist *evlist)
3969{
3970	struct evsel *evsel;
3971
3972	evlist__for_each_entry(evlist, evsel) {
3973		if (evsel->core.attr.context_switch)
3974			return true;
3975	}
3976
3977	return false;
3978}
3979
3980static int intel_pt_perf_config(const char *var, const char *value, void *data)
3981{
3982	struct intel_pt *pt = data;
3983
3984	if (!strcmp(var, "intel-pt.mispred-all"))
3985		pt->mispred_all = perf_config_bool(var, value);
3986
3987	if (!strcmp(var, "intel-pt.max-loops"))
3988		perf_config_int(&pt->max_loops, var, value);
3989
3990	return 0;
3991}
3992
3993/* Find least TSC which converts to ns or later */
3994static u64 intel_pt_tsc_start(u64 ns, struct intel_pt *pt)
3995{
3996	u64 tsc, tm;
3997
3998	tsc = perf_time_to_tsc(ns, &pt->tc);
3999
4000	while (1) {
4001		tm = tsc_to_perf_time(tsc, &pt->tc);
4002		if (tm < ns)
4003			break;
4004		tsc -= 1;
4005	}
4006
4007	while (tm < ns)
4008		tm = tsc_to_perf_time(++tsc, &pt->tc);
4009
4010	return tsc;
4011}
4012
4013/* Find greatest TSC which converts to ns or earlier */
4014static u64 intel_pt_tsc_end(u64 ns, struct intel_pt *pt)
4015{
4016	u64 tsc, tm;
4017
4018	tsc = perf_time_to_tsc(ns, &pt->tc);
4019
4020	while (1) {
4021		tm = tsc_to_perf_time(tsc, &pt->tc);
4022		if (tm > ns)
4023			break;
4024		tsc += 1;
4025	}
4026
4027	while (tm > ns)
4028		tm = tsc_to_perf_time(--tsc, &pt->tc);
4029
4030	return tsc;
4031}
4032
4033static int intel_pt_setup_time_ranges(struct intel_pt *pt,
4034				      struct itrace_synth_opts *opts)
4035{
4036	struct perf_time_interval *p = opts->ptime_range;
4037	int n = opts->range_num;
4038	int i;
4039
4040	if (!n || !p || pt->timeless_decoding)
4041		return 0;
4042
4043	pt->time_ranges = calloc(n, sizeof(struct range));
4044	if (!pt->time_ranges)
4045		return -ENOMEM;
4046
4047	pt->range_cnt = n;
4048
4049	intel_pt_log("%s: %u range(s)\n", __func__, n);
4050
4051	for (i = 0; i < n; i++) {
4052		struct range *r = &pt->time_ranges[i];
4053		u64 ts = p[i].start;
4054		u64 te = p[i].end;
4055
4056		/*
4057		 * Take care to ensure the TSC range matches the perf-time range
4058		 * when converted back to perf-time.
4059		 */
4060		r->start = ts ? intel_pt_tsc_start(ts, pt) : 0;
4061		r->end   = te ? intel_pt_tsc_end(te, pt) : 0;
4062
4063		intel_pt_log("range %d: perf time interval: %"PRIu64" to %"PRIu64"\n",
4064			     i, ts, te);
4065		intel_pt_log("range %d: TSC time interval: %#"PRIx64" to %#"PRIx64"\n",
4066			     i, r->start, r->end);
4067	}
4068
4069	return 0;
4070}
4071
4072static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args)
4073{
4074	struct intel_pt_vmcs_info *vmcs_info;
4075	u64 tsc_offset, vmcs;
4076	char *p = *args;
4077
4078	errno = 0;
4079
4080	p = skip_spaces(p);
4081	if (!*p)
4082		return 1;
4083
4084	tsc_offset = strtoull(p, &p, 0);
4085	if (errno)
4086		return -errno;
4087	p = skip_spaces(p);
4088	if (*p != ':') {
4089		pt->dflt_tsc_offset = tsc_offset;
4090		*args = p;
4091		return 0;
4092	}
4093	p += 1;
4094	while (1) {
4095		vmcs = strtoull(p, &p, 0);
4096		if (errno)
4097			return -errno;
4098		if (!vmcs)
4099			return -EINVAL;
4100		vmcs_info = intel_pt_findnew_vmcs(&pt->vmcs_info, vmcs, tsc_offset);
4101		if (!vmcs_info)
4102			return -ENOMEM;
4103		p = skip_spaces(p);
4104		if (*p != ',')
4105			break;
4106		p += 1;
4107	}
4108	*args = p;
4109	return 0;
4110}
4111
4112static int intel_pt_parse_vm_tm_corr_args(struct intel_pt *pt)
4113{
4114	char *args = pt->synth_opts.vm_tm_corr_args;
4115	int ret;
4116
4117	if (!args)
4118		return 0;
4119
4120	do {
4121		ret = intel_pt_parse_vm_tm_corr_arg(pt, &args);
4122	} while (!ret);
4123
4124	if (ret < 0) {
4125		pr_err("Failed to parse VM Time Correlation options\n");
4126		return ret;
4127	}
4128
4129	return 0;
4130}
4131
4132static const char * const intel_pt_info_fmts[] = {
4133	[INTEL_PT_PMU_TYPE]		= "  PMU Type            %"PRId64"\n",
4134	[INTEL_PT_TIME_SHIFT]		= "  Time Shift          %"PRIu64"\n",
4135	[INTEL_PT_TIME_MULT]		= "  Time Muliplier      %"PRIu64"\n",
4136	[INTEL_PT_TIME_ZERO]		= "  Time Zero           %"PRIu64"\n",
4137	[INTEL_PT_CAP_USER_TIME_ZERO]	= "  Cap Time Zero       %"PRId64"\n",
4138	[INTEL_PT_TSC_BIT]		= "  TSC bit             %#"PRIx64"\n",
4139	[INTEL_PT_NORETCOMP_BIT]	= "  NoRETComp bit       %#"PRIx64"\n",
4140	[INTEL_PT_HAVE_SCHED_SWITCH]	= "  Have sched_switch   %"PRId64"\n",
4141	[INTEL_PT_SNAPSHOT_MODE]	= "  Snapshot mode       %"PRId64"\n",
4142	[INTEL_PT_PER_CPU_MMAPS]	= "  Per-cpu maps        %"PRId64"\n",
4143	[INTEL_PT_MTC_BIT]		= "  MTC bit             %#"PRIx64"\n",
4144	[INTEL_PT_MTC_FREQ_BITS]	= "  MTC freq bits       %#"PRIx64"\n",
4145	[INTEL_PT_TSC_CTC_N]		= "  TSC:CTC numerator   %"PRIu64"\n",
4146	[INTEL_PT_TSC_CTC_D]		= "  TSC:CTC denominator %"PRIu64"\n",
4147	[INTEL_PT_CYC_BIT]		= "  CYC bit             %#"PRIx64"\n",
4148	[INTEL_PT_MAX_NONTURBO_RATIO]	= "  Max non-turbo ratio %"PRIu64"\n",
4149	[INTEL_PT_FILTER_STR_LEN]	= "  Filter string len.  %"PRIu64"\n",
4150};
4151
4152static void intel_pt_print_info(__u64 *arr, int start, int finish)
4153{
4154	int i;
4155
4156	if (!dump_trace)
4157		return;
4158
4159	for (i = start; i <= finish; i++) {
4160		const char *fmt = intel_pt_info_fmts[i];
4161
4162		if (fmt)
4163			fprintf(stdout, fmt, arr[i]);
4164	}
4165}
4166
4167static void intel_pt_print_info_str(const char *name, const char *str)
4168{
4169	if (!dump_trace)
4170		return;
4171
4172	fprintf(stdout, "  %-20s%s\n", name, str ? str : "");
4173}
4174
4175static bool intel_pt_has(struct perf_record_auxtrace_info *auxtrace_info, int pos)
4176{
4177	return auxtrace_info->header.size >=
4178		sizeof(struct perf_record_auxtrace_info) + (sizeof(u64) * (pos + 1));
4179}
4180
4181int intel_pt_process_auxtrace_info(union perf_event *event,
4182				   struct perf_session *session)
4183{
4184	struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
4185	size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
4186	struct intel_pt *pt;
4187	void *info_end;
4188	__u64 *info;
4189	int err;
4190
4191	if (auxtrace_info->header.size < sizeof(struct perf_record_auxtrace_info) +
4192					min_sz)
4193		return -EINVAL;
4194
4195	pt = zalloc(sizeof(struct intel_pt));
4196	if (!pt)
4197		return -ENOMEM;
4198
4199	pt->vmcs_info = RB_ROOT;
4200
4201	addr_filters__init(&pt->filts);
4202
4203	err = perf_config(intel_pt_perf_config, pt);
4204	if (err)
4205		goto err_free;
4206
4207	err = auxtrace_queues__init(&pt->queues);
4208	if (err)
4209		goto err_free;
4210
4211	if (session->itrace_synth_opts->set) {
4212		pt->synth_opts = *session->itrace_synth_opts;
4213	} else {
4214		struct itrace_synth_opts *opts = session->itrace_synth_opts;
4215
4216		itrace_synth_opts__set_default(&pt->synth_opts, opts->default_no_sample);
4217		if (!opts->default_no_sample && !opts->inject) {
4218			pt->synth_opts.branches = false;
4219			pt->synth_opts.callchain = true;
4220			pt->synth_opts.add_callchain = true;
4221		}
4222		pt->synth_opts.thread_stack = opts->thread_stack;
4223	}
4224
4225	if (!(pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_USE_STDOUT))
4226		intel_pt_log_set_name(INTEL_PT_PMU_NAME);
4227
4228	pt->session = session;
4229	pt->machine = &session->machines.host; /* No kvm support */
4230	pt->auxtrace_type = auxtrace_info->type;
4231	pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
4232	pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
4233	pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
4234	pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
4235	pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
4236	pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
4237	pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
4238	pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
4239	pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
4240	pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
4241	intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
4242			    INTEL_PT_PER_CPU_MMAPS);
4243
4244	if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
4245		pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
4246		pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
4247		pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
4248		pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
4249		pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
4250		intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
4251				    INTEL_PT_CYC_BIT);
4252	}
4253
4254	if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
4255		pt->max_non_turbo_ratio =
4256			auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
4257		intel_pt_print_info(&auxtrace_info->priv[0],
4258				    INTEL_PT_MAX_NONTURBO_RATIO,
4259				    INTEL_PT_MAX_NONTURBO_RATIO);
4260	}
4261
4262	info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
4263	info_end = (void *)auxtrace_info + auxtrace_info->header.size;
4264
4265	if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
4266		size_t len;
4267
4268		len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
4269		intel_pt_print_info(&auxtrace_info->priv[0],
4270				    INTEL_PT_FILTER_STR_LEN,
4271				    INTEL_PT_FILTER_STR_LEN);
4272		if (len) {
4273			const char *filter = (const char *)info;
4274
4275			len = roundup(len + 1, 8);
4276			info += len >> 3;
4277			if ((void *)info > info_end) {
4278				pr_err("%s: bad filter string length\n", __func__);
4279				err = -EINVAL;
4280				goto err_free_queues;
4281			}
4282			pt->filter = memdup(filter, len);
4283			if (!pt->filter) {
4284				err = -ENOMEM;
4285				goto err_free_queues;
4286			}
4287			if (session->header.needs_swap)
4288				mem_bswap_64(pt->filter, len);
4289			if (pt->filter[len - 1]) {
4290				pr_err("%s: filter string not null terminated\n", __func__);
4291				err = -EINVAL;
4292				goto err_free_queues;
4293			}
4294			err = addr_filters__parse_bare_filter(&pt->filts,
4295							      filter);
4296			if (err)
4297				goto err_free_queues;
4298		}
4299		intel_pt_print_info_str("Filter string", pt->filter);
4300	}
4301
4302	if ((void *)info < info_end) {
4303		pt->cap_event_trace = *info++;
4304		if (dump_trace)
4305			fprintf(stdout, "  Cap Event Trace     %d\n",
4306				pt->cap_event_trace);
4307	}
4308
4309	pt->timeless_decoding = intel_pt_timeless_decoding(pt);
4310	if (pt->timeless_decoding && !pt->tc.time_mult)
4311		pt->tc.time_mult = 1;
4312	pt->have_tsc = intel_pt_have_tsc(pt);
4313	pt->sampling_mode = intel_pt_sampling_mode(pt);
4314	pt->est_tsc = !pt->timeless_decoding;
4315
4316	if (pt->synth_opts.vm_time_correlation) {
4317		if (pt->timeless_decoding) {
4318			pr_err("Intel PT has no time information for VM Time Correlation\n");
4319			err = -EINVAL;
4320			goto err_free_queues;
4321		}
4322		if (session->itrace_synth_opts->ptime_range) {
4323			pr_err("Time ranges cannot be specified with VM Time Correlation\n");
4324			err = -EINVAL;
4325			goto err_free_queues;
4326		}
4327		/* Currently TSC Offset is calculated using MTC packets */
4328		if (!intel_pt_have_mtc(pt)) {
4329			pr_err("MTC packets must have been enabled for VM Time Correlation\n");
4330			err = -EINVAL;
4331			goto err_free_queues;
4332		}
4333		err = intel_pt_parse_vm_tm_corr_args(pt);
4334		if (err)
4335			goto err_free_queues;
4336	}
4337
4338	pt->unknown_thread = thread__new(999999999, 999999999);
4339	if (!pt->unknown_thread) {
4340		err = -ENOMEM;
4341		goto err_free_queues;
4342	}
4343
4344	err = thread__set_comm(pt->unknown_thread, "unknown", 0);
4345	if (err)
4346		goto err_delete_thread;
4347	if (thread__init_maps(pt->unknown_thread, pt->machine)) {
4348		err = -ENOMEM;
4349		goto err_delete_thread;
4350	}
4351
4352	pt->auxtrace.process_event = intel_pt_process_event;
4353	pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
4354	pt->auxtrace.queue_data = intel_pt_queue_data;
4355	pt->auxtrace.dump_auxtrace_sample = intel_pt_dump_sample;
4356	pt->auxtrace.flush_events = intel_pt_flush;
4357	pt->auxtrace.free_events = intel_pt_free_events;
4358	pt->auxtrace.free = intel_pt_free;
4359	pt->auxtrace.evsel_is_auxtrace = intel_pt_evsel_is_auxtrace;
4360	session->auxtrace = &pt->auxtrace;
4361
4362	if (dump_trace)
4363		return 0;
4364
4365	if (pt->have_sched_switch == 1) {
4366		pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
4367		if (!pt->switch_evsel) {
4368			pr_err("%s: missing sched_switch event\n", __func__);
4369			err = -EINVAL;
4370			goto err_delete_thread;
4371		}
4372	} else if (pt->have_sched_switch == 2 &&
4373		   !intel_pt_find_switch(session->evlist)) {
4374		pr_err("%s: missing context_switch attribute flag\n", __func__);
4375		err = -EINVAL;
4376		goto err_delete_thread;
4377	}
4378
4379	if (pt->synth_opts.log) {
4380		bool log_on_error = pt->synth_opts.log_plus_flags & AUXTRACE_LOG_FLG_ON_ERROR;
4381		unsigned int log_on_error_size = pt->synth_opts.log_on_error_size;
4382
4383		intel_pt_log_enable(log_on_error, log_on_error_size);
4384	}
4385
4386	/* Maximum non-turbo ratio is TSC freq / 100 MHz */
4387	if (pt->tc.time_mult) {
4388		u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
4389
4390		if (!pt->max_non_turbo_ratio)
4391			pt->max_non_turbo_ratio =
4392					(tsc_freq + 50000000) / 100000000;
4393		intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
4394		intel_pt_log("Maximum non-turbo ratio %u\n",
4395			     pt->max_non_turbo_ratio);
4396		pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
4397	}
4398
4399	err = intel_pt_setup_time_ranges(pt, session->itrace_synth_opts);
4400	if (err)
4401		goto err_delete_thread;
4402
4403	if (pt->synth_opts.calls)
4404		pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
4405				       PERF_IP_FLAG_TRACE_END;
4406	if (pt->synth_opts.returns)
4407		pt->branches_filter |= PERF_IP_FLAG_RETURN |
4408				       PERF_IP_FLAG_TRACE_BEGIN;
4409
4410	if ((pt->synth_opts.callchain || pt->synth_opts.add_callchain) &&
4411	    !symbol_conf.use_callchain) {
4412		symbol_conf.use_callchain = true;
4413		if (callchain_register_param(&callchain_param) < 0) {
4414			symbol_conf.use_callchain = false;
4415			pt->synth_opts.callchain = false;
4416			pt->synth_opts.add_callchain = false;
4417		}
4418	}
4419
4420	if (pt->synth_opts.add_callchain) {
4421		err = intel_pt_callchain_init(pt);
4422		if (err)
4423			goto err_delete_thread;
4424	}
4425
4426	if (pt->synth_opts.last_branch || pt->synth_opts.add_last_branch) {
4427		pt->br_stack_sz = pt->synth_opts.last_branch_sz;
4428		pt->br_stack_sz_plus = pt->br_stack_sz;
4429	}
4430
4431	if (pt->synth_opts.add_last_branch) {
4432		err = intel_pt_br_stack_init(pt);
4433		if (err)
4434			goto err_delete_thread;
4435		/*
4436		 * Additional branch stack size to cater for tracing from the
4437		 * actual sample ip to where the sample time is recorded.
4438		 * Measured at about 200 branches, but generously set to 1024.
4439		 * If kernel space is not being traced, then add just 1 for the
4440		 * branch to kernel space.
4441		 */
4442		if (intel_pt_tracing_kernel(pt))
4443			pt->br_stack_sz_plus += 1024;
4444		else
4445			pt->br_stack_sz_plus += 1;
4446	}
4447
4448	pt->use_thread_stack = pt->synth_opts.callchain ||
4449			       pt->synth_opts.add_callchain ||
4450			       pt->synth_opts.thread_stack ||
4451			       pt->synth_opts.last_branch ||
4452			       pt->synth_opts.add_last_branch;
4453
4454	pt->callstack = pt->synth_opts.callchain ||
4455			pt->synth_opts.add_callchain ||
4456			pt->synth_opts.thread_stack;
4457
4458	err = intel_pt_synth_events(pt, session);
4459	if (err)
4460		goto err_delete_thread;
4461
4462	intel_pt_setup_pebs_events(pt);
4463
4464	if (perf_data__is_pipe(session->data)) {
4465		pr_warning("WARNING: Intel PT with pipe mode is not recommended.\n"
4466			   "         The output cannot relied upon.  In particular,\n"
4467			   "         timestamps and the order of events may be incorrect.\n");
4468	}
4469
4470	if (pt->sampling_mode || list_empty(&session->auxtrace_index))
4471		err = auxtrace_queue_data(session, true, true);
4472	else
4473		err = auxtrace_queues__process_index(&pt->queues, session);
4474	if (err)
4475		goto err_delete_thread;
4476
4477	if (pt->queues.populated)
4478		pt->data_queued = true;
4479
4480	if (pt->timeless_decoding)
4481		pr_debug2("Intel PT decoding without timestamps\n");
4482
4483	return 0;
4484
4485err_delete_thread:
4486	zfree(&pt->chain);
4487	thread__zput(pt->unknown_thread);
4488err_free_queues:
4489	intel_pt_log_disable();
4490	auxtrace_queues__free(&pt->queues);
4491	session->auxtrace = NULL;
4492err_free:
4493	addr_filters__exit(&pt->filts);
4494	zfree(&pt->filter);
4495	zfree(&pt->time_ranges);
4496	free(pt);
4497	return err;
4498}