Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
Note: File does not exist in v3.5.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright(C) 2015-2018 Linaro Limited.
   4 *
   5 * Author: Tor Jeremiassen <tor@ti.com>
   6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
   7 */
   8
   9#include <linux/bitops.h>
  10#include <linux/coresight-pmu.h>
  11#include <linux/err.h>
  12#include <linux/kernel.h>
  13#include <linux/log2.h>
  14#include <linux/types.h>
  15#include <linux/zalloc.h>
  16
  17#include <opencsd/ocsd_if_types.h>
  18#include <stdlib.h>
  19
  20#include "auxtrace.h"
  21#include "color.h"
  22#include "cs-etm.h"
  23#include "cs-etm-decoder/cs-etm-decoder.h"
  24#include "debug.h"
  25#include "dso.h"
  26#include "evlist.h"
  27#include "intlist.h"
  28#include "machine.h"
  29#include "map.h"
  30#include "perf.h"
  31#include "session.h"
  32#include "map_symbol.h"
  33#include "branch.h"
  34#include "symbol.h"
  35#include "tool.h"
  36#include "thread.h"
  37#include "thread-stack.h"
  38#include <tools/libc_compat.h>
  39#include "util/synthetic-events.h"
  40
  41struct cs_etm_auxtrace {
  42	struct auxtrace auxtrace;
  43	struct auxtrace_queues queues;
  44	struct auxtrace_heap heap;
  45	struct itrace_synth_opts synth_opts;
  46	struct perf_session *session;
  47	struct machine *machine;
  48	struct thread *unknown_thread;
  49
  50	u8 timeless_decoding;
  51	u8 snapshot_mode;
  52	u8 data_queued;
  53
  54	int num_cpu;
  55	u64 latest_kernel_timestamp;
  56	u32 auxtrace_type;
  57	u64 branches_sample_type;
  58	u64 branches_id;
  59	u64 instructions_sample_type;
  60	u64 instructions_sample_period;
  61	u64 instructions_id;
  62	u64 **metadata;
  63	unsigned int pmu_type;
  64};
  65
  66struct cs_etm_traceid_queue {
  67	u8 trace_chan_id;
  68	pid_t pid, tid;
  69	u64 period_instructions;
  70	size_t last_branch_pos;
  71	union perf_event *event_buf;
  72	struct thread *thread;
  73	struct branch_stack *last_branch;
  74	struct branch_stack *last_branch_rb;
  75	struct cs_etm_packet *prev_packet;
  76	struct cs_etm_packet *packet;
  77	struct cs_etm_packet_queue packet_queue;
  78};
  79
  80struct cs_etm_queue {
  81	struct cs_etm_auxtrace *etm;
  82	struct cs_etm_decoder *decoder;
  83	struct auxtrace_buffer *buffer;
  84	unsigned int queue_nr;
  85	u8 pending_timestamp_chan_id;
  86	u64 offset;
  87	const unsigned char *buf;
  88	size_t buf_len, buf_used;
  89	/* Conversion between traceID and index in traceid_queues array */
  90	struct intlist *traceid_queues_list;
  91	struct cs_etm_traceid_queue **traceid_queues;
  92};
  93
  94/* RB tree for quick conversion between traceID and metadata pointers */
  95static struct intlist *traceid_list;
  96
  97static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
  98static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
  99					   pid_t tid);
 100static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
 101static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
 102
 103/* PTMs ETMIDR [11:8] set to b0011 */
 104#define ETMIDR_PTM_VERSION 0x00000300
 105
 106/*
 107 * A struct auxtrace_heap_item only has a queue_nr and a timestamp to
 108 * work with.  One option is to modify to auxtrace_heap_XYZ() API or simply
 109 * encode the etm queue number as the upper 16 bit and the channel as
 110 * the lower 16 bit.
 111 */
 112#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id)	\
 113		      (queue_nr << 16 | trace_chan_id)
 114#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
 115#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
 116
 117static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
 118{
 119	etmidr &= ETMIDR_PTM_VERSION;
 120
 121	if (etmidr == ETMIDR_PTM_VERSION)
 122		return CS_ETM_PROTO_PTM;
 123
 124	return CS_ETM_PROTO_ETMV3;
 125}
 126
 127static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
 128{
 129	struct int_node *inode;
 130	u64 *metadata;
 131
 132	inode = intlist__find(traceid_list, trace_chan_id);
 133	if (!inode)
 134		return -EINVAL;
 135
 136	metadata = inode->priv;
 137	*magic = metadata[CS_ETM_MAGIC];
 138	return 0;
 139}
 140
 141int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
 142{
 143	struct int_node *inode;
 144	u64 *metadata;
 145
 146	inode = intlist__find(traceid_list, trace_chan_id);
 147	if (!inode)
 148		return -EINVAL;
 149
 150	metadata = inode->priv;
 151	*cpu = (int)metadata[CS_ETM_CPU];
 152	return 0;
 153}
 154
 155/*
 156 * The returned PID format is presented by two bits:
 157 *
 158 *   Bit ETM_OPT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced;
 159 *   Bit ETM_OPT_CTXTID2: CONTEXTIDR_EL2 is traced.
 160 *
 161 * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
 162 * are enabled at the same time when the session runs on an EL2 kernel.
 163 * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
 164 * recorded in the trace data, the tool will selectively use
 165 * CONTEXTIDR_EL2 as PID.
 166 */
 167int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
 168{
 169	struct int_node *inode;
 170	u64 *metadata, val;
 171
 172	inode = intlist__find(traceid_list, trace_chan_id);
 173	if (!inode)
 174		return -EINVAL;
 175
 176	metadata = inode->priv;
 177
 178	if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
 179		val = metadata[CS_ETM_ETMCR];
 180		/* CONTEXTIDR is traced */
 181		if (val & BIT(ETM_OPT_CTXTID))
 182			*pid_fmt = BIT(ETM_OPT_CTXTID);
 183	} else {
 184		val = metadata[CS_ETMV4_TRCCONFIGR];
 185		/* CONTEXTIDR_EL2 is traced */
 186		if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
 187			*pid_fmt = BIT(ETM_OPT_CTXTID2);
 188		/* CONTEXTIDR_EL1 is traced */
 189		else if (val & BIT(ETM4_CFG_BIT_CTXTID))
 190			*pid_fmt = BIT(ETM_OPT_CTXTID);
 191	}
 192
 193	return 0;
 194}
 195
 196void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
 197					      u8 trace_chan_id)
 198{
 199	/*
 200	 * When a timestamp packet is encountered the backend code
 201	 * is stopped so that the front end has time to process packets
 202	 * that were accumulated in the traceID queue.  Since there can
 203	 * be more than one channel per cs_etm_queue, we need to specify
 204	 * what traceID queue needs servicing.
 205	 */
 206	etmq->pending_timestamp_chan_id = trace_chan_id;
 207}
 208
 209static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
 210				      u8 *trace_chan_id)
 211{
 212	struct cs_etm_packet_queue *packet_queue;
 213
 214	if (!etmq->pending_timestamp_chan_id)
 215		return 0;
 216
 217	if (trace_chan_id)
 218		*trace_chan_id = etmq->pending_timestamp_chan_id;
 219
 220	packet_queue = cs_etm__etmq_get_packet_queue(etmq,
 221						     etmq->pending_timestamp_chan_id);
 222	if (!packet_queue)
 223		return 0;
 224
 225	/* Acknowledge pending status */
 226	etmq->pending_timestamp_chan_id = 0;
 227
 228	/* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
 229	return packet_queue->cs_timestamp;
 230}
 231
 232static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
 233{
 234	int i;
 235
 236	queue->head = 0;
 237	queue->tail = 0;
 238	queue->packet_count = 0;
 239	for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
 240		queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
 241		queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
 242		queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
 243		queue->packet_buffer[i].instr_count = 0;
 244		queue->packet_buffer[i].last_instr_taken_branch = false;
 245		queue->packet_buffer[i].last_instr_size = 0;
 246		queue->packet_buffer[i].last_instr_type = 0;
 247		queue->packet_buffer[i].last_instr_subtype = 0;
 248		queue->packet_buffer[i].last_instr_cond = 0;
 249		queue->packet_buffer[i].flags = 0;
 250		queue->packet_buffer[i].exception_number = UINT32_MAX;
 251		queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
 252		queue->packet_buffer[i].cpu = INT_MIN;
 253	}
 254}
 255
 256static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
 257{
 258	int idx;
 259	struct int_node *inode;
 260	struct cs_etm_traceid_queue *tidq;
 261	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
 262
 263	intlist__for_each_entry(inode, traceid_queues_list) {
 264		idx = (int)(intptr_t)inode->priv;
 265		tidq = etmq->traceid_queues[idx];
 266		cs_etm__clear_packet_queue(&tidq->packet_queue);
 267	}
 268}
 269
 270static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
 271				      struct cs_etm_traceid_queue *tidq,
 272				      u8 trace_chan_id)
 273{
 274	int rc = -ENOMEM;
 275	struct auxtrace_queue *queue;
 276	struct cs_etm_auxtrace *etm = etmq->etm;
 277
 278	cs_etm__clear_packet_queue(&tidq->packet_queue);
 279
 280	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
 281	tidq->tid = queue->tid;
 282	tidq->pid = -1;
 283	tidq->trace_chan_id = trace_chan_id;
 284
 285	tidq->packet = zalloc(sizeof(struct cs_etm_packet));
 286	if (!tidq->packet)
 287		goto out;
 288
 289	tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
 290	if (!tidq->prev_packet)
 291		goto out_free;
 292
 293	if (etm->synth_opts.last_branch) {
 294		size_t sz = sizeof(struct branch_stack);
 295
 296		sz += etm->synth_opts.last_branch_sz *
 297		      sizeof(struct branch_entry);
 298		tidq->last_branch = zalloc(sz);
 299		if (!tidq->last_branch)
 300			goto out_free;
 301		tidq->last_branch_rb = zalloc(sz);
 302		if (!tidq->last_branch_rb)
 303			goto out_free;
 304	}
 305
 306	tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
 307	if (!tidq->event_buf)
 308		goto out_free;
 309
 310	return 0;
 311
 312out_free:
 313	zfree(&tidq->last_branch_rb);
 314	zfree(&tidq->last_branch);
 315	zfree(&tidq->prev_packet);
 316	zfree(&tidq->packet);
 317out:
 318	return rc;
 319}
 320
 321static struct cs_etm_traceid_queue
 322*cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
 323{
 324	int idx;
 325	struct int_node *inode;
 326	struct intlist *traceid_queues_list;
 327	struct cs_etm_traceid_queue *tidq, **traceid_queues;
 328	struct cs_etm_auxtrace *etm = etmq->etm;
 329
 330	if (etm->timeless_decoding)
 331		trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
 332
 333	traceid_queues_list = etmq->traceid_queues_list;
 334
 335	/*
 336	 * Check if the traceid_queue exist for this traceID by looking
 337	 * in the queue list.
 338	 */
 339	inode = intlist__find(traceid_queues_list, trace_chan_id);
 340	if (inode) {
 341		idx = (int)(intptr_t)inode->priv;
 342		return etmq->traceid_queues[idx];
 343	}
 344
 345	/* We couldn't find a traceid_queue for this traceID, allocate one */
 346	tidq = malloc(sizeof(*tidq));
 347	if (!tidq)
 348		return NULL;
 349
 350	memset(tidq, 0, sizeof(*tidq));
 351
 352	/* Get a valid index for the new traceid_queue */
 353	idx = intlist__nr_entries(traceid_queues_list);
 354	/* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
 355	inode = intlist__findnew(traceid_queues_list, trace_chan_id);
 356	if (!inode)
 357		goto out_free;
 358
 359	/* Associate this traceID with this index */
 360	inode->priv = (void *)(intptr_t)idx;
 361
 362	if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
 363		goto out_free;
 364
 365	/* Grow the traceid_queues array by one unit */
 366	traceid_queues = etmq->traceid_queues;
 367	traceid_queues = reallocarray(traceid_queues,
 368				      idx + 1,
 369				      sizeof(*traceid_queues));
 370
 371	/*
 372	 * On failure reallocarray() returns NULL and the original block of
 373	 * memory is left untouched.
 374	 */
 375	if (!traceid_queues)
 376		goto out_free;
 377
 378	traceid_queues[idx] = tidq;
 379	etmq->traceid_queues = traceid_queues;
 380
 381	return etmq->traceid_queues[idx];
 382
 383out_free:
 384	/*
 385	 * Function intlist__remove() removes the inode from the list
 386	 * and delete the memory associated to it.
 387	 */
 388	intlist__remove(traceid_queues_list, inode);
 389	free(tidq);
 390
 391	return NULL;
 392}
 393
 394struct cs_etm_packet_queue
 395*cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
 396{
 397	struct cs_etm_traceid_queue *tidq;
 398
 399	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
 400	if (tidq)
 401		return &tidq->packet_queue;
 402
 403	return NULL;
 404}
 405
 406static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
 407				struct cs_etm_traceid_queue *tidq)
 408{
 409	struct cs_etm_packet *tmp;
 410
 411	if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
 412	    etm->synth_opts.instructions) {
 413		/*
 414		 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
 415		 * the next incoming packet.
 416		 */
 417		tmp = tidq->packet;
 418		tidq->packet = tidq->prev_packet;
 419		tidq->prev_packet = tmp;
 420	}
 421}
 422
 423static void cs_etm__packet_dump(const char *pkt_string)
 424{
 425	const char *color = PERF_COLOR_BLUE;
 426	int len = strlen(pkt_string);
 427
 428	if (len && (pkt_string[len-1] == '\n'))
 429		color_fprintf(stdout, color, "	%s", pkt_string);
 430	else
 431		color_fprintf(stdout, color, "	%s\n", pkt_string);
 432
 433	fflush(stdout);
 434}
 435
 436static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
 437					  struct cs_etm_auxtrace *etm, int idx,
 438					  u32 etmidr)
 439{
 440	u64 **metadata = etm->metadata;
 441
 442	t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
 443	t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
 444	t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
 445}
 446
 447static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
 448					  struct cs_etm_auxtrace *etm, int idx)
 449{
 450	u64 **metadata = etm->metadata;
 451
 452	t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
 453	t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
 454	t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
 455	t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
 456	t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
 457	t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
 458	t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
 459}
 460
 461static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
 462					  struct cs_etm_auxtrace *etm, int idx)
 463{
 464	u64 **metadata = etm->metadata;
 465
 466	t_params[idx].protocol = CS_ETM_PROTO_ETE;
 467	t_params[idx].ete.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
 468	t_params[idx].ete.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
 469	t_params[idx].ete.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
 470	t_params[idx].ete.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
 471	t_params[idx].ete.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
 472	t_params[idx].ete.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
 473	t_params[idx].ete.reg_devarch = metadata[idx][CS_ETE_TRCDEVARCH];
 474}
 475
 476static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
 477				     struct cs_etm_auxtrace *etm,
 478				     int decoders)
 479{
 480	int i;
 481	u32 etmidr;
 482	u64 architecture;
 483
 484	for (i = 0; i < decoders; i++) {
 485		architecture = etm->metadata[i][CS_ETM_MAGIC];
 486
 487		switch (architecture) {
 488		case __perf_cs_etmv3_magic:
 489			etmidr = etm->metadata[i][CS_ETM_ETMIDR];
 490			cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
 491			break;
 492		case __perf_cs_etmv4_magic:
 493			cs_etm__set_trace_param_etmv4(t_params, etm, i);
 494			break;
 495		case __perf_cs_ete_magic:
 496			cs_etm__set_trace_param_ete(t_params, etm, i);
 497			break;
 498		default:
 499			return -EINVAL;
 500		}
 501	}
 502
 503	return 0;
 504}
 505
 506static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
 507				       struct cs_etm_queue *etmq,
 508				       enum cs_etm_decoder_operation mode,
 509				       bool formatted)
 510{
 511	int ret = -EINVAL;
 512
 513	if (!(mode < CS_ETM_OPERATION_MAX))
 514		goto out;
 515
 516	d_params->packet_printer = cs_etm__packet_dump;
 517	d_params->operation = mode;
 518	d_params->data = etmq;
 519	d_params->formatted = formatted;
 520	d_params->fsyncs = false;
 521	d_params->hsyncs = false;
 522	d_params->frame_aligned = true;
 523
 524	ret = 0;
 525out:
 526	return ret;
 527}
 528
 529static void cs_etm__dump_event(struct cs_etm_queue *etmq,
 530			       struct auxtrace_buffer *buffer)
 531{
 532	int ret;
 533	const char *color = PERF_COLOR_BLUE;
 534	size_t buffer_used = 0;
 535
 536	fprintf(stdout, "\n");
 537	color_fprintf(stdout, color,
 538		     ". ... CoreSight %s Trace data: size %#zx bytes\n",
 539		     cs_etm_decoder__get_name(etmq->decoder), buffer->size);
 540
 541	do {
 542		size_t consumed;
 543
 544		ret = cs_etm_decoder__process_data_block(
 545				etmq->decoder, buffer->offset,
 546				&((u8 *)buffer->data)[buffer_used],
 547				buffer->size - buffer_used, &consumed);
 548		if (ret)
 549			break;
 550
 551		buffer_used += consumed;
 552	} while (buffer_used < buffer->size);
 553
 554	cs_etm_decoder__reset(etmq->decoder);
 555}
 556
 557static int cs_etm__flush_events(struct perf_session *session,
 558				struct perf_tool *tool)
 559{
 560	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
 561						   struct cs_etm_auxtrace,
 562						   auxtrace);
 563	if (dump_trace)
 564		return 0;
 565
 566	if (!tool->ordered_events)
 567		return -EINVAL;
 568
 569	if (etm->timeless_decoding)
 570		return cs_etm__process_timeless_queues(etm, -1);
 571
 572	return cs_etm__process_queues(etm);
 573}
 574
 575static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
 576{
 577	int idx;
 578	uintptr_t priv;
 579	struct int_node *inode, *tmp;
 580	struct cs_etm_traceid_queue *tidq;
 581	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
 582
 583	intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
 584		priv = (uintptr_t)inode->priv;
 585		idx = priv;
 586
 587		/* Free this traceid_queue from the array */
 588		tidq = etmq->traceid_queues[idx];
 589		thread__zput(tidq->thread);
 590		zfree(&tidq->event_buf);
 591		zfree(&tidq->last_branch);
 592		zfree(&tidq->last_branch_rb);
 593		zfree(&tidq->prev_packet);
 594		zfree(&tidq->packet);
 595		zfree(&tidq);
 596
 597		/*
 598		 * Function intlist__remove() removes the inode from the list
 599		 * and delete the memory associated to it.
 600		 */
 601		intlist__remove(traceid_queues_list, inode);
 602	}
 603
 604	/* Then the RB tree itself */
 605	intlist__delete(traceid_queues_list);
 606	etmq->traceid_queues_list = NULL;
 607
 608	/* finally free the traceid_queues array */
 609	zfree(&etmq->traceid_queues);
 610}
 611
 612static void cs_etm__free_queue(void *priv)
 613{
 614	struct cs_etm_queue *etmq = priv;
 615
 616	if (!etmq)
 617		return;
 618
 619	cs_etm_decoder__free(etmq->decoder);
 620	cs_etm__free_traceid_queues(etmq);
 621	free(etmq);
 622}
 623
 624static void cs_etm__free_events(struct perf_session *session)
 625{
 626	unsigned int i;
 627	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
 628						   struct cs_etm_auxtrace,
 629						   auxtrace);
 630	struct auxtrace_queues *queues = &aux->queues;
 631
 632	for (i = 0; i < queues->nr_queues; i++) {
 633		cs_etm__free_queue(queues->queue_array[i].priv);
 634		queues->queue_array[i].priv = NULL;
 635	}
 636
 637	auxtrace_queues__free(queues);
 638}
 639
 640static void cs_etm__free(struct perf_session *session)
 641{
 642	int i;
 643	struct int_node *inode, *tmp;
 644	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
 645						   struct cs_etm_auxtrace,
 646						   auxtrace);
 647	cs_etm__free_events(session);
 648	session->auxtrace = NULL;
 649
 650	/* First remove all traceID/metadata nodes for the RB tree */
 651	intlist__for_each_entry_safe(inode, tmp, traceid_list)
 652		intlist__remove(traceid_list, inode);
 653	/* Then the RB tree itself */
 654	intlist__delete(traceid_list);
 655
 656	for (i = 0; i < aux->num_cpu; i++)
 657		zfree(&aux->metadata[i]);
 658
 659	thread__zput(aux->unknown_thread);
 660	zfree(&aux->metadata);
 661	zfree(&aux);
 662}
 663
 664static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
 665				      struct evsel *evsel)
 666{
 667	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
 668						   struct cs_etm_auxtrace,
 669						   auxtrace);
 670
 671	return evsel->core.attr.type == aux->pmu_type;
 672}
 673
 674static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
 675{
 676	struct machine *machine;
 677
 678	machine = etmq->etm->machine;
 679
 680	if (address >= machine__kernel_start(machine)) {
 681		if (machine__is_host(machine))
 682			return PERF_RECORD_MISC_KERNEL;
 683		else
 684			return PERF_RECORD_MISC_GUEST_KERNEL;
 685	} else {
 686		if (machine__is_host(machine))
 687			return PERF_RECORD_MISC_USER;
 688		else if (perf_guest)
 689			return PERF_RECORD_MISC_GUEST_USER;
 690		else
 691			return PERF_RECORD_MISC_HYPERVISOR;
 692	}
 693}
 694
 695static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
 696			      u64 address, size_t size, u8 *buffer)
 697{
 698	u8  cpumode;
 699	u64 offset;
 700	int len;
 701	struct thread *thread;
 702	struct machine *machine;
 703	struct addr_location al;
 704	struct cs_etm_traceid_queue *tidq;
 705
 706	if (!etmq)
 707		return 0;
 708
 709	machine = etmq->etm->machine;
 710	cpumode = cs_etm__cpu_mode(etmq, address);
 711	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
 712	if (!tidq)
 713		return 0;
 714
 715	thread = tidq->thread;
 716	if (!thread) {
 717		if (cpumode != PERF_RECORD_MISC_KERNEL)
 718			return 0;
 719		thread = etmq->etm->unknown_thread;
 720	}
 721
 722	if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
 723		return 0;
 724
 725	if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
 726	    dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
 727		return 0;
 728
 729	offset = al.map->map_ip(al.map, address);
 730
 731	map__load(al.map);
 732
 733	len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
 734
 735	if (len <= 0) {
 736		ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
 737				 "              Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
 738		if (!al.map->dso->auxtrace_warned) {
 739			pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
 740				    address,
 741				    al.map->dso->long_name ? al.map->dso->long_name : "Unknown");
 742			al.map->dso->auxtrace_warned = true;
 743		}
 744		return 0;
 745	}
 746
 747	return len;
 748}
 749
 750static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
 751						bool formatted)
 752{
 753	struct cs_etm_decoder_params d_params;
 754	struct cs_etm_trace_params  *t_params = NULL;
 755	struct cs_etm_queue *etmq;
 756	/*
 757	 * Each queue can only contain data from one CPU when unformatted, so only one decoder is
 758	 * needed.
 759	 */
 760	int decoders = formatted ? etm->num_cpu : 1;
 761
 762	etmq = zalloc(sizeof(*etmq));
 763	if (!etmq)
 764		return NULL;
 765
 766	etmq->traceid_queues_list = intlist__new(NULL);
 767	if (!etmq->traceid_queues_list)
 768		goto out_free;
 769
 770	/* Use metadata to fill in trace parameters for trace decoder */
 771	t_params = zalloc(sizeof(*t_params) * decoders);
 772
 773	if (!t_params)
 774		goto out_free;
 775
 776	if (cs_etm__init_trace_params(t_params, etm, decoders))
 777		goto out_free;
 778
 779	/* Set decoder parameters to decode trace packets */
 780	if (cs_etm__init_decoder_params(&d_params, etmq,
 781					dump_trace ? CS_ETM_OPERATION_PRINT :
 782						     CS_ETM_OPERATION_DECODE,
 783					formatted))
 784		goto out_free;
 785
 786	etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
 787					    t_params);
 788
 789	if (!etmq->decoder)
 790		goto out_free;
 791
 792	/*
 793	 * Register a function to handle all memory accesses required by
 794	 * the trace decoder library.
 795	 */
 796	if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
 797					      0x0L, ((u64) -1L),
 798					      cs_etm__mem_access))
 799		goto out_free_decoder;
 800
 801	zfree(&t_params);
 802	return etmq;
 803
 804out_free_decoder:
 805	cs_etm_decoder__free(etmq->decoder);
 806out_free:
 807	intlist__delete(etmq->traceid_queues_list);
 808	free(etmq);
 809
 810	return NULL;
 811}
 812
 813static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
 814			       struct auxtrace_queue *queue,
 815			       unsigned int queue_nr,
 816			       bool formatted)
 817{
 818	struct cs_etm_queue *etmq = queue->priv;
 819
 820	if (list_empty(&queue->head) || etmq)
 821		return 0;
 822
 823	etmq = cs_etm__alloc_queue(etm, formatted);
 824
 825	if (!etmq)
 826		return -ENOMEM;
 827
 828	queue->priv = etmq;
 829	etmq->etm = etm;
 830	etmq->queue_nr = queue_nr;
 831	etmq->offset = 0;
 832
 833	return 0;
 834}
 835
 836static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
 837					    struct cs_etm_queue *etmq,
 838					    unsigned int queue_nr)
 839{
 840	int ret = 0;
 841	unsigned int cs_queue_nr;
 842	u8 trace_chan_id;
 843	u64 cs_timestamp;
 844
 845	/*
 846	 * We are under a CPU-wide trace scenario.  As such we need to know
 847	 * when the code that generated the traces started to execute so that
 848	 * it can be correlated with execution on other CPUs.  So we get a
 849	 * handle on the beginning of traces and decode until we find a
 850	 * timestamp.  The timestamp is then added to the auxtrace min heap
 851	 * in order to know what nibble (of all the etmqs) to decode first.
 852	 */
 853	while (1) {
 854		/*
 855		 * Fetch an aux_buffer from this etmq.  Bail if no more
 856		 * blocks or an error has been encountered.
 857		 */
 858		ret = cs_etm__get_data_block(etmq);
 859		if (ret <= 0)
 860			goto out;
 861
 862		/*
 863		 * Run decoder on the trace block.  The decoder will stop when
 864		 * encountering a CS timestamp, a full packet queue or the end of
 865		 * trace for that block.
 866		 */
 867		ret = cs_etm__decode_data_block(etmq);
 868		if (ret)
 869			goto out;
 870
 871		/*
 872		 * Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
 873		 * the timestamp calculation for us.
 874		 */
 875		cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
 876
 877		/* We found a timestamp, no need to continue. */
 878		if (cs_timestamp)
 879			break;
 880
 881		/*
 882		 * We didn't find a timestamp so empty all the traceid packet
 883		 * queues before looking for another timestamp packet, either
 884		 * in the current data block or a new one.  Packets that were
 885		 * just decoded are useless since no timestamp has been
 886		 * associated with them.  As such simply discard them.
 887		 */
 888		cs_etm__clear_all_packet_queues(etmq);
 889	}
 890
 891	/*
 892	 * We have a timestamp.  Add it to the min heap to reflect when
 893	 * instructions conveyed by the range packets of this traceID queue
 894	 * started to execute.  Once the same has been done for all the traceID
 895	 * queues of each etmq, redenring and decoding can start in
 896	 * chronological order.
 897	 *
 898	 * Note that packets decoded above are still in the traceID's packet
 899	 * queue and will be processed in cs_etm__process_queues().
 900	 */
 901	cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
 902	ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
 903out:
 904	return ret;
 905}
 906
 907static inline
 908void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
 909				 struct cs_etm_traceid_queue *tidq)
 910{
 911	struct branch_stack *bs_src = tidq->last_branch_rb;
 912	struct branch_stack *bs_dst = tidq->last_branch;
 913	size_t nr = 0;
 914
 915	/*
 916	 * Set the number of records before early exit: ->nr is used to
 917	 * determine how many branches to copy from ->entries.
 918	 */
 919	bs_dst->nr = bs_src->nr;
 920
 921	/*
 922	 * Early exit when there is nothing to copy.
 923	 */
 924	if (!bs_src->nr)
 925		return;
 926
 927	/*
 928	 * As bs_src->entries is a circular buffer, we need to copy from it in
 929	 * two steps.  First, copy the branches from the most recently inserted
 930	 * branch ->last_branch_pos until the end of bs_src->entries buffer.
 931	 */
 932	nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
 933	memcpy(&bs_dst->entries[0],
 934	       &bs_src->entries[tidq->last_branch_pos],
 935	       sizeof(struct branch_entry) * nr);
 936
 937	/*
 938	 * If we wrapped around at least once, the branches from the beginning
 939	 * of the bs_src->entries buffer and until the ->last_branch_pos element
 940	 * are older valid branches: copy them over.  The total number of
 941	 * branches copied over will be equal to the number of branches asked by
 942	 * the user in last_branch_sz.
 943	 */
 944	if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
 945		memcpy(&bs_dst->entries[nr],
 946		       &bs_src->entries[0],
 947		       sizeof(struct branch_entry) * tidq->last_branch_pos);
 948	}
 949}
 950
 951static inline
 952void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
 953{
 954	tidq->last_branch_pos = 0;
 955	tidq->last_branch_rb->nr = 0;
 956}
 957
 958static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
 959					 u8 trace_chan_id, u64 addr)
 960{
 961	u8 instrBytes[2];
 962
 963	cs_etm__mem_access(etmq, trace_chan_id, addr,
 964			   ARRAY_SIZE(instrBytes), instrBytes);
 965	/*
 966	 * T32 instruction size is indicated by bits[15:11] of the first
 967	 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
 968	 * denote a 32-bit instruction.
 969	 */
 970	return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
 971}
 972
 973static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
 974{
 975	/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
 976	if (packet->sample_type == CS_ETM_DISCONTINUITY)
 977		return 0;
 978
 979	return packet->start_addr;
 980}
 981
 982static inline
 983u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
 984{
 985	/* Returns 0 for the CS_ETM_DISCONTINUITY packet */
 986	if (packet->sample_type == CS_ETM_DISCONTINUITY)
 987		return 0;
 988
 989	return packet->end_addr - packet->last_instr_size;
 990}
 991
 992static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
 993				     u64 trace_chan_id,
 994				     const struct cs_etm_packet *packet,
 995				     u64 offset)
 996{
 997	if (packet->isa == CS_ETM_ISA_T32) {
 998		u64 addr = packet->start_addr;
 999
1000		while (offset) {
1001			addr += cs_etm__t32_instr_size(etmq,
1002						       trace_chan_id, addr);
1003			offset--;
1004		}
1005		return addr;
1006	}
1007
1008	/* Assume a 4 byte instruction size (A32/A64) */
1009	return packet->start_addr + offset * 4;
1010}
1011
1012static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
1013					  struct cs_etm_traceid_queue *tidq)
1014{
1015	struct branch_stack *bs = tidq->last_branch_rb;
1016	struct branch_entry *be;
1017
1018	/*
1019	 * The branches are recorded in a circular buffer in reverse
1020	 * chronological order: we start recording from the last element of the
1021	 * buffer down.  After writing the first element of the stack, move the
1022	 * insert position back to the end of the buffer.
1023	 */
1024	if (!tidq->last_branch_pos)
1025		tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1026
1027	tidq->last_branch_pos -= 1;
1028
1029	be       = &bs->entries[tidq->last_branch_pos];
1030	be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1031	be->to	 = cs_etm__first_executed_instr(tidq->packet);
1032	/* No support for mispredict */
1033	be->flags.mispred = 0;
1034	be->flags.predicted = 1;
1035
1036	/*
1037	 * Increment bs->nr until reaching the number of last branches asked by
1038	 * the user on the command line.
1039	 */
1040	if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1041		bs->nr += 1;
1042}
1043
1044static int cs_etm__inject_event(union perf_event *event,
1045			       struct perf_sample *sample, u64 type)
1046{
1047	event->header.size = perf_event__sample_event_size(sample, type, 0);
1048	return perf_event__synthesize_sample(event, type, 0, sample);
1049}
1050
1051
1052static int
1053cs_etm__get_trace(struct cs_etm_queue *etmq)
1054{
1055	struct auxtrace_buffer *aux_buffer = etmq->buffer;
1056	struct auxtrace_buffer *old_buffer = aux_buffer;
1057	struct auxtrace_queue *queue;
1058
1059	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1060
1061	aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
1062
1063	/* If no more data, drop the previous auxtrace_buffer and return */
1064	if (!aux_buffer) {
1065		if (old_buffer)
1066			auxtrace_buffer__drop_data(old_buffer);
1067		etmq->buf_len = 0;
1068		return 0;
1069	}
1070
1071	etmq->buffer = aux_buffer;
1072
1073	/* If the aux_buffer doesn't have data associated, try to load it */
1074	if (!aux_buffer->data) {
1075		/* get the file desc associated with the perf data file */
1076		int fd = perf_data__fd(etmq->etm->session->data);
1077
1078		aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
1079		if (!aux_buffer->data)
1080			return -ENOMEM;
1081	}
1082
1083	/* If valid, drop the previous buffer */
1084	if (old_buffer)
1085		auxtrace_buffer__drop_data(old_buffer);
1086
1087	etmq->buf_used = 0;
1088	etmq->buf_len = aux_buffer->size;
1089	etmq->buf = aux_buffer->data;
1090
1091	return etmq->buf_len;
1092}
1093
1094static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
1095				    struct cs_etm_traceid_queue *tidq)
1096{
1097	if ((!tidq->thread) && (tidq->tid != -1))
1098		tidq->thread = machine__find_thread(etm->machine, -1,
1099						    tidq->tid);
1100
1101	if (tidq->thread)
1102		tidq->pid = tidq->thread->pid_;
1103}
1104
1105int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
1106			 pid_t tid, u8 trace_chan_id)
1107{
1108	int cpu, err = -EINVAL;
1109	struct cs_etm_auxtrace *etm = etmq->etm;
1110	struct cs_etm_traceid_queue *tidq;
1111
1112	tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1113	if (!tidq)
1114		return err;
1115
1116	if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
1117		return err;
1118
1119	err = machine__set_current_tid(etm->machine, cpu, tid, tid);
1120	if (err)
1121		return err;
1122
1123	tidq->tid = tid;
1124	thread__zput(tidq->thread);
1125
1126	cs_etm__set_pid_tid_cpu(etm, tidq);
1127	return 0;
1128}
1129
1130bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
1131{
1132	return !!etmq->etm->timeless_decoding;
1133}
1134
1135static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
1136			      u64 trace_chan_id,
1137			      const struct cs_etm_packet *packet,
1138			      struct perf_sample *sample)
1139{
1140	/*
1141	 * It's pointless to read instructions for the CS_ETM_DISCONTINUITY
1142	 * packet, so directly bail out with 'insn_len' = 0.
1143	 */
1144	if (packet->sample_type == CS_ETM_DISCONTINUITY) {
1145		sample->insn_len = 0;
1146		return;
1147	}
1148
1149	/*
1150	 * T32 instruction size might be 32-bit or 16-bit, decide by calling
1151	 * cs_etm__t32_instr_size().
1152	 */
1153	if (packet->isa == CS_ETM_ISA_T32)
1154		sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
1155							  sample->ip);
1156	/* Otherwise, A64 and A32 instruction size are always 32-bit. */
1157	else
1158		sample->insn_len = 4;
1159
1160	cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
1161			   sample->insn_len, (void *)sample->insn);
1162}
1163
1164static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
1165					    struct cs_etm_traceid_queue *tidq,
1166					    u64 addr, u64 period)
1167{
1168	int ret = 0;
1169	struct cs_etm_auxtrace *etm = etmq->etm;
1170	union perf_event *event = tidq->event_buf;
1171	struct perf_sample sample = {.ip = 0,};
1172
1173	event->sample.header.type = PERF_RECORD_SAMPLE;
1174	event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
1175	event->sample.header.size = sizeof(struct perf_event_header);
1176
1177	if (!etm->timeless_decoding)
1178		sample.time = etm->latest_kernel_timestamp;
1179	sample.ip = addr;
1180	sample.pid = tidq->pid;
1181	sample.tid = tidq->tid;
1182	sample.id = etmq->etm->instructions_id;
1183	sample.stream_id = etmq->etm->instructions_id;
1184	sample.period = period;
1185	sample.cpu = tidq->packet->cpu;
1186	sample.flags = tidq->prev_packet->flags;
1187	sample.cpumode = event->sample.header.misc;
1188
1189	cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1190
1191	if (etm->synth_opts.last_branch)
1192		sample.branch_stack = tidq->last_branch;
1193
1194	if (etm->synth_opts.inject) {
1195		ret = cs_etm__inject_event(event, &sample,
1196					   etm->instructions_sample_type);
1197		if (ret)
1198			return ret;
1199	}
1200
1201	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1202
1203	if (ret)
1204		pr_err(
1205			"CS ETM Trace: failed to deliver instruction event, error %d\n",
1206			ret);
1207
1208	return ret;
1209}
1210
1211/*
1212 * The cs etm packet encodes an instruction range between a branch target
1213 * and the next taken branch. Generate sample accordingly.
1214 */
1215static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
1216				       struct cs_etm_traceid_queue *tidq)
1217{
1218	int ret = 0;
1219	struct cs_etm_auxtrace *etm = etmq->etm;
1220	struct perf_sample sample = {.ip = 0,};
1221	union perf_event *event = tidq->event_buf;
1222	struct dummy_branch_stack {
1223		u64			nr;
1224		u64			hw_idx;
1225		struct branch_entry	entries;
1226	} dummy_bs;
1227	u64 ip;
1228
1229	ip = cs_etm__last_executed_instr(tidq->prev_packet);
1230
1231	event->sample.header.type = PERF_RECORD_SAMPLE;
1232	event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
1233	event->sample.header.size = sizeof(struct perf_event_header);
1234
1235	if (!etm->timeless_decoding)
1236		sample.time = etm->latest_kernel_timestamp;
1237	sample.ip = ip;
1238	sample.pid = tidq->pid;
1239	sample.tid = tidq->tid;
1240	sample.addr = cs_etm__first_executed_instr(tidq->packet);
1241	sample.id = etmq->etm->branches_id;
1242	sample.stream_id = etmq->etm->branches_id;
1243	sample.period = 1;
1244	sample.cpu = tidq->packet->cpu;
1245	sample.flags = tidq->prev_packet->flags;
1246	sample.cpumode = event->sample.header.misc;
1247
1248	cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1249			  &sample);
1250
1251	/*
1252	 * perf report cannot handle events without a branch stack
1253	 */
1254	if (etm->synth_opts.last_branch) {
1255		dummy_bs = (struct dummy_branch_stack){
1256			.nr = 1,
1257			.hw_idx = -1ULL,
1258			.entries = {
1259				.from = sample.ip,
1260				.to = sample.addr,
1261			},
1262		};
1263		sample.branch_stack = (struct branch_stack *)&dummy_bs;
1264	}
1265
1266	if (etm->synth_opts.inject) {
1267		ret = cs_etm__inject_event(event, &sample,
1268					   etm->branches_sample_type);
1269		if (ret)
1270			return ret;
1271	}
1272
1273	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1274
1275	if (ret)
1276		pr_err(
1277		"CS ETM Trace: failed to deliver instruction event, error %d\n",
1278		ret);
1279
1280	return ret;
1281}
1282
1283struct cs_etm_synth {
1284	struct perf_tool dummy_tool;
1285	struct perf_session *session;
1286};
1287
1288static int cs_etm__event_synth(struct perf_tool *tool,
1289			       union perf_event *event,
1290			       struct perf_sample *sample __maybe_unused,
1291			       struct machine *machine __maybe_unused)
1292{
1293	struct cs_etm_synth *cs_etm_synth =
1294		      container_of(tool, struct cs_etm_synth, dummy_tool);
1295
1296	return perf_session__deliver_synth_event(cs_etm_synth->session,
1297						 event, NULL);
1298}
1299
1300static int cs_etm__synth_event(struct perf_session *session,
1301			       struct perf_event_attr *attr, u64 id)
1302{
1303	struct cs_etm_synth cs_etm_synth;
1304
1305	memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
1306	cs_etm_synth.session = session;
1307
1308	return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
1309					   &id, cs_etm__event_synth);
1310}
1311
1312static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1313				struct perf_session *session)
1314{
1315	struct evlist *evlist = session->evlist;
1316	struct evsel *evsel;
1317	struct perf_event_attr attr;
1318	bool found = false;
1319	u64 id;
1320	int err;
1321
1322	evlist__for_each_entry(evlist, evsel) {
1323		if (evsel->core.attr.type == etm->pmu_type) {
1324			found = true;
1325			break;
1326		}
1327	}
1328
1329	if (!found) {
1330		pr_debug("No selected events with CoreSight Trace data\n");
1331		return 0;
1332	}
1333
1334	memset(&attr, 0, sizeof(struct perf_event_attr));
1335	attr.size = sizeof(struct perf_event_attr);
1336	attr.type = PERF_TYPE_HARDWARE;
1337	attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
1338	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1339			    PERF_SAMPLE_PERIOD;
1340	if (etm->timeless_decoding)
1341		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1342	else
1343		attr.sample_type |= PERF_SAMPLE_TIME;
1344
1345	attr.exclude_user = evsel->core.attr.exclude_user;
1346	attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1347	attr.exclude_hv = evsel->core.attr.exclude_hv;
1348	attr.exclude_host = evsel->core.attr.exclude_host;
1349	attr.exclude_guest = evsel->core.attr.exclude_guest;
1350	attr.sample_id_all = evsel->core.attr.sample_id_all;
1351	attr.read_format = evsel->core.attr.read_format;
1352
1353	/* create new id val to be a fixed offset from evsel id */
1354	id = evsel->core.id[0] + 1000000000;
1355
1356	if (!id)
1357		id = 1;
1358
1359	if (etm->synth_opts.branches) {
1360		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1361		attr.sample_period = 1;
1362		attr.sample_type |= PERF_SAMPLE_ADDR;
1363		err = cs_etm__synth_event(session, &attr, id);
1364		if (err)
1365			return err;
1366		etm->branches_sample_type = attr.sample_type;
1367		etm->branches_id = id;
1368		id += 1;
1369		attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
1370	}
1371
1372	if (etm->synth_opts.last_branch) {
1373		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1374		/*
1375		 * We don't use the hardware index, but the sample generation
1376		 * code uses the new format branch_stack with this field,
1377		 * so the event attributes must indicate that it's present.
1378		 */
1379		attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
1380	}
1381
1382	if (etm->synth_opts.instructions) {
1383		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1384		attr.sample_period = etm->synth_opts.period;
1385		etm->instructions_sample_period = attr.sample_period;
1386		err = cs_etm__synth_event(session, &attr, id);
1387		if (err)
1388			return err;
1389		etm->instructions_sample_type = attr.sample_type;
1390		etm->instructions_id = id;
1391		id += 1;
1392	}
1393
1394	return 0;
1395}
1396
1397static int cs_etm__sample(struct cs_etm_queue *etmq,
1398			  struct cs_etm_traceid_queue *tidq)
1399{
1400	struct cs_etm_auxtrace *etm = etmq->etm;
1401	int ret;
1402	u8 trace_chan_id = tidq->trace_chan_id;
1403	u64 instrs_prev;
1404
1405	/* Get instructions remainder from previous packet */
1406	instrs_prev = tidq->period_instructions;
1407
1408	tidq->period_instructions += tidq->packet->instr_count;
1409
1410	/*
1411	 * Record a branch when the last instruction in
1412	 * PREV_PACKET is a branch.
1413	 */
1414	if (etm->synth_opts.last_branch &&
1415	    tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1416	    tidq->prev_packet->last_instr_taken_branch)
1417		cs_etm__update_last_branch_rb(etmq, tidq);
1418
1419	if (etm->synth_opts.instructions &&
1420	    tidq->period_instructions >= etm->instructions_sample_period) {
1421		/*
1422		 * Emit instruction sample periodically
1423		 * TODO: allow period to be defined in cycles and clock time
1424		 */
1425
1426		/*
1427		 * Below diagram demonstrates the instruction samples
1428		 * generation flows:
1429		 *
1430		 *    Instrs     Instrs       Instrs       Instrs
1431		 *   Sample(n)  Sample(n+1)  Sample(n+2)  Sample(n+3)
1432		 *    |            |            |            |
1433		 *    V            V            V            V
1434		 *   --------------------------------------------------
1435		 *            ^                                  ^
1436		 *            |                                  |
1437		 *         Period                             Period
1438		 *    instructions(Pi)                   instructions(Pi')
1439		 *
1440		 *            |                                  |
1441		 *            \---------------- -----------------/
1442		 *                             V
1443		 *                 tidq->packet->instr_count
1444		 *
1445		 * Instrs Sample(n...) are the synthesised samples occurring
1446		 * every etm->instructions_sample_period instructions - as
1447		 * defined on the perf command line.  Sample(n) is being the
1448		 * last sample before the current etm packet, n+1 to n+3
1449		 * samples are generated from the current etm packet.
1450		 *
1451		 * tidq->packet->instr_count represents the number of
1452		 * instructions in the current etm packet.
1453		 *
1454		 * Period instructions (Pi) contains the number of
1455		 * instructions executed after the sample point(n) from the
1456		 * previous etm packet.  This will always be less than
1457		 * etm->instructions_sample_period.
1458		 *
1459		 * When generate new samples, it combines with two parts
1460		 * instructions, one is the tail of the old packet and another
1461		 * is the head of the new coming packet, to generate
1462		 * sample(n+1); sample(n+2) and sample(n+3) consume the
1463		 * instructions with sample period.  After sample(n+3), the rest
1464		 * instructions will be used by later packet and it is assigned
1465		 * to tidq->period_instructions for next round calculation.
1466		 */
1467
1468		/*
1469		 * Get the initial offset into the current packet instructions;
1470		 * entry conditions ensure that instrs_prev is less than
1471		 * etm->instructions_sample_period.
1472		 */
1473		u64 offset = etm->instructions_sample_period - instrs_prev;
1474		u64 addr;
1475
1476		/* Prepare last branches for instruction sample */
1477		if (etm->synth_opts.last_branch)
1478			cs_etm__copy_last_branch_rb(etmq, tidq);
1479
1480		while (tidq->period_instructions >=
1481				etm->instructions_sample_period) {
1482			/*
1483			 * Calculate the address of the sampled instruction (-1
1484			 * as sample is reported as though instruction has just
1485			 * been executed, but PC has not advanced to next
1486			 * instruction)
1487			 */
1488			addr = cs_etm__instr_addr(etmq, trace_chan_id,
1489						  tidq->packet, offset - 1);
1490			ret = cs_etm__synth_instruction_sample(
1491				etmq, tidq, addr,
1492				etm->instructions_sample_period);
1493			if (ret)
1494				return ret;
1495
1496			offset += etm->instructions_sample_period;
1497			tidq->period_instructions -=
1498				etm->instructions_sample_period;
1499		}
1500	}
1501
1502	if (etm->synth_opts.branches) {
1503		bool generate_sample = false;
1504
1505		/* Generate sample for tracing on packet */
1506		if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1507			generate_sample = true;
1508
1509		/* Generate sample for branch taken packet */
1510		if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1511		    tidq->prev_packet->last_instr_taken_branch)
1512			generate_sample = true;
1513
1514		if (generate_sample) {
1515			ret = cs_etm__synth_branch_sample(etmq, tidq);
1516			if (ret)
1517				return ret;
1518		}
1519	}
1520
1521	cs_etm__packet_swap(etm, tidq);
1522
1523	return 0;
1524}
1525
1526static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
1527{
1528	/*
1529	 * When the exception packet is inserted, whether the last instruction
1530	 * in previous range packet is taken branch or not, we need to force
1531	 * to set 'prev_packet->last_instr_taken_branch' to true.  This ensures
1532	 * to generate branch sample for the instruction range before the
1533	 * exception is trapped to kernel or before the exception returning.
1534	 *
1535	 * The exception packet includes the dummy address values, so don't
1536	 * swap PACKET with PREV_PACKET.  This keeps PREV_PACKET to be useful
1537	 * for generating instruction and branch samples.
1538	 */
1539	if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1540		tidq->prev_packet->last_instr_taken_branch = true;
1541
1542	return 0;
1543}
1544
1545static int cs_etm__flush(struct cs_etm_queue *etmq,
1546			 struct cs_etm_traceid_queue *tidq)
1547{
1548	int err = 0;
1549	struct cs_etm_auxtrace *etm = etmq->etm;
1550
1551	/* Handle start tracing packet */
1552	if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
1553		goto swap_packet;
1554
1555	if (etmq->etm->synth_opts.last_branch &&
1556	    etmq->etm->synth_opts.instructions &&
1557	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1558		u64 addr;
1559
1560		/* Prepare last branches for instruction sample */
1561		cs_etm__copy_last_branch_rb(etmq, tidq);
1562
1563		/*
1564		 * Generate a last branch event for the branches left in the
1565		 * circular buffer at the end of the trace.
1566		 *
1567		 * Use the address of the end of the last reported execution
1568		 * range
1569		 */
1570		addr = cs_etm__last_executed_instr(tidq->prev_packet);
1571
1572		err = cs_etm__synth_instruction_sample(
1573			etmq, tidq, addr,
1574			tidq->period_instructions);
1575		if (err)
1576			return err;
1577
1578		tidq->period_instructions = 0;
1579
1580	}
1581
1582	if (etm->synth_opts.branches &&
1583	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1584		err = cs_etm__synth_branch_sample(etmq, tidq);
1585		if (err)
1586			return err;
1587	}
1588
1589swap_packet:
1590	cs_etm__packet_swap(etm, tidq);
1591
1592	/* Reset last branches after flush the trace */
1593	if (etm->synth_opts.last_branch)
1594		cs_etm__reset_last_branch_rb(tidq);
1595
1596	return err;
1597}
1598
1599static int cs_etm__end_block(struct cs_etm_queue *etmq,
1600			     struct cs_etm_traceid_queue *tidq)
1601{
1602	int err;
1603
1604	/*
1605	 * It has no new packet coming and 'etmq->packet' contains the stale
1606	 * packet which was set at the previous time with packets swapping;
1607	 * so skip to generate branch sample to avoid stale packet.
1608	 *
1609	 * For this case only flush branch stack and generate a last branch
1610	 * event for the branches left in the circular buffer at the end of
1611	 * the trace.
1612	 */
1613	if (etmq->etm->synth_opts.last_branch &&
1614	    etmq->etm->synth_opts.instructions &&
1615	    tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1616		u64 addr;
1617
1618		/* Prepare last branches for instruction sample */
1619		cs_etm__copy_last_branch_rb(etmq, tidq);
1620
1621		/*
1622		 * Use the address of the end of the last reported execution
1623		 * range.
1624		 */
1625		addr = cs_etm__last_executed_instr(tidq->prev_packet);
1626
1627		err = cs_etm__synth_instruction_sample(
1628			etmq, tidq, addr,
1629			tidq->period_instructions);
1630		if (err)
1631			return err;
1632
1633		tidq->period_instructions = 0;
1634	}
1635
1636	return 0;
1637}
1638/*
1639 * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
1640 *			   if need be.
1641 * Returns:	< 0	if error
1642 *		= 0	if no more auxtrace_buffer to read
1643 *		> 0	if the current buffer isn't empty yet
1644 */
1645static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
1646{
1647	int ret;
1648
1649	if (!etmq->buf_len) {
1650		ret = cs_etm__get_trace(etmq);
1651		if (ret <= 0)
1652			return ret;
1653		/*
1654		 * We cannot assume consecutive blocks in the data file
1655		 * are contiguous, reset the decoder to force re-sync.
1656		 */
1657		ret = cs_etm_decoder__reset(etmq->decoder);
1658		if (ret)
1659			return ret;
1660	}
1661
1662	return etmq->buf_len;
1663}
1664
1665static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
1666				 struct cs_etm_packet *packet,
1667				 u64 end_addr)
1668{
1669	/* Initialise to keep compiler happy */
1670	u16 instr16 = 0;
1671	u32 instr32 = 0;
1672	u64 addr;
1673
1674	switch (packet->isa) {
1675	case CS_ETM_ISA_T32:
1676		/*
1677		 * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
1678		 *
1679		 *  b'15         b'8
1680		 * +-----------------+--------+
1681		 * | 1 1 0 1 1 1 1 1 |  imm8  |
1682		 * +-----------------+--------+
1683		 *
1684		 * According to the specification, it only defines SVC for T32
1685		 * with 16 bits instruction and has no definition for 32bits;
1686		 * so below only read 2 bytes as instruction size for T32.
1687		 */
1688		addr = end_addr - 2;
1689		cs_etm__mem_access(etmq, trace_chan_id, addr,
1690				   sizeof(instr16), (u8 *)&instr16);
1691		if ((instr16 & 0xFF00) == 0xDF00)
1692			return true;
1693
1694		break;
1695	case CS_ETM_ISA_A32:
1696		/*
1697		 * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
1698		 *
1699		 *  b'31 b'28 b'27 b'24
1700		 * +---------+---------+-------------------------+
1701		 * |  !1111  | 1 1 1 1 |        imm24            |
1702		 * +---------+---------+-------------------------+
1703		 */
1704		addr = end_addr - 4;
1705		cs_etm__mem_access(etmq, trace_chan_id, addr,
1706				   sizeof(instr32), (u8 *)&instr32);
1707		if ((instr32 & 0x0F000000) == 0x0F000000 &&
1708		    (instr32 & 0xF0000000) != 0xF0000000)
1709			return true;
1710
1711		break;
1712	case CS_ETM_ISA_A64:
1713		/*
1714		 * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
1715		 *
1716		 *  b'31               b'21           b'4     b'0
1717		 * +-----------------------+---------+-----------+
1718		 * | 1 1 0 1 0 1 0 0 0 0 0 |  imm16  | 0 0 0 0 1 |
1719		 * +-----------------------+---------+-----------+
1720		 */
1721		addr = end_addr - 4;
1722		cs_etm__mem_access(etmq, trace_chan_id, addr,
1723				   sizeof(instr32), (u8 *)&instr32);
1724		if ((instr32 & 0xFFE0001F) == 0xd4000001)
1725			return true;
1726
1727		break;
1728	case CS_ETM_ISA_UNKNOWN:
1729	default:
1730		break;
1731	}
1732
1733	return false;
1734}
1735
1736static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
1737			       struct cs_etm_traceid_queue *tidq, u64 magic)
1738{
1739	u8 trace_chan_id = tidq->trace_chan_id;
1740	struct cs_etm_packet *packet = tidq->packet;
1741	struct cs_etm_packet *prev_packet = tidq->prev_packet;
1742
1743	if (magic == __perf_cs_etmv3_magic)
1744		if (packet->exception_number == CS_ETMV3_EXC_SVC)
1745			return true;
1746
1747	/*
1748	 * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
1749	 * HVC cases; need to check if it's SVC instruction based on
1750	 * packet address.
1751	 */
1752	if (magic == __perf_cs_etmv4_magic) {
1753		if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1754		    cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1755					 prev_packet->end_addr))
1756			return true;
1757	}
1758
1759	return false;
1760}
1761
1762static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
1763				       u64 magic)
1764{
1765	struct cs_etm_packet *packet = tidq->packet;
1766
1767	if (magic == __perf_cs_etmv3_magic)
1768		if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
1769		    packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
1770		    packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
1771		    packet->exception_number == CS_ETMV3_EXC_IRQ ||
1772		    packet->exception_number == CS_ETMV3_EXC_FIQ)
1773			return true;
1774
1775	if (magic == __perf_cs_etmv4_magic)
1776		if (packet->exception_number == CS_ETMV4_EXC_RESET ||
1777		    packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
1778		    packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
1779		    packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
1780		    packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
1781		    packet->exception_number == CS_ETMV4_EXC_IRQ ||
1782		    packet->exception_number == CS_ETMV4_EXC_FIQ)
1783			return true;
1784
1785	return false;
1786}
1787
1788static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
1789				      struct cs_etm_traceid_queue *tidq,
1790				      u64 magic)
1791{
1792	u8 trace_chan_id = tidq->trace_chan_id;
1793	struct cs_etm_packet *packet = tidq->packet;
1794	struct cs_etm_packet *prev_packet = tidq->prev_packet;
1795
1796	if (magic == __perf_cs_etmv3_magic)
1797		if (packet->exception_number == CS_ETMV3_EXC_SMC ||
1798		    packet->exception_number == CS_ETMV3_EXC_HYP ||
1799		    packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
1800		    packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
1801		    packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
1802		    packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
1803		    packet->exception_number == CS_ETMV3_EXC_GENERIC)
1804			return true;
1805
1806	if (magic == __perf_cs_etmv4_magic) {
1807		if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
1808		    packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
1809		    packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
1810		    packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
1811			return true;
1812
1813		/*
1814		 * For CS_ETMV4_EXC_CALL, except SVC other instructions
1815		 * (SMC, HVC) are taken as sync exceptions.
1816		 */
1817		if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1818		    !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1819					  prev_packet->end_addr))
1820			return true;
1821
1822		/*
1823		 * ETMv4 has 5 bits for exception number; if the numbers
1824		 * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
1825		 * they are implementation defined exceptions.
1826		 *
1827		 * For this case, simply take it as sync exception.
1828		 */
1829		if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
1830		    packet->exception_number <= CS_ETMV4_EXC_END)
1831			return true;
1832	}
1833
1834	return false;
1835}
1836
1837static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
1838				    struct cs_etm_traceid_queue *tidq)
1839{
1840	struct cs_etm_packet *packet = tidq->packet;
1841	struct cs_etm_packet *prev_packet = tidq->prev_packet;
1842	u8 trace_chan_id = tidq->trace_chan_id;
1843	u64 magic;
1844	int ret;
1845
1846	switch (packet->sample_type) {
1847	case CS_ETM_RANGE:
1848		/*
1849		 * Immediate branch instruction without neither link nor
1850		 * return flag, it's normal branch instruction within
1851		 * the function.
1852		 */
1853		if (packet->last_instr_type == OCSD_INSTR_BR &&
1854		    packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
1855			packet->flags = PERF_IP_FLAG_BRANCH;
1856
1857			if (packet->last_instr_cond)
1858				packet->flags |= PERF_IP_FLAG_CONDITIONAL;
1859		}
1860
1861		/*
1862		 * Immediate branch instruction with link (e.g. BL), this is
1863		 * branch instruction for function call.
1864		 */
1865		if (packet->last_instr_type == OCSD_INSTR_BR &&
1866		    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1867			packet->flags = PERF_IP_FLAG_BRANCH |
1868					PERF_IP_FLAG_CALL;
1869
1870		/*
1871		 * Indirect branch instruction with link (e.g. BLR), this is
1872		 * branch instruction for function call.
1873		 */
1874		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1875		    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1876			packet->flags = PERF_IP_FLAG_BRANCH |
1877					PERF_IP_FLAG_CALL;
1878
1879		/*
1880		 * Indirect branch instruction with subtype of
1881		 * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
1882		 * function return for A32/T32.
1883		 */
1884		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1885		    packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
1886			packet->flags = PERF_IP_FLAG_BRANCH |
1887					PERF_IP_FLAG_RETURN;
1888
1889		/*
1890		 * Indirect branch instruction without link (e.g. BR), usually
1891		 * this is used for function return, especially for functions
1892		 * within dynamic link lib.
1893		 */
1894		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1895		    packet->last_instr_subtype == OCSD_S_INSTR_NONE)
1896			packet->flags = PERF_IP_FLAG_BRANCH |
1897					PERF_IP_FLAG_RETURN;
1898
1899		/* Return instruction for function return. */
1900		if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1901		    packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
1902			packet->flags = PERF_IP_FLAG_BRANCH |
1903					PERF_IP_FLAG_RETURN;
1904
1905		/*
1906		 * Decoder might insert a discontinuity in the middle of
1907		 * instruction packets, fixup prev_packet with flag
1908		 * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
1909		 */
1910		if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1911			prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1912					      PERF_IP_FLAG_TRACE_BEGIN;
1913
1914		/*
1915		 * If the previous packet is an exception return packet
1916		 * and the return address just follows SVC instruction,
1917		 * it needs to calibrate the previous packet sample flags
1918		 * as PERF_IP_FLAG_SYSCALLRET.
1919		 */
1920		if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
1921					   PERF_IP_FLAG_RETURN |
1922					   PERF_IP_FLAG_INTERRUPT) &&
1923		    cs_etm__is_svc_instr(etmq, trace_chan_id,
1924					 packet, packet->start_addr))
1925			prev_packet->flags = PERF_IP_FLAG_BRANCH |
1926					     PERF_IP_FLAG_RETURN |
1927					     PERF_IP_FLAG_SYSCALLRET;
1928		break;
1929	case CS_ETM_DISCONTINUITY:
1930		/*
1931		 * The trace is discontinuous, if the previous packet is
1932		 * instruction packet, set flag PERF_IP_FLAG_TRACE_END
1933		 * for previous packet.
1934		 */
1935		if (prev_packet->sample_type == CS_ETM_RANGE)
1936			prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1937					      PERF_IP_FLAG_TRACE_END;
1938		break;
1939	case CS_ETM_EXCEPTION:
1940		ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
1941		if (ret)
1942			return ret;
1943
1944		/* The exception is for system call. */
1945		if (cs_etm__is_syscall(etmq, tidq, magic))
1946			packet->flags = PERF_IP_FLAG_BRANCH |
1947					PERF_IP_FLAG_CALL |
1948					PERF_IP_FLAG_SYSCALLRET;
1949		/*
1950		 * The exceptions are triggered by external signals from bus,
1951		 * interrupt controller, debug module, PE reset or halt.
1952		 */
1953		else if (cs_etm__is_async_exception(tidq, magic))
1954			packet->flags = PERF_IP_FLAG_BRANCH |
1955					PERF_IP_FLAG_CALL |
1956					PERF_IP_FLAG_ASYNC |
1957					PERF_IP_FLAG_INTERRUPT;
1958		/*
1959		 * Otherwise, exception is caused by trap, instruction &
1960		 * data fault, or alignment errors.
1961		 */
1962		else if (cs_etm__is_sync_exception(etmq, tidq, magic))
1963			packet->flags = PERF_IP_FLAG_BRANCH |
1964					PERF_IP_FLAG_CALL |
1965					PERF_IP_FLAG_INTERRUPT;
1966
1967		/*
1968		 * When the exception packet is inserted, since exception
1969		 * packet is not used standalone for generating samples
1970		 * and it's affiliation to the previous instruction range
1971		 * packet; so set previous range packet flags to tell perf
1972		 * it is an exception taken branch.
1973		 */
1974		if (prev_packet->sample_type == CS_ETM_RANGE)
1975			prev_packet->flags = packet->flags;
1976		break;
1977	case CS_ETM_EXCEPTION_RET:
1978		/*
1979		 * When the exception return packet is inserted, since
1980		 * exception return packet is not used standalone for
1981		 * generating samples and it's affiliation to the previous
1982		 * instruction range packet; so set previous range packet
1983		 * flags to tell perf it is an exception return branch.
1984		 *
1985		 * The exception return can be for either system call or
1986		 * other exception types; unfortunately the packet doesn't
1987		 * contain exception type related info so we cannot decide
1988		 * the exception type purely based on exception return packet.
1989		 * If we record the exception number from exception packet and
1990		 * reuse it for exception return packet, this is not reliable
1991		 * due the trace can be discontinuity or the interrupt can
1992		 * be nested, thus the recorded exception number cannot be
1993		 * used for exception return packet for these two cases.
1994		 *
1995		 * For exception return packet, we only need to distinguish the
1996		 * packet is for system call or for other types.  Thus the
1997		 * decision can be deferred when receive the next packet which
1998		 * contains the return address, based on the return address we
1999		 * can read out the previous instruction and check if it's a
2000		 * system call instruction and then calibrate the sample flag
2001		 * as needed.
2002		 */
2003		if (prev_packet->sample_type == CS_ETM_RANGE)
2004			prev_packet->flags = PERF_IP_FLAG_BRANCH |
2005					     PERF_IP_FLAG_RETURN |
2006					     PERF_IP_FLAG_INTERRUPT;
2007		break;
2008	case CS_ETM_EMPTY:
2009	default:
2010		break;
2011	}
2012
2013	return 0;
2014}
2015
2016static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
2017{
2018	int ret = 0;
2019	size_t processed = 0;
2020
2021	/*
2022	 * Packets are decoded and added to the decoder's packet queue
2023	 * until the decoder packet processing callback has requested that
2024	 * processing stops or there is nothing left in the buffer.  Normal
2025	 * operations that stop processing are a timestamp packet or a full
2026	 * decoder buffer queue.
2027	 */
2028	ret = cs_etm_decoder__process_data_block(etmq->decoder,
2029						 etmq->offset,
2030						 &etmq->buf[etmq->buf_used],
2031						 etmq->buf_len,
2032						 &processed);
2033	if (ret)
2034		goto out;
2035
2036	etmq->offset += processed;
2037	etmq->buf_used += processed;
2038	etmq->buf_len -= processed;
2039
2040out:
2041	return ret;
2042}
2043
2044static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
2045					 struct cs_etm_traceid_queue *tidq)
2046{
2047	int ret;
2048	struct cs_etm_packet_queue *packet_queue;
2049
2050	packet_queue = &tidq->packet_queue;
2051
2052	/* Process each packet in this chunk */
2053	while (1) {
2054		ret = cs_etm_decoder__get_packet(packet_queue,
2055						 tidq->packet);
2056		if (ret <= 0)
2057			/*
2058			 * Stop processing this chunk on
2059			 * end of data or error
2060			 */
2061			break;
2062
2063		/*
2064		 * Since packet addresses are swapped in packet
2065		 * handling within below switch() statements,
2066		 * thus setting sample flags must be called
2067		 * prior to switch() statement to use address
2068		 * information before packets swapping.
2069		 */
2070		ret = cs_etm__set_sample_flags(etmq, tidq);
2071		if (ret < 0)
2072			break;
2073
2074		switch (tidq->packet->sample_type) {
2075		case CS_ETM_RANGE:
2076			/*
2077			 * If the packet contains an instruction
2078			 * range, generate instruction sequence
2079			 * events.
2080			 */
2081			cs_etm__sample(etmq, tidq);
2082			break;
2083		case CS_ETM_EXCEPTION:
2084		case CS_ETM_EXCEPTION_RET:
2085			/*
2086			 * If the exception packet is coming,
2087			 * make sure the previous instruction
2088			 * range packet to be handled properly.
2089			 */
2090			cs_etm__exception(tidq);
2091			break;
2092		case CS_ETM_DISCONTINUITY:
2093			/*
2094			 * Discontinuity in trace, flush
2095			 * previous branch stack
2096			 */
2097			cs_etm__flush(etmq, tidq);
2098			break;
2099		case CS_ETM_EMPTY:
2100			/*
2101			 * Should not receive empty packet,
2102			 * report error.
2103			 */
2104			pr_err("CS ETM Trace: empty packet\n");
2105			return -EINVAL;
2106		default:
2107			break;
2108		}
2109	}
2110
2111	return ret;
2112}
2113
2114static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
2115{
2116	int idx;
2117	struct int_node *inode;
2118	struct cs_etm_traceid_queue *tidq;
2119	struct intlist *traceid_queues_list = etmq->traceid_queues_list;
2120
2121	intlist__for_each_entry(inode, traceid_queues_list) {
2122		idx = (int)(intptr_t)inode->priv;
2123		tidq = etmq->traceid_queues[idx];
2124
2125		/* Ignore return value */
2126		cs_etm__process_traceid_queue(etmq, tidq);
2127
2128		/*
2129		 * Generate an instruction sample with the remaining
2130		 * branchstack entries.
2131		 */
2132		cs_etm__flush(etmq, tidq);
2133	}
2134}
2135
2136static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
2137{
2138	int err = 0;
2139	struct cs_etm_traceid_queue *tidq;
2140
2141	tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2142	if (!tidq)
2143		return -EINVAL;
2144
2145	/* Go through each buffer in the queue and decode them one by one */
2146	while (1) {
2147		err = cs_etm__get_data_block(etmq);
2148		if (err <= 0)
2149			return err;
2150
2151		/* Run trace decoder until buffer consumed or end of trace */
2152		do {
2153			err = cs_etm__decode_data_block(etmq);
2154			if (err)
2155				return err;
2156
2157			/*
2158			 * Process each packet in this chunk, nothing to do if
2159			 * an error occurs other than hoping the next one will
2160			 * be better.
2161			 */
2162			err = cs_etm__process_traceid_queue(etmq, tidq);
2163
2164		} while (etmq->buf_len);
2165
2166		if (err == 0)
2167			/* Flush any remaining branch stack entries */
2168			err = cs_etm__end_block(etmq, tidq);
2169	}
2170
2171	return err;
2172}
2173
2174static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
2175					   pid_t tid)
2176{
2177	unsigned int i;
2178	struct auxtrace_queues *queues = &etm->queues;
2179
2180	for (i = 0; i < queues->nr_queues; i++) {
2181		struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2182		struct cs_etm_queue *etmq = queue->priv;
2183		struct cs_etm_traceid_queue *tidq;
2184
2185		if (!etmq)
2186			continue;
2187
2188		tidq = cs_etm__etmq_get_traceid_queue(etmq,
2189						CS_ETM_PER_THREAD_TRACEID);
2190
2191		if (!tidq)
2192			continue;
2193
2194		if ((tid == -1) || (tidq->tid == tid)) {
2195			cs_etm__set_pid_tid_cpu(etm, tidq);
2196			cs_etm__run_decoder(etmq);
2197		}
2198	}
2199
2200	return 0;
2201}
2202
2203static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
2204{
2205	int ret = 0;
2206	unsigned int cs_queue_nr, queue_nr, i;
2207	u8 trace_chan_id;
2208	u64 cs_timestamp;
2209	struct auxtrace_queue *queue;
2210	struct cs_etm_queue *etmq;
2211	struct cs_etm_traceid_queue *tidq;
2212
2213	/*
2214	 * Pre-populate the heap with one entry from each queue so that we can
2215	 * start processing in time order across all queues.
2216	 */
2217	for (i = 0; i < etm->queues.nr_queues; i++) {
2218		etmq = etm->queues.queue_array[i].priv;
2219		if (!etmq)
2220			continue;
2221
2222		ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
2223		if (ret)
2224			return ret;
2225	}
2226
2227	while (1) {
2228		if (!etm->heap.heap_cnt)
2229			goto out;
2230
2231		/* Take the entry at the top of the min heap */
2232		cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2233		queue_nr = TO_QUEUE_NR(cs_queue_nr);
2234		trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
2235		queue = &etm->queues.queue_array[queue_nr];
2236		etmq = queue->priv;
2237
2238		/*
2239		 * Remove the top entry from the heap since we are about
2240		 * to process it.
2241		 */
2242		auxtrace_heap__pop(&etm->heap);
2243
2244		tidq  = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2245		if (!tidq) {
2246			/*
2247			 * No traceID queue has been allocated for this traceID,
2248			 * which means something somewhere went very wrong.  No
2249			 * other choice than simply exit.
2250			 */
2251			ret = -EINVAL;
2252			goto out;
2253		}
2254
2255		/*
2256		 * Packets associated with this timestamp are already in
2257		 * the etmq's traceID queue, so process them.
2258		 */
2259		ret = cs_etm__process_traceid_queue(etmq, tidq);
2260		if (ret < 0)
2261			goto out;
2262
2263		/*
2264		 * Packets for this timestamp have been processed, time to
2265		 * move on to the next timestamp, fetching a new auxtrace_buffer
2266		 * if need be.
2267		 */
2268refetch:
2269		ret = cs_etm__get_data_block(etmq);
2270		if (ret < 0)
2271			goto out;
2272
2273		/*
2274		 * No more auxtrace_buffers to process in this etmq, simply
2275		 * move on to another entry in the auxtrace_heap.
2276		 */
2277		if (!ret)
2278			continue;
2279
2280		ret = cs_etm__decode_data_block(etmq);
2281		if (ret)
2282			goto out;
2283
2284		cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
2285
2286		if (!cs_timestamp) {
2287			/*
2288			 * Function cs_etm__decode_data_block() returns when
2289			 * there is no more traces to decode in the current
2290			 * auxtrace_buffer OR when a timestamp has been
2291			 * encountered on any of the traceID queues.  Since we
2292			 * did not get a timestamp, there is no more traces to
2293			 * process in this auxtrace_buffer.  As such empty and
2294			 * flush all traceID queues.
2295			 */
2296			cs_etm__clear_all_traceid_queues(etmq);
2297
2298			/* Fetch another auxtrace_buffer for this etmq */
2299			goto refetch;
2300		}
2301
2302		/*
2303		 * Add to the min heap the timestamp for packets that have
2304		 * just been decoded.  They will be processed and synthesized
2305		 * during the next call to cs_etm__process_traceid_queue() for
2306		 * this queue/traceID.
2307		 */
2308		cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
2309		ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
2310	}
2311
2312out:
2313	return ret;
2314}
2315
2316static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2317					union perf_event *event)
2318{
2319	struct thread *th;
2320
2321	if (etm->timeless_decoding)
2322		return 0;
2323
2324	/*
2325	 * Add the tid/pid to the log so that we can get a match when
2326	 * we get a contextID from the decoder.
2327	 */
2328	th = machine__findnew_thread(etm->machine,
2329				     event->itrace_start.pid,
2330				     event->itrace_start.tid);
2331	if (!th)
2332		return -ENOMEM;
2333
2334	thread__put(th);
2335
2336	return 0;
2337}
2338
2339static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2340					   union perf_event *event)
2341{
2342	struct thread *th;
2343	bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2344
2345	/*
2346	 * Context switch in per-thread mode are irrelevant since perf
2347	 * will start/stop tracing as the process is scheduled.
2348	 */
2349	if (etm->timeless_decoding)
2350		return 0;
2351
2352	/*
2353	 * SWITCH_IN events carry the next process to be switched out while
2354	 * SWITCH_OUT events carry the process to be switched in.  As such
2355	 * we don't care about IN events.
2356	 */
2357	if (!out)
2358		return 0;
2359
2360	/*
2361	 * Add the tid/pid to the log so that we can get a match when
2362	 * we get a contextID from the decoder.
2363	 */
2364	th = machine__findnew_thread(etm->machine,
2365				     event->context_switch.next_prev_pid,
2366				     event->context_switch.next_prev_tid);
2367	if (!th)
2368		return -ENOMEM;
2369
2370	thread__put(th);
2371
2372	return 0;
2373}
2374
2375static int cs_etm__process_event(struct perf_session *session,
2376				 union perf_event *event,
2377				 struct perf_sample *sample,
2378				 struct perf_tool *tool)
2379{
2380	u64 sample_kernel_timestamp;
2381	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2382						   struct cs_etm_auxtrace,
2383						   auxtrace);
2384
2385	if (dump_trace)
2386		return 0;
2387
2388	if (!tool->ordered_events) {
2389		pr_err("CoreSight ETM Trace requires ordered events\n");
2390		return -EINVAL;
2391	}
2392
2393	if (sample->time && (sample->time != (u64) -1))
2394		sample_kernel_timestamp = sample->time;
2395	else
2396		sample_kernel_timestamp = 0;
2397
2398	/*
2399	 * Don't wait for cs_etm__flush_events() in per-thread/timeless mode to start the decode. We
2400	 * need the tid of the PERF_RECORD_EXIT event to assign to the synthesised samples because
2401	 * ETM_OPT_CTXTID is not enabled.
2402	 */
2403	if (etm->timeless_decoding &&
2404	    event->header.type == PERF_RECORD_EXIT)
2405		return cs_etm__process_timeless_queues(etm,
2406						       event->fork.tid);
2407
2408	if (event->header.type == PERF_RECORD_ITRACE_START)
2409		return cs_etm__process_itrace_start(etm, event);
2410	else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2411		return cs_etm__process_switch_cpu_wide(etm, event);
2412
2413	if (!etm->timeless_decoding && event->header.type == PERF_RECORD_AUX) {
2414		/*
2415		 * Record the latest kernel timestamp available in the header
2416		 * for samples so that synthesised samples occur from this point
2417		 * onwards.
2418		 */
2419		etm->latest_kernel_timestamp = sample_kernel_timestamp;
2420	}
2421
2422	return 0;
2423}
2424
2425static void dump_queued_data(struct cs_etm_auxtrace *etm,
2426			     struct perf_record_auxtrace *event)
2427{
2428	struct auxtrace_buffer *buf;
2429	unsigned int i;
2430	/*
2431	 * Find all buffers with same reference in the queues and dump them.
2432	 * This is because the queues can contain multiple entries of the same
2433	 * buffer that were split on aux records.
2434	 */
2435	for (i = 0; i < etm->queues.nr_queues; ++i)
2436		list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
2437			if (buf->reference == event->reference)
2438				cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
2439}
2440
2441static int cs_etm__process_auxtrace_event(struct perf_session *session,
2442					  union perf_event *event,
2443					  struct perf_tool *tool __maybe_unused)
2444{
2445	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2446						   struct cs_etm_auxtrace,
2447						   auxtrace);
2448	if (!etm->data_queued) {
2449		struct auxtrace_buffer *buffer;
2450		off_t  data_offset;
2451		int fd = perf_data__fd(session->data);
2452		bool is_pipe = perf_data__is_pipe(session->data);
2453		int err;
2454		int idx = event->auxtrace.idx;
2455
2456		if (is_pipe)
2457			data_offset = 0;
2458		else {
2459			data_offset = lseek(fd, 0, SEEK_CUR);
2460			if (data_offset == -1)
2461				return -errno;
2462		}
2463
2464		err = auxtrace_queues__add_event(&etm->queues, session,
2465						 event, data_offset, &buffer);
2466		if (err)
2467			return err;
2468
2469		/*
2470		 * Knowing if the trace is formatted or not requires a lookup of
2471		 * the aux record so only works in non-piped mode where data is
2472		 * queued in cs_etm__queue_aux_records(). Always assume
2473		 * formatted in piped mode (true).
2474		 */
2475		err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2476					  idx, true);
2477		if (err)
2478			return err;
2479
2480		if (dump_trace)
2481			if (auxtrace_buffer__get_data(buffer, fd)) {
2482				cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
2483				auxtrace_buffer__put_data(buffer);
2484			}
2485	} else if (dump_trace)
2486		dump_queued_data(etm, &event->auxtrace);
2487
2488	return 0;
2489}
2490
2491static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
2492{
2493	struct evsel *evsel;
2494	struct evlist *evlist = etm->session->evlist;
2495	bool timeless_decoding = true;
2496
2497	/* Override timeless mode with user input from --itrace=Z */
2498	if (etm->synth_opts.timeless_decoding)
2499		return true;
2500
2501	/*
2502	 * Circle through the list of event and complain if we find one
2503	 * with the time bit set.
2504	 */
2505	evlist__for_each_entry(evlist, evsel) {
2506		if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2507			timeless_decoding = false;
2508	}
2509
2510	return timeless_decoding;
2511}
2512
2513/*
2514 * Read a single cpu parameter block from the auxtrace_info priv block.
2515 *
2516 * For version 1 there is a per cpu nr_params entry. If we are handling
2517 * version 1 file, then there may be less, the same, or more params
2518 * indicated by this value than the compile time number we understand.
2519 *
2520 * For a version 0 info block, there are a fixed number, and we need to
2521 * fill out the nr_param value in the metadata we create.
2522 */
2523static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
2524				    int out_blk_size, int nr_params_v0)
2525{
2526	u64 *metadata = NULL;
2527	int hdr_version;
2528	int nr_in_params, nr_out_params, nr_cmn_params;
2529	int i, k;
2530
2531	metadata = zalloc(sizeof(*metadata) * out_blk_size);
2532	if (!metadata)
2533		return NULL;
2534
2535	/* read block current index & version */
2536	i = *buff_in_offset;
2537	hdr_version = buff_in[CS_HEADER_VERSION];
2538
2539	if (!hdr_version) {
2540	/* read version 0 info block into a version 1 metadata block  */
2541		nr_in_params = nr_params_v0;
2542		metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
2543		metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
2544		metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
2545		/* remaining block params at offset +1 from source */
2546		for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
2547			metadata[k + 1] = buff_in[i + k];
2548		/* version 0 has 2 common params */
2549		nr_cmn_params = 2;
2550	} else {
2551	/* read version 1 info block - input and output nr_params may differ */
2552		/* version 1 has 3 common params */
2553		nr_cmn_params = 3;
2554		nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
2555
2556		/* if input has more params than output - skip excess */
2557		nr_out_params = nr_in_params + nr_cmn_params;
2558		if (nr_out_params > out_blk_size)
2559			nr_out_params = out_blk_size;
2560
2561		for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
2562			metadata[k] = buff_in[i + k];
2563
2564		/* record the actual nr params we copied */
2565		metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
2566	}
2567
2568	/* adjust in offset by number of in params used */
2569	i += nr_in_params + nr_cmn_params;
2570	*buff_in_offset = i;
2571	return metadata;
2572}
2573
2574/**
2575 * Puts a fragment of an auxtrace buffer into the auxtrace queues based
2576 * on the bounds of aux_event, if it matches with the buffer that's at
2577 * file_offset.
2578 *
2579 * Normally, whole auxtrace buffers would be added to the queue. But we
2580 * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
2581 * is reset across each buffer, so splitting the buffers up in advance has
2582 * the same effect.
2583 */
2584static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
2585				      struct perf_record_aux *aux_event, struct perf_sample *sample)
2586{
2587	int err;
2588	char buf[PERF_SAMPLE_MAX_SIZE];
2589	union perf_event *auxtrace_event_union;
2590	struct perf_record_auxtrace *auxtrace_event;
2591	union perf_event auxtrace_fragment;
2592	__u64 aux_offset, aux_size;
2593	__u32 idx;
2594	bool formatted;
2595
2596	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2597						   struct cs_etm_auxtrace,
2598						   auxtrace);
2599
2600	/*
2601	 * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
2602	 * from looping through the auxtrace index.
2603	 */
2604	err = perf_session__peek_event(session, file_offset, buf,
2605				       PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
2606	if (err)
2607		return err;
2608	auxtrace_event = &auxtrace_event_union->auxtrace;
2609	if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
2610		return -EINVAL;
2611
2612	if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
2613		auxtrace_event->header.size != sz) {
2614		return -EINVAL;
2615	}
2616
2617	/*
2618	 * In per-thread mode, CPU is set to -1, but TID will be set instead. See
2619	 * auxtrace_mmap_params__set_idx(). Return 'not found' if neither CPU nor TID match.
2620	 */
2621	if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) ||
2622			auxtrace_event->cpu != sample->cpu)
2623		return 1;
2624
2625	if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
2626		/*
2627		 * Clamp size in snapshot mode. The buffer size is clamped in
2628		 * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
2629		 * the buffer size.
2630		 */
2631		aux_size = min(aux_event->aux_size, auxtrace_event->size);
2632
2633		/*
2634		 * In this mode, the head also points to the end of the buffer so aux_offset
2635		 * needs to have the size subtracted so it points to the beginning as in normal mode
2636		 */
2637		aux_offset = aux_event->aux_offset - aux_size;
2638	} else {
2639		aux_size = aux_event->aux_size;
2640		aux_offset = aux_event->aux_offset;
2641	}
2642
2643	if (aux_offset >= auxtrace_event->offset &&
2644	    aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
2645		/*
2646		 * If this AUX event was inside this buffer somewhere, create a new auxtrace event
2647		 * based on the sizes of the aux event, and queue that fragment.
2648		 */
2649		auxtrace_fragment.auxtrace = *auxtrace_event;
2650		auxtrace_fragment.auxtrace.size = aux_size;
2651		auxtrace_fragment.auxtrace.offset = aux_offset;
2652		file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
2653
2654		pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
2655			  " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
2656		err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
2657						 file_offset, NULL);
2658		if (err)
2659			return err;
2660
2661		idx = auxtrace_event->idx;
2662		formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
2663		return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2664					   idx, formatted);
2665	}
2666
2667	/* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
2668	return 1;
2669}
2670
2671static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
2672					u64 offset __maybe_unused, void *data __maybe_unused)
2673{
2674	struct perf_sample sample;
2675	int ret;
2676	struct auxtrace_index_entry *ent;
2677	struct auxtrace_index *auxtrace_index;
2678	struct evsel *evsel;
2679	size_t i;
2680
2681	/* Don't care about any other events, we're only queuing buffers for AUX events */
2682	if (event->header.type != PERF_RECORD_AUX)
2683		return 0;
2684
2685	if (event->header.size < sizeof(struct perf_record_aux))
2686		return -EINVAL;
2687
2688	/* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
2689	if (!event->aux.aux_size)
2690		return 0;
2691
2692	/*
2693	 * Parse the sample, we need the sample_id_all data that comes after the event so that the
2694	 * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
2695	 */
2696	evsel = evlist__event2evsel(session->evlist, event);
2697	if (!evsel)
2698		return -EINVAL;
2699	ret = evsel__parse_sample(evsel, event, &sample);
2700	if (ret)
2701		return ret;
2702
2703	/*
2704	 * Loop through the auxtrace index to find the buffer that matches up with this aux event.
2705	 */
2706	list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
2707		for (i = 0; i < auxtrace_index->nr; i++) {
2708			ent = &auxtrace_index->entries[i];
2709			ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
2710							 ent->sz, &event->aux, &sample);
2711			/*
2712			 * Stop search on error or successful values. Continue search on
2713			 * 1 ('not found')
2714			 */
2715			if (ret != 1)
2716				return ret;
2717		}
2718	}
2719
2720	/*
2721	 * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
2722	 * don't exit with an error because it will still be possible to decode other aux records.
2723	 */
2724	pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
2725	       " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
2726	return 0;
2727}
2728
2729static int cs_etm__queue_aux_records(struct perf_session *session)
2730{
2731	struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
2732								struct auxtrace_index, list);
2733	if (index && index->nr > 0)
2734		return perf_session__peek_events(session, session->header.data_offset,
2735						 session->header.data_size,
2736						 cs_etm__queue_aux_records_cb, NULL);
2737
2738	/*
2739	 * We would get here if there are no entries in the index (either no auxtrace
2740	 * buffers or no index at all). Fail silently as there is the possibility of
2741	 * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
2742	 * false.
2743	 *
2744	 * In that scenario, buffers will not be split by AUX records.
2745	 */
2746	return 0;
2747}
2748
2749int cs_etm__process_auxtrace_info_full(union perf_event *event,
2750				       struct perf_session *session)
2751{
2752	struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
2753	struct cs_etm_auxtrace *etm = NULL;
2754	struct int_node *inode;
2755	int event_header_size = sizeof(struct perf_event_header);
2756	int total_size = auxtrace_info->header.size;
2757	int priv_size = 0;
2758	int num_cpu, trcidr_idx;
2759	int err = 0;
2760	int i, j;
2761	u64 *ptr = NULL;
2762	u64 **metadata = NULL;
2763
2764	/*
2765	 * Create an RB tree for traceID-metadata tuple.  Since the conversion
2766	 * has to be made for each packet that gets decoded, optimizing access
2767	 * in anything other than a sequential array is worth doing.
2768	 */
2769	traceid_list = intlist__new(NULL);
2770	if (!traceid_list)
2771		return -ENOMEM;
2772
2773	/* First the global part */
2774	ptr = (u64 *) auxtrace_info->priv;
2775	num_cpu = ptr[CS_PMU_TYPE_CPUS] & 0xffffffff;
2776	metadata = zalloc(sizeof(*metadata) * num_cpu);
2777	if (!metadata) {
2778		err = -ENOMEM;
2779		goto err_free_traceid_list;
2780	}
2781
2782	/* Start parsing after the common part of the header */
2783	i = CS_HEADER_VERSION_MAX;
2784
2785	/*
2786	 * The metadata is stored in the auxtrace_info section and encodes
2787	 * the configuration of the ARM embedded trace macrocell which is
2788	 * required by the trace decoder to properly decode the trace due
2789	 * to its highly compressed nature.
2790	 */
2791	for (j = 0; j < num_cpu; j++) {
2792		if (ptr[i] == __perf_cs_etmv3_magic) {
2793			metadata[j] =
2794				cs_etm__create_meta_blk(ptr, &i,
2795							CS_ETM_PRIV_MAX,
2796							CS_ETM_NR_TRC_PARAMS_V0);
2797
2798			/* The traceID is our handle */
2799			trcidr_idx = CS_ETM_ETMTRACEIDR;
2800
2801		} else if (ptr[i] == __perf_cs_etmv4_magic) {
2802			metadata[j] =
2803				cs_etm__create_meta_blk(ptr, &i,
2804							CS_ETMV4_PRIV_MAX,
2805							CS_ETMV4_NR_TRC_PARAMS_V0);
2806
2807			/* The traceID is our handle */
2808			trcidr_idx = CS_ETMV4_TRCTRACEIDR;
2809		} else if (ptr[i] == __perf_cs_ete_magic) {
2810			metadata[j] = cs_etm__create_meta_blk(ptr, &i, CS_ETE_PRIV_MAX, -1);
2811
2812			/* ETE shares first part of metadata with ETMv4 */
2813			trcidr_idx = CS_ETMV4_TRCTRACEIDR;
2814		} else {
2815			ui__error("CS ETM Trace: Unrecognised magic number %#"PRIx64". File could be from a newer version of perf.\n",
2816				  ptr[i]);
2817			err = -EINVAL;
2818			goto err_free_metadata;
2819		}
2820
2821		if (!metadata[j]) {
2822			err = -ENOMEM;
2823			goto err_free_metadata;
2824		}
2825
2826		/* Get an RB node for this CPU */
2827		inode = intlist__findnew(traceid_list, metadata[j][trcidr_idx]);
2828
2829		/* Something went wrong, no need to continue */
2830		if (!inode) {
2831			err = -ENOMEM;
2832			goto err_free_metadata;
2833		}
2834
2835		/*
2836		 * The node for that CPU should not be taken.
2837		 * Back out if that's the case.
2838		 */
2839		if (inode->priv) {
2840			err = -EINVAL;
2841			goto err_free_metadata;
2842		}
2843		/* All good, associate the traceID with the metadata pointer */
2844		inode->priv = metadata[j];
2845	}
2846
2847	/*
2848	 * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
2849	 * CS_ETMV4_PRIV_MAX mark how many double words are in the
2850	 * global metadata, and each cpu's metadata respectively.
2851	 * The following tests if the correct number of double words was
2852	 * present in the auxtrace info section.
2853	 */
2854	priv_size = total_size - event_header_size - INFO_HEADER_SIZE;
2855	if (i * 8 != priv_size) {
2856		err = -EINVAL;
2857		goto err_free_metadata;
2858	}
2859
2860	etm = zalloc(sizeof(*etm));
2861
2862	if (!etm) {
2863		err = -ENOMEM;
2864		goto err_free_metadata;
2865	}
2866
2867	err = auxtrace_queues__init(&etm->queues);
2868	if (err)
2869		goto err_free_etm;
2870
2871	if (session->itrace_synth_opts->set) {
2872		etm->synth_opts = *session->itrace_synth_opts;
2873	} else {
2874		itrace_synth_opts__set_default(&etm->synth_opts,
2875				session->itrace_synth_opts->default_no_sample);
2876		etm->synth_opts.callchain = false;
2877	}
2878
2879	etm->session = session;
2880	etm->machine = &session->machines.host;
2881
2882	etm->num_cpu = num_cpu;
2883	etm->pmu_type = (unsigned int) ((ptr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff);
2884	etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0);
2885	etm->metadata = metadata;
2886	etm->auxtrace_type = auxtrace_info->type;
2887	etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
2888
2889	etm->auxtrace.process_event = cs_etm__process_event;
2890	etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
2891	etm->auxtrace.flush_events = cs_etm__flush_events;
2892	etm->auxtrace.free_events = cs_etm__free_events;
2893	etm->auxtrace.free = cs_etm__free;
2894	etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
2895	session->auxtrace = &etm->auxtrace;
2896
2897	etm->unknown_thread = thread__new(999999999, 999999999);
2898	if (!etm->unknown_thread) {
2899		err = -ENOMEM;
2900		goto err_free_queues;
2901	}
2902
2903	/*
2904	 * Initialize list node so that at thread__zput() we can avoid
2905	 * segmentation fault at list_del_init().
2906	 */
2907	INIT_LIST_HEAD(&etm->unknown_thread->node);
2908
2909	err = thread__set_comm(etm->unknown_thread, "unknown", 0);
2910	if (err)
2911		goto err_delete_thread;
2912
2913	if (thread__init_maps(etm->unknown_thread, etm->machine)) {
2914		err = -ENOMEM;
2915		goto err_delete_thread;
2916	}
2917
2918	err = cs_etm__synth_events(etm, session);
2919	if (err)
2920		goto err_delete_thread;
2921
2922	err = cs_etm__queue_aux_records(session);
2923	if (err)
2924		goto err_delete_thread;
2925
2926	etm->data_queued = etm->queues.populated;
2927	/*
2928	 * Print warning in pipe mode, see cs_etm__process_auxtrace_event() and
2929	 * cs_etm__queue_aux_fragment() for details relating to limitations.
2930	 */
2931	if (!etm->data_queued)
2932		pr_warning("CS ETM warning: Coresight decode and TRBE support requires random file access.\n"
2933			   "Continuing with best effort decoding in piped mode.\n\n");
2934
2935	return 0;
2936
2937err_delete_thread:
2938	thread__zput(etm->unknown_thread);
2939err_free_queues:
2940	auxtrace_queues__free(&etm->queues);
2941	session->auxtrace = NULL;
2942err_free_etm:
2943	zfree(&etm);
2944err_free_metadata:
2945	/* No need to check @metadata[j], free(NULL) is supported */
2946	for (j = 0; j < num_cpu; j++)
2947		zfree(&metadata[j]);
2948	zfree(&metadata);
2949err_free_traceid_list:
2950	intlist__delete(traceid_list);
2951	return err;
2952}