Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright(C) 2015-2018 Linaro Limited.
   4 *
   5 * Author: Tor Jeremiassen <tor@ti.com>
   6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
   7 */
   8
   9#include <linux/bitops.h>
  10#include <linux/err.h>
  11#include <linux/kernel.h>
  12#include <linux/log2.h>
  13#include <linux/types.h>
  14
  15#include <stdlib.h>
  16
  17#include "auxtrace.h"
  18#include "color.h"
  19#include "cs-etm.h"
  20#include "cs-etm-decoder/cs-etm-decoder.h"
  21#include "debug.h"
  22#include "evlist.h"
  23#include "intlist.h"
  24#include "machine.h"
  25#include "map.h"
  26#include "perf.h"
  27#include "thread.h"
  28#include "thread_map.h"
  29#include "thread-stack.h"
  30#include "util.h"
  31
  32#define MAX_TIMESTAMP (~0ULL)
  33
  34/*
  35 * A64 instructions are always 4 bytes
  36 *
  37 * Only A64 is supported, so can use this constant for converting between
  38 * addresses and instruction counts, calculting offsets etc
  39 */
  40#define A64_INSTR_SIZE 4
  41
  42struct cs_etm_auxtrace {
  43	struct auxtrace auxtrace;
  44	struct auxtrace_queues queues;
  45	struct auxtrace_heap heap;
  46	struct itrace_synth_opts synth_opts;
  47	struct perf_session *session;
  48	struct machine *machine;
  49	struct thread *unknown_thread;
  50
  51	u8 timeless_decoding;
  52	u8 snapshot_mode;
  53	u8 data_queued;
  54	u8 sample_branches;
  55	u8 sample_instructions;
  56
  57	int num_cpu;
  58	u32 auxtrace_type;
  59	u64 branches_sample_type;
  60	u64 branches_id;
  61	u64 instructions_sample_type;
  62	u64 instructions_sample_period;
  63	u64 instructions_id;
  64	u64 **metadata;
  65	u64 kernel_start;
  66	unsigned int pmu_type;
  67};
  68
  69struct cs_etm_queue {
  70	struct cs_etm_auxtrace *etm;
  71	struct thread *thread;
  72	struct cs_etm_decoder *decoder;
  73	struct auxtrace_buffer *buffer;
  74	const struct cs_etm_state *state;
  75	union perf_event *event_buf;
  76	unsigned int queue_nr;
  77	pid_t pid, tid;
  78	int cpu;
  79	u64 time;
  80	u64 timestamp;
  81	u64 offset;
  82	u64 period_instructions;
  83	struct branch_stack *last_branch;
  84	struct branch_stack *last_branch_rb;
  85	size_t last_branch_pos;
  86	struct cs_etm_packet *prev_packet;
  87	struct cs_etm_packet *packet;
  88};
  89
  90static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
  91static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
  92					   pid_t tid, u64 time_);
  93
  94static void cs_etm__packet_dump(const char *pkt_string)
  95{
  96	const char *color = PERF_COLOR_BLUE;
  97	int len = strlen(pkt_string);
  98
  99	if (len && (pkt_string[len-1] == '\n'))
 100		color_fprintf(stdout, color, "	%s", pkt_string);
 101	else
 102		color_fprintf(stdout, color, "	%s\n", pkt_string);
 103
 104	fflush(stdout);
 105}
 106
 107static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
 108			       struct auxtrace_buffer *buffer)
 109{
 110	int i, ret;
 111	const char *color = PERF_COLOR_BLUE;
 112	struct cs_etm_decoder_params d_params;
 113	struct cs_etm_trace_params *t_params;
 114	struct cs_etm_decoder *decoder;
 115	size_t buffer_used = 0;
 116
 117	fprintf(stdout, "\n");
 118	color_fprintf(stdout, color,
 119		     ". ... CoreSight ETM Trace data: size %zu bytes\n",
 120		     buffer->size);
 121
 122	/* Use metadata to fill in trace parameters for trace decoder */
 123	t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
 124	for (i = 0; i < etm->num_cpu; i++) {
 125		t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
 126		t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
 127		t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
 128		t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
 129		t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
 130		t_params[i].etmv4.reg_configr =
 131					etm->metadata[i][CS_ETMV4_TRCCONFIGR];
 132		t_params[i].etmv4.reg_traceidr =
 133					etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
 134	}
 135
 136	/* Set decoder parameters to simply print the trace packets */
 137	d_params.packet_printer = cs_etm__packet_dump;
 138	d_params.operation = CS_ETM_OPERATION_PRINT;
 139	d_params.formatted = true;
 140	d_params.fsyncs = false;
 141	d_params.hsyncs = false;
 142	d_params.frame_aligned = true;
 143
 144	decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
 145
 146	zfree(&t_params);
 147
 148	if (!decoder)
 149		return;
 150	do {
 151		size_t consumed;
 152
 153		ret = cs_etm_decoder__process_data_block(
 154				decoder, buffer->offset,
 155				&((u8 *)buffer->data)[buffer_used],
 156				buffer->size - buffer_used, &consumed);
 157		if (ret)
 158			break;
 159
 160		buffer_used += consumed;
 161	} while (buffer_used < buffer->size);
 162
 163	cs_etm_decoder__free(decoder);
 164}
 165
 166static int cs_etm__flush_events(struct perf_session *session,
 167				struct perf_tool *tool)
 168{
 169	int ret;
 170	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
 171						   struct cs_etm_auxtrace,
 172						   auxtrace);
 173	if (dump_trace)
 174		return 0;
 175
 176	if (!tool->ordered_events)
 177		return -EINVAL;
 178
 179	if (!etm->timeless_decoding)
 180		return -EINVAL;
 181
 182	ret = cs_etm__update_queues(etm);
 183
 184	if (ret < 0)
 185		return ret;
 186
 187	return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
 188}
 189
 190static void cs_etm__free_queue(void *priv)
 191{
 192	struct cs_etm_queue *etmq = priv;
 193
 194	if (!etmq)
 195		return;
 196
 197	thread__zput(etmq->thread);
 198	cs_etm_decoder__free(etmq->decoder);
 199	zfree(&etmq->event_buf);
 200	zfree(&etmq->last_branch);
 201	zfree(&etmq->last_branch_rb);
 202	zfree(&etmq->prev_packet);
 203	zfree(&etmq->packet);
 204	free(etmq);
 205}
 206
 207static void cs_etm__free_events(struct perf_session *session)
 208{
 209	unsigned int i;
 210	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
 211						   struct cs_etm_auxtrace,
 212						   auxtrace);
 213	struct auxtrace_queues *queues = &aux->queues;
 214
 215	for (i = 0; i < queues->nr_queues; i++) {
 216		cs_etm__free_queue(queues->queue_array[i].priv);
 217		queues->queue_array[i].priv = NULL;
 218	}
 219
 220	auxtrace_queues__free(queues);
 221}
 222
 223static void cs_etm__free(struct perf_session *session)
 224{
 225	int i;
 226	struct int_node *inode, *tmp;
 227	struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
 228						   struct cs_etm_auxtrace,
 229						   auxtrace);
 230	cs_etm__free_events(session);
 231	session->auxtrace = NULL;
 232
 233	/* First remove all traceID/CPU# nodes for the RB tree */
 234	intlist__for_each_entry_safe(inode, tmp, traceid_list)
 235		intlist__remove(traceid_list, inode);
 236	/* Then the RB tree itself */
 237	intlist__delete(traceid_list);
 238
 239	for (i = 0; i < aux->num_cpu; i++)
 240		zfree(&aux->metadata[i]);
 241
 242	thread__zput(aux->unknown_thread);
 243	zfree(&aux->metadata);
 244	zfree(&aux);
 245}
 246
 247static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
 248			      size_t size, u8 *buffer)
 249{
 250	u8  cpumode;
 251	u64 offset;
 252	int len;
 253	struct	 thread *thread;
 254	struct	 machine *machine;
 255	struct	 addr_location al;
 256
 257	if (!etmq)
 258		return -1;
 259
 260	machine = etmq->etm->machine;
 261	if (address >= etmq->etm->kernel_start)
 262		cpumode = PERF_RECORD_MISC_KERNEL;
 263	else
 264		cpumode = PERF_RECORD_MISC_USER;
 265
 266	thread = etmq->thread;
 267	if (!thread) {
 268		if (cpumode != PERF_RECORD_MISC_KERNEL)
 269			return -EINVAL;
 270		thread = etmq->etm->unknown_thread;
 271	}
 272
 273	thread__find_addr_map(thread, cpumode, MAP__FUNCTION, address, &al);
 274
 275	if (!al.map || !al.map->dso)
 276		return 0;
 277
 278	if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
 279	    dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
 280		return 0;
 281
 282	offset = al.map->map_ip(al.map, address);
 283
 284	map__load(al.map);
 285
 286	len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
 287
 288	if (len <= 0)
 289		return 0;
 290
 291	return len;
 292}
 293
 294static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
 295						unsigned int queue_nr)
 296{
 297	int i;
 298	struct cs_etm_decoder_params d_params;
 299	struct cs_etm_trace_params  *t_params;
 300	struct cs_etm_queue *etmq;
 301	size_t szp = sizeof(struct cs_etm_packet);
 302
 303	etmq = zalloc(sizeof(*etmq));
 304	if (!etmq)
 305		return NULL;
 306
 307	etmq->packet = zalloc(szp);
 308	if (!etmq->packet)
 309		goto out_free;
 310
 311	if (etm->synth_opts.last_branch || etm->sample_branches) {
 312		etmq->prev_packet = zalloc(szp);
 313		if (!etmq->prev_packet)
 314			goto out_free;
 315	}
 316
 317	if (etm->synth_opts.last_branch) {
 318		size_t sz = sizeof(struct branch_stack);
 319
 320		sz += etm->synth_opts.last_branch_sz *
 321		      sizeof(struct branch_entry);
 322		etmq->last_branch = zalloc(sz);
 323		if (!etmq->last_branch)
 324			goto out_free;
 325		etmq->last_branch_rb = zalloc(sz);
 326		if (!etmq->last_branch_rb)
 327			goto out_free;
 328	}
 329
 330	etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
 331	if (!etmq->event_buf)
 332		goto out_free;
 333
 334	etmq->etm = etm;
 335	etmq->queue_nr = queue_nr;
 336	etmq->pid = -1;
 337	etmq->tid = -1;
 338	etmq->cpu = -1;
 339
 340	/* Use metadata to fill in trace parameters for trace decoder */
 341	t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
 342
 343	if (!t_params)
 344		goto out_free;
 345
 346	for (i = 0; i < etm->num_cpu; i++) {
 347		t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
 348		t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
 349		t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
 350		t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
 351		t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
 352		t_params[i].etmv4.reg_configr =
 353					etm->metadata[i][CS_ETMV4_TRCCONFIGR];
 354		t_params[i].etmv4.reg_traceidr =
 355					etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
 356	}
 357
 358	/* Set decoder parameters to simply print the trace packets */
 359	d_params.packet_printer = cs_etm__packet_dump;
 360	d_params.operation = CS_ETM_OPERATION_DECODE;
 361	d_params.formatted = true;
 362	d_params.fsyncs = false;
 363	d_params.hsyncs = false;
 364	d_params.frame_aligned = true;
 365	d_params.data = etmq;
 366
 367	etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
 368
 369	zfree(&t_params);
 370
 371	if (!etmq->decoder)
 372		goto out_free;
 373
 374	/*
 375	 * Register a function to handle all memory accesses required by
 376	 * the trace decoder library.
 377	 */
 378	if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
 379					      0x0L, ((u64) -1L),
 380					      cs_etm__mem_access))
 381		goto out_free_decoder;
 382
 383	etmq->offset = 0;
 384	etmq->period_instructions = 0;
 385
 386	return etmq;
 387
 388out_free_decoder:
 389	cs_etm_decoder__free(etmq->decoder);
 390out_free:
 391	zfree(&etmq->event_buf);
 392	zfree(&etmq->last_branch);
 393	zfree(&etmq->last_branch_rb);
 394	zfree(&etmq->prev_packet);
 395	zfree(&etmq->packet);
 396	free(etmq);
 397
 398	return NULL;
 399}
 400
 401static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
 402			       struct auxtrace_queue *queue,
 403			       unsigned int queue_nr)
 404{
 405	struct cs_etm_queue *etmq = queue->priv;
 406
 407	if (list_empty(&queue->head) || etmq)
 408		return 0;
 409
 410	etmq = cs_etm__alloc_queue(etm, queue_nr);
 411
 412	if (!etmq)
 413		return -ENOMEM;
 414
 415	queue->priv = etmq;
 416
 417	if (queue->cpu != -1)
 418		etmq->cpu = queue->cpu;
 419
 420	etmq->tid = queue->tid;
 421
 422	return 0;
 423}
 424
 425static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
 426{
 427	unsigned int i;
 428	int ret;
 429
 430	for (i = 0; i < etm->queues.nr_queues; i++) {
 431		ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
 432		if (ret)
 433			return ret;
 434	}
 435
 436	return 0;
 437}
 438
 439static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
 440{
 441	if (etm->queues.new_data) {
 442		etm->queues.new_data = false;
 443		return cs_etm__setup_queues(etm);
 444	}
 445
 446	return 0;
 447}
 448
 449static inline void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq)
 450{
 451	struct branch_stack *bs_src = etmq->last_branch_rb;
 452	struct branch_stack *bs_dst = etmq->last_branch;
 453	size_t nr = 0;
 454
 455	/*
 456	 * Set the number of records before early exit: ->nr is used to
 457	 * determine how many branches to copy from ->entries.
 458	 */
 459	bs_dst->nr = bs_src->nr;
 460
 461	/*
 462	 * Early exit when there is nothing to copy.
 463	 */
 464	if (!bs_src->nr)
 465		return;
 466
 467	/*
 468	 * As bs_src->entries is a circular buffer, we need to copy from it in
 469	 * two steps.  First, copy the branches from the most recently inserted
 470	 * branch ->last_branch_pos until the end of bs_src->entries buffer.
 471	 */
 472	nr = etmq->etm->synth_opts.last_branch_sz - etmq->last_branch_pos;
 473	memcpy(&bs_dst->entries[0],
 474	       &bs_src->entries[etmq->last_branch_pos],
 475	       sizeof(struct branch_entry) * nr);
 476
 477	/*
 478	 * If we wrapped around at least once, the branches from the beginning
 479	 * of the bs_src->entries buffer and until the ->last_branch_pos element
 480	 * are older valid branches: copy them over.  The total number of
 481	 * branches copied over will be equal to the number of branches asked by
 482	 * the user in last_branch_sz.
 483	 */
 484	if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
 485		memcpy(&bs_dst->entries[nr],
 486		       &bs_src->entries[0],
 487		       sizeof(struct branch_entry) * etmq->last_branch_pos);
 488	}
 489}
 490
 491static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
 492{
 493	etmq->last_branch_pos = 0;
 494	etmq->last_branch_rb->nr = 0;
 495}
 496
 497static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
 498{
 499	/*
 500	 * The packet records the execution range with an exclusive end address
 501	 *
 502	 * A64 instructions are constant size, so the last executed
 503	 * instruction is A64_INSTR_SIZE before the end address
 504	 * Will need to do instruction level decode for T32 instructions as
 505	 * they can be variable size (not yet supported).
 506	 */
 507	return packet->end_addr - A64_INSTR_SIZE;
 508}
 509
 510static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet)
 511{
 512	/*
 513	 * Only A64 instructions are currently supported, so can get
 514	 * instruction count by dividing.
 515	 * Will need to do instruction level decode for T32 instructions as
 516	 * they can be variable size (not yet supported).
 517	 */
 518	return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE;
 519}
 520
 521static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet,
 522				     u64 offset)
 523{
 524	/*
 525	 * Only A64 instructions are currently supported, so can get
 526	 * instruction address by muliplying.
 527	 * Will need to do instruction level decode for T32 instructions as
 528	 * they can be variable size (not yet supported).
 529	 */
 530	return packet->start_addr + offset * A64_INSTR_SIZE;
 531}
 532
 533static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
 534{
 535	struct branch_stack *bs = etmq->last_branch_rb;
 536	struct branch_entry *be;
 537
 538	/*
 539	 * The branches are recorded in a circular buffer in reverse
 540	 * chronological order: we start recording from the last element of the
 541	 * buffer down.  After writing the first element of the stack, move the
 542	 * insert position back to the end of the buffer.
 543	 */
 544	if (!etmq->last_branch_pos)
 545		etmq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
 546
 547	etmq->last_branch_pos -= 1;
 548
 549	be       = &bs->entries[etmq->last_branch_pos];
 550	be->from = cs_etm__last_executed_instr(etmq->prev_packet);
 551	be->to	 = etmq->packet->start_addr;
 552	/* No support for mispredict */
 553	be->flags.mispred = 0;
 554	be->flags.predicted = 1;
 555
 556	/*
 557	 * Increment bs->nr until reaching the number of last branches asked by
 558	 * the user on the command line.
 559	 */
 560	if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
 561		bs->nr += 1;
 562}
 563
 564static int cs_etm__inject_event(union perf_event *event,
 565			       struct perf_sample *sample, u64 type)
 566{
 567	event->header.size = perf_event__sample_event_size(sample, type, 0);
 568	return perf_event__synthesize_sample(event, type, 0, sample);
 569}
 570
 571
 572static int
 573cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
 574{
 575	struct auxtrace_buffer *aux_buffer = etmq->buffer;
 576	struct auxtrace_buffer *old_buffer = aux_buffer;
 577	struct auxtrace_queue *queue;
 578
 579	queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
 580
 581	aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
 582
 583	/* If no more data, drop the previous auxtrace_buffer and return */
 584	if (!aux_buffer) {
 585		if (old_buffer)
 586			auxtrace_buffer__drop_data(old_buffer);
 587		buff->len = 0;
 588		return 0;
 589	}
 590
 591	etmq->buffer = aux_buffer;
 592
 593	/* If the aux_buffer doesn't have data associated, try to load it */
 594	if (!aux_buffer->data) {
 595		/* get the file desc associated with the perf data file */
 596		int fd = perf_data__fd(etmq->etm->session->data);
 597
 598		aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
 599		if (!aux_buffer->data)
 600			return -ENOMEM;
 601	}
 602
 603	/* If valid, drop the previous buffer */
 604	if (old_buffer)
 605		auxtrace_buffer__drop_data(old_buffer);
 606
 607	buff->offset = aux_buffer->offset;
 608	buff->len = aux_buffer->size;
 609	buff->buf = aux_buffer->data;
 610
 611	buff->ref_timestamp = aux_buffer->reference;
 612
 613	return buff->len;
 614}
 615
 616static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
 617				    struct auxtrace_queue *queue)
 618{
 619	struct cs_etm_queue *etmq = queue->priv;
 620
 621	/* CPU-wide tracing isn't supported yet */
 622	if (queue->tid == -1)
 623		return;
 624
 625	if ((!etmq->thread) && (etmq->tid != -1))
 626		etmq->thread = machine__find_thread(etm->machine, -1,
 627						    etmq->tid);
 628
 629	if (etmq->thread) {
 630		etmq->pid = etmq->thread->pid_;
 631		if (queue->cpu == -1)
 632			etmq->cpu = etmq->thread->cpu;
 633	}
 634}
 635
 636static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
 637					    u64 addr, u64 period)
 638{
 639	int ret = 0;
 640	struct cs_etm_auxtrace *etm = etmq->etm;
 641	union perf_event *event = etmq->event_buf;
 642	struct perf_sample sample = {.ip = 0,};
 643
 644	event->sample.header.type = PERF_RECORD_SAMPLE;
 645	event->sample.header.misc = PERF_RECORD_MISC_USER;
 646	event->sample.header.size = sizeof(struct perf_event_header);
 647
 648	sample.ip = addr;
 649	sample.pid = etmq->pid;
 650	sample.tid = etmq->tid;
 651	sample.id = etmq->etm->instructions_id;
 652	sample.stream_id = etmq->etm->instructions_id;
 653	sample.period = period;
 654	sample.cpu = etmq->packet->cpu;
 655	sample.flags = 0;
 656	sample.insn_len = 1;
 657	sample.cpumode = event->header.misc;
 658
 659	if (etm->synth_opts.last_branch) {
 660		cs_etm__copy_last_branch_rb(etmq);
 661		sample.branch_stack = etmq->last_branch;
 662	}
 663
 664	if (etm->synth_opts.inject) {
 665		ret = cs_etm__inject_event(event, &sample,
 666					   etm->instructions_sample_type);
 667		if (ret)
 668			return ret;
 669	}
 670
 671	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
 672
 673	if (ret)
 674		pr_err(
 675			"CS ETM Trace: failed to deliver instruction event, error %d\n",
 676			ret);
 677
 678	if (etm->synth_opts.last_branch)
 679		cs_etm__reset_last_branch_rb(etmq);
 680
 681	return ret;
 682}
 683
 684/*
 685 * The cs etm packet encodes an instruction range between a branch target
 686 * and the next taken branch. Generate sample accordingly.
 687 */
 688static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
 689{
 690	int ret = 0;
 691	struct cs_etm_auxtrace *etm = etmq->etm;
 692	struct perf_sample sample = {.ip = 0,};
 693	union perf_event *event = etmq->event_buf;
 694	struct dummy_branch_stack {
 695		u64			nr;
 696		struct branch_entry	entries;
 697	} dummy_bs;
 698
 699	event->sample.header.type = PERF_RECORD_SAMPLE;
 700	event->sample.header.misc = PERF_RECORD_MISC_USER;
 701	event->sample.header.size = sizeof(struct perf_event_header);
 702
 703	sample.ip = cs_etm__last_executed_instr(etmq->prev_packet);
 704	sample.pid = etmq->pid;
 705	sample.tid = etmq->tid;
 706	sample.addr = etmq->packet->start_addr;
 707	sample.id = etmq->etm->branches_id;
 708	sample.stream_id = etmq->etm->branches_id;
 709	sample.period = 1;
 710	sample.cpu = etmq->packet->cpu;
 711	sample.flags = 0;
 712	sample.cpumode = PERF_RECORD_MISC_USER;
 713
 714	/*
 715	 * perf report cannot handle events without a branch stack
 716	 */
 717	if (etm->synth_opts.last_branch) {
 718		dummy_bs = (struct dummy_branch_stack){
 719			.nr = 1,
 720			.entries = {
 721				.from = sample.ip,
 722				.to = sample.addr,
 723			},
 724		};
 725		sample.branch_stack = (struct branch_stack *)&dummy_bs;
 726	}
 727
 728	if (etm->synth_opts.inject) {
 729		ret = cs_etm__inject_event(event, &sample,
 730					   etm->branches_sample_type);
 731		if (ret)
 732			return ret;
 733	}
 734
 735	ret = perf_session__deliver_synth_event(etm->session, event, &sample);
 736
 737	if (ret)
 738		pr_err(
 739		"CS ETM Trace: failed to deliver instruction event, error %d\n",
 740		ret);
 741
 742	return ret;
 743}
 744
 745struct cs_etm_synth {
 746	struct perf_tool dummy_tool;
 747	struct perf_session *session;
 748};
 749
 750static int cs_etm__event_synth(struct perf_tool *tool,
 751			       union perf_event *event,
 752			       struct perf_sample *sample __maybe_unused,
 753			       struct machine *machine __maybe_unused)
 754{
 755	struct cs_etm_synth *cs_etm_synth =
 756		      container_of(tool, struct cs_etm_synth, dummy_tool);
 757
 758	return perf_session__deliver_synth_event(cs_etm_synth->session,
 759						 event, NULL);
 760}
 761
 762static int cs_etm__synth_event(struct perf_session *session,
 763			       struct perf_event_attr *attr, u64 id)
 764{
 765	struct cs_etm_synth cs_etm_synth;
 766
 767	memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
 768	cs_etm_synth.session = session;
 769
 770	return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
 771					   &id, cs_etm__event_synth);
 772}
 773
 774static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
 775				struct perf_session *session)
 776{
 777	struct perf_evlist *evlist = session->evlist;
 778	struct perf_evsel *evsel;
 779	struct perf_event_attr attr;
 780	bool found = false;
 781	u64 id;
 782	int err;
 783
 784	evlist__for_each_entry(evlist, evsel) {
 785		if (evsel->attr.type == etm->pmu_type) {
 786			found = true;
 787			break;
 788		}
 789	}
 790
 791	if (!found) {
 792		pr_debug("No selected events with CoreSight Trace data\n");
 793		return 0;
 794	}
 795
 796	memset(&attr, 0, sizeof(struct perf_event_attr));
 797	attr.size = sizeof(struct perf_event_attr);
 798	attr.type = PERF_TYPE_HARDWARE;
 799	attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
 800	attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
 801			    PERF_SAMPLE_PERIOD;
 802	if (etm->timeless_decoding)
 803		attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
 804	else
 805		attr.sample_type |= PERF_SAMPLE_TIME;
 806
 807	attr.exclude_user = evsel->attr.exclude_user;
 808	attr.exclude_kernel = evsel->attr.exclude_kernel;
 809	attr.exclude_hv = evsel->attr.exclude_hv;
 810	attr.exclude_host = evsel->attr.exclude_host;
 811	attr.exclude_guest = evsel->attr.exclude_guest;
 812	attr.sample_id_all = evsel->attr.sample_id_all;
 813	attr.read_format = evsel->attr.read_format;
 814
 815	/* create new id val to be a fixed offset from evsel id */
 816	id = evsel->id[0] + 1000000000;
 817
 818	if (!id)
 819		id = 1;
 820
 821	if (etm->synth_opts.branches) {
 822		attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
 823		attr.sample_period = 1;
 824		attr.sample_type |= PERF_SAMPLE_ADDR;
 825		err = cs_etm__synth_event(session, &attr, id);
 826		if (err)
 827			return err;
 828		etm->sample_branches = true;
 829		etm->branches_sample_type = attr.sample_type;
 830		etm->branches_id = id;
 831		id += 1;
 832		attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
 833	}
 834
 835	if (etm->synth_opts.last_branch)
 836		attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
 837
 838	if (etm->synth_opts.instructions) {
 839		attr.config = PERF_COUNT_HW_INSTRUCTIONS;
 840		attr.sample_period = etm->synth_opts.period;
 841		etm->instructions_sample_period = attr.sample_period;
 842		err = cs_etm__synth_event(session, &attr, id);
 843		if (err)
 844			return err;
 845		etm->sample_instructions = true;
 846		etm->instructions_sample_type = attr.sample_type;
 847		etm->instructions_id = id;
 848		id += 1;
 849	}
 850
 851	return 0;
 852}
 853
 854static int cs_etm__sample(struct cs_etm_queue *etmq)
 855{
 856	struct cs_etm_auxtrace *etm = etmq->etm;
 857	struct cs_etm_packet *tmp;
 858	int ret;
 859	u64 instrs_executed;
 860
 861	instrs_executed = cs_etm__instr_count(etmq->packet);
 862	etmq->period_instructions += instrs_executed;
 863
 864	/*
 865	 * Record a branch when the last instruction in
 866	 * PREV_PACKET is a branch.
 867	 */
 868	if (etm->synth_opts.last_branch &&
 869	    etmq->prev_packet &&
 870	    etmq->prev_packet->sample_type == CS_ETM_RANGE &&
 871	    etmq->prev_packet->last_instr_taken_branch)
 872		cs_etm__update_last_branch_rb(etmq);
 873
 874	if (etm->sample_instructions &&
 875	    etmq->period_instructions >= etm->instructions_sample_period) {
 876		/*
 877		 * Emit instruction sample periodically
 878		 * TODO: allow period to be defined in cycles and clock time
 879		 */
 880
 881		/* Get number of instructions executed after the sample point */
 882		u64 instrs_over = etmq->period_instructions -
 883			etm->instructions_sample_period;
 884
 885		/*
 886		 * Calculate the address of the sampled instruction (-1 as
 887		 * sample is reported as though instruction has just been
 888		 * executed, but PC has not advanced to next instruction)
 889		 */
 890		u64 offset = (instrs_executed - instrs_over - 1);
 891		u64 addr = cs_etm__instr_addr(etmq->packet, offset);
 892
 893		ret = cs_etm__synth_instruction_sample(
 894			etmq, addr, etm->instructions_sample_period);
 895		if (ret)
 896			return ret;
 897
 898		/* Carry remaining instructions into next sample period */
 899		etmq->period_instructions = instrs_over;
 900	}
 901
 902	if (etm->sample_branches &&
 903	    etmq->prev_packet &&
 904	    etmq->prev_packet->sample_type == CS_ETM_RANGE &&
 905	    etmq->prev_packet->last_instr_taken_branch) {
 906		ret = cs_etm__synth_branch_sample(etmq);
 907		if (ret)
 908			return ret;
 909	}
 910
 911	if (etm->sample_branches || etm->synth_opts.last_branch) {
 912		/*
 913		 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
 914		 * the next incoming packet.
 915		 */
 916		tmp = etmq->packet;
 917		etmq->packet = etmq->prev_packet;
 918		etmq->prev_packet = tmp;
 919	}
 920
 921	return 0;
 922}
 923
 924static int cs_etm__flush(struct cs_etm_queue *etmq)
 925{
 926	int err = 0;
 927	struct cs_etm_packet *tmp;
 928
 929	if (etmq->etm->synth_opts.last_branch &&
 930	    etmq->prev_packet &&
 931	    etmq->prev_packet->sample_type == CS_ETM_RANGE) {
 932		/*
 933		 * Generate a last branch event for the branches left in the
 934		 * circular buffer at the end of the trace.
 935		 *
 936		 * Use the address of the end of the last reported execution
 937		 * range
 938		 */
 939		u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
 940
 941		err = cs_etm__synth_instruction_sample(
 942			etmq, addr,
 943			etmq->period_instructions);
 944		etmq->period_instructions = 0;
 945
 946		/*
 947		 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
 948		 * the next incoming packet.
 949		 */
 950		tmp = etmq->packet;
 951		etmq->packet = etmq->prev_packet;
 952		etmq->prev_packet = tmp;
 953	}
 954
 955	return err;
 956}
 957
 958static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
 959{
 960	struct cs_etm_auxtrace *etm = etmq->etm;
 961	struct cs_etm_buffer buffer;
 962	size_t buffer_used, processed;
 963	int err = 0;
 964
 965	if (!etm->kernel_start)
 966		etm->kernel_start = machine__kernel_start(etm->machine);
 967
 968	/* Go through each buffer in the queue and decode them one by one */
 969	while (1) {
 970		buffer_used = 0;
 971		memset(&buffer, 0, sizeof(buffer));
 972		err = cs_etm__get_trace(&buffer, etmq);
 973		if (err <= 0)
 974			return err;
 975		/*
 976		 * We cannot assume consecutive blocks in the data file are
 977		 * contiguous, reset the decoder to force re-sync.
 978		 */
 979		err = cs_etm_decoder__reset(etmq->decoder);
 980		if (err != 0)
 981			return err;
 982
 983		/* Run trace decoder until buffer consumed or end of trace */
 984		do {
 985			processed = 0;
 986			err = cs_etm_decoder__process_data_block(
 987				etmq->decoder,
 988				etmq->offset,
 989				&buffer.buf[buffer_used],
 990				buffer.len - buffer_used,
 991				&processed);
 992			if (err)
 993				return err;
 994
 995			etmq->offset += processed;
 996			buffer_used += processed;
 997
 998			/* Process each packet in this chunk */
 999			while (1) {
1000				err = cs_etm_decoder__get_packet(etmq->decoder,
1001								 etmq->packet);
1002				if (err <= 0)
1003					/*
1004					 * Stop processing this chunk on
1005					 * end of data or error
1006					 */
1007					break;
1008
1009				switch (etmq->packet->sample_type) {
1010				case CS_ETM_RANGE:
1011					/*
1012					 * If the packet contains an instruction
1013					 * range, generate instruction sequence
1014					 * events.
1015					 */
1016					cs_etm__sample(etmq);
1017					break;
1018				case CS_ETM_TRACE_ON:
1019					/*
1020					 * Discontinuity in trace, flush
1021					 * previous branch stack
1022					 */
1023					cs_etm__flush(etmq);
1024					break;
1025				default:
1026					break;
1027				}
1028			}
1029		} while (buffer.len > buffer_used);
1030
1031		if (err == 0)
1032			/* Flush any remaining branch stack entries */
1033			err = cs_etm__flush(etmq);
1034	}
1035
1036	return err;
1037}
1038
1039static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
1040					   pid_t tid, u64 time_)
1041{
1042	unsigned int i;
1043	struct auxtrace_queues *queues = &etm->queues;
1044
1045	for (i = 0; i < queues->nr_queues; i++) {
1046		struct auxtrace_queue *queue = &etm->queues.queue_array[i];
1047		struct cs_etm_queue *etmq = queue->priv;
1048
1049		if (etmq && ((tid == -1) || (etmq->tid == tid))) {
1050			etmq->time = time_;
1051			cs_etm__set_pid_tid_cpu(etm, queue);
1052			cs_etm__run_decoder(etmq);
1053		}
1054	}
1055
1056	return 0;
1057}
1058
1059static int cs_etm__process_event(struct perf_session *session,
1060				 union perf_event *event,
1061				 struct perf_sample *sample,
1062				 struct perf_tool *tool)
1063{
1064	int err = 0;
1065	u64 timestamp;
1066	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1067						   struct cs_etm_auxtrace,
1068						   auxtrace);
1069
1070	if (dump_trace)
1071		return 0;
1072
1073	if (!tool->ordered_events) {
1074		pr_err("CoreSight ETM Trace requires ordered events\n");
1075		return -EINVAL;
1076	}
1077
1078	if (!etm->timeless_decoding)
1079		return -EINVAL;
1080
1081	if (sample->time && (sample->time != (u64) -1))
1082		timestamp = sample->time;
1083	else
1084		timestamp = 0;
1085
1086	if (timestamp || etm->timeless_decoding) {
1087		err = cs_etm__update_queues(etm);
1088		if (err)
1089			return err;
1090	}
1091
1092	if (event->header.type == PERF_RECORD_EXIT)
1093		return cs_etm__process_timeless_queues(etm,
1094						       event->fork.tid,
1095						       sample->time);
1096
1097	return 0;
1098}
1099
1100static int cs_etm__process_auxtrace_event(struct perf_session *session,
1101					  union perf_event *event,
1102					  struct perf_tool *tool __maybe_unused)
1103{
1104	struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1105						   struct cs_etm_auxtrace,
1106						   auxtrace);
1107	if (!etm->data_queued) {
1108		struct auxtrace_buffer *buffer;
1109		off_t  data_offset;
1110		int fd = perf_data__fd(session->data);
1111		bool is_pipe = perf_data__is_pipe(session->data);
1112		int err;
1113
1114		if (is_pipe)
1115			data_offset = 0;
1116		else {
1117			data_offset = lseek(fd, 0, SEEK_CUR);
1118			if (data_offset == -1)
1119				return -errno;
1120		}
1121
1122		err = auxtrace_queues__add_event(&etm->queues, session,
1123						 event, data_offset, &buffer);
1124		if (err)
1125			return err;
1126
1127		if (dump_trace)
1128			if (auxtrace_buffer__get_data(buffer, fd)) {
1129				cs_etm__dump_event(etm, buffer);
1130				auxtrace_buffer__put_data(buffer);
1131			}
1132	}
1133
1134	return 0;
1135}
1136
1137static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
1138{
1139	struct perf_evsel *evsel;
1140	struct perf_evlist *evlist = etm->session->evlist;
1141	bool timeless_decoding = true;
1142
1143	/*
1144	 * Circle through the list of event and complain if we find one
1145	 * with the time bit set.
1146	 */
1147	evlist__for_each_entry(evlist, evsel) {
1148		if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
1149			timeless_decoding = false;
1150	}
1151
1152	return timeless_decoding;
1153}
1154
1155static const char * const cs_etm_global_header_fmts[] = {
1156	[CS_HEADER_VERSION_0]	= "	Header version		       %llx\n",
1157	[CS_PMU_TYPE_CPUS]	= "	PMU type/num cpus	       %llx\n",
1158	[CS_ETM_SNAPSHOT]	= "	Snapshot		       %llx\n",
1159};
1160
1161static const char * const cs_etm_priv_fmts[] = {
1162	[CS_ETM_MAGIC]		= "	Magic number		       %llx\n",
1163	[CS_ETM_CPU]		= "	CPU			       %lld\n",
1164	[CS_ETM_ETMCR]		= "	ETMCR			       %llx\n",
1165	[CS_ETM_ETMTRACEIDR]	= "	ETMTRACEIDR		       %llx\n",
1166	[CS_ETM_ETMCCER]	= "	ETMCCER			       %llx\n",
1167	[CS_ETM_ETMIDR]		= "	ETMIDR			       %llx\n",
1168};
1169
1170static const char * const cs_etmv4_priv_fmts[] = {
1171	[CS_ETM_MAGIC]		= "	Magic number		       %llx\n",
1172	[CS_ETM_CPU]		= "	CPU			       %lld\n",
1173	[CS_ETMV4_TRCCONFIGR]	= "	TRCCONFIGR		       %llx\n",
1174	[CS_ETMV4_TRCTRACEIDR]	= "	TRCTRACEIDR		       %llx\n",
1175	[CS_ETMV4_TRCIDR0]	= "	TRCIDR0			       %llx\n",
1176	[CS_ETMV4_TRCIDR1]	= "	TRCIDR1			       %llx\n",
1177	[CS_ETMV4_TRCIDR2]	= "	TRCIDR2			       %llx\n",
1178	[CS_ETMV4_TRCIDR8]	= "	TRCIDR8			       %llx\n",
1179	[CS_ETMV4_TRCAUTHSTATUS] = "	TRCAUTHSTATUS		       %llx\n",
1180};
1181
1182static void cs_etm__print_auxtrace_info(u64 *val, int num)
1183{
1184	int i, j, cpu = 0;
1185
1186	for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1187		fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
1188
1189	for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
1190		if (val[i] == __perf_cs_etmv3_magic)
1191			for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
1192				fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
1193		else if (val[i] == __perf_cs_etmv4_magic)
1194			for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
1195				fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
1196		else
1197			/* failure.. return */
1198			return;
1199	}
1200}
1201
1202int cs_etm__process_auxtrace_info(union perf_event *event,
1203				  struct perf_session *session)
1204{
1205	struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
1206	struct cs_etm_auxtrace *etm = NULL;
1207	struct int_node *inode;
1208	unsigned int pmu_type;
1209	int event_header_size = sizeof(struct perf_event_header);
1210	int info_header_size;
1211	int total_size = auxtrace_info->header.size;
1212	int priv_size = 0;
1213	int num_cpu;
1214	int err = 0, idx = -1;
1215	int i, j, k;
1216	u64 *ptr, *hdr = NULL;
1217	u64 **metadata = NULL;
1218
1219	/*
1220	 * sizeof(auxtrace_info_event::type) +
1221	 * sizeof(auxtrace_info_event::reserved) == 8
1222	 */
1223	info_header_size = 8;
1224
1225	if (total_size < (event_header_size + info_header_size))
1226		return -EINVAL;
1227
1228	priv_size = total_size - event_header_size - info_header_size;
1229
1230	/* First the global part */
1231	ptr = (u64 *) auxtrace_info->priv;
1232
1233	/* Look for version '0' of the header */
1234	if (ptr[0] != 0)
1235		return -EINVAL;
1236
1237	hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
1238	if (!hdr)
1239		return -ENOMEM;
1240
1241	/* Extract header information - see cs-etm.h for format */
1242	for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1243		hdr[i] = ptr[i];
1244	num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
1245	pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
1246				    0xffffffff);
1247
1248	/*
1249	 * Create an RB tree for traceID-CPU# tuple. Since the conversion has
1250	 * to be made for each packet that gets decoded, optimizing access in
1251	 * anything other than a sequential array is worth doing.
1252	 */
1253	traceid_list = intlist__new(NULL);
1254	if (!traceid_list) {
1255		err = -ENOMEM;
1256		goto err_free_hdr;
1257	}
1258
1259	metadata = zalloc(sizeof(*metadata) * num_cpu);
1260	if (!metadata) {
1261		err = -ENOMEM;
1262		goto err_free_traceid_list;
1263	}
1264
1265	/*
1266	 * The metadata is stored in the auxtrace_info section and encodes
1267	 * the configuration of the ARM embedded trace macrocell which is
1268	 * required by the trace decoder to properly decode the trace due
1269	 * to its highly compressed nature.
1270	 */
1271	for (j = 0; j < num_cpu; j++) {
1272		if (ptr[i] == __perf_cs_etmv3_magic) {
1273			metadata[j] = zalloc(sizeof(*metadata[j]) *
1274					     CS_ETM_PRIV_MAX);
1275			if (!metadata[j]) {
1276				err = -ENOMEM;
1277				goto err_free_metadata;
1278			}
1279			for (k = 0; k < CS_ETM_PRIV_MAX; k++)
1280				metadata[j][k] = ptr[i + k];
1281
1282			/* The traceID is our handle */
1283			idx = metadata[j][CS_ETM_ETMTRACEIDR];
1284			i += CS_ETM_PRIV_MAX;
1285		} else if (ptr[i] == __perf_cs_etmv4_magic) {
1286			metadata[j] = zalloc(sizeof(*metadata[j]) *
1287					     CS_ETMV4_PRIV_MAX);
1288			if (!metadata[j]) {
1289				err = -ENOMEM;
1290				goto err_free_metadata;
1291			}
1292			for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
1293				metadata[j][k] = ptr[i + k];
1294
1295			/* The traceID is our handle */
1296			idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
1297			i += CS_ETMV4_PRIV_MAX;
1298		}
1299
1300		/* Get an RB node for this CPU */
1301		inode = intlist__findnew(traceid_list, idx);
1302
1303		/* Something went wrong, no need to continue */
1304		if (!inode) {
1305			err = PTR_ERR(inode);
1306			goto err_free_metadata;
1307		}
1308
1309		/*
1310		 * The node for that CPU should not be taken.
1311		 * Back out if that's the case.
1312		 */
1313		if (inode->priv) {
1314			err = -EINVAL;
1315			goto err_free_metadata;
1316		}
1317		/* All good, associate the traceID with the CPU# */
1318		inode->priv = &metadata[j][CS_ETM_CPU];
1319	}
1320
1321	/*
1322	 * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
1323	 * CS_ETMV4_PRIV_MAX mark how many double words are in the
1324	 * global metadata, and each cpu's metadata respectively.
1325	 * The following tests if the correct number of double words was
1326	 * present in the auxtrace info section.
1327	 */
1328	if (i * 8 != priv_size) {
1329		err = -EINVAL;
1330		goto err_free_metadata;
1331	}
1332
1333	etm = zalloc(sizeof(*etm));
1334
1335	if (!etm) {
1336		err = -ENOMEM;
1337		goto err_free_metadata;
1338	}
1339
1340	err = auxtrace_queues__init(&etm->queues);
1341	if (err)
1342		goto err_free_etm;
1343
1344	etm->session = session;
1345	etm->machine = &session->machines.host;
1346
1347	etm->num_cpu = num_cpu;
1348	etm->pmu_type = pmu_type;
1349	etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
1350	etm->metadata = metadata;
1351	etm->auxtrace_type = auxtrace_info->type;
1352	etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
1353
1354	etm->auxtrace.process_event = cs_etm__process_event;
1355	etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
1356	etm->auxtrace.flush_events = cs_etm__flush_events;
1357	etm->auxtrace.free_events = cs_etm__free_events;
1358	etm->auxtrace.free = cs_etm__free;
1359	session->auxtrace = &etm->auxtrace;
1360
1361	etm->unknown_thread = thread__new(999999999, 999999999);
1362	if (!etm->unknown_thread)
1363		goto err_free_queues;
1364
1365	/*
1366	 * Initialize list node so that at thread__zput() we can avoid
1367	 * segmentation fault at list_del_init().
1368	 */
1369	INIT_LIST_HEAD(&etm->unknown_thread->node);
1370
1371	err = thread__set_comm(etm->unknown_thread, "unknown", 0);
1372	if (err)
1373		goto err_delete_thread;
1374
1375	if (thread__init_map_groups(etm->unknown_thread, etm->machine))
1376		goto err_delete_thread;
1377
1378	if (dump_trace) {
1379		cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
1380		return 0;
1381	}
1382
1383	if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
1384		etm->synth_opts = *session->itrace_synth_opts;
1385	} else {
1386		itrace_synth_opts__set_default(&etm->synth_opts);
1387		etm->synth_opts.callchain = false;
1388	}
1389
1390	err = cs_etm__synth_events(etm, session);
1391	if (err)
1392		goto err_delete_thread;
1393
1394	err = auxtrace_queues__process_index(&etm->queues, session);
1395	if (err)
1396		goto err_delete_thread;
1397
1398	etm->data_queued = etm->queues.populated;
1399
1400	return 0;
1401
1402err_delete_thread:
1403	thread__zput(etm->unknown_thread);
1404err_free_queues:
1405	auxtrace_queues__free(&etm->queues);
1406	session->auxtrace = NULL;
1407err_free_etm:
1408	zfree(&etm);
1409err_free_metadata:
1410	/* No need to check @metadata[j], free(NULL) is supported */
1411	for (j = 0; j < num_cpu; j++)
1412		free(metadata[j]);
1413	zfree(&metadata);
1414err_free_traceid_list:
1415	intlist__delete(traceid_list);
1416err_free_hdr:
1417	zfree(&hdr);
1418
1419	return -EINVAL;
1420}