Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/*
   2 * auxtrace.c: AUX area trace support
   3 * Copyright (c) 2013-2015, Intel Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 */
  15
  16#include <sys/types.h>
  17#include <sys/mman.h>
  18#include <stdbool.h>
  19
  20#include <linux/kernel.h>
  21#include <linux/perf_event.h>
  22#include <linux/types.h>
  23#include <linux/bitops.h>
  24#include <linux/log2.h>
  25#include <linux/string.h>
  26
  27#include <sys/param.h>
  28#include <stdlib.h>
  29#include <stdio.h>
  30#include <string.h>
  31#include <limits.h>
  32#include <errno.h>
  33#include <linux/list.h>
  34
  35#include "../perf.h"
  36#include "util.h"
  37#include "evlist.h"
  38#include "cpumap.h"
  39#include "thread_map.h"
  40#include "asm/bug.h"
  41#include "auxtrace.h"
  42
  43#include <linux/hash.h>
  44
  45#include "event.h"
  46#include "session.h"
  47#include "debug.h"
  48#include <subcmd/parse-options.h>
  49
  50#include "intel-pt.h"
  51#include "intel-bts.h"
  52
  53int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
  54			struct auxtrace_mmap_params *mp,
  55			void *userpg, int fd)
  56{
  57	struct perf_event_mmap_page *pc = userpg;
  58
  59	WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
  60
  61	mm->userpg = userpg;
  62	mm->mask = mp->mask;
  63	mm->len = mp->len;
  64	mm->prev = 0;
  65	mm->idx = mp->idx;
  66	mm->tid = mp->tid;
  67	mm->cpu = mp->cpu;
  68
  69	if (!mp->len) {
  70		mm->base = NULL;
  71		return 0;
  72	}
  73
  74#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
  75	pr_err("Cannot use AUX area tracing mmaps\n");
  76	return -1;
  77#endif
  78
  79	pc->aux_offset = mp->offset;
  80	pc->aux_size = mp->len;
  81
  82	mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
  83	if (mm->base == MAP_FAILED) {
  84		pr_debug2("failed to mmap AUX area\n");
  85		mm->base = NULL;
  86		return -1;
  87	}
  88
  89	return 0;
  90}
  91
  92void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
  93{
  94	if (mm->base) {
  95		munmap(mm->base, mm->len);
  96		mm->base = NULL;
  97	}
  98}
  99
 100void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
 101				off_t auxtrace_offset,
 102				unsigned int auxtrace_pages,
 103				bool auxtrace_overwrite)
 104{
 105	if (auxtrace_pages) {
 106		mp->offset = auxtrace_offset;
 107		mp->len = auxtrace_pages * (size_t)page_size;
 108		mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
 109		mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
 110		pr_debug2("AUX area mmap length %zu\n", mp->len);
 111	} else {
 112		mp->len = 0;
 113	}
 114}
 115
 116void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
 117				   struct perf_evlist *evlist, int idx,
 118				   bool per_cpu)
 119{
 120	mp->idx = idx;
 121
 122	if (per_cpu) {
 123		mp->cpu = evlist->cpus->map[idx];
 124		if (evlist->threads)
 125			mp->tid = thread_map__pid(evlist->threads, 0);
 126		else
 127			mp->tid = -1;
 128	} else {
 129		mp->cpu = -1;
 130		mp->tid = thread_map__pid(evlist->threads, idx);
 131	}
 132}
 133
 134#define AUXTRACE_INIT_NR_QUEUES	32
 135
 136static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
 137{
 138	struct auxtrace_queue *queue_array;
 139	unsigned int max_nr_queues, i;
 140
 141	max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
 142	if (nr_queues > max_nr_queues)
 143		return NULL;
 144
 145	queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
 146	if (!queue_array)
 147		return NULL;
 148
 149	for (i = 0; i < nr_queues; i++) {
 150		INIT_LIST_HEAD(&queue_array[i].head);
 151		queue_array[i].priv = NULL;
 152	}
 153
 154	return queue_array;
 155}
 156
 157int auxtrace_queues__init(struct auxtrace_queues *queues)
 158{
 159	queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
 160	queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
 161	if (!queues->queue_array)
 162		return -ENOMEM;
 163	return 0;
 164}
 165
 166static int auxtrace_queues__grow(struct auxtrace_queues *queues,
 167				 unsigned int new_nr_queues)
 168{
 169	unsigned int nr_queues = queues->nr_queues;
 170	struct auxtrace_queue *queue_array;
 171	unsigned int i;
 172
 173	if (!nr_queues)
 174		nr_queues = AUXTRACE_INIT_NR_QUEUES;
 175
 176	while (nr_queues && nr_queues < new_nr_queues)
 177		nr_queues <<= 1;
 178
 179	if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
 180		return -EINVAL;
 181
 182	queue_array = auxtrace_alloc_queue_array(nr_queues);
 183	if (!queue_array)
 184		return -ENOMEM;
 185
 186	for (i = 0; i < queues->nr_queues; i++) {
 187		list_splice_tail(&queues->queue_array[i].head,
 188				 &queue_array[i].head);
 189		queue_array[i].priv = queues->queue_array[i].priv;
 190	}
 191
 192	queues->nr_queues = nr_queues;
 193	queues->queue_array = queue_array;
 194
 195	return 0;
 196}
 197
 198static void *auxtrace_copy_data(u64 size, struct perf_session *session)
 199{
 200	int fd = perf_data_file__fd(session->file);
 201	void *p;
 202	ssize_t ret;
 203
 204	if (size > SSIZE_MAX)
 205		return NULL;
 206
 207	p = malloc(size);
 208	if (!p)
 209		return NULL;
 210
 211	ret = readn(fd, p, size);
 212	if (ret != (ssize_t)size) {
 213		free(p);
 214		return NULL;
 215	}
 216
 217	return p;
 218}
 219
 220static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
 221				       unsigned int idx,
 222				       struct auxtrace_buffer *buffer)
 223{
 224	struct auxtrace_queue *queue;
 225	int err;
 226
 227	if (idx >= queues->nr_queues) {
 228		err = auxtrace_queues__grow(queues, idx + 1);
 229		if (err)
 230			return err;
 231	}
 232
 233	queue = &queues->queue_array[idx];
 234
 235	if (!queue->set) {
 236		queue->set = true;
 237		queue->tid = buffer->tid;
 238		queue->cpu = buffer->cpu;
 239	} else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
 240		pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
 241		       queue->cpu, queue->tid, buffer->cpu, buffer->tid);
 242		return -EINVAL;
 243	}
 244
 245	buffer->buffer_nr = queues->next_buffer_nr++;
 246
 247	list_add_tail(&buffer->list, &queue->head);
 248
 249	queues->new_data = true;
 250	queues->populated = true;
 251
 252	return 0;
 253}
 254
 255/* Limit buffers to 32MiB on 32-bit */
 256#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
 257
 258static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
 259					 unsigned int idx,
 260					 struct auxtrace_buffer *buffer)
 261{
 262	u64 sz = buffer->size;
 263	bool consecutive = false;
 264	struct auxtrace_buffer *b;
 265	int err;
 266
 267	while (sz > BUFFER_LIMIT_FOR_32_BIT) {
 268		b = memdup(buffer, sizeof(struct auxtrace_buffer));
 269		if (!b)
 270			return -ENOMEM;
 271		b->size = BUFFER_LIMIT_FOR_32_BIT;
 272		b->consecutive = consecutive;
 273		err = auxtrace_queues__add_buffer(queues, idx, b);
 274		if (err) {
 275			auxtrace_buffer__free(b);
 276			return err;
 277		}
 278		buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
 279		sz -= BUFFER_LIMIT_FOR_32_BIT;
 280		consecutive = true;
 281	}
 282
 283	buffer->size = sz;
 284	buffer->consecutive = consecutive;
 285
 286	return 0;
 287}
 288
 289static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
 290					     struct perf_session *session,
 291					     unsigned int idx,
 292					     struct auxtrace_buffer *buffer)
 293{
 294	if (session->one_mmap) {
 295		buffer->data = buffer->data_offset - session->one_mmap_offset +
 296			       session->one_mmap_addr;
 297	} else if (perf_data_file__is_pipe(session->file)) {
 298		buffer->data = auxtrace_copy_data(buffer->size, session);
 299		if (!buffer->data)
 300			return -ENOMEM;
 301		buffer->data_needs_freeing = true;
 302	} else if (BITS_PER_LONG == 32 &&
 303		   buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
 304		int err;
 305
 306		err = auxtrace_queues__split_buffer(queues, idx, buffer);
 307		if (err)
 308			return err;
 309	}
 310
 311	return auxtrace_queues__add_buffer(queues, idx, buffer);
 312}
 313
 314int auxtrace_queues__add_event(struct auxtrace_queues *queues,
 315			       struct perf_session *session,
 316			       union perf_event *event, off_t data_offset,
 317			       struct auxtrace_buffer **buffer_ptr)
 318{
 319	struct auxtrace_buffer *buffer;
 320	unsigned int idx;
 321	int err;
 322
 323	buffer = zalloc(sizeof(struct auxtrace_buffer));
 324	if (!buffer)
 325		return -ENOMEM;
 326
 327	buffer->pid = -1;
 328	buffer->tid = event->auxtrace.tid;
 329	buffer->cpu = event->auxtrace.cpu;
 330	buffer->data_offset = data_offset;
 331	buffer->offset = event->auxtrace.offset;
 332	buffer->reference = event->auxtrace.reference;
 333	buffer->size = event->auxtrace.size;
 334	idx = event->auxtrace.idx;
 335
 336	err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer);
 337	if (err)
 338		goto out_err;
 339
 340	if (buffer_ptr)
 341		*buffer_ptr = buffer;
 342
 343	return 0;
 344
 345out_err:
 346	auxtrace_buffer__free(buffer);
 347	return err;
 348}
 349
 350static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
 351					      struct perf_session *session,
 352					      off_t file_offset, size_t sz)
 353{
 354	union perf_event *event;
 355	int err;
 356	char buf[PERF_SAMPLE_MAX_SIZE];
 357
 358	err = perf_session__peek_event(session, file_offset, buf,
 359				       PERF_SAMPLE_MAX_SIZE, &event, NULL);
 360	if (err)
 361		return err;
 362
 363	if (event->header.type == PERF_RECORD_AUXTRACE) {
 364		if (event->header.size < sizeof(struct auxtrace_event) ||
 365		    event->header.size != sz) {
 366			err = -EINVAL;
 367			goto out;
 368		}
 369		file_offset += event->header.size;
 370		err = auxtrace_queues__add_event(queues, session, event,
 371						 file_offset, NULL);
 372	}
 373out:
 374	return err;
 375}
 376
 377void auxtrace_queues__free(struct auxtrace_queues *queues)
 378{
 379	unsigned int i;
 380
 381	for (i = 0; i < queues->nr_queues; i++) {
 382		while (!list_empty(&queues->queue_array[i].head)) {
 383			struct auxtrace_buffer *buffer;
 384
 385			buffer = list_entry(queues->queue_array[i].head.next,
 386					    struct auxtrace_buffer, list);
 387			list_del(&buffer->list);
 388			auxtrace_buffer__free(buffer);
 389		}
 390	}
 391
 392	zfree(&queues->queue_array);
 393	queues->nr_queues = 0;
 394}
 395
 396static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
 397			     unsigned int pos, unsigned int queue_nr,
 398			     u64 ordinal)
 399{
 400	unsigned int parent;
 401
 402	while (pos) {
 403		parent = (pos - 1) >> 1;
 404		if (heap_array[parent].ordinal <= ordinal)
 405			break;
 406		heap_array[pos] = heap_array[parent];
 407		pos = parent;
 408	}
 409	heap_array[pos].queue_nr = queue_nr;
 410	heap_array[pos].ordinal = ordinal;
 411}
 412
 413int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
 414		       u64 ordinal)
 415{
 416	struct auxtrace_heap_item *heap_array;
 417
 418	if (queue_nr >= heap->heap_sz) {
 419		unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
 420
 421		while (heap_sz <= queue_nr)
 422			heap_sz <<= 1;
 423		heap_array = realloc(heap->heap_array,
 424				     heap_sz * sizeof(struct auxtrace_heap_item));
 425		if (!heap_array)
 426			return -ENOMEM;
 427		heap->heap_array = heap_array;
 428		heap->heap_sz = heap_sz;
 429	}
 430
 431	auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
 432
 433	return 0;
 434}
 435
 436void auxtrace_heap__free(struct auxtrace_heap *heap)
 437{
 438	zfree(&heap->heap_array);
 439	heap->heap_cnt = 0;
 440	heap->heap_sz = 0;
 441}
 442
 443void auxtrace_heap__pop(struct auxtrace_heap *heap)
 444{
 445	unsigned int pos, last, heap_cnt = heap->heap_cnt;
 446	struct auxtrace_heap_item *heap_array;
 447
 448	if (!heap_cnt)
 449		return;
 450
 451	heap->heap_cnt -= 1;
 452
 453	heap_array = heap->heap_array;
 454
 455	pos = 0;
 456	while (1) {
 457		unsigned int left, right;
 458
 459		left = (pos << 1) + 1;
 460		if (left >= heap_cnt)
 461			break;
 462		right = left + 1;
 463		if (right >= heap_cnt) {
 464			heap_array[pos] = heap_array[left];
 465			return;
 466		}
 467		if (heap_array[left].ordinal < heap_array[right].ordinal) {
 468			heap_array[pos] = heap_array[left];
 469			pos = left;
 470		} else {
 471			heap_array[pos] = heap_array[right];
 472			pos = right;
 473		}
 474	}
 475
 476	last = heap_cnt - 1;
 477	auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
 478			 heap_array[last].ordinal);
 479}
 480
 481size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
 482				       struct perf_evlist *evlist)
 483{
 484	if (itr)
 485		return itr->info_priv_size(itr, evlist);
 486	return 0;
 487}
 488
 489static int auxtrace_not_supported(void)
 490{
 491	pr_err("AUX area tracing is not supported on this architecture\n");
 492	return -EINVAL;
 493}
 494
 495int auxtrace_record__info_fill(struct auxtrace_record *itr,
 496			       struct perf_session *session,
 497			       struct auxtrace_info_event *auxtrace_info,
 498			       size_t priv_size)
 499{
 500	if (itr)
 501		return itr->info_fill(itr, session, auxtrace_info, priv_size);
 502	return auxtrace_not_supported();
 503}
 504
 505void auxtrace_record__free(struct auxtrace_record *itr)
 506{
 507	if (itr)
 508		itr->free(itr);
 509}
 510
 511int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
 512{
 513	if (itr && itr->snapshot_start)
 514		return itr->snapshot_start(itr);
 515	return 0;
 516}
 517
 518int auxtrace_record__snapshot_finish(struct auxtrace_record *itr)
 519{
 520	if (itr && itr->snapshot_finish)
 521		return itr->snapshot_finish(itr);
 522	return 0;
 523}
 524
 525int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
 526				   struct auxtrace_mmap *mm,
 527				   unsigned char *data, u64 *head, u64 *old)
 528{
 529	if (itr && itr->find_snapshot)
 530		return itr->find_snapshot(itr, idx, mm, data, head, old);
 531	return 0;
 532}
 533
 534int auxtrace_record__options(struct auxtrace_record *itr,
 535			     struct perf_evlist *evlist,
 536			     struct record_opts *opts)
 537{
 538	if (itr)
 539		return itr->recording_options(itr, evlist, opts);
 540	return 0;
 541}
 542
 543u64 auxtrace_record__reference(struct auxtrace_record *itr)
 544{
 545	if (itr)
 546		return itr->reference(itr);
 547	return 0;
 548}
 549
 550int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
 551				    struct record_opts *opts, const char *str)
 552{
 553	if (!str)
 554		return 0;
 555
 556	if (itr)
 557		return itr->parse_snapshot_options(itr, opts, str);
 558
 559	pr_err("No AUX area tracing to snapshot\n");
 560	return -EINVAL;
 561}
 562
 563struct auxtrace_record *__weak
 564auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
 565{
 566	*err = 0;
 567	return NULL;
 568}
 569
 570static int auxtrace_index__alloc(struct list_head *head)
 571{
 572	struct auxtrace_index *auxtrace_index;
 573
 574	auxtrace_index = malloc(sizeof(struct auxtrace_index));
 575	if (!auxtrace_index)
 576		return -ENOMEM;
 577
 578	auxtrace_index->nr = 0;
 579	INIT_LIST_HEAD(&auxtrace_index->list);
 580
 581	list_add_tail(&auxtrace_index->list, head);
 582
 583	return 0;
 584}
 585
 586void auxtrace_index__free(struct list_head *head)
 587{
 588	struct auxtrace_index *auxtrace_index, *n;
 589
 590	list_for_each_entry_safe(auxtrace_index, n, head, list) {
 591		list_del(&auxtrace_index->list);
 592		free(auxtrace_index);
 593	}
 594}
 595
 596static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
 597{
 598	struct auxtrace_index *auxtrace_index;
 599	int err;
 600
 601	if (list_empty(head)) {
 602		err = auxtrace_index__alloc(head);
 603		if (err)
 604			return NULL;
 605	}
 606
 607	auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
 608
 609	if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
 610		err = auxtrace_index__alloc(head);
 611		if (err)
 612			return NULL;
 613		auxtrace_index = list_entry(head->prev, struct auxtrace_index,
 614					    list);
 615	}
 616
 617	return auxtrace_index;
 618}
 619
 620int auxtrace_index__auxtrace_event(struct list_head *head,
 621				   union perf_event *event, off_t file_offset)
 622{
 623	struct auxtrace_index *auxtrace_index;
 624	size_t nr;
 625
 626	auxtrace_index = auxtrace_index__last(head);
 627	if (!auxtrace_index)
 628		return -ENOMEM;
 629
 630	nr = auxtrace_index->nr;
 631	auxtrace_index->entries[nr].file_offset = file_offset;
 632	auxtrace_index->entries[nr].sz = event->header.size;
 633	auxtrace_index->nr += 1;
 634
 635	return 0;
 636}
 637
 638static int auxtrace_index__do_write(int fd,
 639				    struct auxtrace_index *auxtrace_index)
 640{
 641	struct auxtrace_index_entry ent;
 642	size_t i;
 643
 644	for (i = 0; i < auxtrace_index->nr; i++) {
 645		ent.file_offset = auxtrace_index->entries[i].file_offset;
 646		ent.sz = auxtrace_index->entries[i].sz;
 647		if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
 648			return -errno;
 649	}
 650	return 0;
 651}
 652
 653int auxtrace_index__write(int fd, struct list_head *head)
 654{
 655	struct auxtrace_index *auxtrace_index;
 656	u64 total = 0;
 657	int err;
 658
 659	list_for_each_entry(auxtrace_index, head, list)
 660		total += auxtrace_index->nr;
 661
 662	if (writen(fd, &total, sizeof(total)) != sizeof(total))
 663		return -errno;
 664
 665	list_for_each_entry(auxtrace_index, head, list) {
 666		err = auxtrace_index__do_write(fd, auxtrace_index);
 667		if (err)
 668			return err;
 669	}
 670
 671	return 0;
 672}
 673
 674static int auxtrace_index__process_entry(int fd, struct list_head *head,
 675					 bool needs_swap)
 676{
 677	struct auxtrace_index *auxtrace_index;
 678	struct auxtrace_index_entry ent;
 679	size_t nr;
 680
 681	if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
 682		return -1;
 683
 684	auxtrace_index = auxtrace_index__last(head);
 685	if (!auxtrace_index)
 686		return -1;
 687
 688	nr = auxtrace_index->nr;
 689	if (needs_swap) {
 690		auxtrace_index->entries[nr].file_offset =
 691						bswap_64(ent.file_offset);
 692		auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
 693	} else {
 694		auxtrace_index->entries[nr].file_offset = ent.file_offset;
 695		auxtrace_index->entries[nr].sz = ent.sz;
 696	}
 697
 698	auxtrace_index->nr = nr + 1;
 699
 700	return 0;
 701}
 702
 703int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
 704			    bool needs_swap)
 705{
 706	struct list_head *head = &session->auxtrace_index;
 707	u64 nr;
 708
 709	if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
 710		return -1;
 711
 712	if (needs_swap)
 713		nr = bswap_64(nr);
 714
 715	if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
 716		return -1;
 717
 718	while (nr--) {
 719		int err;
 720
 721		err = auxtrace_index__process_entry(fd, head, needs_swap);
 722		if (err)
 723			return -1;
 724	}
 725
 726	return 0;
 727}
 728
 729static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
 730						struct perf_session *session,
 731						struct auxtrace_index_entry *ent)
 732{
 733	return auxtrace_queues__add_indexed_event(queues, session,
 734						  ent->file_offset, ent->sz);
 735}
 736
 737int auxtrace_queues__process_index(struct auxtrace_queues *queues,
 738				   struct perf_session *session)
 739{
 740	struct auxtrace_index *auxtrace_index;
 741	struct auxtrace_index_entry *ent;
 742	size_t i;
 743	int err;
 744
 745	list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
 746		for (i = 0; i < auxtrace_index->nr; i++) {
 747			ent = &auxtrace_index->entries[i];
 748			err = auxtrace_queues__process_index_entry(queues,
 749								   session,
 750								   ent);
 751			if (err)
 752				return err;
 753		}
 754	}
 755	return 0;
 756}
 757
 758struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
 759					      struct auxtrace_buffer *buffer)
 760{
 761	if (buffer) {
 762		if (list_is_last(&buffer->list, &queue->head))
 763			return NULL;
 764		return list_entry(buffer->list.next, struct auxtrace_buffer,
 765				  list);
 766	} else {
 767		if (list_empty(&queue->head))
 768			return NULL;
 769		return list_entry(queue->head.next, struct auxtrace_buffer,
 770				  list);
 771	}
 772}
 773
 774void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
 775{
 776	size_t adj = buffer->data_offset & (page_size - 1);
 777	size_t size = buffer->size + adj;
 778	off_t file_offset = buffer->data_offset - adj;
 779	void *addr;
 780
 781	if (buffer->data)
 782		return buffer->data;
 783
 784	addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
 785	if (addr == MAP_FAILED)
 786		return NULL;
 787
 788	buffer->mmap_addr = addr;
 789	buffer->mmap_size = size;
 790
 791	buffer->data = addr + adj;
 792
 793	return buffer->data;
 794}
 795
 796void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
 797{
 798	if (!buffer->data || !buffer->mmap_addr)
 799		return;
 800	munmap(buffer->mmap_addr, buffer->mmap_size);
 801	buffer->mmap_addr = NULL;
 802	buffer->mmap_size = 0;
 803	buffer->data = NULL;
 804	buffer->use_data = NULL;
 805}
 806
 807void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
 808{
 809	auxtrace_buffer__put_data(buffer);
 810	if (buffer->data_needs_freeing) {
 811		buffer->data_needs_freeing = false;
 812		zfree(&buffer->data);
 813		buffer->use_data = NULL;
 814		buffer->size = 0;
 815	}
 816}
 817
 818void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
 819{
 820	auxtrace_buffer__drop_data(buffer);
 821	free(buffer);
 822}
 823
 824void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
 825			  int code, int cpu, pid_t pid, pid_t tid, u64 ip,
 826			  const char *msg)
 827{
 828	size_t size;
 829
 830	memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event));
 831
 832	auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
 833	auxtrace_error->type = type;
 834	auxtrace_error->code = code;
 835	auxtrace_error->cpu = cpu;
 836	auxtrace_error->pid = pid;
 837	auxtrace_error->tid = tid;
 838	auxtrace_error->ip = ip;
 839	strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
 840
 841	size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
 842	       strlen(auxtrace_error->msg) + 1;
 843	auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
 844}
 845
 846int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
 847					 struct perf_tool *tool,
 848					 struct perf_session *session,
 849					 perf_event__handler_t process)
 850{
 851	union perf_event *ev;
 852	size_t priv_size;
 853	int err;
 854
 855	pr_debug2("Synthesizing auxtrace information\n");
 856	priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
 857	ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
 858	if (!ev)
 859		return -ENOMEM;
 860
 861	ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
 862	ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) +
 863					priv_size;
 864	err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
 865					 priv_size);
 866	if (err)
 867		goto out_free;
 868
 869	err = process(tool, ev, NULL, NULL);
 870out_free:
 871	free(ev);
 872	return err;
 873}
 874
 875static bool auxtrace__dont_decode(struct perf_session *session)
 876{
 877	return !session->itrace_synth_opts ||
 878	       session->itrace_synth_opts->dont_decode;
 879}
 880
 881int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
 882				      union perf_event *event,
 883				      struct perf_session *session)
 884{
 885	enum auxtrace_type type = event->auxtrace_info.type;
 886
 887	if (dump_trace)
 888		fprintf(stdout, " type: %u\n", type);
 889
 890	switch (type) {
 891	case PERF_AUXTRACE_INTEL_PT:
 892		return intel_pt_process_auxtrace_info(event, session);
 893	case PERF_AUXTRACE_INTEL_BTS:
 894		return intel_bts_process_auxtrace_info(event, session);
 895	case PERF_AUXTRACE_UNKNOWN:
 896	default:
 897		return -EINVAL;
 898	}
 899}
 900
 901s64 perf_event__process_auxtrace(struct perf_tool *tool,
 902				 union perf_event *event,
 903				 struct perf_session *session)
 904{
 905	s64 err;
 906
 907	if (dump_trace)
 908		fprintf(stdout, " size: %#"PRIx64"  offset: %#"PRIx64"  ref: %#"PRIx64"  idx: %u  tid: %d  cpu: %d\n",
 909			event->auxtrace.size, event->auxtrace.offset,
 910			event->auxtrace.reference, event->auxtrace.idx,
 911			event->auxtrace.tid, event->auxtrace.cpu);
 912
 913	if (auxtrace__dont_decode(session))
 914		return event->auxtrace.size;
 915
 916	if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
 917		return -EINVAL;
 918
 919	err = session->auxtrace->process_auxtrace_event(session, event, tool);
 920	if (err < 0)
 921		return err;
 922
 923	return event->auxtrace.size;
 924}
 925
 926#define PERF_ITRACE_DEFAULT_PERIOD_TYPE		PERF_ITRACE_PERIOD_NANOSECS
 927#define PERF_ITRACE_DEFAULT_PERIOD		100000
 928#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ	16
 929#define PERF_ITRACE_MAX_CALLCHAIN_SZ		1024
 930#define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ	64
 931#define PERF_ITRACE_MAX_LAST_BRANCH_SZ		1024
 932
 933void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
 934{
 935	synth_opts->instructions = true;
 936	synth_opts->branches = true;
 937	synth_opts->transactions = true;
 938	synth_opts->errors = true;
 939	synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
 940	synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
 941	synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
 942	synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
 943}
 944
 945/*
 946 * Please check tools/perf/Documentation/perf-script.txt for information
 947 * about the options parsed here, which is introduced after this cset,
 948 * when support in 'perf script' for these options is introduced.
 949 */
 950int itrace_parse_synth_opts(const struct option *opt, const char *str,
 951			    int unset)
 952{
 953	struct itrace_synth_opts *synth_opts = opt->value;
 954	const char *p;
 955	char *endptr;
 956	bool period_type_set = false;
 957	bool period_set = false;
 958
 959	synth_opts->set = true;
 960
 961	if (unset) {
 962		synth_opts->dont_decode = true;
 963		return 0;
 964	}
 965
 966	if (!str) {
 967		itrace_synth_opts__set_default(synth_opts);
 968		return 0;
 969	}
 970
 971	for (p = str; *p;) {
 972		switch (*p++) {
 973		case 'i':
 974			synth_opts->instructions = true;
 975			while (*p == ' ' || *p == ',')
 976				p += 1;
 977			if (isdigit(*p)) {
 978				synth_opts->period = strtoull(p, &endptr, 10);
 979				period_set = true;
 980				p = endptr;
 981				while (*p == ' ' || *p == ',')
 982					p += 1;
 983				switch (*p++) {
 984				case 'i':
 985					synth_opts->period_type =
 986						PERF_ITRACE_PERIOD_INSTRUCTIONS;
 987					period_type_set = true;
 988					break;
 989				case 't':
 990					synth_opts->period_type =
 991						PERF_ITRACE_PERIOD_TICKS;
 992					period_type_set = true;
 993					break;
 994				case 'm':
 995					synth_opts->period *= 1000;
 996					/* Fall through */
 997				case 'u':
 998					synth_opts->period *= 1000;
 999					/* Fall through */
1000				case 'n':
1001					if (*p++ != 's')
1002						goto out_err;
1003					synth_opts->period_type =
1004						PERF_ITRACE_PERIOD_NANOSECS;
1005					period_type_set = true;
1006					break;
1007				case '\0':
1008					goto out;
1009				default:
1010					goto out_err;
1011				}
1012			}
1013			break;
1014		case 'b':
1015			synth_opts->branches = true;
1016			break;
1017		case 'x':
1018			synth_opts->transactions = true;
1019			break;
1020		case 'e':
1021			synth_opts->errors = true;
1022			break;
1023		case 'd':
1024			synth_opts->log = true;
1025			break;
1026		case 'c':
1027			synth_opts->branches = true;
1028			synth_opts->calls = true;
1029			break;
1030		case 'r':
1031			synth_opts->branches = true;
1032			synth_opts->returns = true;
1033			break;
1034		case 'g':
1035			synth_opts->callchain = true;
1036			synth_opts->callchain_sz =
1037					PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
1038			while (*p == ' ' || *p == ',')
1039				p += 1;
1040			if (isdigit(*p)) {
1041				unsigned int val;
1042
1043				val = strtoul(p, &endptr, 10);
1044				p = endptr;
1045				if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
1046					goto out_err;
1047				synth_opts->callchain_sz = val;
1048			}
1049			break;
1050		case 'l':
1051			synth_opts->last_branch = true;
1052			synth_opts->last_branch_sz =
1053					PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
1054			while (*p == ' ' || *p == ',')
1055				p += 1;
1056			if (isdigit(*p)) {
1057				unsigned int val;
1058
1059				val = strtoul(p, &endptr, 10);
1060				p = endptr;
1061				if (!val ||
1062				    val > PERF_ITRACE_MAX_LAST_BRANCH_SZ)
1063					goto out_err;
1064				synth_opts->last_branch_sz = val;
1065			}
1066			break;
1067		case ' ':
1068		case ',':
1069			break;
1070		default:
1071			goto out_err;
1072		}
1073	}
1074out:
1075	if (synth_opts->instructions) {
1076		if (!period_type_set)
1077			synth_opts->period_type =
1078					PERF_ITRACE_DEFAULT_PERIOD_TYPE;
1079		if (!period_set)
1080			synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
1081	}
1082
1083	return 0;
1084
1085out_err:
1086	pr_err("Bad Instruction Tracing options '%s'\n", str);
1087	return -EINVAL;
1088}
1089
1090static const char * const auxtrace_error_type_name[] = {
1091	[PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
1092};
1093
1094static const char *auxtrace_error_name(int type)
1095{
1096	const char *error_type_name = NULL;
1097
1098	if (type < PERF_AUXTRACE_ERROR_MAX)
1099		error_type_name = auxtrace_error_type_name[type];
1100	if (!error_type_name)
1101		error_type_name = "unknown AUX";
1102	return error_type_name;
1103}
1104
1105size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
1106{
1107	struct auxtrace_error_event *e = &event->auxtrace_error;
1108	int ret;
1109
1110	ret = fprintf(fp, " %s error type %u",
1111		      auxtrace_error_name(e->type), e->type);
1112	ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
1113		       e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
1114	return ret;
1115}
1116
1117void perf_session__auxtrace_error_inc(struct perf_session *session,
1118				      union perf_event *event)
1119{
1120	struct auxtrace_error_event *e = &event->auxtrace_error;
1121
1122	if (e->type < PERF_AUXTRACE_ERROR_MAX)
1123		session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
1124}
1125
1126void events_stats__auxtrace_error_warn(const struct events_stats *stats)
1127{
1128	int i;
1129
1130	for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
1131		if (!stats->nr_auxtrace_errors[i])
1132			continue;
1133		ui__warning("%u %s errors\n",
1134			    stats->nr_auxtrace_errors[i],
1135			    auxtrace_error_name(i));
1136	}
1137}
1138
1139int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
1140				       union perf_event *event,
1141				       struct perf_session *session)
1142{
1143	if (auxtrace__dont_decode(session))
1144		return 0;
1145
1146	perf_event__fprintf_auxtrace_error(event, stdout);
1147	return 0;
1148}
1149
1150static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
1151				 struct auxtrace_record *itr,
1152				 struct perf_tool *tool, process_auxtrace_t fn,
1153				 bool snapshot, size_t snapshot_size)
1154{
1155	u64 head, old = mm->prev, offset, ref;
1156	unsigned char *data = mm->base;
1157	size_t size, head_off, old_off, len1, len2, padding;
1158	union perf_event ev;
1159	void *data1, *data2;
1160
1161	if (snapshot) {
1162		head = auxtrace_mmap__read_snapshot_head(mm);
1163		if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
1164						   &head, &old))
1165			return -1;
1166	} else {
1167		head = auxtrace_mmap__read_head(mm);
1168	}
1169
1170	if (old == head)
1171		return 0;
1172
1173	pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
1174		  mm->idx, old, head, head - old);
1175
1176	if (mm->mask) {
1177		head_off = head & mm->mask;
1178		old_off = old & mm->mask;
1179	} else {
1180		head_off = head % mm->len;
1181		old_off = old % mm->len;
1182	}
1183
1184	if (head_off > old_off)
1185		size = head_off - old_off;
1186	else
1187		size = mm->len - (old_off - head_off);
1188
1189	if (snapshot && size > snapshot_size)
1190		size = snapshot_size;
1191
1192	ref = auxtrace_record__reference(itr);
1193
1194	if (head > old || size <= head || mm->mask) {
1195		offset = head - size;
1196	} else {
1197		/*
1198		 * When the buffer size is not a power of 2, 'head' wraps at the
1199		 * highest multiple of the buffer size, so we have to subtract
1200		 * the remainder here.
1201		 */
1202		u64 rem = (0ULL - mm->len) % mm->len;
1203
1204		offset = head - size - rem;
1205	}
1206
1207	if (size > head_off) {
1208		len1 = size - head_off;
1209		data1 = &data[mm->len - len1];
1210		len2 = head_off;
1211		data2 = &data[0];
1212	} else {
1213		len1 = size;
1214		data1 = &data[head_off - len1];
1215		len2 = 0;
1216		data2 = NULL;
1217	}
1218
1219	if (itr->alignment) {
1220		unsigned int unwanted = len1 % itr->alignment;
1221
1222		len1 -= unwanted;
1223		size -= unwanted;
1224	}
1225
1226	/* padding must be written by fn() e.g. record__process_auxtrace() */
1227	padding = size & 7;
1228	if (padding)
1229		padding = 8 - padding;
1230
1231	memset(&ev, 0, sizeof(ev));
1232	ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
1233	ev.auxtrace.header.size = sizeof(ev.auxtrace);
1234	ev.auxtrace.size = size + padding;
1235	ev.auxtrace.offset = offset;
1236	ev.auxtrace.reference = ref;
1237	ev.auxtrace.idx = mm->idx;
1238	ev.auxtrace.tid = mm->tid;
1239	ev.auxtrace.cpu = mm->cpu;
1240
1241	if (fn(tool, &ev, data1, len1, data2, len2))
1242		return -1;
1243
1244	mm->prev = head;
1245
1246	if (!snapshot) {
1247		auxtrace_mmap__write_tail(mm, head);
1248		if (itr->read_finish) {
1249			int err;
1250
1251			err = itr->read_finish(itr, mm->idx);
1252			if (err < 0)
1253				return err;
1254		}
1255	}
1256
1257	return 1;
1258}
1259
1260int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
1261			struct perf_tool *tool, process_auxtrace_t fn)
1262{
1263	return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0);
1264}
1265
1266int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
1267				 struct auxtrace_record *itr,
1268				 struct perf_tool *tool, process_auxtrace_t fn,
1269				 size_t snapshot_size)
1270{
1271	return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size);
1272}
1273
1274/**
1275 * struct auxtrace_cache - hash table to implement a cache
1276 * @hashtable: the hashtable
1277 * @sz: hashtable size (number of hlists)
1278 * @entry_size: size of an entry
1279 * @limit: limit the number of entries to this maximum, when reached the cache
1280 *         is dropped and caching begins again with an empty cache
1281 * @cnt: current number of entries
1282 * @bits: hashtable size (@sz = 2^@bits)
1283 */
1284struct auxtrace_cache {
1285	struct hlist_head *hashtable;
1286	size_t sz;
1287	size_t entry_size;
1288	size_t limit;
1289	size_t cnt;
1290	unsigned int bits;
1291};
1292
1293struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
1294					   unsigned int limit_percent)
1295{
1296	struct auxtrace_cache *c;
1297	struct hlist_head *ht;
1298	size_t sz, i;
1299
1300	c = zalloc(sizeof(struct auxtrace_cache));
1301	if (!c)
1302		return NULL;
1303
1304	sz = 1UL << bits;
1305
1306	ht = calloc(sz, sizeof(struct hlist_head));
1307	if (!ht)
1308		goto out_free;
1309
1310	for (i = 0; i < sz; i++)
1311		INIT_HLIST_HEAD(&ht[i]);
1312
1313	c->hashtable = ht;
1314	c->sz = sz;
1315	c->entry_size = entry_size;
1316	c->limit = (c->sz * limit_percent) / 100;
1317	c->bits = bits;
1318
1319	return c;
1320
1321out_free:
1322	free(c);
1323	return NULL;
1324}
1325
1326static void auxtrace_cache__drop(struct auxtrace_cache *c)
1327{
1328	struct auxtrace_cache_entry *entry;
1329	struct hlist_node *tmp;
1330	size_t i;
1331
1332	if (!c)
1333		return;
1334
1335	for (i = 0; i < c->sz; i++) {
1336		hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
1337			hlist_del(&entry->hash);
1338			auxtrace_cache__free_entry(c, entry);
1339		}
1340	}
1341
1342	c->cnt = 0;
1343}
1344
1345void auxtrace_cache__free(struct auxtrace_cache *c)
1346{
1347	if (!c)
1348		return;
1349
1350	auxtrace_cache__drop(c);
1351	free(c->hashtable);
1352	free(c);
1353}
1354
1355void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
1356{
1357	return malloc(c->entry_size);
1358}
1359
1360void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
1361				void *entry)
1362{
1363	free(entry);
1364}
1365
1366int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
1367			struct auxtrace_cache_entry *entry)
1368{
1369	if (c->limit && ++c->cnt > c->limit)
1370		auxtrace_cache__drop(c);
1371
1372	entry->key = key;
1373	hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
1374
1375	return 0;
1376}
1377
1378void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
1379{
1380	struct auxtrace_cache_entry *entry;
1381	struct hlist_head *hlist;
1382
1383	if (!c)
1384		return NULL;
1385
1386	hlist = &c->hashtable[hash_32(key, c->bits)];
1387	hlist_for_each_entry(entry, hlist, hash) {
1388		if (entry->key == key)
1389			return entry;
1390	}
1391
1392	return NULL;
1393}