Linux Audio

Check our new training course

Loading...
   1/*
   2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
   3 *
   4 * Parts came from builtin-{top,stat,record}.c, see those files for further
   5 * copyright notes.
   6 *
   7 * Released under the GPL v2. (and only v2, not any later version)
   8 */
   9#include "util.h"
  10#include <api/fs/fs.h>
  11#include <poll.h>
  12#include "cpumap.h"
  13#include "thread_map.h"
  14#include "target.h"
  15#include "evlist.h"
  16#include "evsel.h"
  17#include "debug.h"
 
  18#include <unistd.h>
  19
  20#include "parse-events.h"
  21#include <subcmd/parse-options.h>
  22
  23#include <sys/mman.h>
  24
  25#include <linux/bitops.h>
  26#include <linux/hash.h>
  27#include <linux/log2.h>
  28#include <linux/err.h>
  29
  30static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
  31static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
  32
  33#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  34#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
  35
  36void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
  37		       struct thread_map *threads)
  38{
  39	int i;
  40
  41	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
  42		INIT_HLIST_HEAD(&evlist->heads[i]);
  43	INIT_LIST_HEAD(&evlist->entries);
  44	perf_evlist__set_maps(evlist, cpus, threads);
  45	fdarray__init(&evlist->pollfd, 64);
  46	evlist->workload.pid = -1;
 
  47}
  48
  49struct perf_evlist *perf_evlist__new(void)
  50{
  51	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
  52
  53	if (evlist != NULL)
  54		perf_evlist__init(evlist, NULL, NULL);
  55
  56	return evlist;
  57}
  58
  59struct perf_evlist *perf_evlist__new_default(void)
  60{
  61	struct perf_evlist *evlist = perf_evlist__new();
  62
  63	if (evlist && perf_evlist__add_default(evlist)) {
  64		perf_evlist__delete(evlist);
  65		evlist = NULL;
  66	}
  67
  68	return evlist;
  69}
  70
  71struct perf_evlist *perf_evlist__new_dummy(void)
  72{
  73	struct perf_evlist *evlist = perf_evlist__new();
  74
  75	if (evlist && perf_evlist__add_dummy(evlist)) {
  76		perf_evlist__delete(evlist);
  77		evlist = NULL;
  78	}
  79
  80	return evlist;
  81}
  82
  83/**
  84 * perf_evlist__set_id_pos - set the positions of event ids.
  85 * @evlist: selected event list
  86 *
  87 * Events with compatible sample types all have the same id_pos
  88 * and is_pos.  For convenience, put a copy on evlist.
  89 */
  90void perf_evlist__set_id_pos(struct perf_evlist *evlist)
  91{
  92	struct perf_evsel *first = perf_evlist__first(evlist);
  93
  94	evlist->id_pos = first->id_pos;
  95	evlist->is_pos = first->is_pos;
  96}
  97
  98static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
  99{
 100	struct perf_evsel *evsel;
 101
 102	evlist__for_each(evlist, evsel)
 103		perf_evsel__calc_id_pos(evsel);
 104
 105	perf_evlist__set_id_pos(evlist);
 106}
 107
 108static void perf_evlist__purge(struct perf_evlist *evlist)
 109{
 110	struct perf_evsel *pos, *n;
 111
 112	evlist__for_each_safe(evlist, n, pos) {
 113		list_del_init(&pos->node);
 114		pos->evlist = NULL;
 115		perf_evsel__delete(pos);
 116	}
 117
 118	evlist->nr_entries = 0;
 119}
 120
 121void perf_evlist__exit(struct perf_evlist *evlist)
 122{
 123	zfree(&evlist->mmap);
 
 124	fdarray__exit(&evlist->pollfd);
 125}
 126
 127void perf_evlist__delete(struct perf_evlist *evlist)
 128{
 
 
 
 129	perf_evlist__munmap(evlist);
 130	perf_evlist__close(evlist);
 131	cpu_map__put(evlist->cpus);
 132	thread_map__put(evlist->threads);
 133	evlist->cpus = NULL;
 134	evlist->threads = NULL;
 135	perf_evlist__purge(evlist);
 136	perf_evlist__exit(evlist);
 137	free(evlist);
 138}
 139
 140static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
 141					  struct perf_evsel *evsel)
 142{
 143	/*
 144	 * We already have cpus for evsel (via PMU sysfs) so
 145	 * keep it, if there's no target cpu list defined.
 146	 */
 147	if (!evsel->own_cpus || evlist->has_user_cpus) {
 148		cpu_map__put(evsel->cpus);
 149		evsel->cpus = cpu_map__get(evlist->cpus);
 150	} else if (evsel->cpus != evsel->own_cpus) {
 151		cpu_map__put(evsel->cpus);
 152		evsel->cpus = cpu_map__get(evsel->own_cpus);
 153	}
 154
 155	thread_map__put(evsel->threads);
 156	evsel->threads = thread_map__get(evlist->threads);
 157}
 158
 159static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
 160{
 161	struct perf_evsel *evsel;
 162
 163	evlist__for_each(evlist, evsel)
 164		__perf_evlist__propagate_maps(evlist, evsel);
 165}
 166
 167void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 168{
 169	entry->evlist = evlist;
 170	list_add_tail(&entry->node, &evlist->entries);
 171	entry->idx = evlist->nr_entries;
 172	entry->tracking = !entry->idx;
 173
 174	if (!evlist->nr_entries++)
 175		perf_evlist__set_id_pos(evlist);
 176
 177	__perf_evlist__propagate_maps(evlist, entry);
 178}
 179
 180void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
 181{
 182	evsel->evlist = NULL;
 183	list_del_init(&evsel->node);
 184	evlist->nr_entries -= 1;
 185}
 186
 187void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
 188				   struct list_head *list)
 189{
 190	struct perf_evsel *evsel, *temp;
 191
 192	__evlist__for_each_safe(list, temp, evsel) {
 193		list_del_init(&evsel->node);
 194		perf_evlist__add(evlist, evsel);
 195	}
 196}
 197
 198void __perf_evlist__set_leader(struct list_head *list)
 199{
 200	struct perf_evsel *evsel, *leader;
 201
 202	leader = list_entry(list->next, struct perf_evsel, node);
 203	evsel = list_entry(list->prev, struct perf_evsel, node);
 204
 205	leader->nr_members = evsel->idx - leader->idx + 1;
 206
 207	__evlist__for_each(list, evsel) {
 208		evsel->leader = leader;
 209	}
 210}
 211
 212void perf_evlist__set_leader(struct perf_evlist *evlist)
 213{
 214	if (evlist->nr_entries) {
 215		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
 216		__perf_evlist__set_leader(&evlist->entries);
 217	}
 218}
 219
 220void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
 221{
 222	attr->precise_ip = 3;
 223
 224	while (attr->precise_ip != 0) {
 225		int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
 226		if (fd != -1) {
 227			close(fd);
 228			break;
 229		}
 230		--attr->precise_ip;
 231	}
 232}
 233
 234int perf_evlist__add_default(struct perf_evlist *evlist)
 235{
 236	struct perf_event_attr attr = {
 237		.type = PERF_TYPE_HARDWARE,
 238		.config = PERF_COUNT_HW_CPU_CYCLES,
 239	};
 240	struct perf_evsel *evsel;
 241
 242	event_attr_init(&attr);
 243
 244	perf_event_attr__set_max_precise_ip(&attr);
 245
 246	evsel = perf_evsel__new(&attr);
 247	if (evsel == NULL)
 248		goto error;
 249
 250	/* use asprintf() because free(evsel) assumes name is allocated */
 251	if (asprintf(&evsel->name, "cycles%.*s",
 252		     attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
 253		goto error_free;
 254
 255	perf_evlist__add(evlist, evsel);
 256	return 0;
 257error_free:
 258	perf_evsel__delete(evsel);
 259error:
 260	return -ENOMEM;
 261}
 262
 263int perf_evlist__add_dummy(struct perf_evlist *evlist)
 264{
 265	struct perf_event_attr attr = {
 266		.type	= PERF_TYPE_SOFTWARE,
 267		.config = PERF_COUNT_SW_DUMMY,
 268		.size	= sizeof(attr), /* to capture ABI version */
 269	};
 270	struct perf_evsel *evsel = perf_evsel__new(&attr);
 271
 272	if (evsel == NULL)
 273		return -ENOMEM;
 274
 275	perf_evlist__add(evlist, evsel);
 276	return 0;
 277}
 278
 279static int perf_evlist__add_attrs(struct perf_evlist *evlist,
 280				  struct perf_event_attr *attrs, size_t nr_attrs)
 281{
 282	struct perf_evsel *evsel, *n;
 283	LIST_HEAD(head);
 284	size_t i;
 285
 286	for (i = 0; i < nr_attrs; i++) {
 287		evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
 288		if (evsel == NULL)
 289			goto out_delete_partial_list;
 290		list_add_tail(&evsel->node, &head);
 291	}
 292
 293	perf_evlist__splice_list_tail(evlist, &head);
 294
 295	return 0;
 296
 297out_delete_partial_list:
 298	__evlist__for_each_safe(&head, n, evsel)
 299		perf_evsel__delete(evsel);
 300	return -1;
 301}
 302
 303int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
 304				     struct perf_event_attr *attrs, size_t nr_attrs)
 305{
 306	size_t i;
 307
 308	for (i = 0; i < nr_attrs; i++)
 309		event_attr_init(attrs + i);
 310
 311	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
 312}
 313
 314struct perf_evsel *
 315perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
 316{
 317	struct perf_evsel *evsel;
 318
 319	evlist__for_each(evlist, evsel) {
 320		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
 321		    (int)evsel->attr.config == id)
 322			return evsel;
 323	}
 324
 325	return NULL;
 326}
 327
 328struct perf_evsel *
 329perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
 330				     const char *name)
 331{
 332	struct perf_evsel *evsel;
 333
 334	evlist__for_each(evlist, evsel) {
 335		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
 336		    (strcmp(evsel->name, name) == 0))
 337			return evsel;
 338	}
 339
 340	return NULL;
 341}
 342
 343int perf_evlist__add_newtp(struct perf_evlist *evlist,
 344			   const char *sys, const char *name, void *handler)
 345{
 346	struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
 347
 348	if (IS_ERR(evsel))
 349		return -1;
 350
 351	evsel->handler = handler;
 352	perf_evlist__add(evlist, evsel);
 353	return 0;
 354}
 355
 356static int perf_evlist__nr_threads(struct perf_evlist *evlist,
 357				   struct perf_evsel *evsel)
 358{
 359	if (evsel->system_wide)
 360		return 1;
 361	else
 362		return thread_map__nr(evlist->threads);
 363}
 364
 365void perf_evlist__disable(struct perf_evlist *evlist)
 366{
 367	struct perf_evsel *pos;
 368
 369	evlist__for_each(evlist, pos) {
 370		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
 371			continue;
 372		perf_evsel__disable(pos);
 373	}
 374
 375	evlist->enabled = false;
 376}
 377
 378void perf_evlist__enable(struct perf_evlist *evlist)
 379{
 380	struct perf_evsel *pos;
 381
 382	evlist__for_each(evlist, pos) {
 383		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
 384			continue;
 385		perf_evsel__enable(pos);
 386	}
 387
 388	evlist->enabled = true;
 389}
 390
 391void perf_evlist__toggle_enable(struct perf_evlist *evlist)
 392{
 393	(evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
 394}
 395
 396static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
 397					 struct perf_evsel *evsel, int cpu)
 398{
 399	int thread, err;
 400	int nr_threads = perf_evlist__nr_threads(evlist, evsel);
 401
 402	if (!evsel->fd)
 403		return -EINVAL;
 404
 405	for (thread = 0; thread < nr_threads; thread++) {
 406		err = ioctl(FD(evsel, cpu, thread),
 407			    PERF_EVENT_IOC_ENABLE, 0);
 408		if (err)
 409			return err;
 410	}
 411	return 0;
 412}
 413
 414static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
 415					    struct perf_evsel *evsel,
 416					    int thread)
 417{
 418	int cpu, err;
 419	int nr_cpus = cpu_map__nr(evlist->cpus);
 420
 421	if (!evsel->fd)
 422		return -EINVAL;
 423
 424	for (cpu = 0; cpu < nr_cpus; cpu++) {
 425		err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
 426		if (err)
 427			return err;
 428	}
 429	return 0;
 430}
 431
 432int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
 433				  struct perf_evsel *evsel, int idx)
 434{
 435	bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
 436
 437	if (per_cpu_mmaps)
 438		return perf_evlist__enable_event_cpu(evlist, evsel, idx);
 439	else
 440		return perf_evlist__enable_event_thread(evlist, evsel, idx);
 441}
 442
 443int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 444{
 445	int nr_cpus = cpu_map__nr(evlist->cpus);
 446	int nr_threads = thread_map__nr(evlist->threads);
 447	int nfds = 0;
 448	struct perf_evsel *evsel;
 449
 450	evlist__for_each(evlist, evsel) {
 451		if (evsel->system_wide)
 452			nfds += nr_cpus;
 453		else
 454			nfds += nr_cpus * nr_threads;
 455	}
 456
 457	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
 458	    fdarray__grow(&evlist->pollfd, nfds) < 0)
 459		return -ENOMEM;
 460
 461	return 0;
 462}
 463
 464static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
 
 465{
 466	int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
 467	/*
 468	 * Save the idx so that when we filter out fds POLLHUP'ed we can
 469	 * close the associated evlist->mmap[] entry.
 470	 */
 471	if (pos >= 0) {
 472		evlist->pollfd.priv[pos].idx = idx;
 473
 474		fcntl(fd, F_SETFL, O_NONBLOCK);
 475	}
 476
 477	return pos;
 478}
 479
 480int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
 481{
 482	return __perf_evlist__add_pollfd(evlist, fd, -1);
 483}
 484
 485static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
 
 486{
 487	struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
 488
 489	perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
 
 490}
 491
 492int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
 493{
 494	return fdarray__filter(&evlist->pollfd, revents_and_mask,
 495			       perf_evlist__munmap_filtered);
 496}
 497
 498int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
 499{
 500	return fdarray__poll(&evlist->pollfd, timeout);
 501}
 502
 503static void perf_evlist__id_hash(struct perf_evlist *evlist,
 504				 struct perf_evsel *evsel,
 505				 int cpu, int thread, u64 id)
 506{
 507	int hash;
 508	struct perf_sample_id *sid = SID(evsel, cpu, thread);
 509
 510	sid->id = id;
 511	sid->evsel = evsel;
 512	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
 513	hlist_add_head(&sid->node, &evlist->heads[hash]);
 514}
 515
 516void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
 517			 int cpu, int thread, u64 id)
 518{
 519	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
 520	evsel->id[evsel->ids++] = id;
 521}
 522
 523int perf_evlist__id_add_fd(struct perf_evlist *evlist,
 524			   struct perf_evsel *evsel,
 525			   int cpu, int thread, int fd)
 526{
 527	u64 read_data[4] = { 0, };
 528	int id_idx = 1; /* The first entry is the counter value */
 529	u64 id;
 530	int ret;
 531
 532	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
 533	if (!ret)
 534		goto add;
 535
 536	if (errno != ENOTTY)
 537		return -1;
 538
 539	/* Legacy way to get event id.. All hail to old kernels! */
 540
 541	/*
 542	 * This way does not work with group format read, so bail
 543	 * out in that case.
 544	 */
 545	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
 546		return -1;
 547
 548	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
 549	    read(fd, &read_data, sizeof(read_data)) == -1)
 550		return -1;
 551
 552	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 553		++id_idx;
 554	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 555		++id_idx;
 556
 557	id = read_data[id_idx];
 558
 559 add:
 560	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
 561	return 0;
 562}
 563
 564static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
 565				     struct perf_evsel *evsel, int idx, int cpu,
 566				     int thread)
 567{
 568	struct perf_sample_id *sid = SID(evsel, cpu, thread);
 569	sid->idx = idx;
 570	if (evlist->cpus && cpu >= 0)
 571		sid->cpu = evlist->cpus->map[cpu];
 572	else
 573		sid->cpu = -1;
 574	if (!evsel->system_wide && evlist->threads && thread >= 0)
 575		sid->tid = thread_map__pid(evlist->threads, thread);
 576	else
 577		sid->tid = -1;
 578}
 579
 580struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
 581{
 582	struct hlist_head *head;
 583	struct perf_sample_id *sid;
 584	int hash;
 585
 586	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 587	head = &evlist->heads[hash];
 588
 589	hlist_for_each_entry(sid, head, node)
 590		if (sid->id == id)
 591			return sid;
 592
 593	return NULL;
 594}
 595
 596struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
 597{
 598	struct perf_sample_id *sid;
 599
 600	if (evlist->nr_entries == 1 || !id)
 601		return perf_evlist__first(evlist);
 602
 603	sid = perf_evlist__id2sid(evlist, id);
 604	if (sid)
 605		return sid->evsel;
 606
 607	if (!perf_evlist__sample_id_all(evlist))
 608		return perf_evlist__first(evlist);
 609
 610	return NULL;
 611}
 612
 613struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
 614						u64 id)
 615{
 616	struct perf_sample_id *sid;
 617
 618	if (!id)
 619		return NULL;
 620
 621	sid = perf_evlist__id2sid(evlist, id);
 622	if (sid)
 623		return sid->evsel;
 624
 625	return NULL;
 626}
 627
 628static int perf_evlist__event2id(struct perf_evlist *evlist,
 629				 union perf_event *event, u64 *id)
 630{
 631	const u64 *array = event->sample.array;
 632	ssize_t n;
 633
 634	n = (event->header.size - sizeof(event->header)) >> 3;
 635
 636	if (event->header.type == PERF_RECORD_SAMPLE) {
 637		if (evlist->id_pos >= n)
 638			return -1;
 639		*id = array[evlist->id_pos];
 640	} else {
 641		if (evlist->is_pos > n)
 642			return -1;
 643		n -= evlist->is_pos;
 644		*id = array[n];
 645	}
 646	return 0;
 647}
 648
 649static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
 650						   union perf_event *event)
 651{
 652	struct perf_evsel *first = perf_evlist__first(evlist);
 653	struct hlist_head *head;
 654	struct perf_sample_id *sid;
 655	int hash;
 656	u64 id;
 657
 658	if (evlist->nr_entries == 1)
 659		return first;
 660
 661	if (!first->attr.sample_id_all &&
 662	    event->header.type != PERF_RECORD_SAMPLE)
 663		return first;
 664
 665	if (perf_evlist__event2id(evlist, event, &id))
 666		return NULL;
 667
 668	/* Synthesized events have an id of zero */
 669	if (!id)
 670		return first;
 671
 672	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 673	head = &evlist->heads[hash];
 674
 675	hlist_for_each_entry(sid, head, node) {
 676		if (sid->id == id)
 677			return sid->evsel;
 678	}
 679	return NULL;
 680}
 681
 682union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 683{
 684	struct perf_mmap *md = &evlist->mmap[idx];
 685	u64 head;
 686	u64 old = md->prev;
 687	unsigned char *data = md->base + page_size;
 688	union perf_event *event = NULL;
 
 689
 690	/*
 691	 * Check if event was unmapped due to a POLLHUP/POLLERR.
 692	 */
 693	if (!atomic_read(&md->refcnt))
 694		return NULL;
 695
 696	head = perf_mmap__read_head(md);
 697	if (evlist->overwrite) {
 698		/*
 699		 * If we're further behind than half the buffer, there's a chance
 700		 * the writer will bite our tail and mess up the samples under us.
 701		 *
 702		 * If we somehow ended up ahead of the head, we got messed up.
 703		 *
 704		 * In either case, truncate and restart at head.
 705		 */
 706		int diff = head - old;
 707		if (diff > md->mask / 2 || diff < 0) {
 708			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
 709
 710			/*
 711			 * head points to a known good entry, start there.
 712			 */
 713			old = head;
 
 714		}
 715	}
 716
 717	if (old != head) {
 718		size_t size;
 719
 720		event = (union perf_event *)&data[old & md->mask];
 721		size = event->header.size;
 722
 
 
 
 
 
 723		/*
 724		 * Event straddles the mmap boundary -- header should always
 725		 * be inside due to u64 alignment of output.
 726		 */
 727		if ((old & md->mask) + size != ((old + size) & md->mask)) {
 728			unsigned int offset = old;
 729			unsigned int len = min(sizeof(*event), size), cpy;
 730			void *dst = md->event_copy;
 731
 732			do {
 733				cpy = min(md->mask + 1 - (offset & md->mask), len);
 734				memcpy(dst, &data[offset & md->mask], cpy);
 735				offset += cpy;
 736				dst += cpy;
 737				len -= cpy;
 738			} while (len);
 739
 740			event = (union perf_event *) md->event_copy;
 741		}
 742
 743		old += size;
 744	}
 745
 746	md->prev = old;
 
 
 747
 748	return event;
 749}
 750
 751static bool perf_mmap__empty(struct perf_mmap *md)
 752{
 753	return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
 
 
 
 
 
 
 
 
 
 
 
 754}
 755
 756static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
 
 757{
 758	atomic_inc(&evlist->mmap[idx].refcnt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 759}
 760
 761static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
 762{
 763	BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0);
 764
 765	if (atomic_dec_and_test(&evlist->mmap[idx].refcnt))
 766		__perf_evlist__munmap(evlist, idx);
 
 
 
 
 
 767}
 768
 769void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
 770{
 771	struct perf_mmap *md = &evlist->mmap[idx];
 772
 773	if (!evlist->overwrite) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 774		u64 old = md->prev;
 775
 776		perf_mmap__write_tail(md, old);
 777	}
 778
 779	if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
 780		perf_evlist__mmap_put(evlist, idx);
 
 
 
 
 
 781}
 782
 783int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
 784			       struct auxtrace_mmap_params *mp __maybe_unused,
 785			       void *userpg __maybe_unused,
 786			       int fd __maybe_unused)
 787{
 788	return 0;
 789}
 790
 791void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
 792{
 793}
 794
 795void __weak auxtrace_mmap_params__init(
 796			struct auxtrace_mmap_params *mp __maybe_unused,
 797			off_t auxtrace_offset __maybe_unused,
 798			unsigned int auxtrace_pages __maybe_unused,
 799			bool auxtrace_overwrite __maybe_unused)
 800{
 801}
 802
 803void __weak auxtrace_mmap_params__set_idx(
 804			struct auxtrace_mmap_params *mp __maybe_unused,
 805			struct perf_evlist *evlist __maybe_unused,
 806			int idx __maybe_unused,
 807			bool per_cpu __maybe_unused)
 808{
 809}
 810
 811static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
 812{
 813	if (evlist->mmap[idx].base != NULL) {
 814		munmap(evlist->mmap[idx].base, evlist->mmap_len);
 815		evlist->mmap[idx].base = NULL;
 816		atomic_set(&evlist->mmap[idx].refcnt, 0);
 
 817	}
 818	auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
 819}
 820
 821void perf_evlist__munmap(struct perf_evlist *evlist)
 822{
 823	int i;
 824
 825	if (evlist->mmap == NULL)
 826		return;
 827
 828	for (i = 0; i < evlist->nr_mmaps; i++)
 829		__perf_evlist__munmap(evlist, i);
 
 
 
 830
 
 
 
 831	zfree(&evlist->mmap);
 
 832}
 833
 834static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 835{
 
 
 
 836	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
 837	if (cpu_map__empty(evlist->cpus))
 838		evlist->nr_mmaps = thread_map__nr(evlist->threads);
 839	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
 840	return evlist->mmap != NULL ? 0 : -ENOMEM;
 
 
 
 
 
 841}
 842
 843struct mmap_params {
 844	int prot;
 845	int mask;
 846	struct auxtrace_mmap_params auxtrace_mp;
 847};
 848
 849static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
 850			       struct mmap_params *mp, int fd)
 851{
 852	/*
 853	 * The last one will be done at perf_evlist__mmap_consume(), so that we
 854	 * make sure we don't prevent tools from consuming every last event in
 855	 * the ring buffer.
 856	 *
 857	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
 858	 * anymore, but the last events for it are still in the ring buffer,
 859	 * waiting to be consumed.
 860	 *
 861	 * Tools can chose to ignore this at their own discretion, but the
 862	 * evlist layer can't just drop it when filtering events in
 863	 * perf_evlist__filter_pollfd().
 864	 */
 865	atomic_set(&evlist->mmap[idx].refcnt, 2);
 866	evlist->mmap[idx].prev = 0;
 867	evlist->mmap[idx].mask = mp->mask;
 868	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
 869				      MAP_SHARED, fd, 0);
 870	if (evlist->mmap[idx].base == MAP_FAILED) {
 871		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
 872			  errno);
 873		evlist->mmap[idx].base = NULL;
 874		return -1;
 875	}
 
 876
 877	if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
 878				&mp->auxtrace_mp, evlist->mmap[idx].base, fd))
 879		return -1;
 880
 881	return 0;
 882}
 883
 
 
 
 
 
 
 
 
 
 884static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
 885				       struct mmap_params *mp, int cpu,
 886				       int thread, int *output)
 887{
 888	struct perf_evsel *evsel;
 
 
 889
 890	evlist__for_each(evlist, evsel) {
 
 
 891		int fd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 892
 893		if (evsel->system_wide && thread)
 894			continue;
 895
 
 
 
 
 896		fd = FD(evsel, cpu, thread);
 897
 898		if (*output == -1) {
 899			*output = fd;
 900			if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
 
 901				return -1;
 902		} else {
 903			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
 904				return -1;
 905
 906			perf_evlist__mmap_get(evlist, idx);
 907		}
 908
 
 
 909		/*
 910		 * The system_wide flag causes a selected event to be opened
 911		 * always without a pid.  Consequently it will never get a
 912		 * POLLHUP, but it is used for tracking in combination with
 913		 * other events, so it should not need to be polled anyway.
 914		 * Therefore don't add it for polling.
 915		 */
 916		if (!evsel->system_wide &&
 917		    __perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
 918			perf_evlist__mmap_put(evlist, idx);
 919			return -1;
 920		}
 921
 922		if (evsel->attr.read_format & PERF_FORMAT_ID) {
 923			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
 924						   fd) < 0)
 925				return -1;
 926			perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
 927						 thread);
 928		}
 929	}
 930
 931	return 0;
 932}
 933
 934static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
 935				     struct mmap_params *mp)
 936{
 937	int cpu, thread;
 938	int nr_cpus = cpu_map__nr(evlist->cpus);
 939	int nr_threads = thread_map__nr(evlist->threads);
 940
 941	pr_debug2("perf event ring buffer mmapped per cpu\n");
 942	for (cpu = 0; cpu < nr_cpus; cpu++) {
 943		int output = -1;
 
 944
 945		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
 946					      true);
 947
 948		for (thread = 0; thread < nr_threads; thread++) {
 949			if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
 950							thread, &output))
 951				goto out_unmap;
 952		}
 953	}
 954
 955	return 0;
 956
 957out_unmap:
 958	for (cpu = 0; cpu < nr_cpus; cpu++)
 959		__perf_evlist__munmap(evlist, cpu);
 960	return -1;
 961}
 962
 963static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
 964					struct mmap_params *mp)
 965{
 966	int thread;
 967	int nr_threads = thread_map__nr(evlist->threads);
 968
 969	pr_debug2("perf event ring buffer mmapped per thread\n");
 970	for (thread = 0; thread < nr_threads; thread++) {
 971		int output = -1;
 
 972
 973		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
 974					      false);
 975
 976		if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
 977						&output))
 978			goto out_unmap;
 979	}
 980
 981	return 0;
 982
 983out_unmap:
 984	for (thread = 0; thread < nr_threads; thread++)
 985		__perf_evlist__munmap(evlist, thread);
 986	return -1;
 987}
 988
 989static size_t perf_evlist__mmap_size(unsigned long pages)
 990{
 991	if (pages == UINT_MAX) {
 992		int max;
 993
 994		if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
 995			/*
 996			 * Pick a once upon a time good value, i.e. things look
 997			 * strange since we can't read a sysctl value, but lets not
 998			 * die yet...
 999			 */
1000			max = 512;
1001		} else {
1002			max -= (page_size / 1024);
1003		}
 
 
 
 
1004
1005		pages = (max * 1024) / page_size;
1006		if (!is_power_of_2(pages))
1007			pages = rounddown_pow_of_two(pages);
1008	} else if (!is_power_of_2(pages))
 
 
 
 
1009		return 0;
1010
1011	return (pages + 1) * page_size;
1012}
1013
1014static long parse_pages_arg(const char *str, unsigned long min,
1015			    unsigned long max)
1016{
1017	unsigned long pages, val;
1018	static struct parse_tag tags[] = {
1019		{ .tag  = 'B', .mult = 1       },
1020		{ .tag  = 'K', .mult = 1 << 10 },
1021		{ .tag  = 'M', .mult = 1 << 20 },
1022		{ .tag  = 'G', .mult = 1 << 30 },
1023		{ .tag  = 0 },
1024	};
1025
1026	if (str == NULL)
1027		return -EINVAL;
1028
1029	val = parse_tag_value(str, tags);
1030	if (val != (unsigned long) -1) {
1031		/* we got file size value */
1032		pages = PERF_ALIGN(val, page_size) / page_size;
1033	} else {
1034		/* we got pages count value */
1035		char *eptr;
1036		pages = strtoul(str, &eptr, 10);
1037		if (*eptr != '\0')
1038			return -EINVAL;
1039	}
1040
1041	if (pages == 0 && min == 0) {
1042		/* leave number of pages at 0 */
1043	} else if (!is_power_of_2(pages)) {
1044		/* round pages up to next power of 2 */
1045		pages = roundup_pow_of_two(pages);
1046		if (!pages)
1047			return -EINVAL;
1048		pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1049			pages * page_size, pages);
1050	}
1051
1052	if (pages > max)
1053		return -EINVAL;
1054
1055	return pages;
1056}
1057
1058int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
1059{
1060	unsigned long max = UINT_MAX;
1061	long pages;
1062
1063	if (max > SIZE_MAX / page_size)
1064		max = SIZE_MAX / page_size;
1065
1066	pages = parse_pages_arg(str, 1, max);
1067	if (pages < 0) {
1068		pr_err("Invalid argument for --mmap_pages/-m\n");
1069		return -1;
1070	}
1071
1072	*mmap_pages = pages;
1073	return 0;
1074}
1075
1076int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1077				  int unset __maybe_unused)
1078{
1079	return __perf_evlist__parse_mmap_pages(opt->value, str);
1080}
1081
1082/**
1083 * perf_evlist__mmap_ex - Create mmaps to receive events.
1084 * @evlist: list of events
1085 * @pages: map length in pages
1086 * @overwrite: overwrite older events?
1087 * @auxtrace_pages - auxtrace map length in pages
1088 * @auxtrace_overwrite - overwrite older auxtrace data?
1089 *
1090 * If @overwrite is %false the user needs to signal event consumption using
1091 * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
1092 * automatically.
1093 *
1094 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1095 * consumption using auxtrace_mmap__write_tail().
1096 *
1097 * Return: %0 on success, negative error code otherwise.
1098 */
1099int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1100			 bool overwrite, unsigned int auxtrace_pages,
1101			 bool auxtrace_overwrite)
1102{
1103	struct perf_evsel *evsel;
1104	const struct cpu_map *cpus = evlist->cpus;
1105	const struct thread_map *threads = evlist->threads;
1106	struct mmap_params mp = {
1107		.prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1108	};
1109
1110	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
 
 
1111		return -ENOMEM;
1112
1113	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1114		return -ENOMEM;
1115
1116	evlist->overwrite = overwrite;
1117	evlist->mmap_len = perf_evlist__mmap_size(pages);
1118	pr_debug("mmap size %zuB\n", evlist->mmap_len);
1119	mp.mask = evlist->mmap_len - page_size - 1;
1120
1121	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1122				   auxtrace_pages, auxtrace_overwrite);
1123
1124	evlist__for_each(evlist, evsel) {
1125		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1126		    evsel->sample_id == NULL &&
1127		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1128			return -ENOMEM;
1129	}
1130
1131	if (cpu_map__empty(cpus))
1132		return perf_evlist__mmap_per_thread(evlist, &mp);
1133
1134	return perf_evlist__mmap_per_cpu(evlist, &mp);
1135}
1136
1137int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1138		      bool overwrite)
1139{
1140	return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1141}
1142
1143int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1144{
1145	struct cpu_map *cpus;
1146	struct thread_map *threads;
1147
1148	threads = thread_map__new_str(target->pid, target->tid, target->uid);
1149
1150	if (!threads)
1151		return -1;
1152
1153	if (target__uses_dummy_map(target))
1154		cpus = cpu_map__dummy_new();
1155	else
1156		cpus = cpu_map__new(target->cpu_list);
1157
1158	if (!cpus)
1159		goto out_delete_threads;
1160
1161	evlist->has_user_cpus = !!target->cpu_list;
1162
1163	perf_evlist__set_maps(evlist, cpus, threads);
1164
1165	return 0;
1166
1167out_delete_threads:
1168	thread_map__put(threads);
1169	return -1;
1170}
1171
1172void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1173			   struct thread_map *threads)
1174{
1175	/*
1176	 * Allow for the possibility that one or another of the maps isn't being
1177	 * changed i.e. don't put it.  Note we are assuming the maps that are
1178	 * being applied are brand new and evlist is taking ownership of the
1179	 * original reference count of 1.  If that is not the case it is up to
1180	 * the caller to increase the reference count.
1181	 */
1182	if (cpus != evlist->cpus) {
1183		cpu_map__put(evlist->cpus);
1184		evlist->cpus = cpu_map__get(cpus);
1185	}
1186
1187	if (threads != evlist->threads) {
1188		thread_map__put(evlist->threads);
1189		evlist->threads = thread_map__get(threads);
1190	}
1191
1192	perf_evlist__propagate_maps(evlist);
1193}
1194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1195int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1196{
1197	struct perf_evsel *evsel;
1198	int err = 0;
1199	const int ncpus = cpu_map__nr(evlist->cpus),
1200		  nthreads = thread_map__nr(evlist->threads);
1201
1202	evlist__for_each(evlist, evsel) {
1203		if (evsel->filter == NULL)
1204			continue;
1205
1206		/*
1207		 * filters only work for tracepoint event, which doesn't have cpu limit.
1208		 * So evlist and evsel should always be same.
1209		 */
1210		err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
1211		if (err) {
1212			*err_evsel = evsel;
1213			break;
1214		}
1215	}
1216
1217	return err;
1218}
1219
1220int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1221{
1222	struct perf_evsel *evsel;
1223	int err = 0;
1224
1225	evlist__for_each(evlist, evsel) {
1226		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1227			continue;
1228
1229		err = perf_evsel__set_filter(evsel, filter);
1230		if (err)
1231			break;
1232	}
1233
1234	return err;
1235}
1236
1237int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1238{
1239	char *filter;
1240	int ret = -1;
1241	size_t i;
1242
1243	for (i = 0; i < npids; ++i) {
1244		if (i == 0) {
1245			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1246				return -1;
1247		} else {
1248			char *tmp;
1249
1250			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1251				goto out_free;
1252
1253			free(filter);
1254			filter = tmp;
1255		}
1256	}
1257
1258	ret = perf_evlist__set_filter(evlist, filter);
1259out_free:
1260	free(filter);
1261	return ret;
1262}
1263
1264int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1265{
1266	return perf_evlist__set_filter_pids(evlist, 1, &pid);
1267}
1268
1269bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1270{
1271	struct perf_evsel *pos;
1272
1273	if (evlist->nr_entries == 1)
1274		return true;
1275
1276	if (evlist->id_pos < 0 || evlist->is_pos < 0)
1277		return false;
1278
1279	evlist__for_each(evlist, pos) {
1280		if (pos->id_pos != evlist->id_pos ||
1281		    pos->is_pos != evlist->is_pos)
1282			return false;
1283	}
1284
1285	return true;
1286}
1287
1288u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1289{
1290	struct perf_evsel *evsel;
1291
1292	if (evlist->combined_sample_type)
1293		return evlist->combined_sample_type;
1294
1295	evlist__for_each(evlist, evsel)
1296		evlist->combined_sample_type |= evsel->attr.sample_type;
1297
1298	return evlist->combined_sample_type;
1299}
1300
1301u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1302{
1303	evlist->combined_sample_type = 0;
1304	return __perf_evlist__combined_sample_type(evlist);
1305}
1306
1307u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1308{
1309	struct perf_evsel *evsel;
1310	u64 branch_type = 0;
1311
1312	evlist__for_each(evlist, evsel)
1313		branch_type |= evsel->attr.branch_sample_type;
1314	return branch_type;
1315}
1316
1317bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1318{
1319	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1320	u64 read_format = first->attr.read_format;
1321	u64 sample_type = first->attr.sample_type;
1322
1323	evlist__for_each(evlist, pos) {
1324		if (read_format != pos->attr.read_format)
1325			return false;
1326	}
1327
1328	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1329	if ((sample_type & PERF_SAMPLE_READ) &&
1330	    !(read_format & PERF_FORMAT_ID)) {
1331		return false;
1332	}
1333
1334	return true;
1335}
1336
1337u64 perf_evlist__read_format(struct perf_evlist *evlist)
1338{
1339	struct perf_evsel *first = perf_evlist__first(evlist);
1340	return first->attr.read_format;
1341}
1342
1343u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1344{
1345	struct perf_evsel *first = perf_evlist__first(evlist);
1346	struct perf_sample *data;
1347	u64 sample_type;
1348	u16 size = 0;
1349
1350	if (!first->attr.sample_id_all)
1351		goto out;
1352
1353	sample_type = first->attr.sample_type;
1354
1355	if (sample_type & PERF_SAMPLE_TID)
1356		size += sizeof(data->tid) * 2;
1357
1358       if (sample_type & PERF_SAMPLE_TIME)
1359		size += sizeof(data->time);
1360
1361	if (sample_type & PERF_SAMPLE_ID)
1362		size += sizeof(data->id);
1363
1364	if (sample_type & PERF_SAMPLE_STREAM_ID)
1365		size += sizeof(data->stream_id);
1366
1367	if (sample_type & PERF_SAMPLE_CPU)
1368		size += sizeof(data->cpu) * 2;
1369
1370	if (sample_type & PERF_SAMPLE_IDENTIFIER)
1371		size += sizeof(data->id);
1372out:
1373	return size;
1374}
1375
1376bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1377{
1378	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1379
1380	evlist__for_each_continue(evlist, pos) {
1381		if (first->attr.sample_id_all != pos->attr.sample_id_all)
1382			return false;
1383	}
1384
1385	return true;
1386}
1387
1388bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1389{
1390	struct perf_evsel *first = perf_evlist__first(evlist);
1391	return first->attr.sample_id_all;
1392}
1393
1394void perf_evlist__set_selected(struct perf_evlist *evlist,
1395			       struct perf_evsel *evsel)
1396{
1397	evlist->selected = evsel;
1398}
1399
1400void perf_evlist__close(struct perf_evlist *evlist)
1401{
1402	struct perf_evsel *evsel;
1403	int ncpus = cpu_map__nr(evlist->cpus);
1404	int nthreads = thread_map__nr(evlist->threads);
1405	int n;
1406
1407	evlist__for_each_reverse(evlist, evsel) {
1408		n = evsel->cpus ? evsel->cpus->nr : ncpus;
1409		perf_evsel__close(evsel, n, nthreads);
1410	}
1411}
1412
1413static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1414{
1415	struct cpu_map	  *cpus;
1416	struct thread_map *threads;
1417	int err = -ENOMEM;
1418
1419	/*
1420	 * Try reading /sys/devices/system/cpu/online to get
1421	 * an all cpus map.
1422	 *
1423	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1424	 * code needs an overhaul to properly forward the
1425	 * error, and we may not want to do that fallback to a
1426	 * default cpu identity map :-\
1427	 */
1428	cpus = cpu_map__new(NULL);
1429	if (!cpus)
1430		goto out;
1431
1432	threads = thread_map__new_dummy();
1433	if (!threads)
1434		goto out_put;
1435
1436	perf_evlist__set_maps(evlist, cpus, threads);
1437out:
1438	return err;
1439out_put:
1440	cpu_map__put(cpus);
1441	goto out;
1442}
1443
1444int perf_evlist__open(struct perf_evlist *evlist)
1445{
1446	struct perf_evsel *evsel;
1447	int err;
1448
1449	/*
1450	 * Default: one fd per CPU, all threads, aka systemwide
1451	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1452	 */
1453	if (evlist->threads == NULL && evlist->cpus == NULL) {
1454		err = perf_evlist__create_syswide_maps(evlist);
1455		if (err < 0)
1456			goto out_err;
1457	}
1458
1459	perf_evlist__update_id_pos(evlist);
1460
1461	evlist__for_each(evlist, evsel) {
1462		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1463		if (err < 0)
1464			goto out_err;
1465	}
1466
1467	return 0;
1468out_err:
1469	perf_evlist__close(evlist);
1470	errno = -err;
1471	return err;
1472}
1473
1474int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1475				  const char *argv[], bool pipe_output,
1476				  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1477{
1478	int child_ready_pipe[2], go_pipe[2];
1479	char bf;
1480
1481	if (pipe(child_ready_pipe) < 0) {
1482		perror("failed to create 'ready' pipe");
1483		return -1;
1484	}
1485
1486	if (pipe(go_pipe) < 0) {
1487		perror("failed to create 'go' pipe");
1488		goto out_close_ready_pipe;
1489	}
1490
1491	evlist->workload.pid = fork();
1492	if (evlist->workload.pid < 0) {
1493		perror("failed to fork");
1494		goto out_close_pipes;
1495	}
1496
1497	if (!evlist->workload.pid) {
1498		int ret;
1499
1500		if (pipe_output)
1501			dup2(2, 1);
1502
1503		signal(SIGTERM, SIG_DFL);
1504
1505		close(child_ready_pipe[0]);
1506		close(go_pipe[1]);
1507		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1508
1509		/*
1510		 * Tell the parent we're ready to go
1511		 */
1512		close(child_ready_pipe[1]);
1513
1514		/*
1515		 * Wait until the parent tells us to go.
1516		 */
1517		ret = read(go_pipe[0], &bf, 1);
1518		/*
1519		 * The parent will ask for the execvp() to be performed by
1520		 * writing exactly one byte, in workload.cork_fd, usually via
1521		 * perf_evlist__start_workload().
1522		 *
1523		 * For cancelling the workload without actually running it,
1524		 * the parent will just close workload.cork_fd, without writing
1525		 * anything, i.e. read will return zero and we just exit()
1526		 * here.
1527		 */
1528		if (ret != 1) {
1529			if (ret == -1)
1530				perror("unable to read pipe");
1531			exit(ret);
1532		}
1533
1534		execvp(argv[0], (char **)argv);
1535
1536		if (exec_error) {
1537			union sigval val;
1538
1539			val.sival_int = errno;
1540			if (sigqueue(getppid(), SIGUSR1, val))
1541				perror(argv[0]);
1542		} else
1543			perror(argv[0]);
1544		exit(-1);
1545	}
1546
1547	if (exec_error) {
1548		struct sigaction act = {
1549			.sa_flags     = SA_SIGINFO,
1550			.sa_sigaction = exec_error,
1551		};
1552		sigaction(SIGUSR1, &act, NULL);
1553	}
1554
1555	if (target__none(target)) {
1556		if (evlist->threads == NULL) {
1557			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1558				__func__, __LINE__);
1559			goto out_close_pipes;
1560		}
1561		thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1562	}
1563
1564	close(child_ready_pipe[1]);
1565	close(go_pipe[0]);
1566	/*
1567	 * wait for child to settle
1568	 */
1569	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1570		perror("unable to read pipe");
1571		goto out_close_pipes;
1572	}
1573
1574	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1575	evlist->workload.cork_fd = go_pipe[1];
1576	close(child_ready_pipe[0]);
1577	return 0;
1578
1579out_close_pipes:
1580	close(go_pipe[0]);
1581	close(go_pipe[1]);
1582out_close_ready_pipe:
1583	close(child_ready_pipe[0]);
1584	close(child_ready_pipe[1]);
1585	return -1;
1586}
1587
1588int perf_evlist__start_workload(struct perf_evlist *evlist)
1589{
1590	if (evlist->workload.cork_fd > 0) {
1591		char bf = 0;
1592		int ret;
1593		/*
1594		 * Remove the cork, let it rip!
1595		 */
1596		ret = write(evlist->workload.cork_fd, &bf, 1);
1597		if (ret < 0)
1598			perror("enable to write to pipe");
1599
1600		close(evlist->workload.cork_fd);
1601		return ret;
1602	}
1603
1604	return 0;
1605}
1606
1607int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1608			      struct perf_sample *sample)
1609{
1610	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1611
1612	if (!evsel)
1613		return -EFAULT;
1614	return perf_evsel__parse_sample(evsel, event, sample);
1615}
1616
1617size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1618{
1619	struct perf_evsel *evsel;
1620	size_t printed = 0;
1621
1622	evlist__for_each(evlist, evsel) {
1623		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1624				   perf_evsel__name(evsel));
1625	}
1626
1627	return printed + fprintf(fp, "\n");
1628}
1629
1630int perf_evlist__strerror_open(struct perf_evlist *evlist,
1631			       int err, char *buf, size_t size)
1632{
1633	int printed, value;
1634	char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1635
1636	switch (err) {
1637	case EACCES:
1638	case EPERM:
1639		printed = scnprintf(buf, size,
1640				    "Error:\t%s.\n"
1641				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1642
1643		value = perf_event_paranoid();
1644
1645		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1646
1647		if (value >= 2) {
1648			printed += scnprintf(buf + printed, size - printed,
1649					     "For your workloads it needs to be <= 1\nHint:\t");
1650		}
1651		printed += scnprintf(buf + printed, size - printed,
1652				     "For system wide tracing it needs to be set to -1.\n");
1653
1654		printed += scnprintf(buf + printed, size - printed,
1655				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1656				    "Hint:\tThe current value is %d.", value);
1657		break;
1658	case EINVAL: {
1659		struct perf_evsel *first = perf_evlist__first(evlist);
1660		int max_freq;
1661
1662		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1663			goto out_default;
1664
1665		if (first->attr.sample_freq < (u64)max_freq)
1666			goto out_default;
1667
1668		printed = scnprintf(buf, size,
1669				    "Error:\t%s.\n"
1670				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1671				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1672				    emsg, max_freq, first->attr.sample_freq);
1673		break;
1674	}
1675	default:
1676out_default:
1677		scnprintf(buf, size, "%s", emsg);
1678		break;
1679	}
1680
1681	return 0;
1682}
1683
1684int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1685{
1686	char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1687	int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1688
1689	switch (err) {
1690	case EPERM:
1691		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1692		printed += scnprintf(buf + printed, size - printed,
1693				     "Error:\t%s.\n"
1694				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1695				     "Hint:\tTried using %zd kB.\n",
1696				     emsg, pages_max_per_user, pages_attempted);
1697
1698		if (pages_attempted >= pages_max_per_user) {
1699			printed += scnprintf(buf + printed, size - printed,
1700					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1701					     pages_max_per_user + pages_attempted);
1702		}
1703
1704		printed += scnprintf(buf + printed, size - printed,
1705				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1706		break;
1707	default:
1708		scnprintf(buf, size, "%s", emsg);
1709		break;
1710	}
1711
1712	return 0;
1713}
1714
1715void perf_evlist__to_front(struct perf_evlist *evlist,
1716			   struct perf_evsel *move_evsel)
1717{
1718	struct perf_evsel *evsel, *n;
1719	LIST_HEAD(move);
1720
1721	if (move_evsel == perf_evlist__first(evlist))
1722		return;
1723
1724	evlist__for_each_safe(evlist, n, evsel) {
1725		if (evsel->leader == move_evsel->leader)
1726			list_move_tail(&evsel->node, &move);
1727	}
1728
1729	list_splice(&move, &evlist->entries);
1730}
1731
1732void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1733				     struct perf_evsel *tracking_evsel)
1734{
1735	struct perf_evsel *evsel;
1736
1737	if (tracking_evsel->tracking)
1738		return;
1739
1740	evlist__for_each(evlist, evsel) {
1741		if (evsel != tracking_evsel)
1742			evsel->tracking = false;
1743	}
1744
1745	tracking_evsel->tracking = true;
1746}
1747
1748struct perf_evsel *
1749perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1750			       const char *str)
1751{
1752	struct perf_evsel *evsel;
1753
1754	evlist__for_each(evlist, evsel) {
1755		if (!evsel->name)
1756			continue;
1757		if (strcmp(str, evsel->name) == 0)
1758			return evsel;
1759	}
1760
1761	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1762}
   1/*
   2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
   3 *
   4 * Parts came from builtin-{top,stat,record}.c, see those files for further
   5 * copyright notes.
   6 *
   7 * Released under the GPL v2. (and only v2, not any later version)
   8 */
   9#include "util.h"
  10#include <api/fs/fs.h>
  11#include <poll.h>
  12#include "cpumap.h"
  13#include "thread_map.h"
  14#include "target.h"
  15#include "evlist.h"
  16#include "evsel.h"
  17#include "debug.h"
  18#include "asm/bug.h"
  19#include <unistd.h>
  20
  21#include "parse-events.h"
  22#include <subcmd/parse-options.h>
  23
  24#include <sys/mman.h>
  25
  26#include <linux/bitops.h>
  27#include <linux/hash.h>
  28#include <linux/log2.h>
  29#include <linux/err.h>
  30
  31static void perf_mmap__munmap(struct perf_mmap *map);
  32static void perf_mmap__put(struct perf_mmap *map);
  33
  34#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  35#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
  36
  37void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
  38		       struct thread_map *threads)
  39{
  40	int i;
  41
  42	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
  43		INIT_HLIST_HEAD(&evlist->heads[i]);
  44	INIT_LIST_HEAD(&evlist->entries);
  45	perf_evlist__set_maps(evlist, cpus, threads);
  46	fdarray__init(&evlist->pollfd, 64);
  47	evlist->workload.pid = -1;
  48	evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
  49}
  50
  51struct perf_evlist *perf_evlist__new(void)
  52{
  53	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
  54
  55	if (evlist != NULL)
  56		perf_evlist__init(evlist, NULL, NULL);
  57
  58	return evlist;
  59}
  60
  61struct perf_evlist *perf_evlist__new_default(void)
  62{
  63	struct perf_evlist *evlist = perf_evlist__new();
  64
  65	if (evlist && perf_evlist__add_default(evlist)) {
  66		perf_evlist__delete(evlist);
  67		evlist = NULL;
  68	}
  69
  70	return evlist;
  71}
  72
  73struct perf_evlist *perf_evlist__new_dummy(void)
  74{
  75	struct perf_evlist *evlist = perf_evlist__new();
  76
  77	if (evlist && perf_evlist__add_dummy(evlist)) {
  78		perf_evlist__delete(evlist);
  79		evlist = NULL;
  80	}
  81
  82	return evlist;
  83}
  84
  85/**
  86 * perf_evlist__set_id_pos - set the positions of event ids.
  87 * @evlist: selected event list
  88 *
  89 * Events with compatible sample types all have the same id_pos
  90 * and is_pos.  For convenience, put a copy on evlist.
  91 */
  92void perf_evlist__set_id_pos(struct perf_evlist *evlist)
  93{
  94	struct perf_evsel *first = perf_evlist__first(evlist);
  95
  96	evlist->id_pos = first->id_pos;
  97	evlist->is_pos = first->is_pos;
  98}
  99
 100static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
 101{
 102	struct perf_evsel *evsel;
 103
 104	evlist__for_each_entry(evlist, evsel)
 105		perf_evsel__calc_id_pos(evsel);
 106
 107	perf_evlist__set_id_pos(evlist);
 108}
 109
 110static void perf_evlist__purge(struct perf_evlist *evlist)
 111{
 112	struct perf_evsel *pos, *n;
 113
 114	evlist__for_each_entry_safe(evlist, n, pos) {
 115		list_del_init(&pos->node);
 116		pos->evlist = NULL;
 117		perf_evsel__delete(pos);
 118	}
 119
 120	evlist->nr_entries = 0;
 121}
 122
 123void perf_evlist__exit(struct perf_evlist *evlist)
 124{
 125	zfree(&evlist->mmap);
 126	zfree(&evlist->backward_mmap);
 127	fdarray__exit(&evlist->pollfd);
 128}
 129
 130void perf_evlist__delete(struct perf_evlist *evlist)
 131{
 132	if (evlist == NULL)
 133		return;
 134
 135	perf_evlist__munmap(evlist);
 136	perf_evlist__close(evlist);
 137	cpu_map__put(evlist->cpus);
 138	thread_map__put(evlist->threads);
 139	evlist->cpus = NULL;
 140	evlist->threads = NULL;
 141	perf_evlist__purge(evlist);
 142	perf_evlist__exit(evlist);
 143	free(evlist);
 144}
 145
 146static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
 147					  struct perf_evsel *evsel)
 148{
 149	/*
 150	 * We already have cpus for evsel (via PMU sysfs) so
 151	 * keep it, if there's no target cpu list defined.
 152	 */
 153	if (!evsel->own_cpus || evlist->has_user_cpus) {
 154		cpu_map__put(evsel->cpus);
 155		evsel->cpus = cpu_map__get(evlist->cpus);
 156	} else if (evsel->cpus != evsel->own_cpus) {
 157		cpu_map__put(evsel->cpus);
 158		evsel->cpus = cpu_map__get(evsel->own_cpus);
 159	}
 160
 161	thread_map__put(evsel->threads);
 162	evsel->threads = thread_map__get(evlist->threads);
 163}
 164
 165static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
 166{
 167	struct perf_evsel *evsel;
 168
 169	evlist__for_each_entry(evlist, evsel)
 170		__perf_evlist__propagate_maps(evlist, evsel);
 171}
 172
 173void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 174{
 175	entry->evlist = evlist;
 176	list_add_tail(&entry->node, &evlist->entries);
 177	entry->idx = evlist->nr_entries;
 178	entry->tracking = !entry->idx;
 179
 180	if (!evlist->nr_entries++)
 181		perf_evlist__set_id_pos(evlist);
 182
 183	__perf_evlist__propagate_maps(evlist, entry);
 184}
 185
 186void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
 187{
 188	evsel->evlist = NULL;
 189	list_del_init(&evsel->node);
 190	evlist->nr_entries -= 1;
 191}
 192
 193void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
 194				   struct list_head *list)
 195{
 196	struct perf_evsel *evsel, *temp;
 197
 198	__evlist__for_each_entry_safe(list, temp, evsel) {
 199		list_del_init(&evsel->node);
 200		perf_evlist__add(evlist, evsel);
 201	}
 202}
 203
 204void __perf_evlist__set_leader(struct list_head *list)
 205{
 206	struct perf_evsel *evsel, *leader;
 207
 208	leader = list_entry(list->next, struct perf_evsel, node);
 209	evsel = list_entry(list->prev, struct perf_evsel, node);
 210
 211	leader->nr_members = evsel->idx - leader->idx + 1;
 212
 213	__evlist__for_each_entry(list, evsel) {
 214		evsel->leader = leader;
 215	}
 216}
 217
 218void perf_evlist__set_leader(struct perf_evlist *evlist)
 219{
 220	if (evlist->nr_entries) {
 221		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
 222		__perf_evlist__set_leader(&evlist->entries);
 223	}
 224}
 225
 226void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
 227{
 228	attr->precise_ip = 3;
 229
 230	while (attr->precise_ip != 0) {
 231		int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
 232		if (fd != -1) {
 233			close(fd);
 234			break;
 235		}
 236		--attr->precise_ip;
 237	}
 238}
 239
 240int perf_evlist__add_default(struct perf_evlist *evlist)
 241{
 242	struct perf_evsel *evsel = perf_evsel__new_cycles();
 
 
 
 
 
 
 243
 
 
 
 244	if (evsel == NULL)
 245		return -ENOMEM;
 
 
 
 
 
 246
 247	perf_evlist__add(evlist, evsel);
 248	return 0;
 
 
 
 
 249}
 250
 251int perf_evlist__add_dummy(struct perf_evlist *evlist)
 252{
 253	struct perf_event_attr attr = {
 254		.type	= PERF_TYPE_SOFTWARE,
 255		.config = PERF_COUNT_SW_DUMMY,
 256		.size	= sizeof(attr), /* to capture ABI version */
 257	};
 258	struct perf_evsel *evsel = perf_evsel__new(&attr);
 259
 260	if (evsel == NULL)
 261		return -ENOMEM;
 262
 263	perf_evlist__add(evlist, evsel);
 264	return 0;
 265}
 266
 267static int perf_evlist__add_attrs(struct perf_evlist *evlist,
 268				  struct perf_event_attr *attrs, size_t nr_attrs)
 269{
 270	struct perf_evsel *evsel, *n;
 271	LIST_HEAD(head);
 272	size_t i;
 273
 274	for (i = 0; i < nr_attrs; i++) {
 275		evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
 276		if (evsel == NULL)
 277			goto out_delete_partial_list;
 278		list_add_tail(&evsel->node, &head);
 279	}
 280
 281	perf_evlist__splice_list_tail(evlist, &head);
 282
 283	return 0;
 284
 285out_delete_partial_list:
 286	__evlist__for_each_entry_safe(&head, n, evsel)
 287		perf_evsel__delete(evsel);
 288	return -1;
 289}
 290
 291int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
 292				     struct perf_event_attr *attrs, size_t nr_attrs)
 293{
 294	size_t i;
 295
 296	for (i = 0; i < nr_attrs; i++)
 297		event_attr_init(attrs + i);
 298
 299	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
 300}
 301
 302struct perf_evsel *
 303perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
 304{
 305	struct perf_evsel *evsel;
 306
 307	evlist__for_each_entry(evlist, evsel) {
 308		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
 309		    (int)evsel->attr.config == id)
 310			return evsel;
 311	}
 312
 313	return NULL;
 314}
 315
 316struct perf_evsel *
 317perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
 318				     const char *name)
 319{
 320	struct perf_evsel *evsel;
 321
 322	evlist__for_each_entry(evlist, evsel) {
 323		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
 324		    (strcmp(evsel->name, name) == 0))
 325			return evsel;
 326	}
 327
 328	return NULL;
 329}
 330
 331int perf_evlist__add_newtp(struct perf_evlist *evlist,
 332			   const char *sys, const char *name, void *handler)
 333{
 334	struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
 335
 336	if (IS_ERR(evsel))
 337		return -1;
 338
 339	evsel->handler = handler;
 340	perf_evlist__add(evlist, evsel);
 341	return 0;
 342}
 343
 344static int perf_evlist__nr_threads(struct perf_evlist *evlist,
 345				   struct perf_evsel *evsel)
 346{
 347	if (evsel->system_wide)
 348		return 1;
 349	else
 350		return thread_map__nr(evlist->threads);
 351}
 352
 353void perf_evlist__disable(struct perf_evlist *evlist)
 354{
 355	struct perf_evsel *pos;
 356
 357	evlist__for_each_entry(evlist, pos) {
 358		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
 359			continue;
 360		perf_evsel__disable(pos);
 361	}
 362
 363	evlist->enabled = false;
 364}
 365
 366void perf_evlist__enable(struct perf_evlist *evlist)
 367{
 368	struct perf_evsel *pos;
 369
 370	evlist__for_each_entry(evlist, pos) {
 371		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
 372			continue;
 373		perf_evsel__enable(pos);
 374	}
 375
 376	evlist->enabled = true;
 377}
 378
 379void perf_evlist__toggle_enable(struct perf_evlist *evlist)
 380{
 381	(evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
 382}
 383
 384static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
 385					 struct perf_evsel *evsel, int cpu)
 386{
 387	int thread;
 388	int nr_threads = perf_evlist__nr_threads(evlist, evsel);
 389
 390	if (!evsel->fd)
 391		return -EINVAL;
 392
 393	for (thread = 0; thread < nr_threads; thread++) {
 394		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
 
 395		if (err)
 396			return err;
 397	}
 398	return 0;
 399}
 400
 401static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
 402					    struct perf_evsel *evsel,
 403					    int thread)
 404{
 405	int cpu;
 406	int nr_cpus = cpu_map__nr(evlist->cpus);
 407
 408	if (!evsel->fd)
 409		return -EINVAL;
 410
 411	for (cpu = 0; cpu < nr_cpus; cpu++) {
 412		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
 413		if (err)
 414			return err;
 415	}
 416	return 0;
 417}
 418
 419int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
 420				  struct perf_evsel *evsel, int idx)
 421{
 422	bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
 423
 424	if (per_cpu_mmaps)
 425		return perf_evlist__enable_event_cpu(evlist, evsel, idx);
 426	else
 427		return perf_evlist__enable_event_thread(evlist, evsel, idx);
 428}
 429
 430int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 431{
 432	int nr_cpus = cpu_map__nr(evlist->cpus);
 433	int nr_threads = thread_map__nr(evlist->threads);
 434	int nfds = 0;
 435	struct perf_evsel *evsel;
 436
 437	evlist__for_each_entry(evlist, evsel) {
 438		if (evsel->system_wide)
 439			nfds += nr_cpus;
 440		else
 441			nfds += nr_cpus * nr_threads;
 442	}
 443
 444	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
 445	    fdarray__grow(&evlist->pollfd, nfds) < 0)
 446		return -ENOMEM;
 447
 448	return 0;
 449}
 450
 451static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
 452				     struct perf_mmap *map, short revent)
 453{
 454	int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
 455	/*
 456	 * Save the idx so that when we filter out fds POLLHUP'ed we can
 457	 * close the associated evlist->mmap[] entry.
 458	 */
 459	if (pos >= 0) {
 460		evlist->pollfd.priv[pos].ptr = map;
 461
 462		fcntl(fd, F_SETFL, O_NONBLOCK);
 463	}
 464
 465	return pos;
 466}
 467
 468int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
 469{
 470	return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
 471}
 472
 473static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
 474					 void *arg __maybe_unused)
 475{
 476	struct perf_mmap *map = fda->priv[fd].ptr;
 477
 478	if (map)
 479		perf_mmap__put(map);
 480}
 481
 482int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
 483{
 484	return fdarray__filter(&evlist->pollfd, revents_and_mask,
 485			       perf_evlist__munmap_filtered, NULL);
 486}
 487
 488int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
 489{
 490	return fdarray__poll(&evlist->pollfd, timeout);
 491}
 492
 493static void perf_evlist__id_hash(struct perf_evlist *evlist,
 494				 struct perf_evsel *evsel,
 495				 int cpu, int thread, u64 id)
 496{
 497	int hash;
 498	struct perf_sample_id *sid = SID(evsel, cpu, thread);
 499
 500	sid->id = id;
 501	sid->evsel = evsel;
 502	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
 503	hlist_add_head(&sid->node, &evlist->heads[hash]);
 504}
 505
 506void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
 507			 int cpu, int thread, u64 id)
 508{
 509	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
 510	evsel->id[evsel->ids++] = id;
 511}
 512
 513int perf_evlist__id_add_fd(struct perf_evlist *evlist,
 514			   struct perf_evsel *evsel,
 515			   int cpu, int thread, int fd)
 516{
 517	u64 read_data[4] = { 0, };
 518	int id_idx = 1; /* The first entry is the counter value */
 519	u64 id;
 520	int ret;
 521
 522	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
 523	if (!ret)
 524		goto add;
 525
 526	if (errno != ENOTTY)
 527		return -1;
 528
 529	/* Legacy way to get event id.. All hail to old kernels! */
 530
 531	/*
 532	 * This way does not work with group format read, so bail
 533	 * out in that case.
 534	 */
 535	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
 536		return -1;
 537
 538	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
 539	    read(fd, &read_data, sizeof(read_data)) == -1)
 540		return -1;
 541
 542	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 543		++id_idx;
 544	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 545		++id_idx;
 546
 547	id = read_data[id_idx];
 548
 549 add:
 550	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
 551	return 0;
 552}
 553
 554static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
 555				     struct perf_evsel *evsel, int idx, int cpu,
 556				     int thread)
 557{
 558	struct perf_sample_id *sid = SID(evsel, cpu, thread);
 559	sid->idx = idx;
 560	if (evlist->cpus && cpu >= 0)
 561		sid->cpu = evlist->cpus->map[cpu];
 562	else
 563		sid->cpu = -1;
 564	if (!evsel->system_wide && evlist->threads && thread >= 0)
 565		sid->tid = thread_map__pid(evlist->threads, thread);
 566	else
 567		sid->tid = -1;
 568}
 569
 570struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
 571{
 572	struct hlist_head *head;
 573	struct perf_sample_id *sid;
 574	int hash;
 575
 576	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 577	head = &evlist->heads[hash];
 578
 579	hlist_for_each_entry(sid, head, node)
 580		if (sid->id == id)
 581			return sid;
 582
 583	return NULL;
 584}
 585
 586struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
 587{
 588	struct perf_sample_id *sid;
 589
 590	if (evlist->nr_entries == 1 || !id)
 591		return perf_evlist__first(evlist);
 592
 593	sid = perf_evlist__id2sid(evlist, id);
 594	if (sid)
 595		return sid->evsel;
 596
 597	if (!perf_evlist__sample_id_all(evlist))
 598		return perf_evlist__first(evlist);
 599
 600	return NULL;
 601}
 602
 603struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
 604						u64 id)
 605{
 606	struct perf_sample_id *sid;
 607
 608	if (!id)
 609		return NULL;
 610
 611	sid = perf_evlist__id2sid(evlist, id);
 612	if (sid)
 613		return sid->evsel;
 614
 615	return NULL;
 616}
 617
 618static int perf_evlist__event2id(struct perf_evlist *evlist,
 619				 union perf_event *event, u64 *id)
 620{
 621	const u64 *array = event->sample.array;
 622	ssize_t n;
 623
 624	n = (event->header.size - sizeof(event->header)) >> 3;
 625
 626	if (event->header.type == PERF_RECORD_SAMPLE) {
 627		if (evlist->id_pos >= n)
 628			return -1;
 629		*id = array[evlist->id_pos];
 630	} else {
 631		if (evlist->is_pos > n)
 632			return -1;
 633		n -= evlist->is_pos;
 634		*id = array[n];
 635	}
 636	return 0;
 637}
 638
 639struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
 640					    union perf_event *event)
 641{
 642	struct perf_evsel *first = perf_evlist__first(evlist);
 643	struct hlist_head *head;
 644	struct perf_sample_id *sid;
 645	int hash;
 646	u64 id;
 647
 648	if (evlist->nr_entries == 1)
 649		return first;
 650
 651	if (!first->attr.sample_id_all &&
 652	    event->header.type != PERF_RECORD_SAMPLE)
 653		return first;
 654
 655	if (perf_evlist__event2id(evlist, event, &id))
 656		return NULL;
 657
 658	/* Synthesized events have an id of zero */
 659	if (!id)
 660		return first;
 661
 662	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 663	head = &evlist->heads[hash];
 664
 665	hlist_for_each_entry(sid, head, node) {
 666		if (sid->id == id)
 667			return sid->evsel;
 668	}
 669	return NULL;
 670}
 671
 672static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
 673{
 674	int i;
 675
 676	if (!evlist->backward_mmap)
 677		return 0;
 678
 679	for (i = 0; i < evlist->nr_mmaps; i++) {
 680		int fd = evlist->backward_mmap[i].fd;
 681		int err;
 682
 683		if (fd < 0)
 684			continue;
 685		err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
 686		if (err)
 687			return err;
 688	}
 689	return 0;
 690}
 691
 692static int perf_evlist__pause(struct perf_evlist *evlist)
 693{
 694	return perf_evlist__set_paused(evlist, true);
 695}
 696
 697static int perf_evlist__resume(struct perf_evlist *evlist)
 698{
 699	return perf_evlist__set_paused(evlist, false);
 700}
 701
 702/* When check_messup is true, 'end' must points to a good entry */
 703static union perf_event *
 704perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
 705		u64 end, u64 *prev)
 706{
 
 
 
 707	unsigned char *data = md->base + page_size;
 708	union perf_event *event = NULL;
 709	int diff = end - start;
 710
 711	if (check_messup) {
 
 
 
 
 
 
 
 712		/*
 713		 * If we're further behind than half the buffer, there's a chance
 714		 * the writer will bite our tail and mess up the samples under us.
 715		 *
 716		 * If we somehow ended up ahead of the 'end', we got messed up.
 717		 *
 718		 * In either case, truncate and restart at 'end'.
 719		 */
 
 720		if (diff > md->mask / 2 || diff < 0) {
 721			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
 722
 723			/*
 724			 * 'end' points to a known good entry, start there.
 725			 */
 726			start = end;
 727			diff = 0;
 728		}
 729	}
 730
 731	if (diff >= (int)sizeof(event->header)) {
 732		size_t size;
 733
 734		event = (union perf_event *)&data[start & md->mask];
 735		size = event->header.size;
 736
 737		if (size < sizeof(event->header) || diff < (int)size) {
 738			event = NULL;
 739			goto broken_event;
 740		}
 741
 742		/*
 743		 * Event straddles the mmap boundary -- header should always
 744		 * be inside due to u64 alignment of output.
 745		 */
 746		if ((start & md->mask) + size != ((start + size) & md->mask)) {
 747			unsigned int offset = start;
 748			unsigned int len = min(sizeof(*event), size), cpy;
 749			void *dst = md->event_copy;
 750
 751			do {
 752				cpy = min(md->mask + 1 - (offset & md->mask), len);
 753				memcpy(dst, &data[offset & md->mask], cpy);
 754				offset += cpy;
 755				dst += cpy;
 756				len -= cpy;
 757			} while (len);
 758
 759			event = (union perf_event *) md->event_copy;
 760		}
 761
 762		start += size;
 763	}
 764
 765broken_event:
 766	if (prev)
 767		*prev = start;
 768
 769	return event;
 770}
 771
 772union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
 773{
 774	u64 head;
 775	u64 old = md->prev;
 776
 777	/*
 778	 * Check if event was unmapped due to a POLLHUP/POLLERR.
 779	 */
 780	if (!atomic_read(&md->refcnt))
 781		return NULL;
 782
 783	head = perf_mmap__read_head(md);
 784
 785	return perf_mmap__read(md, check_messup, old, head, &md->prev);
 786}
 787
 788union perf_event *
 789perf_mmap__read_backward(struct perf_mmap *md)
 790{
 791	u64 head, end;
 792	u64 start = md->prev;
 793
 794	/*
 795	 * Check if event was unmapped due to a POLLHUP/POLLERR.
 796	 */
 797	if (!atomic_read(&md->refcnt))
 798		return NULL;
 799
 800	head = perf_mmap__read_head(md);
 801	if (!head)
 802		return NULL;
 803
 804	/*
 805	 * 'head' pointer starts from 0. Kernel minus sizeof(record) form
 806	 * it each time when kernel writes to it, so in fact 'head' is
 807	 * negative. 'end' pointer is made manually by adding the size of
 808	 * the ring buffer to 'head' pointer, means the validate data can
 809	 * read is the whole ring buffer. If 'end' is positive, the ring
 810	 * buffer has not fully filled, so we must adjust 'end' to 0.
 811	 *
 812	 * However, since both 'head' and 'end' is unsigned, we can't
 813	 * simply compare 'end' against 0. Here we compare '-head' and
 814	 * the size of the ring buffer, where -head is the number of bytes
 815	 * kernel write to the ring buffer.
 816	 */
 817	if (-head < (u64)(md->mask + 1))
 818		end = 0;
 819	else
 820		end = head + md->mask + 1;
 821
 822	return perf_mmap__read(md, false, start, end, &md->prev);
 823}
 824
 825union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
 826{
 827	struct perf_mmap *md = &evlist->mmap[idx];
 828
 829	/*
 830	 * Check messup is required for forward overwritable ring buffer:
 831	 * memory pointed by md->prev can be overwritten in this case.
 832	 * No need for read-write ring buffer: kernel stop outputting when
 833	 * it hit md->prev (perf_mmap__consume()).
 834	 */
 835	return perf_mmap__read_forward(md, evlist->overwrite);
 836}
 837
 838union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
 839{
 840	struct perf_mmap *md = &evlist->mmap[idx];
 841
 842	/*
 843	 * No need to check messup for backward ring buffer:
 844	 * We can always read arbitrary long data from a backward
 845	 * ring buffer unless we forget to pause it before reading.
 846	 */
 847	return perf_mmap__read_backward(md);
 848}
 849
 850union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 851{
 852	return perf_evlist__mmap_read_forward(evlist, idx);
 853}
 854
 855void perf_mmap__read_catchup(struct perf_mmap *md)
 856{
 857	u64 head;
 858
 859	if (!atomic_read(&md->refcnt))
 860		return;
 861
 862	head = perf_mmap__read_head(md);
 863	md->prev = head;
 864}
 865
 866void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
 867{
 868	perf_mmap__read_catchup(&evlist->mmap[idx]);
 869}
 870
 871static bool perf_mmap__empty(struct perf_mmap *md)
 872{
 873	return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
 874}
 875
 876static void perf_mmap__get(struct perf_mmap *map)
 877{
 878	atomic_inc(&map->refcnt);
 879}
 880
 881static void perf_mmap__put(struct perf_mmap *md)
 882{
 883	BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
 884
 885	if (atomic_dec_and_test(&md->refcnt))
 886		perf_mmap__munmap(md);
 887}
 888
 889void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
 890{
 891	if (!overwrite) {
 892		u64 old = md->prev;
 893
 894		perf_mmap__write_tail(md, old);
 895	}
 896
 897	if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
 898		perf_mmap__put(md);
 899}
 900
 901void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
 902{
 903	perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
 904}
 905
 906int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
 907			       struct auxtrace_mmap_params *mp __maybe_unused,
 908			       void *userpg __maybe_unused,
 909			       int fd __maybe_unused)
 910{
 911	return 0;
 912}
 913
 914void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
 915{
 916}
 917
 918void __weak auxtrace_mmap_params__init(
 919			struct auxtrace_mmap_params *mp __maybe_unused,
 920			off_t auxtrace_offset __maybe_unused,
 921			unsigned int auxtrace_pages __maybe_unused,
 922			bool auxtrace_overwrite __maybe_unused)
 923{
 924}
 925
 926void __weak auxtrace_mmap_params__set_idx(
 927			struct auxtrace_mmap_params *mp __maybe_unused,
 928			struct perf_evlist *evlist __maybe_unused,
 929			int idx __maybe_unused,
 930			bool per_cpu __maybe_unused)
 931{
 932}
 933
 934static void perf_mmap__munmap(struct perf_mmap *map)
 935{
 936	if (map->base != NULL) {
 937		munmap(map->base, perf_mmap__mmap_len(map));
 938		map->base = NULL;
 939		map->fd = -1;
 940		atomic_set(&map->refcnt, 0);
 941	}
 942	auxtrace_mmap__munmap(&map->auxtrace_mmap);
 943}
 944
 945static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
 946{
 947	int i;
 948
 949	if (evlist->mmap)
 950		for (i = 0; i < evlist->nr_mmaps; i++)
 951			perf_mmap__munmap(&evlist->mmap[i]);
 952
 953	if (evlist->backward_mmap)
 954		for (i = 0; i < evlist->nr_mmaps; i++)
 955			perf_mmap__munmap(&evlist->backward_mmap[i]);
 956}
 957
 958void perf_evlist__munmap(struct perf_evlist *evlist)
 959{
 960	perf_evlist__munmap_nofree(evlist);
 961	zfree(&evlist->mmap);
 962	zfree(&evlist->backward_mmap);
 963}
 964
 965static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 966{
 967	int i;
 968	struct perf_mmap *map;
 969
 970	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
 971	if (cpu_map__empty(evlist->cpus))
 972		evlist->nr_mmaps = thread_map__nr(evlist->threads);
 973	map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
 974	if (!map)
 975		return NULL;
 976
 977	for (i = 0; i < evlist->nr_mmaps; i++)
 978		map[i].fd = -1;
 979	return map;
 980}
 981
 982struct mmap_params {
 983	int prot;
 984	int mask;
 985	struct auxtrace_mmap_params auxtrace_mp;
 986};
 987
 988static int perf_mmap__mmap(struct perf_mmap *map,
 989			   struct mmap_params *mp, int fd)
 990{
 991	/*
 992	 * The last one will be done at perf_evlist__mmap_consume(), so that we
 993	 * make sure we don't prevent tools from consuming every last event in
 994	 * the ring buffer.
 995	 *
 996	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
 997	 * anymore, but the last events for it are still in the ring buffer,
 998	 * waiting to be consumed.
 999	 *
1000	 * Tools can chose to ignore this at their own discretion, but the
1001	 * evlist layer can't just drop it when filtering events in
1002	 * perf_evlist__filter_pollfd().
1003	 */
1004	atomic_set(&map->refcnt, 2);
1005	map->prev = 0;
1006	map->mask = mp->mask;
1007	map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
1008			 MAP_SHARED, fd, 0);
1009	if (map->base == MAP_FAILED) {
1010		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
1011			  errno);
1012		map->base = NULL;
1013		return -1;
1014	}
1015	map->fd = fd;
1016
1017	if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
1018				&mp->auxtrace_mp, map->base, fd))
1019		return -1;
1020
1021	return 0;
1022}
1023
1024static bool
1025perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
1026			 struct perf_evsel *evsel)
1027{
1028	if (evsel->attr.write_backward)
1029		return false;
1030	return true;
1031}
1032
1033static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
1034				       struct mmap_params *mp, int cpu_idx,
1035				       int thread, int *_output, int *_output_backward)
1036{
1037	struct perf_evsel *evsel;
1038	int revent;
1039	int evlist_cpu = cpu_map__cpu(evlist->cpus, cpu_idx);
1040
1041	evlist__for_each_entry(evlist, evsel) {
1042		struct perf_mmap *maps = evlist->mmap;
1043		int *output = _output;
1044		int fd;
1045		int cpu;
1046
1047		if (evsel->attr.write_backward) {
1048			output = _output_backward;
1049			maps = evlist->backward_mmap;
1050
1051			if (!maps) {
1052				maps = perf_evlist__alloc_mmap(evlist);
1053				if (!maps)
1054					return -1;
1055				evlist->backward_mmap = maps;
1056				if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
1057					perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
1058			}
1059		}
1060
1061		if (evsel->system_wide && thread)
1062			continue;
1063
1064		cpu = cpu_map__idx(evsel->cpus, evlist_cpu);
1065		if (cpu == -1)
1066			continue;
1067
1068		fd = FD(evsel, cpu, thread);
1069
1070		if (*output == -1) {
1071			*output = fd;
1072
1073			if (perf_mmap__mmap(&maps[idx], mp, *output)  < 0)
1074				return -1;
1075		} else {
1076			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
1077				return -1;
1078
1079			perf_mmap__get(&maps[idx]);
1080		}
1081
1082		revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
1083
1084		/*
1085		 * The system_wide flag causes a selected event to be opened
1086		 * always without a pid.  Consequently it will never get a
1087		 * POLLHUP, but it is used for tracking in combination with
1088		 * other events, so it should not need to be polled anyway.
1089		 * Therefore don't add it for polling.
1090		 */
1091		if (!evsel->system_wide &&
1092		    __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
1093			perf_mmap__put(&maps[idx]);
1094			return -1;
1095		}
1096
1097		if (evsel->attr.read_format & PERF_FORMAT_ID) {
1098			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
1099						   fd) < 0)
1100				return -1;
1101			perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
1102						 thread);
1103		}
1104	}
1105
1106	return 0;
1107}
1108
1109static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
1110				     struct mmap_params *mp)
1111{
1112	int cpu, thread;
1113	int nr_cpus = cpu_map__nr(evlist->cpus);
1114	int nr_threads = thread_map__nr(evlist->threads);
1115
1116	pr_debug2("perf event ring buffer mmapped per cpu\n");
1117	for (cpu = 0; cpu < nr_cpus; cpu++) {
1118		int output = -1;
1119		int output_backward = -1;
1120
1121		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
1122					      true);
1123
1124		for (thread = 0; thread < nr_threads; thread++) {
1125			if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
1126							thread, &output, &output_backward))
1127				goto out_unmap;
1128		}
1129	}
1130
1131	return 0;
1132
1133out_unmap:
1134	perf_evlist__munmap_nofree(evlist);
 
1135	return -1;
1136}
1137
1138static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
1139					struct mmap_params *mp)
1140{
1141	int thread;
1142	int nr_threads = thread_map__nr(evlist->threads);
1143
1144	pr_debug2("perf event ring buffer mmapped per thread\n");
1145	for (thread = 0; thread < nr_threads; thread++) {
1146		int output = -1;
1147		int output_backward = -1;
1148
1149		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
1150					      false);
1151
1152		if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
1153						&output, &output_backward))
1154			goto out_unmap;
1155	}
1156
1157	return 0;
1158
1159out_unmap:
1160	perf_evlist__munmap_nofree(evlist);
 
1161	return -1;
1162}
1163
1164unsigned long perf_event_mlock_kb_in_pages(void)
1165{
1166	unsigned long pages;
1167	int max;
1168
1169	if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
1170		/*
1171		 * Pick a once upon a time good value, i.e. things look
1172		 * strange since we can't read a sysctl value, but lets not
1173		 * die yet...
1174		 */
1175		max = 512;
1176	} else {
1177		max -= (page_size / 1024);
1178	}
1179
1180	pages = (max * 1024) / page_size;
1181	if (!is_power_of_2(pages))
1182		pages = rounddown_pow_of_two(pages);
1183
1184	return pages;
1185}
1186
1187static size_t perf_evlist__mmap_size(unsigned long pages)
1188{
1189	if (pages == UINT_MAX)
1190		pages = perf_event_mlock_kb_in_pages();
1191	else if (!is_power_of_2(pages))
1192		return 0;
1193
1194	return (pages + 1) * page_size;
1195}
1196
1197static long parse_pages_arg(const char *str, unsigned long min,
1198			    unsigned long max)
1199{
1200	unsigned long pages, val;
1201	static struct parse_tag tags[] = {
1202		{ .tag  = 'B', .mult = 1       },
1203		{ .tag  = 'K', .mult = 1 << 10 },
1204		{ .tag  = 'M', .mult = 1 << 20 },
1205		{ .tag  = 'G', .mult = 1 << 30 },
1206		{ .tag  = 0 },
1207	};
1208
1209	if (str == NULL)
1210		return -EINVAL;
1211
1212	val = parse_tag_value(str, tags);
1213	if (val != (unsigned long) -1) {
1214		/* we got file size value */
1215		pages = PERF_ALIGN(val, page_size) / page_size;
1216	} else {
1217		/* we got pages count value */
1218		char *eptr;
1219		pages = strtoul(str, &eptr, 10);
1220		if (*eptr != '\0')
1221			return -EINVAL;
1222	}
1223
1224	if (pages == 0 && min == 0) {
1225		/* leave number of pages at 0 */
1226	} else if (!is_power_of_2(pages)) {
1227		/* round pages up to next power of 2 */
1228		pages = roundup_pow_of_two(pages);
1229		if (!pages)
1230			return -EINVAL;
1231		pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1232			pages * page_size, pages);
1233	}
1234
1235	if (pages > max)
1236		return -EINVAL;
1237
1238	return pages;
1239}
1240
1241int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
1242{
1243	unsigned long max = UINT_MAX;
1244	long pages;
1245
1246	if (max > SIZE_MAX / page_size)
1247		max = SIZE_MAX / page_size;
1248
1249	pages = parse_pages_arg(str, 1, max);
1250	if (pages < 0) {
1251		pr_err("Invalid argument for --mmap_pages/-m\n");
1252		return -1;
1253	}
1254
1255	*mmap_pages = pages;
1256	return 0;
1257}
1258
1259int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1260				  int unset __maybe_unused)
1261{
1262	return __perf_evlist__parse_mmap_pages(opt->value, str);
1263}
1264
1265/**
1266 * perf_evlist__mmap_ex - Create mmaps to receive events.
1267 * @evlist: list of events
1268 * @pages: map length in pages
1269 * @overwrite: overwrite older events?
1270 * @auxtrace_pages - auxtrace map length in pages
1271 * @auxtrace_overwrite - overwrite older auxtrace data?
1272 *
1273 * If @overwrite is %false the user needs to signal event consumption using
1274 * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
1275 * automatically.
1276 *
1277 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1278 * consumption using auxtrace_mmap__write_tail().
1279 *
1280 * Return: %0 on success, negative error code otherwise.
1281 */
1282int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1283			 bool overwrite, unsigned int auxtrace_pages,
1284			 bool auxtrace_overwrite)
1285{
1286	struct perf_evsel *evsel;
1287	const struct cpu_map *cpus = evlist->cpus;
1288	const struct thread_map *threads = evlist->threads;
1289	struct mmap_params mp = {
1290		.prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1291	};
1292
1293	if (!evlist->mmap)
1294		evlist->mmap = perf_evlist__alloc_mmap(evlist);
1295	if (!evlist->mmap)
1296		return -ENOMEM;
1297
1298	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1299		return -ENOMEM;
1300
1301	evlist->overwrite = overwrite;
1302	evlist->mmap_len = perf_evlist__mmap_size(pages);
1303	pr_debug("mmap size %zuB\n", evlist->mmap_len);
1304	mp.mask = evlist->mmap_len - page_size - 1;
1305
1306	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1307				   auxtrace_pages, auxtrace_overwrite);
1308
1309	evlist__for_each_entry(evlist, evsel) {
1310		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1311		    evsel->sample_id == NULL &&
1312		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1313			return -ENOMEM;
1314	}
1315
1316	if (cpu_map__empty(cpus))
1317		return perf_evlist__mmap_per_thread(evlist, &mp);
1318
1319	return perf_evlist__mmap_per_cpu(evlist, &mp);
1320}
1321
1322int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1323		      bool overwrite)
1324{
1325	return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1326}
1327
1328int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1329{
1330	struct cpu_map *cpus;
1331	struct thread_map *threads;
1332
1333	threads = thread_map__new_str(target->pid, target->tid, target->uid);
1334
1335	if (!threads)
1336		return -1;
1337
1338	if (target__uses_dummy_map(target))
1339		cpus = cpu_map__dummy_new();
1340	else
1341		cpus = cpu_map__new(target->cpu_list);
1342
1343	if (!cpus)
1344		goto out_delete_threads;
1345
1346	evlist->has_user_cpus = !!target->cpu_list;
1347
1348	perf_evlist__set_maps(evlist, cpus, threads);
1349
1350	return 0;
1351
1352out_delete_threads:
1353	thread_map__put(threads);
1354	return -1;
1355}
1356
1357void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1358			   struct thread_map *threads)
1359{
1360	/*
1361	 * Allow for the possibility that one or another of the maps isn't being
1362	 * changed i.e. don't put it.  Note we are assuming the maps that are
1363	 * being applied are brand new and evlist is taking ownership of the
1364	 * original reference count of 1.  If that is not the case it is up to
1365	 * the caller to increase the reference count.
1366	 */
1367	if (cpus != evlist->cpus) {
1368		cpu_map__put(evlist->cpus);
1369		evlist->cpus = cpu_map__get(cpus);
1370	}
1371
1372	if (threads != evlist->threads) {
1373		thread_map__put(evlist->threads);
1374		evlist->threads = thread_map__get(threads);
1375	}
1376
1377	perf_evlist__propagate_maps(evlist);
1378}
1379
1380void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
1381				   enum perf_event_sample_format bit)
1382{
1383	struct perf_evsel *evsel;
1384
1385	evlist__for_each_entry(evlist, evsel)
1386		__perf_evsel__set_sample_bit(evsel, bit);
1387}
1388
1389void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
1390				     enum perf_event_sample_format bit)
1391{
1392	struct perf_evsel *evsel;
1393
1394	evlist__for_each_entry(evlist, evsel)
1395		__perf_evsel__reset_sample_bit(evsel, bit);
1396}
1397
1398int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1399{
1400	struct perf_evsel *evsel;
1401	int err = 0;
1402	const int ncpus = cpu_map__nr(evlist->cpus),
1403		  nthreads = thread_map__nr(evlist->threads);
1404
1405	evlist__for_each_entry(evlist, evsel) {
1406		if (evsel->filter == NULL)
1407			continue;
1408
1409		/*
1410		 * filters only work for tracepoint event, which doesn't have cpu limit.
1411		 * So evlist and evsel should always be same.
1412		 */
1413		err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
1414		if (err) {
1415			*err_evsel = evsel;
1416			break;
1417		}
1418	}
1419
1420	return err;
1421}
1422
1423int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1424{
1425	struct perf_evsel *evsel;
1426	int err = 0;
1427
1428	evlist__for_each_entry(evlist, evsel) {
1429		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1430			continue;
1431
1432		err = perf_evsel__set_filter(evsel, filter);
1433		if (err)
1434			break;
1435	}
1436
1437	return err;
1438}
1439
1440int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1441{
1442	char *filter;
1443	int ret = -1;
1444	size_t i;
1445
1446	for (i = 0; i < npids; ++i) {
1447		if (i == 0) {
1448			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1449				return -1;
1450		} else {
1451			char *tmp;
1452
1453			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1454				goto out_free;
1455
1456			free(filter);
1457			filter = tmp;
1458		}
1459	}
1460
1461	ret = perf_evlist__set_filter(evlist, filter);
1462out_free:
1463	free(filter);
1464	return ret;
1465}
1466
1467int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1468{
1469	return perf_evlist__set_filter_pids(evlist, 1, &pid);
1470}
1471
1472bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1473{
1474	struct perf_evsel *pos;
1475
1476	if (evlist->nr_entries == 1)
1477		return true;
1478
1479	if (evlist->id_pos < 0 || evlist->is_pos < 0)
1480		return false;
1481
1482	evlist__for_each_entry(evlist, pos) {
1483		if (pos->id_pos != evlist->id_pos ||
1484		    pos->is_pos != evlist->is_pos)
1485			return false;
1486	}
1487
1488	return true;
1489}
1490
1491u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1492{
1493	struct perf_evsel *evsel;
1494
1495	if (evlist->combined_sample_type)
1496		return evlist->combined_sample_type;
1497
1498	evlist__for_each_entry(evlist, evsel)
1499		evlist->combined_sample_type |= evsel->attr.sample_type;
1500
1501	return evlist->combined_sample_type;
1502}
1503
1504u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1505{
1506	evlist->combined_sample_type = 0;
1507	return __perf_evlist__combined_sample_type(evlist);
1508}
1509
1510u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1511{
1512	struct perf_evsel *evsel;
1513	u64 branch_type = 0;
1514
1515	evlist__for_each_entry(evlist, evsel)
1516		branch_type |= evsel->attr.branch_sample_type;
1517	return branch_type;
1518}
1519
1520bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1521{
1522	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1523	u64 read_format = first->attr.read_format;
1524	u64 sample_type = first->attr.sample_type;
1525
1526	evlist__for_each_entry(evlist, pos) {
1527		if (read_format != pos->attr.read_format)
1528			return false;
1529	}
1530
1531	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1532	if ((sample_type & PERF_SAMPLE_READ) &&
1533	    !(read_format & PERF_FORMAT_ID)) {
1534		return false;
1535	}
1536
1537	return true;
1538}
1539
1540u64 perf_evlist__read_format(struct perf_evlist *evlist)
1541{
1542	struct perf_evsel *first = perf_evlist__first(evlist);
1543	return first->attr.read_format;
1544}
1545
1546u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1547{
1548	struct perf_evsel *first = perf_evlist__first(evlist);
1549	struct perf_sample *data;
1550	u64 sample_type;
1551	u16 size = 0;
1552
1553	if (!first->attr.sample_id_all)
1554		goto out;
1555
1556	sample_type = first->attr.sample_type;
1557
1558	if (sample_type & PERF_SAMPLE_TID)
1559		size += sizeof(data->tid) * 2;
1560
1561       if (sample_type & PERF_SAMPLE_TIME)
1562		size += sizeof(data->time);
1563
1564	if (sample_type & PERF_SAMPLE_ID)
1565		size += sizeof(data->id);
1566
1567	if (sample_type & PERF_SAMPLE_STREAM_ID)
1568		size += sizeof(data->stream_id);
1569
1570	if (sample_type & PERF_SAMPLE_CPU)
1571		size += sizeof(data->cpu) * 2;
1572
1573	if (sample_type & PERF_SAMPLE_IDENTIFIER)
1574		size += sizeof(data->id);
1575out:
1576	return size;
1577}
1578
1579bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1580{
1581	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1582
1583	evlist__for_each_entry_continue(evlist, pos) {
1584		if (first->attr.sample_id_all != pos->attr.sample_id_all)
1585			return false;
1586	}
1587
1588	return true;
1589}
1590
1591bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1592{
1593	struct perf_evsel *first = perf_evlist__first(evlist);
1594	return first->attr.sample_id_all;
1595}
1596
1597void perf_evlist__set_selected(struct perf_evlist *evlist,
1598			       struct perf_evsel *evsel)
1599{
1600	evlist->selected = evsel;
1601}
1602
1603void perf_evlist__close(struct perf_evlist *evlist)
1604{
1605	struct perf_evsel *evsel;
1606	int ncpus = cpu_map__nr(evlist->cpus);
1607	int nthreads = thread_map__nr(evlist->threads);
 
1608
1609	evlist__for_each_entry_reverse(evlist, evsel) {
1610		int n = evsel->cpus ? evsel->cpus->nr : ncpus;
1611		perf_evsel__close(evsel, n, nthreads);
1612	}
1613}
1614
1615static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1616{
1617	struct cpu_map	  *cpus;
1618	struct thread_map *threads;
1619	int err = -ENOMEM;
1620
1621	/*
1622	 * Try reading /sys/devices/system/cpu/online to get
1623	 * an all cpus map.
1624	 *
1625	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1626	 * code needs an overhaul to properly forward the
1627	 * error, and we may not want to do that fallback to a
1628	 * default cpu identity map :-\
1629	 */
1630	cpus = cpu_map__new(NULL);
1631	if (!cpus)
1632		goto out;
1633
1634	threads = thread_map__new_dummy();
1635	if (!threads)
1636		goto out_put;
1637
1638	perf_evlist__set_maps(evlist, cpus, threads);
1639out:
1640	return err;
1641out_put:
1642	cpu_map__put(cpus);
1643	goto out;
1644}
1645
1646int perf_evlist__open(struct perf_evlist *evlist)
1647{
1648	struct perf_evsel *evsel;
1649	int err;
1650
1651	/*
1652	 * Default: one fd per CPU, all threads, aka systemwide
1653	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1654	 */
1655	if (evlist->threads == NULL && evlist->cpus == NULL) {
1656		err = perf_evlist__create_syswide_maps(evlist);
1657		if (err < 0)
1658			goto out_err;
1659	}
1660
1661	perf_evlist__update_id_pos(evlist);
1662
1663	evlist__for_each_entry(evlist, evsel) {
1664		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1665		if (err < 0)
1666			goto out_err;
1667	}
1668
1669	return 0;
1670out_err:
1671	perf_evlist__close(evlist);
1672	errno = -err;
1673	return err;
1674}
1675
1676int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1677				  const char *argv[], bool pipe_output,
1678				  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1679{
1680	int child_ready_pipe[2], go_pipe[2];
1681	char bf;
1682
1683	if (pipe(child_ready_pipe) < 0) {
1684		perror("failed to create 'ready' pipe");
1685		return -1;
1686	}
1687
1688	if (pipe(go_pipe) < 0) {
1689		perror("failed to create 'go' pipe");
1690		goto out_close_ready_pipe;
1691	}
1692
1693	evlist->workload.pid = fork();
1694	if (evlist->workload.pid < 0) {
1695		perror("failed to fork");
1696		goto out_close_pipes;
1697	}
1698
1699	if (!evlist->workload.pid) {
1700		int ret;
1701
1702		if (pipe_output)
1703			dup2(2, 1);
1704
1705		signal(SIGTERM, SIG_DFL);
1706
1707		close(child_ready_pipe[0]);
1708		close(go_pipe[1]);
1709		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1710
1711		/*
1712		 * Tell the parent we're ready to go
1713		 */
1714		close(child_ready_pipe[1]);
1715
1716		/*
1717		 * Wait until the parent tells us to go.
1718		 */
1719		ret = read(go_pipe[0], &bf, 1);
1720		/*
1721		 * The parent will ask for the execvp() to be performed by
1722		 * writing exactly one byte, in workload.cork_fd, usually via
1723		 * perf_evlist__start_workload().
1724		 *
1725		 * For cancelling the workload without actually running it,
1726		 * the parent will just close workload.cork_fd, without writing
1727		 * anything, i.e. read will return zero and we just exit()
1728		 * here.
1729		 */
1730		if (ret != 1) {
1731			if (ret == -1)
1732				perror("unable to read pipe");
1733			exit(ret);
1734		}
1735
1736		execvp(argv[0], (char **)argv);
1737
1738		if (exec_error) {
1739			union sigval val;
1740
1741			val.sival_int = errno;
1742			if (sigqueue(getppid(), SIGUSR1, val))
1743				perror(argv[0]);
1744		} else
1745			perror(argv[0]);
1746		exit(-1);
1747	}
1748
1749	if (exec_error) {
1750		struct sigaction act = {
1751			.sa_flags     = SA_SIGINFO,
1752			.sa_sigaction = exec_error,
1753		};
1754		sigaction(SIGUSR1, &act, NULL);
1755	}
1756
1757	if (target__none(target)) {
1758		if (evlist->threads == NULL) {
1759			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1760				__func__, __LINE__);
1761			goto out_close_pipes;
1762		}
1763		thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1764	}
1765
1766	close(child_ready_pipe[1]);
1767	close(go_pipe[0]);
1768	/*
1769	 * wait for child to settle
1770	 */
1771	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1772		perror("unable to read pipe");
1773		goto out_close_pipes;
1774	}
1775
1776	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1777	evlist->workload.cork_fd = go_pipe[1];
1778	close(child_ready_pipe[0]);
1779	return 0;
1780
1781out_close_pipes:
1782	close(go_pipe[0]);
1783	close(go_pipe[1]);
1784out_close_ready_pipe:
1785	close(child_ready_pipe[0]);
1786	close(child_ready_pipe[1]);
1787	return -1;
1788}
1789
1790int perf_evlist__start_workload(struct perf_evlist *evlist)
1791{
1792	if (evlist->workload.cork_fd > 0) {
1793		char bf = 0;
1794		int ret;
1795		/*
1796		 * Remove the cork, let it rip!
1797		 */
1798		ret = write(evlist->workload.cork_fd, &bf, 1);
1799		if (ret < 0)
1800			perror("enable to write to pipe");
1801
1802		close(evlist->workload.cork_fd);
1803		return ret;
1804	}
1805
1806	return 0;
1807}
1808
1809int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1810			      struct perf_sample *sample)
1811{
1812	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1813
1814	if (!evsel)
1815		return -EFAULT;
1816	return perf_evsel__parse_sample(evsel, event, sample);
1817}
1818
1819size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1820{
1821	struct perf_evsel *evsel;
1822	size_t printed = 0;
1823
1824	evlist__for_each_entry(evlist, evsel) {
1825		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1826				   perf_evsel__name(evsel));
1827	}
1828
1829	return printed + fprintf(fp, "\n");
1830}
1831
1832int perf_evlist__strerror_open(struct perf_evlist *evlist,
1833			       int err, char *buf, size_t size)
1834{
1835	int printed, value;
1836	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1837
1838	switch (err) {
1839	case EACCES:
1840	case EPERM:
1841		printed = scnprintf(buf, size,
1842				    "Error:\t%s.\n"
1843				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1844
1845		value = perf_event_paranoid();
1846
1847		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1848
1849		if (value >= 2) {
1850			printed += scnprintf(buf + printed, size - printed,
1851					     "For your workloads it needs to be <= 1\nHint:\t");
1852		}
1853		printed += scnprintf(buf + printed, size - printed,
1854				     "For system wide tracing it needs to be set to -1.\n");
1855
1856		printed += scnprintf(buf + printed, size - printed,
1857				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1858				    "Hint:\tThe current value is %d.", value);
1859		break;
1860	case EINVAL: {
1861		struct perf_evsel *first = perf_evlist__first(evlist);
1862		int max_freq;
1863
1864		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1865			goto out_default;
1866
1867		if (first->attr.sample_freq < (u64)max_freq)
1868			goto out_default;
1869
1870		printed = scnprintf(buf, size,
1871				    "Error:\t%s.\n"
1872				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1873				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1874				    emsg, max_freq, first->attr.sample_freq);
1875		break;
1876	}
1877	default:
1878out_default:
1879		scnprintf(buf, size, "%s", emsg);
1880		break;
1881	}
1882
1883	return 0;
1884}
1885
1886int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1887{
1888	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1889	int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1890
1891	switch (err) {
1892	case EPERM:
1893		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1894		printed += scnprintf(buf + printed, size - printed,
1895				     "Error:\t%s.\n"
1896				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1897				     "Hint:\tTried using %zd kB.\n",
1898				     emsg, pages_max_per_user, pages_attempted);
1899
1900		if (pages_attempted >= pages_max_per_user) {
1901			printed += scnprintf(buf + printed, size - printed,
1902					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1903					     pages_max_per_user + pages_attempted);
1904		}
1905
1906		printed += scnprintf(buf + printed, size - printed,
1907				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1908		break;
1909	default:
1910		scnprintf(buf, size, "%s", emsg);
1911		break;
1912	}
1913
1914	return 0;
1915}
1916
1917void perf_evlist__to_front(struct perf_evlist *evlist,
1918			   struct perf_evsel *move_evsel)
1919{
1920	struct perf_evsel *evsel, *n;
1921	LIST_HEAD(move);
1922
1923	if (move_evsel == perf_evlist__first(evlist))
1924		return;
1925
1926	evlist__for_each_entry_safe(evlist, n, evsel) {
1927		if (evsel->leader == move_evsel->leader)
1928			list_move_tail(&evsel->node, &move);
1929	}
1930
1931	list_splice(&move, &evlist->entries);
1932}
1933
1934void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1935				     struct perf_evsel *tracking_evsel)
1936{
1937	struct perf_evsel *evsel;
1938
1939	if (tracking_evsel->tracking)
1940		return;
1941
1942	evlist__for_each_entry(evlist, evsel) {
1943		if (evsel != tracking_evsel)
1944			evsel->tracking = false;
1945	}
1946
1947	tracking_evsel->tracking = true;
1948}
1949
1950struct perf_evsel *
1951perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1952			       const char *str)
1953{
1954	struct perf_evsel *evsel;
1955
1956	evlist__for_each_entry(evlist, evsel) {
1957		if (!evsel->name)
1958			continue;
1959		if (strcmp(str, evsel->name) == 0)
1960			return evsel;
1961	}
1962
1963	return NULL;
1964}
1965
1966void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
1967				  enum bkw_mmap_state state)
1968{
1969	enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1970	enum action {
1971		NONE,
1972		PAUSE,
1973		RESUME,
1974	} action = NONE;
1975
1976	if (!evlist->backward_mmap)
1977		return;
1978
1979	switch (old_state) {
1980	case BKW_MMAP_NOTREADY: {
1981		if (state != BKW_MMAP_RUNNING)
1982			goto state_err;;
1983		break;
1984	}
1985	case BKW_MMAP_RUNNING: {
1986		if (state != BKW_MMAP_DATA_PENDING)
1987			goto state_err;
1988		action = PAUSE;
1989		break;
1990	}
1991	case BKW_MMAP_DATA_PENDING: {
1992		if (state != BKW_MMAP_EMPTY)
1993			goto state_err;
1994		break;
1995	}
1996	case BKW_MMAP_EMPTY: {
1997		if (state != BKW_MMAP_RUNNING)
1998			goto state_err;
1999		action = RESUME;
2000		break;
2001	}
2002	default:
2003		WARN_ONCE(1, "Shouldn't get there\n");
2004	}
2005
2006	evlist->bkw_mmap_state = state;
2007
2008	switch (action) {
2009	case PAUSE:
2010		perf_evlist__pause(evlist);
2011		break;
2012	case RESUME:
2013		perf_evlist__resume(evlist);
2014		break;
2015	case NONE:
2016	default:
2017		break;
2018	}
2019
2020state_err:
2021	return;
2022}