Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v4.6
   1/*
   2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
   3 *
   4 * Parts came from builtin-{top,stat,record}.c, see those files for further
   5 * copyright notes.
   6 *
   7 * Released under the GPL v2. (and only v2, not any later version)
   8 */
   9#include "util.h"
  10#include <api/fs/fs.h>
  11#include <poll.h>
  12#include "cpumap.h"
  13#include "thread_map.h"
  14#include "target.h"
  15#include "evlist.h"
  16#include "evsel.h"
  17#include "debug.h"
  18#include <unistd.h>
  19
  20#include "parse-events.h"
  21#include <subcmd/parse-options.h>
  22
  23#include <sys/mman.h>
  24
  25#include <linux/bitops.h>
  26#include <linux/hash.h>
  27#include <linux/log2.h>
  28#include <linux/err.h>
  29
  30static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
  31static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
  32
  33#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  34#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
  35
  36void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
  37		       struct thread_map *threads)
  38{
  39	int i;
  40
  41	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
  42		INIT_HLIST_HEAD(&evlist->heads[i]);
  43	INIT_LIST_HEAD(&evlist->entries);
  44	perf_evlist__set_maps(evlist, cpus, threads);
  45	fdarray__init(&evlist->pollfd, 64);
  46	evlist->workload.pid = -1;
  47}
  48
  49struct perf_evlist *perf_evlist__new(void)
  50{
  51	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
  52
  53	if (evlist != NULL)
  54		perf_evlist__init(evlist, NULL, NULL);
  55
  56	return evlist;
  57}
  58
  59struct perf_evlist *perf_evlist__new_default(void)
  60{
  61	struct perf_evlist *evlist = perf_evlist__new();
  62
  63	if (evlist && perf_evlist__add_default(evlist)) {
  64		perf_evlist__delete(evlist);
  65		evlist = NULL;
  66	}
  67
  68	return evlist;
  69}
  70
  71struct perf_evlist *perf_evlist__new_dummy(void)
  72{
  73	struct perf_evlist *evlist = perf_evlist__new();
  74
  75	if (evlist && perf_evlist__add_dummy(evlist)) {
  76		perf_evlist__delete(evlist);
  77		evlist = NULL;
  78	}
  79
  80	return evlist;
  81}
  82
  83/**
  84 * perf_evlist__set_id_pos - set the positions of event ids.
  85 * @evlist: selected event list
  86 *
  87 * Events with compatible sample types all have the same id_pos
  88 * and is_pos.  For convenience, put a copy on evlist.
  89 */
  90void perf_evlist__set_id_pos(struct perf_evlist *evlist)
  91{
  92	struct perf_evsel *first = perf_evlist__first(evlist);
  93
  94	evlist->id_pos = first->id_pos;
  95	evlist->is_pos = first->is_pos;
  96}
  97
  98static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
  99{
 100	struct perf_evsel *evsel;
 101
 102	evlist__for_each(evlist, evsel)
 103		perf_evsel__calc_id_pos(evsel);
 104
 105	perf_evlist__set_id_pos(evlist);
 106}
 107
 108static void perf_evlist__purge(struct perf_evlist *evlist)
 109{
 110	struct perf_evsel *pos, *n;
 111
 112	evlist__for_each_safe(evlist, n, pos) {
 113		list_del_init(&pos->node);
 114		pos->evlist = NULL;
 115		perf_evsel__delete(pos);
 116	}
 117
 118	evlist->nr_entries = 0;
 119}
 120
 121void perf_evlist__exit(struct perf_evlist *evlist)
 122{
 123	zfree(&evlist->mmap);
 124	fdarray__exit(&evlist->pollfd);
 125}
 126
 127void perf_evlist__delete(struct perf_evlist *evlist)
 128{
 129	perf_evlist__munmap(evlist);
 130	perf_evlist__close(evlist);
 131	cpu_map__put(evlist->cpus);
 132	thread_map__put(evlist->threads);
 133	evlist->cpus = NULL;
 134	evlist->threads = NULL;
 135	perf_evlist__purge(evlist);
 136	perf_evlist__exit(evlist);
 137	free(evlist);
 138}
 139
 140static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
 141					  struct perf_evsel *evsel)
 142{
 143	/*
 144	 * We already have cpus for evsel (via PMU sysfs) so
 145	 * keep it, if there's no target cpu list defined.
 146	 */
 147	if (!evsel->own_cpus || evlist->has_user_cpus) {
 148		cpu_map__put(evsel->cpus);
 149		evsel->cpus = cpu_map__get(evlist->cpus);
 150	} else if (evsel->cpus != evsel->own_cpus) {
 151		cpu_map__put(evsel->cpus);
 152		evsel->cpus = cpu_map__get(evsel->own_cpus);
 153	}
 154
 155	thread_map__put(evsel->threads);
 156	evsel->threads = thread_map__get(evlist->threads);
 157}
 158
 159static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
 160{
 161	struct perf_evsel *evsel;
 162
 163	evlist__for_each(evlist, evsel)
 164		__perf_evlist__propagate_maps(evlist, evsel);
 165}
 166
 167void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 168{
 169	entry->evlist = evlist;
 170	list_add_tail(&entry->node, &evlist->entries);
 171	entry->idx = evlist->nr_entries;
 172	entry->tracking = !entry->idx;
 173
 174	if (!evlist->nr_entries++)
 175		perf_evlist__set_id_pos(evlist);
 176
 177	__perf_evlist__propagate_maps(evlist, entry);
 178}
 179
 180void perf_evlist__remove(struct perf_evlist *evlist, struct perf_evsel *evsel)
 181{
 182	evsel->evlist = NULL;
 183	list_del_init(&evsel->node);
 184	evlist->nr_entries -= 1;
 185}
 186
 187void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
 188				   struct list_head *list)
 
 189{
 190	struct perf_evsel *evsel, *temp;
 191
 192	__evlist__for_each_safe(list, temp, evsel) {
 193		list_del_init(&evsel->node);
 194		perf_evlist__add(evlist, evsel);
 195	}
 196}
 197
 198void __perf_evlist__set_leader(struct list_head *list)
 199{
 200	struct perf_evsel *evsel, *leader;
 201
 202	leader = list_entry(list->next, struct perf_evsel, node);
 203	evsel = list_entry(list->prev, struct perf_evsel, node);
 204
 205	leader->nr_members = evsel->idx - leader->idx + 1;
 206
 207	__evlist__for_each(list, evsel) {
 208		evsel->leader = leader;
 209	}
 210}
 211
 212void perf_evlist__set_leader(struct perf_evlist *evlist)
 213{
 214	if (evlist->nr_entries) {
 215		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
 216		__perf_evlist__set_leader(&evlist->entries);
 217	}
 218}
 219
 220void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr)
 221{
 222	attr->precise_ip = 3;
 223
 224	while (attr->precise_ip != 0) {
 225		int fd = sys_perf_event_open(attr, 0, -1, -1, 0);
 226		if (fd != -1) {
 227			close(fd);
 228			break;
 229		}
 230		--attr->precise_ip;
 231	}
 232}
 233
 234int perf_evlist__add_default(struct perf_evlist *evlist)
 235{
 236	struct perf_event_attr attr = {
 237		.type = PERF_TYPE_HARDWARE,
 238		.config = PERF_COUNT_HW_CPU_CYCLES,
 239	};
 240	struct perf_evsel *evsel;
 241
 242	event_attr_init(&attr);
 243
 244	perf_event_attr__set_max_precise_ip(&attr);
 245
 246	evsel = perf_evsel__new(&attr);
 247	if (evsel == NULL)
 248		goto error;
 249
 250	/* use asprintf() because free(evsel) assumes name is allocated */
 251	if (asprintf(&evsel->name, "cycles%.*s",
 252		     attr.precise_ip ? attr.precise_ip + 1 : 0, ":ppp") < 0)
 253		goto error_free;
 254
 255	perf_evlist__add(evlist, evsel);
 256	return 0;
 257error_free:
 258	perf_evsel__delete(evsel);
 259error:
 260	return -ENOMEM;
 261}
 262
 263int perf_evlist__add_dummy(struct perf_evlist *evlist)
 264{
 265	struct perf_event_attr attr = {
 266		.type	= PERF_TYPE_SOFTWARE,
 267		.config = PERF_COUNT_SW_DUMMY,
 268		.size	= sizeof(attr), /* to capture ABI version */
 269	};
 270	struct perf_evsel *evsel = perf_evsel__new(&attr);
 271
 272	if (evsel == NULL)
 273		return -ENOMEM;
 274
 275	perf_evlist__add(evlist, evsel);
 276	return 0;
 277}
 278
 279static int perf_evlist__add_attrs(struct perf_evlist *evlist,
 280				  struct perf_event_attr *attrs, size_t nr_attrs)
 281{
 282	struct perf_evsel *evsel, *n;
 283	LIST_HEAD(head);
 284	size_t i;
 285
 286	for (i = 0; i < nr_attrs; i++) {
 287		evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
 288		if (evsel == NULL)
 289			goto out_delete_partial_list;
 290		list_add_tail(&evsel->node, &head);
 291	}
 292
 293	perf_evlist__splice_list_tail(evlist, &head);
 294
 295	return 0;
 296
 297out_delete_partial_list:
 298	__evlist__for_each_safe(&head, n, evsel)
 299		perf_evsel__delete(evsel);
 300	return -1;
 301}
 302
 303int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
 304				     struct perf_event_attr *attrs, size_t nr_attrs)
 305{
 306	size_t i;
 307
 308	for (i = 0; i < nr_attrs; i++)
 309		event_attr_init(attrs + i);
 310
 311	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
 312}
 313
 314struct perf_evsel *
 315perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
 316{
 317	struct perf_evsel *evsel;
 318
 319	evlist__for_each(evlist, evsel) {
 320		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
 321		    (int)evsel->attr.config == id)
 322			return evsel;
 323	}
 324
 325	return NULL;
 326}
 327
 328struct perf_evsel *
 329perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
 330				     const char *name)
 331{
 332	struct perf_evsel *evsel;
 333
 334	evlist__for_each(evlist, evsel) {
 335		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
 336		    (strcmp(evsel->name, name) == 0))
 337			return evsel;
 338	}
 339
 340	return NULL;
 341}
 342
 343int perf_evlist__add_newtp(struct perf_evlist *evlist,
 344			   const char *sys, const char *name, void *handler)
 345{
 346	struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
 347
 348	if (IS_ERR(evsel))
 349		return -1;
 350
 351	evsel->handler = handler;
 352	perf_evlist__add(evlist, evsel);
 353	return 0;
 354}
 355
 356static int perf_evlist__nr_threads(struct perf_evlist *evlist,
 357				   struct perf_evsel *evsel)
 358{
 359	if (evsel->system_wide)
 360		return 1;
 361	else
 362		return thread_map__nr(evlist->threads);
 363}
 364
 365void perf_evlist__disable(struct perf_evlist *evlist)
 366{
 
 367	struct perf_evsel *pos;
 
 
 368
 369	evlist__for_each(evlist, pos) {
 370		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
 371			continue;
 372		perf_evsel__disable(pos);
 
 
 
 
 373	}
 374
 375	evlist->enabled = false;
 376}
 377
 378void perf_evlist__enable(struct perf_evlist *evlist)
 379{
 
 380	struct perf_evsel *pos;
 
 
 381
 382	evlist__for_each(evlist, pos) {
 383		if (!perf_evsel__is_group_leader(pos) || !pos->fd)
 384			continue;
 385		perf_evsel__enable(pos);
 
 
 
 
 386	}
 387
 388	evlist->enabled = true;
 389}
 390
 391void perf_evlist__toggle_enable(struct perf_evlist *evlist)
 392{
 393	(evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
 394}
 395
 396static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
 397					 struct perf_evsel *evsel, int cpu)
 398{
 399	int thread, err;
 400	int nr_threads = perf_evlist__nr_threads(evlist, evsel);
 401
 402	if (!evsel->fd)
 403		return -EINVAL;
 404
 405	for (thread = 0; thread < nr_threads; thread++) {
 406		err = ioctl(FD(evsel, cpu, thread),
 407			    PERF_EVENT_IOC_ENABLE, 0);
 408		if (err)
 409			return err;
 
 
 410	}
 411	return 0;
 412}
 413
 414static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
 415					    struct perf_evsel *evsel,
 416					    int thread)
 417{
 418	int cpu, err;
 419	int nr_cpus = cpu_map__nr(evlist->cpus);
 420
 421	if (!evsel->fd)
 422		return -EINVAL;
 423
 424	for (cpu = 0; cpu < nr_cpus; cpu++) {
 425		err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
 426		if (err)
 427			return err;
 
 
 
 428	}
 429	return 0;
 430}
 431
 432int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
 433				  struct perf_evsel *evsel, int idx)
 434{
 435	bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
 436
 437	if (per_cpu_mmaps)
 438		return perf_evlist__enable_event_cpu(evlist, evsel, idx);
 439	else
 440		return perf_evlist__enable_event_thread(evlist, evsel, idx);
 441}
 442
 443int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 444{
 445	int nr_cpus = cpu_map__nr(evlist->cpus);
 446	int nr_threads = thread_map__nr(evlist->threads);
 447	int nfds = 0;
 448	struct perf_evsel *evsel;
 449
 450	evlist__for_each(evlist, evsel) {
 451		if (evsel->system_wide)
 452			nfds += nr_cpus;
 453		else
 454			nfds += nr_cpus * nr_threads;
 455	}
 456
 457	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
 458	    fdarray__grow(&evlist->pollfd, nfds) < 0)
 459		return -ENOMEM;
 460
 461	return 0;
 462}
 463
 464static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
 465{
 466	int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
 467	/*
 468	 * Save the idx so that when we filter out fds POLLHUP'ed we can
 469	 * close the associated evlist->mmap[] entry.
 470	 */
 471	if (pos >= 0) {
 472		evlist->pollfd.priv[pos].idx = idx;
 473
 474		fcntl(fd, F_SETFL, O_NONBLOCK);
 475	}
 476
 477	return pos;
 478}
 479
 480int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
 481{
 482	return __perf_evlist__add_pollfd(evlist, fd, -1);
 483}
 484
 485static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
 486{
 487	struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
 488
 489	perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
 490}
 491
 492int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
 493{
 494	return fdarray__filter(&evlist->pollfd, revents_and_mask,
 495			       perf_evlist__munmap_filtered);
 496}
 497
 498int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
 499{
 500	return fdarray__poll(&evlist->pollfd, timeout);
 501}
 502
 503static void perf_evlist__id_hash(struct perf_evlist *evlist,
 504				 struct perf_evsel *evsel,
 505				 int cpu, int thread, u64 id)
 506{
 507	int hash;
 508	struct perf_sample_id *sid = SID(evsel, cpu, thread);
 509
 510	sid->id = id;
 511	sid->evsel = evsel;
 512	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
 513	hlist_add_head(&sid->node, &evlist->heads[hash]);
 514}
 515
 516void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
 517			 int cpu, int thread, u64 id)
 518{
 519	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
 520	evsel->id[evsel->ids++] = id;
 521}
 522
 523int perf_evlist__id_add_fd(struct perf_evlist *evlist,
 524			   struct perf_evsel *evsel,
 525			   int cpu, int thread, int fd)
 526{
 527	u64 read_data[4] = { 0, };
 528	int id_idx = 1; /* The first entry is the counter value */
 529	u64 id;
 530	int ret;
 531
 532	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
 533	if (!ret)
 534		goto add;
 535
 536	if (errno != ENOTTY)
 537		return -1;
 538
 539	/* Legacy way to get event id.. All hail to old kernels! */
 540
 541	/*
 542	 * This way does not work with group format read, so bail
 543	 * out in that case.
 544	 */
 545	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
 546		return -1;
 547
 548	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
 549	    read(fd, &read_data, sizeof(read_data)) == -1)
 550		return -1;
 551
 552	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 553		++id_idx;
 554	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 555		++id_idx;
 556
 557	id = read_data[id_idx];
 558
 559 add:
 560	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
 561	return 0;
 562}
 563
 564static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
 565				     struct perf_evsel *evsel, int idx, int cpu,
 566				     int thread)
 567{
 568	struct perf_sample_id *sid = SID(evsel, cpu, thread);
 569	sid->idx = idx;
 570	if (evlist->cpus && cpu >= 0)
 571		sid->cpu = evlist->cpus->map[cpu];
 572	else
 573		sid->cpu = -1;
 574	if (!evsel->system_wide && evlist->threads && thread >= 0)
 575		sid->tid = thread_map__pid(evlist->threads, thread);
 576	else
 577		sid->tid = -1;
 578}
 579
 580struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
 581{
 582	struct hlist_head *head;
 583	struct perf_sample_id *sid;
 584	int hash;
 585
 586	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 587	head = &evlist->heads[hash];
 588
 589	hlist_for_each_entry(sid, head, node)
 590		if (sid->id == id)
 591			return sid;
 592
 593	return NULL;
 594}
 595
 596struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
 597{
 598	struct perf_sample_id *sid;
 599
 600	if (evlist->nr_entries == 1 || !id)
 601		return perf_evlist__first(evlist);
 602
 603	sid = perf_evlist__id2sid(evlist, id);
 604	if (sid)
 605		return sid->evsel;
 606
 607	if (!perf_evlist__sample_id_all(evlist))
 608		return perf_evlist__first(evlist);
 609
 610	return NULL;
 611}
 612
 613struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
 614						u64 id)
 615{
 616	struct perf_sample_id *sid;
 617
 618	if (!id)
 619		return NULL;
 620
 621	sid = perf_evlist__id2sid(evlist, id);
 622	if (sid)
 623		return sid->evsel;
 624
 625	return NULL;
 626}
 627
 628static int perf_evlist__event2id(struct perf_evlist *evlist,
 629				 union perf_event *event, u64 *id)
 630{
 631	const u64 *array = event->sample.array;
 632	ssize_t n;
 633
 634	n = (event->header.size - sizeof(event->header)) >> 3;
 635
 636	if (event->header.type == PERF_RECORD_SAMPLE) {
 637		if (evlist->id_pos >= n)
 638			return -1;
 639		*id = array[evlist->id_pos];
 640	} else {
 641		if (evlist->is_pos > n)
 642			return -1;
 643		n -= evlist->is_pos;
 644		*id = array[n];
 645	}
 646	return 0;
 647}
 648
 649static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
 650						   union perf_event *event)
 651{
 652	struct perf_evsel *first = perf_evlist__first(evlist);
 653	struct hlist_head *head;
 654	struct perf_sample_id *sid;
 655	int hash;
 656	u64 id;
 657
 658	if (evlist->nr_entries == 1)
 659		return first;
 660
 661	if (!first->attr.sample_id_all &&
 662	    event->header.type != PERF_RECORD_SAMPLE)
 663		return first;
 664
 665	if (perf_evlist__event2id(evlist, event, &id))
 666		return NULL;
 667
 668	/* Synthesized events have an id of zero */
 669	if (!id)
 670		return first;
 671
 672	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 673	head = &evlist->heads[hash];
 674
 675	hlist_for_each_entry(sid, head, node) {
 676		if (sid->id == id)
 677			return sid->evsel;
 678	}
 679	return NULL;
 680}
 681
 682union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 683{
 684	struct perf_mmap *md = &evlist->mmap[idx];
 685	u64 head;
 686	u64 old = md->prev;
 687	unsigned char *data = md->base + page_size;
 688	union perf_event *event = NULL;
 689
 690	/*
 691	 * Check if event was unmapped due to a POLLHUP/POLLERR.
 692	 */
 693	if (!atomic_read(&md->refcnt))
 694		return NULL;
 695
 696	head = perf_mmap__read_head(md);
 697	if (evlist->overwrite) {
 698		/*
 699		 * If we're further behind than half the buffer, there's a chance
 700		 * the writer will bite our tail and mess up the samples under us.
 701		 *
 702		 * If we somehow ended up ahead of the head, we got messed up.
 703		 *
 704		 * In either case, truncate and restart at head.
 705		 */
 706		int diff = head - old;
 707		if (diff > md->mask / 2 || diff < 0) {
 708			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
 709
 710			/*
 711			 * head points to a known good entry, start there.
 712			 */
 713			old = head;
 714		}
 715	}
 716
 717	if (old != head) {
 718		size_t size;
 719
 720		event = (union perf_event *)&data[old & md->mask];
 721		size = event->header.size;
 722
 723		/*
 724		 * Event straddles the mmap boundary -- header should always
 725		 * be inside due to u64 alignment of output.
 726		 */
 727		if ((old & md->mask) + size != ((old + size) & md->mask)) {
 728			unsigned int offset = old;
 729			unsigned int len = min(sizeof(*event), size), cpy;
 730			void *dst = md->event_copy;
 731
 732			do {
 733				cpy = min(md->mask + 1 - (offset & md->mask), len);
 734				memcpy(dst, &data[offset & md->mask], cpy);
 735				offset += cpy;
 736				dst += cpy;
 737				len -= cpy;
 738			} while (len);
 739
 740			event = (union perf_event *) md->event_copy;
 741		}
 742
 743		old += size;
 744	}
 745
 746	md->prev = old;
 747
 748	return event;
 749}
 750
 751static bool perf_mmap__empty(struct perf_mmap *md)
 752{
 753	return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
 754}
 755
 756static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
 757{
 758	atomic_inc(&evlist->mmap[idx].refcnt);
 759}
 760
 761static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
 762{
 763	BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0);
 764
 765	if (atomic_dec_and_test(&evlist->mmap[idx].refcnt))
 766		__perf_evlist__munmap(evlist, idx);
 767}
 768
 769void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
 770{
 771	struct perf_mmap *md = &evlist->mmap[idx];
 772
 773	if (!evlist->overwrite) {
 774		u64 old = md->prev;
 
 775
 776		perf_mmap__write_tail(md, old);
 777	}
 778
 779	if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
 780		perf_evlist__mmap_put(evlist, idx);
 781}
 782
 783int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
 784			       struct auxtrace_mmap_params *mp __maybe_unused,
 785			       void *userpg __maybe_unused,
 786			       int fd __maybe_unused)
 787{
 788	return 0;
 789}
 790
 791void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
 792{
 793}
 794
 795void __weak auxtrace_mmap_params__init(
 796			struct auxtrace_mmap_params *mp __maybe_unused,
 797			off_t auxtrace_offset __maybe_unused,
 798			unsigned int auxtrace_pages __maybe_unused,
 799			bool auxtrace_overwrite __maybe_unused)
 800{
 801}
 802
 803void __weak auxtrace_mmap_params__set_idx(
 804			struct auxtrace_mmap_params *mp __maybe_unused,
 805			struct perf_evlist *evlist __maybe_unused,
 806			int idx __maybe_unused,
 807			bool per_cpu __maybe_unused)
 808{
 809}
 810
 811static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
 812{
 813	if (evlist->mmap[idx].base != NULL) {
 814		munmap(evlist->mmap[idx].base, evlist->mmap_len);
 815		evlist->mmap[idx].base = NULL;
 816		atomic_set(&evlist->mmap[idx].refcnt, 0);
 817	}
 818	auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
 819}
 820
 821void perf_evlist__munmap(struct perf_evlist *evlist)
 822{
 823	int i;
 824
 825	if (evlist->mmap == NULL)
 826		return;
 827
 828	for (i = 0; i < evlist->nr_mmaps; i++)
 829		__perf_evlist__munmap(evlist, i);
 830
 831	zfree(&evlist->mmap);
 832}
 833
 834static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 835{
 836	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
 837	if (cpu_map__empty(evlist->cpus))
 838		evlist->nr_mmaps = thread_map__nr(evlist->threads);
 839	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
 840	return evlist->mmap != NULL ? 0 : -ENOMEM;
 841}
 842
 843struct mmap_params {
 844	int prot;
 845	int mask;
 846	struct auxtrace_mmap_params auxtrace_mp;
 847};
 848
 849static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
 850			       struct mmap_params *mp, int fd)
 851{
 852	/*
 853	 * The last one will be done at perf_evlist__mmap_consume(), so that we
 854	 * make sure we don't prevent tools from consuming every last event in
 855	 * the ring buffer.
 856	 *
 857	 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
 858	 * anymore, but the last events for it are still in the ring buffer,
 859	 * waiting to be consumed.
 860	 *
 861	 * Tools can chose to ignore this at their own discretion, but the
 862	 * evlist layer can't just drop it when filtering events in
 863	 * perf_evlist__filter_pollfd().
 864	 */
 865	atomic_set(&evlist->mmap[idx].refcnt, 2);
 866	evlist->mmap[idx].prev = 0;
 867	evlist->mmap[idx].mask = mp->mask;
 868	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
 869				      MAP_SHARED, fd, 0);
 870	if (evlist->mmap[idx].base == MAP_FAILED) {
 871		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
 872			  errno);
 873		evlist->mmap[idx].base = NULL;
 874		return -1;
 875	}
 876
 877	if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
 878				&mp->auxtrace_mp, evlist->mmap[idx].base, fd))
 879		return -1;
 880
 881	return 0;
 882}
 883
 884static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
 885				       struct mmap_params *mp, int cpu,
 886				       int thread, int *output)
 887{
 888	struct perf_evsel *evsel;
 889
 890	evlist__for_each(evlist, evsel) {
 891		int fd;
 892
 893		if (evsel->system_wide && thread)
 894			continue;
 895
 896		fd = FD(evsel, cpu, thread);
 897
 898		if (*output == -1) {
 899			*output = fd;
 900			if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
 
 901				return -1;
 902		} else {
 903			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
 904				return -1;
 905
 906			perf_evlist__mmap_get(evlist, idx);
 907		}
 908
 909		/*
 910		 * The system_wide flag causes a selected event to be opened
 911		 * always without a pid.  Consequently it will never get a
 912		 * POLLHUP, but it is used for tracking in combination with
 913		 * other events, so it should not need to be polled anyway.
 914		 * Therefore don't add it for polling.
 915		 */
 916		if (!evsel->system_wide &&
 917		    __perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
 918			perf_evlist__mmap_put(evlist, idx);
 919			return -1;
 920		}
 921
 922		if (evsel->attr.read_format & PERF_FORMAT_ID) {
 923			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
 924						   fd) < 0)
 925				return -1;
 926			perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
 927						 thread);
 928		}
 929	}
 930
 931	return 0;
 932}
 933
 934static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
 935				     struct mmap_params *mp)
 936{
 937	int cpu, thread;
 938	int nr_cpus = cpu_map__nr(evlist->cpus);
 939	int nr_threads = thread_map__nr(evlist->threads);
 940
 941	pr_debug2("perf event ring buffer mmapped per cpu\n");
 942	for (cpu = 0; cpu < nr_cpus; cpu++) {
 943		int output = -1;
 944
 945		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
 946					      true);
 947
 948		for (thread = 0; thread < nr_threads; thread++) {
 949			if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
 950							thread, &output))
 951				goto out_unmap;
 952		}
 953	}
 954
 955	return 0;
 956
 957out_unmap:
 958	for (cpu = 0; cpu < nr_cpus; cpu++)
 959		__perf_evlist__munmap(evlist, cpu);
 960	return -1;
 961}
 962
 963static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
 964					struct mmap_params *mp)
 965{
 966	int thread;
 967	int nr_threads = thread_map__nr(evlist->threads);
 968
 969	pr_debug2("perf event ring buffer mmapped per thread\n");
 970	for (thread = 0; thread < nr_threads; thread++) {
 971		int output = -1;
 972
 973		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
 974					      false);
 975
 976		if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
 977						&output))
 978			goto out_unmap;
 979	}
 980
 981	return 0;
 982
 983out_unmap:
 984	for (thread = 0; thread < nr_threads; thread++)
 985		__perf_evlist__munmap(evlist, thread);
 986	return -1;
 987}
 988
 989static size_t perf_evlist__mmap_size(unsigned long pages)
 990{
 991	if (pages == UINT_MAX) {
 992		int max;
 993
 994		if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
 995			/*
 996			 * Pick a once upon a time good value, i.e. things look
 997			 * strange since we can't read a sysctl value, but lets not
 998			 * die yet...
 999			 */
1000			max = 512;
1001		} else {
1002			max -= (page_size / 1024);
1003		}
1004
1005		pages = (max * 1024) / page_size;
1006		if (!is_power_of_2(pages))
1007			pages = rounddown_pow_of_two(pages);
1008	} else if (!is_power_of_2(pages))
1009		return 0;
1010
1011	return (pages + 1) * page_size;
1012}
1013
1014static long parse_pages_arg(const char *str, unsigned long min,
1015			    unsigned long max)
1016{
1017	unsigned long pages, val;
1018	static struct parse_tag tags[] = {
1019		{ .tag  = 'B', .mult = 1       },
1020		{ .tag  = 'K', .mult = 1 << 10 },
1021		{ .tag  = 'M', .mult = 1 << 20 },
1022		{ .tag  = 'G', .mult = 1 << 30 },
1023		{ .tag  = 0 },
1024	};
1025
1026	if (str == NULL)
1027		return -EINVAL;
1028
1029	val = parse_tag_value(str, tags);
1030	if (val != (unsigned long) -1) {
1031		/* we got file size value */
1032		pages = PERF_ALIGN(val, page_size) / page_size;
1033	} else {
1034		/* we got pages count value */
1035		char *eptr;
1036		pages = strtoul(str, &eptr, 10);
1037		if (*eptr != '\0')
1038			return -EINVAL;
1039	}
1040
1041	if (pages == 0 && min == 0) {
1042		/* leave number of pages at 0 */
1043	} else if (!is_power_of_2(pages)) {
1044		/* round pages up to next power of 2 */
1045		pages = roundup_pow_of_two(pages);
1046		if (!pages)
1047			return -EINVAL;
1048		pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
1049			pages * page_size, pages);
1050	}
1051
1052	if (pages > max)
1053		return -EINVAL;
1054
1055	return pages;
1056}
1057
1058int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
 
1059{
 
1060	unsigned long max = UINT_MAX;
1061	long pages;
1062
1063	if (max > SIZE_MAX / page_size)
1064		max = SIZE_MAX / page_size;
1065
1066	pages = parse_pages_arg(str, 1, max);
1067	if (pages < 0) {
1068		pr_err("Invalid argument for --mmap_pages/-m\n");
1069		return -1;
1070	}
1071
1072	*mmap_pages = pages;
1073	return 0;
1074}
1075
1076int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
1077				  int unset __maybe_unused)
1078{
1079	return __perf_evlist__parse_mmap_pages(opt->value, str);
1080}
1081
1082/**
1083 * perf_evlist__mmap_ex - Create mmaps to receive events.
1084 * @evlist: list of events
1085 * @pages: map length in pages
1086 * @overwrite: overwrite older events?
1087 * @auxtrace_pages - auxtrace map length in pages
1088 * @auxtrace_overwrite - overwrite older auxtrace data?
1089 *
1090 * If @overwrite is %false the user needs to signal event consumption using
1091 * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
1092 * automatically.
1093 *
1094 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
1095 * consumption using auxtrace_mmap__write_tail().
1096 *
1097 * Return: %0 on success, negative error code otherwise.
1098 */
1099int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
1100			 bool overwrite, unsigned int auxtrace_pages,
1101			 bool auxtrace_overwrite)
1102{
1103	struct perf_evsel *evsel;
1104	const struct cpu_map *cpus = evlist->cpus;
1105	const struct thread_map *threads = evlist->threads;
1106	struct mmap_params mp = {
1107		.prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
1108	};
1109
1110	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
1111		return -ENOMEM;
1112
1113	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
1114		return -ENOMEM;
1115
1116	evlist->overwrite = overwrite;
1117	evlist->mmap_len = perf_evlist__mmap_size(pages);
1118	pr_debug("mmap size %zuB\n", evlist->mmap_len);
1119	mp.mask = evlist->mmap_len - page_size - 1;
1120
1121	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
1122				   auxtrace_pages, auxtrace_overwrite);
1123
1124	evlist__for_each(evlist, evsel) {
1125		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1126		    evsel->sample_id == NULL &&
1127		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1128			return -ENOMEM;
1129	}
1130
1131	if (cpu_map__empty(cpus))
1132		return perf_evlist__mmap_per_thread(evlist, &mp);
1133
1134	return perf_evlist__mmap_per_cpu(evlist, &mp);
1135}
1136
1137int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
1138		      bool overwrite)
1139{
1140	return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
1141}
1142
1143int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1144{
1145	struct cpu_map *cpus;
1146	struct thread_map *threads;
1147
1148	threads = thread_map__new_str(target->pid, target->tid, target->uid);
1149
1150	if (!threads)
1151		return -1;
1152
1153	if (target__uses_dummy_map(target))
1154		cpus = cpu_map__dummy_new();
1155	else
1156		cpus = cpu_map__new(target->cpu_list);
1157
1158	if (!cpus)
1159		goto out_delete_threads;
1160
1161	evlist->has_user_cpus = !!target->cpu_list;
1162
1163	perf_evlist__set_maps(evlist, cpus, threads);
1164
1165	return 0;
1166
1167out_delete_threads:
1168	thread_map__put(threads);
1169	return -1;
1170}
1171
1172void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
1173			   struct thread_map *threads)
1174{
1175	/*
1176	 * Allow for the possibility that one or another of the maps isn't being
1177	 * changed i.e. don't put it.  Note we are assuming the maps that are
1178	 * being applied are brand new and evlist is taking ownership of the
1179	 * original reference count of 1.  If that is not the case it is up to
1180	 * the caller to increase the reference count.
1181	 */
1182	if (cpus != evlist->cpus) {
1183		cpu_map__put(evlist->cpus);
1184		evlist->cpus = cpu_map__get(cpus);
1185	}
1186
1187	if (threads != evlist->threads) {
1188		thread_map__put(evlist->threads);
1189		evlist->threads = thread_map__get(threads);
1190	}
1191
1192	perf_evlist__propagate_maps(evlist);
1193}
1194
1195int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
1196{
1197	struct perf_evsel *evsel;
1198	int err = 0;
1199	const int ncpus = cpu_map__nr(evlist->cpus),
1200		  nthreads = thread_map__nr(evlist->threads);
1201
1202	evlist__for_each(evlist, evsel) {
1203		if (evsel->filter == NULL)
1204			continue;
1205
1206		/*
1207		 * filters only work for tracepoint event, which doesn't have cpu limit.
1208		 * So evlist and evsel should always be same.
1209		 */
1210		err = perf_evsel__apply_filter(evsel, ncpus, nthreads, evsel->filter);
1211		if (err) {
1212			*err_evsel = evsel;
1213			break;
1214		}
1215	}
1216
1217	return err;
1218}
1219
1220int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1221{
1222	struct perf_evsel *evsel;
1223	int err = 0;
 
 
1224
1225	evlist__for_each(evlist, evsel) {
1226		if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
1227			continue;
1228
1229		err = perf_evsel__set_filter(evsel, filter);
1230		if (err)
1231			break;
1232	}
1233
1234	return err;
1235}
1236
1237int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids)
1238{
1239	char *filter;
1240	int ret = -1;
1241	size_t i;
1242
1243	for (i = 0; i < npids; ++i) {
1244		if (i == 0) {
1245			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1246				return -1;
1247		} else {
1248			char *tmp;
1249
1250			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1251				goto out_free;
1252
1253			free(filter);
1254			filter = tmp;
1255		}
1256	}
1257
1258	ret = perf_evlist__set_filter(evlist, filter);
1259out_free:
1260	free(filter);
1261	return ret;
1262}
1263
1264int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid)
1265{
1266	return perf_evlist__set_filter_pids(evlist, 1, &pid);
1267}
1268
1269bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1270{
1271	struct perf_evsel *pos;
1272
1273	if (evlist->nr_entries == 1)
1274		return true;
1275
1276	if (evlist->id_pos < 0 || evlist->is_pos < 0)
1277		return false;
1278
1279	evlist__for_each(evlist, pos) {
1280		if (pos->id_pos != evlist->id_pos ||
1281		    pos->is_pos != evlist->is_pos)
1282			return false;
1283	}
1284
1285	return true;
1286}
1287
1288u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1289{
1290	struct perf_evsel *evsel;
1291
1292	if (evlist->combined_sample_type)
1293		return evlist->combined_sample_type;
1294
1295	evlist__for_each(evlist, evsel)
1296		evlist->combined_sample_type |= evsel->attr.sample_type;
1297
1298	return evlist->combined_sample_type;
1299}
1300
1301u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1302{
1303	evlist->combined_sample_type = 0;
1304	return __perf_evlist__combined_sample_type(evlist);
1305}
1306
1307u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
1308{
1309	struct perf_evsel *evsel;
1310	u64 branch_type = 0;
1311
1312	evlist__for_each(evlist, evsel)
1313		branch_type |= evsel->attr.branch_sample_type;
1314	return branch_type;
1315}
1316
1317bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1318{
1319	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1320	u64 read_format = first->attr.read_format;
1321	u64 sample_type = first->attr.sample_type;
1322
1323	evlist__for_each(evlist, pos) {
1324		if (read_format != pos->attr.read_format)
1325			return false;
1326	}
1327
1328	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1329	if ((sample_type & PERF_SAMPLE_READ) &&
1330	    !(read_format & PERF_FORMAT_ID)) {
1331		return false;
1332	}
1333
1334	return true;
1335}
1336
1337u64 perf_evlist__read_format(struct perf_evlist *evlist)
1338{
1339	struct perf_evsel *first = perf_evlist__first(evlist);
1340	return first->attr.read_format;
1341}
1342
1343u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1344{
1345	struct perf_evsel *first = perf_evlist__first(evlist);
1346	struct perf_sample *data;
1347	u64 sample_type;
1348	u16 size = 0;
1349
1350	if (!first->attr.sample_id_all)
1351		goto out;
1352
1353	sample_type = first->attr.sample_type;
1354
1355	if (sample_type & PERF_SAMPLE_TID)
1356		size += sizeof(data->tid) * 2;
1357
1358       if (sample_type & PERF_SAMPLE_TIME)
1359		size += sizeof(data->time);
1360
1361	if (sample_type & PERF_SAMPLE_ID)
1362		size += sizeof(data->id);
1363
1364	if (sample_type & PERF_SAMPLE_STREAM_ID)
1365		size += sizeof(data->stream_id);
1366
1367	if (sample_type & PERF_SAMPLE_CPU)
1368		size += sizeof(data->cpu) * 2;
1369
1370	if (sample_type & PERF_SAMPLE_IDENTIFIER)
1371		size += sizeof(data->id);
1372out:
1373	return size;
1374}
1375
1376bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1377{
1378	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1379
1380	evlist__for_each_continue(evlist, pos) {
1381		if (first->attr.sample_id_all != pos->attr.sample_id_all)
1382			return false;
1383	}
1384
1385	return true;
1386}
1387
1388bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1389{
1390	struct perf_evsel *first = perf_evlist__first(evlist);
1391	return first->attr.sample_id_all;
1392}
1393
1394void perf_evlist__set_selected(struct perf_evlist *evlist,
1395			       struct perf_evsel *evsel)
1396{
1397	evlist->selected = evsel;
1398}
1399
1400void perf_evlist__close(struct perf_evlist *evlist)
1401{
1402	struct perf_evsel *evsel;
1403	int ncpus = cpu_map__nr(evlist->cpus);
1404	int nthreads = thread_map__nr(evlist->threads);
1405	int n;
1406
1407	evlist__for_each_reverse(evlist, evsel) {
1408		n = evsel->cpus ? evsel->cpus->nr : ncpus;
1409		perf_evsel__close(evsel, n, nthreads);
1410	}
1411}
1412
1413static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1414{
1415	struct cpu_map	  *cpus;
1416	struct thread_map *threads;
1417	int err = -ENOMEM;
1418
1419	/*
1420	 * Try reading /sys/devices/system/cpu/online to get
1421	 * an all cpus map.
1422	 *
1423	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1424	 * code needs an overhaul to properly forward the
1425	 * error, and we may not want to do that fallback to a
1426	 * default cpu identity map :-\
1427	 */
1428	cpus = cpu_map__new(NULL);
1429	if (!cpus)
1430		goto out;
1431
1432	threads = thread_map__new_dummy();
1433	if (!threads)
1434		goto out_put;
1435
1436	perf_evlist__set_maps(evlist, cpus, threads);
1437out:
1438	return err;
1439out_put:
1440	cpu_map__put(cpus);
1441	goto out;
1442}
1443
1444int perf_evlist__open(struct perf_evlist *evlist)
1445{
1446	struct perf_evsel *evsel;
1447	int err;
1448
1449	/*
1450	 * Default: one fd per CPU, all threads, aka systemwide
1451	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1452	 */
1453	if (evlist->threads == NULL && evlist->cpus == NULL) {
1454		err = perf_evlist__create_syswide_maps(evlist);
1455		if (err < 0)
1456			goto out_err;
1457	}
1458
1459	perf_evlist__update_id_pos(evlist);
1460
1461	evlist__for_each(evlist, evsel) {
1462		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
1463		if (err < 0)
1464			goto out_err;
1465	}
1466
1467	return 0;
1468out_err:
1469	perf_evlist__close(evlist);
1470	errno = -err;
1471	return err;
1472}
1473
1474int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1475				  const char *argv[], bool pipe_output,
1476				  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1477{
1478	int child_ready_pipe[2], go_pipe[2];
1479	char bf;
1480
1481	if (pipe(child_ready_pipe) < 0) {
1482		perror("failed to create 'ready' pipe");
1483		return -1;
1484	}
1485
1486	if (pipe(go_pipe) < 0) {
1487		perror("failed to create 'go' pipe");
1488		goto out_close_ready_pipe;
1489	}
1490
1491	evlist->workload.pid = fork();
1492	if (evlist->workload.pid < 0) {
1493		perror("failed to fork");
1494		goto out_close_pipes;
1495	}
1496
1497	if (!evlist->workload.pid) {
1498		int ret;
1499
1500		if (pipe_output)
1501			dup2(2, 1);
1502
1503		signal(SIGTERM, SIG_DFL);
1504
1505		close(child_ready_pipe[0]);
1506		close(go_pipe[1]);
1507		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1508
1509		/*
1510		 * Tell the parent we're ready to go
1511		 */
1512		close(child_ready_pipe[1]);
1513
1514		/*
1515		 * Wait until the parent tells us to go.
1516		 */
1517		ret = read(go_pipe[0], &bf, 1);
1518		/*
1519		 * The parent will ask for the execvp() to be performed by
1520		 * writing exactly one byte, in workload.cork_fd, usually via
1521		 * perf_evlist__start_workload().
1522		 *
1523		 * For cancelling the workload without actually running it,
1524		 * the parent will just close workload.cork_fd, without writing
1525		 * anything, i.e. read will return zero and we just exit()
1526		 * here.
1527		 */
1528		if (ret != 1) {
1529			if (ret == -1)
1530				perror("unable to read pipe");
1531			exit(ret);
1532		}
1533
1534		execvp(argv[0], (char **)argv);
1535
1536		if (exec_error) {
1537			union sigval val;
1538
1539			val.sival_int = errno;
1540			if (sigqueue(getppid(), SIGUSR1, val))
1541				perror(argv[0]);
1542		} else
1543			perror(argv[0]);
1544		exit(-1);
1545	}
1546
1547	if (exec_error) {
1548		struct sigaction act = {
1549			.sa_flags     = SA_SIGINFO,
1550			.sa_sigaction = exec_error,
1551		};
1552		sigaction(SIGUSR1, &act, NULL);
1553	}
1554
1555	if (target__none(target)) {
1556		if (evlist->threads == NULL) {
1557			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1558				__func__, __LINE__);
1559			goto out_close_pipes;
1560		}
1561		thread_map__set_pid(evlist->threads, 0, evlist->workload.pid);
1562	}
1563
1564	close(child_ready_pipe[1]);
1565	close(go_pipe[0]);
1566	/*
1567	 * wait for child to settle
1568	 */
1569	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1570		perror("unable to read pipe");
1571		goto out_close_pipes;
1572	}
1573
1574	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1575	evlist->workload.cork_fd = go_pipe[1];
1576	close(child_ready_pipe[0]);
1577	return 0;
1578
1579out_close_pipes:
1580	close(go_pipe[0]);
1581	close(go_pipe[1]);
1582out_close_ready_pipe:
1583	close(child_ready_pipe[0]);
1584	close(child_ready_pipe[1]);
1585	return -1;
1586}
1587
1588int perf_evlist__start_workload(struct perf_evlist *evlist)
1589{
1590	if (evlist->workload.cork_fd > 0) {
1591		char bf = 0;
1592		int ret;
1593		/*
1594		 * Remove the cork, let it rip!
1595		 */
1596		ret = write(evlist->workload.cork_fd, &bf, 1);
1597		if (ret < 0)
1598			perror("enable to write to pipe");
1599
1600		close(evlist->workload.cork_fd);
1601		return ret;
1602	}
1603
1604	return 0;
1605}
1606
1607int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1608			      struct perf_sample *sample)
1609{
1610	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1611
1612	if (!evsel)
1613		return -EFAULT;
1614	return perf_evsel__parse_sample(evsel, event, sample);
1615}
1616
1617size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1618{
1619	struct perf_evsel *evsel;
1620	size_t printed = 0;
1621
1622	evlist__for_each(evlist, evsel) {
1623		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1624				   perf_evsel__name(evsel));
1625	}
1626
1627	return printed + fprintf(fp, "\n");
1628}
1629
1630int perf_evlist__strerror_open(struct perf_evlist *evlist,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1631			       int err, char *buf, size_t size)
1632{
1633	int printed, value;
1634	char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1635
1636	switch (err) {
1637	case EACCES:
1638	case EPERM:
1639		printed = scnprintf(buf, size,
1640				    "Error:\t%s.\n"
1641				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1642
1643		value = perf_event_paranoid();
1644
1645		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1646
1647		if (value >= 2) {
1648			printed += scnprintf(buf + printed, size - printed,
1649					     "For your workloads it needs to be <= 1\nHint:\t");
1650		}
1651		printed += scnprintf(buf + printed, size - printed,
1652				     "For system wide tracing it needs to be set to -1.\n");
1653
1654		printed += scnprintf(buf + printed, size - printed,
1655				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1656				    "Hint:\tThe current value is %d.", value);
1657		break;
1658	case EINVAL: {
1659		struct perf_evsel *first = perf_evlist__first(evlist);
1660		int max_freq;
1661
1662		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1663			goto out_default;
1664
1665		if (first->attr.sample_freq < (u64)max_freq)
1666			goto out_default;
1667
1668		printed = scnprintf(buf, size,
1669				    "Error:\t%s.\n"
1670				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1671				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1672				    emsg, max_freq, first->attr.sample_freq);
1673		break;
1674	}
1675	default:
1676out_default:
1677		scnprintf(buf, size, "%s", emsg);
1678		break;
1679	}
1680
1681	return 0;
1682}
1683
1684int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1685{
1686	char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1687	int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1688
1689	switch (err) {
1690	case EPERM:
1691		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1692		printed += scnprintf(buf + printed, size - printed,
1693				     "Error:\t%s.\n"
1694				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1695				     "Hint:\tTried using %zd kB.\n",
1696				     emsg, pages_max_per_user, pages_attempted);
1697
1698		if (pages_attempted >= pages_max_per_user) {
1699			printed += scnprintf(buf + printed, size - printed,
1700					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1701					     pages_max_per_user + pages_attempted);
1702		}
1703
1704		printed += scnprintf(buf + printed, size - printed,
1705				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1706		break;
1707	default:
1708		scnprintf(buf, size, "%s", emsg);
1709		break;
1710	}
1711
1712	return 0;
1713}
1714
1715void perf_evlist__to_front(struct perf_evlist *evlist,
1716			   struct perf_evsel *move_evsel)
1717{
1718	struct perf_evsel *evsel, *n;
1719	LIST_HEAD(move);
1720
1721	if (move_evsel == perf_evlist__first(evlist))
1722		return;
1723
1724	evlist__for_each_safe(evlist, n, evsel) {
1725		if (evsel->leader == move_evsel->leader)
1726			list_move_tail(&evsel->node, &move);
1727	}
1728
1729	list_splice(&move, &evlist->entries);
1730}
1731
1732void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1733				     struct perf_evsel *tracking_evsel)
1734{
1735	struct perf_evsel *evsel;
1736
1737	if (tracking_evsel->tracking)
1738		return;
1739
1740	evlist__for_each(evlist, evsel) {
1741		if (evsel != tracking_evsel)
1742			evsel->tracking = false;
1743	}
1744
1745	tracking_evsel->tracking = true;
1746}
1747
1748struct perf_evsel *
1749perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
1750			       const char *str)
1751{
1752	struct perf_evsel *evsel;
1753
1754	evlist__for_each(evlist, evsel) {
1755		if (!evsel->name)
1756			continue;
1757		if (strcmp(str, evsel->name) == 0)
1758			return evsel;
1759	}
1760
1761	return NULL;
1762}
v3.15
   1/*
   2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
   3 *
   4 * Parts came from builtin-{top,stat,record}.c, see those files for further
   5 * copyright notes.
   6 *
   7 * Released under the GPL v2. (and only v2, not any later version)
   8 */
   9#include "util.h"
  10#include <api/fs/debugfs.h>
  11#include <poll.h>
  12#include "cpumap.h"
  13#include "thread_map.h"
  14#include "target.h"
  15#include "evlist.h"
  16#include "evsel.h"
  17#include "debug.h"
  18#include <unistd.h>
  19
  20#include "parse-events.h"
  21#include "parse-options.h"
  22
  23#include <sys/mman.h>
  24
  25#include <linux/bitops.h>
  26#include <linux/hash.h>
 
 
 
 
 
  27
  28#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
  29#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
  30
  31void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
  32		       struct thread_map *threads)
  33{
  34	int i;
  35
  36	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
  37		INIT_HLIST_HEAD(&evlist->heads[i]);
  38	INIT_LIST_HEAD(&evlist->entries);
  39	perf_evlist__set_maps(evlist, cpus, threads);
 
  40	evlist->workload.pid = -1;
  41}
  42
  43struct perf_evlist *perf_evlist__new(void)
  44{
  45	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
  46
  47	if (evlist != NULL)
  48		perf_evlist__init(evlist, NULL, NULL);
  49
  50	return evlist;
  51}
  52
  53struct perf_evlist *perf_evlist__new_default(void)
  54{
  55	struct perf_evlist *evlist = perf_evlist__new();
  56
  57	if (evlist && perf_evlist__add_default(evlist)) {
  58		perf_evlist__delete(evlist);
  59		evlist = NULL;
  60	}
  61
  62	return evlist;
  63}
  64
 
 
 
 
 
 
 
 
 
 
 
 
  65/**
  66 * perf_evlist__set_id_pos - set the positions of event ids.
  67 * @evlist: selected event list
  68 *
  69 * Events with compatible sample types all have the same id_pos
  70 * and is_pos.  For convenience, put a copy on evlist.
  71 */
  72void perf_evlist__set_id_pos(struct perf_evlist *evlist)
  73{
  74	struct perf_evsel *first = perf_evlist__first(evlist);
  75
  76	evlist->id_pos = first->id_pos;
  77	evlist->is_pos = first->is_pos;
  78}
  79
  80static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
  81{
  82	struct perf_evsel *evsel;
  83
  84	evlist__for_each(evlist, evsel)
  85		perf_evsel__calc_id_pos(evsel);
  86
  87	perf_evlist__set_id_pos(evlist);
  88}
  89
  90static void perf_evlist__purge(struct perf_evlist *evlist)
  91{
  92	struct perf_evsel *pos, *n;
  93
  94	evlist__for_each_safe(evlist, n, pos) {
  95		list_del_init(&pos->node);
 
  96		perf_evsel__delete(pos);
  97	}
  98
  99	evlist->nr_entries = 0;
 100}
 101
 102void perf_evlist__exit(struct perf_evlist *evlist)
 103{
 104	zfree(&evlist->mmap);
 105	zfree(&evlist->pollfd);
 106}
 107
 108void perf_evlist__delete(struct perf_evlist *evlist)
 109{
 110	perf_evlist__munmap(evlist);
 111	perf_evlist__close(evlist);
 112	cpu_map__delete(evlist->cpus);
 113	thread_map__delete(evlist->threads);
 114	evlist->cpus = NULL;
 115	evlist->threads = NULL;
 116	perf_evlist__purge(evlist);
 117	perf_evlist__exit(evlist);
 118	free(evlist);
 119}
 120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 121void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 122{
 
 123	list_add_tail(&entry->node, &evlist->entries);
 124	entry->idx = evlist->nr_entries;
 
 125
 126	if (!evlist->nr_entries++)
 127		perf_evlist__set_id_pos(evlist);
 
 
 
 
 
 
 
 
 
 128}
 129
 130void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
 131				   struct list_head *list,
 132				   int nr_entries)
 133{
 134	bool set_id_pos = !evlist->nr_entries;
 135
 136	list_splice_tail(list, &evlist->entries);
 137	evlist->nr_entries += nr_entries;
 138	if (set_id_pos)
 139		perf_evlist__set_id_pos(evlist);
 140}
 141
 142void __perf_evlist__set_leader(struct list_head *list)
 143{
 144	struct perf_evsel *evsel, *leader;
 145
 146	leader = list_entry(list->next, struct perf_evsel, node);
 147	evsel = list_entry(list->prev, struct perf_evsel, node);
 148
 149	leader->nr_members = evsel->idx - leader->idx + 1;
 150
 151	__evlist__for_each(list, evsel) {
 152		evsel->leader = leader;
 153	}
 154}
 155
 156void perf_evlist__set_leader(struct perf_evlist *evlist)
 157{
 158	if (evlist->nr_entries) {
 159		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
 160		__perf_evlist__set_leader(&evlist->entries);
 161	}
 162}
 163
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 164int perf_evlist__add_default(struct perf_evlist *evlist)
 165{
 166	struct perf_event_attr attr = {
 167		.type = PERF_TYPE_HARDWARE,
 168		.config = PERF_COUNT_HW_CPU_CYCLES,
 169	};
 170	struct perf_evsel *evsel;
 171
 172	event_attr_init(&attr);
 173
 
 
 174	evsel = perf_evsel__new(&attr);
 175	if (evsel == NULL)
 176		goto error;
 177
 178	/* use strdup() because free(evsel) assumes name is allocated */
 179	evsel->name = strdup("cycles");
 180	if (!evsel->name)
 181		goto error_free;
 182
 183	perf_evlist__add(evlist, evsel);
 184	return 0;
 185error_free:
 186	perf_evsel__delete(evsel);
 187error:
 188	return -ENOMEM;
 189}
 190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 191static int perf_evlist__add_attrs(struct perf_evlist *evlist,
 192				  struct perf_event_attr *attrs, size_t nr_attrs)
 193{
 194	struct perf_evsel *evsel, *n;
 195	LIST_HEAD(head);
 196	size_t i;
 197
 198	for (i = 0; i < nr_attrs; i++) {
 199		evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
 200		if (evsel == NULL)
 201			goto out_delete_partial_list;
 202		list_add_tail(&evsel->node, &head);
 203	}
 204
 205	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
 206
 207	return 0;
 208
 209out_delete_partial_list:
 210	__evlist__for_each_safe(&head, n, evsel)
 211		perf_evsel__delete(evsel);
 212	return -1;
 213}
 214
 215int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
 216				     struct perf_event_attr *attrs, size_t nr_attrs)
 217{
 218	size_t i;
 219
 220	for (i = 0; i < nr_attrs; i++)
 221		event_attr_init(attrs + i);
 222
 223	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
 224}
 225
 226struct perf_evsel *
 227perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
 228{
 229	struct perf_evsel *evsel;
 230
 231	evlist__for_each(evlist, evsel) {
 232		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
 233		    (int)evsel->attr.config == id)
 234			return evsel;
 235	}
 236
 237	return NULL;
 238}
 239
 240struct perf_evsel *
 241perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
 242				     const char *name)
 243{
 244	struct perf_evsel *evsel;
 245
 246	evlist__for_each(evlist, evsel) {
 247		if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
 248		    (strcmp(evsel->name, name) == 0))
 249			return evsel;
 250	}
 251
 252	return NULL;
 253}
 254
 255int perf_evlist__add_newtp(struct perf_evlist *evlist,
 256			   const char *sys, const char *name, void *handler)
 257{
 258	struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
 259
 260	if (evsel == NULL)
 261		return -1;
 262
 263	evsel->handler = handler;
 264	perf_evlist__add(evlist, evsel);
 265	return 0;
 266}
 267
 
 
 
 
 
 
 
 
 
 268void perf_evlist__disable(struct perf_evlist *evlist)
 269{
 270	int cpu, thread;
 271	struct perf_evsel *pos;
 272	int nr_cpus = cpu_map__nr(evlist->cpus);
 273	int nr_threads = thread_map__nr(evlist->threads);
 274
 275	for (cpu = 0; cpu < nr_cpus; cpu++) {
 276		evlist__for_each(evlist, pos) {
 277			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
 278				continue;
 279			for (thread = 0; thread < nr_threads; thread++)
 280				ioctl(FD(pos, cpu, thread),
 281				      PERF_EVENT_IOC_DISABLE, 0);
 282		}
 283	}
 
 
 284}
 285
 286void perf_evlist__enable(struct perf_evlist *evlist)
 287{
 288	int cpu, thread;
 289	struct perf_evsel *pos;
 290	int nr_cpus = cpu_map__nr(evlist->cpus);
 291	int nr_threads = thread_map__nr(evlist->threads);
 292
 293	for (cpu = 0; cpu < nr_cpus; cpu++) {
 294		evlist__for_each(evlist, pos) {
 295			if (!perf_evsel__is_group_leader(pos) || !pos->fd)
 296				continue;
 297			for (thread = 0; thread < nr_threads; thread++)
 298				ioctl(FD(pos, cpu, thread),
 299				      PERF_EVENT_IOC_ENABLE, 0);
 300		}
 301	}
 
 
 302}
 303
 304int perf_evlist__disable_event(struct perf_evlist *evlist,
 305			       struct perf_evsel *evsel)
 
 
 
 
 
 306{
 307	int cpu, thread, err;
 
 308
 309	if (!evsel->fd)
 310		return 0;
 311
 312	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
 313		for (thread = 0; thread < evlist->threads->nr; thread++) {
 314			err = ioctl(FD(evsel, cpu, thread),
 315				    PERF_EVENT_IOC_DISABLE, 0);
 316			if (err)
 317				return err;
 318		}
 319	}
 320	return 0;
 321}
 322
 323int perf_evlist__enable_event(struct perf_evlist *evlist,
 324			      struct perf_evsel *evsel)
 
 325{
 326	int cpu, thread, err;
 
 327
 328	if (!evsel->fd)
 329		return -EINVAL;
 330
 331	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
 332		for (thread = 0; thread < evlist->threads->nr; thread++) {
 333			err = ioctl(FD(evsel, cpu, thread),
 334				    PERF_EVENT_IOC_ENABLE, 0);
 335			if (err)
 336				return err;
 337		}
 338	}
 339	return 0;
 340}
 341
 342static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
 
 
 
 
 
 
 
 
 
 
 
 343{
 344	int nr_cpus = cpu_map__nr(evlist->cpus);
 345	int nr_threads = thread_map__nr(evlist->threads);
 346	int nfds = nr_cpus * nr_threads * evlist->nr_entries;
 347	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
 348	return evlist->pollfd != NULL ? 0 : -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 349}
 350
 351void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
 352{
 353	fcntl(fd, F_SETFL, O_NONBLOCK);
 354	evlist->pollfd[evlist->nr_fds].fd = fd;
 355	evlist->pollfd[evlist->nr_fds].events = POLLIN;
 356	evlist->nr_fds++;
 
 
 
 357}
 358
 359static void perf_evlist__id_hash(struct perf_evlist *evlist,
 360				 struct perf_evsel *evsel,
 361				 int cpu, int thread, u64 id)
 362{
 363	int hash;
 364	struct perf_sample_id *sid = SID(evsel, cpu, thread);
 365
 366	sid->id = id;
 367	sid->evsel = evsel;
 368	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
 369	hlist_add_head(&sid->node, &evlist->heads[hash]);
 370}
 371
 372void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
 373			 int cpu, int thread, u64 id)
 374{
 375	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
 376	evsel->id[evsel->ids++] = id;
 377}
 378
 379static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
 380				  struct perf_evsel *evsel,
 381				  int cpu, int thread, int fd)
 382{
 383	u64 read_data[4] = { 0, };
 384	int id_idx = 1; /* The first entry is the counter value */
 385	u64 id;
 386	int ret;
 387
 388	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
 389	if (!ret)
 390		goto add;
 391
 392	if (errno != ENOTTY)
 393		return -1;
 394
 395	/* Legacy way to get event id.. All hail to old kernels! */
 396
 397	/*
 398	 * This way does not work with group format read, so bail
 399	 * out in that case.
 400	 */
 401	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
 402		return -1;
 403
 404	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
 405	    read(fd, &read_data, sizeof(read_data)) == -1)
 406		return -1;
 407
 408	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 409		++id_idx;
 410	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 411		++id_idx;
 412
 413	id = read_data[id_idx];
 414
 415 add:
 416	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
 417	return 0;
 418}
 419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 420struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
 421{
 422	struct hlist_head *head;
 423	struct perf_sample_id *sid;
 424	int hash;
 425
 426	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 427	head = &evlist->heads[hash];
 428
 429	hlist_for_each_entry(sid, head, node)
 430		if (sid->id == id)
 431			return sid;
 432
 433	return NULL;
 434}
 435
 436struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
 437{
 438	struct perf_sample_id *sid;
 439
 440	if (evlist->nr_entries == 1)
 441		return perf_evlist__first(evlist);
 442
 443	sid = perf_evlist__id2sid(evlist, id);
 444	if (sid)
 445		return sid->evsel;
 446
 447	if (!perf_evlist__sample_id_all(evlist))
 448		return perf_evlist__first(evlist);
 449
 450	return NULL;
 451}
 452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 453static int perf_evlist__event2id(struct perf_evlist *evlist,
 454				 union perf_event *event, u64 *id)
 455{
 456	const u64 *array = event->sample.array;
 457	ssize_t n;
 458
 459	n = (event->header.size - sizeof(event->header)) >> 3;
 460
 461	if (event->header.type == PERF_RECORD_SAMPLE) {
 462		if (evlist->id_pos >= n)
 463			return -1;
 464		*id = array[evlist->id_pos];
 465	} else {
 466		if (evlist->is_pos > n)
 467			return -1;
 468		n -= evlist->is_pos;
 469		*id = array[n];
 470	}
 471	return 0;
 472}
 473
 474static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
 475						   union perf_event *event)
 476{
 477	struct perf_evsel *first = perf_evlist__first(evlist);
 478	struct hlist_head *head;
 479	struct perf_sample_id *sid;
 480	int hash;
 481	u64 id;
 482
 483	if (evlist->nr_entries == 1)
 484		return first;
 485
 486	if (!first->attr.sample_id_all &&
 487	    event->header.type != PERF_RECORD_SAMPLE)
 488		return first;
 489
 490	if (perf_evlist__event2id(evlist, event, &id))
 491		return NULL;
 492
 493	/* Synthesized events have an id of zero */
 494	if (!id)
 495		return first;
 496
 497	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 498	head = &evlist->heads[hash];
 499
 500	hlist_for_each_entry(sid, head, node) {
 501		if (sid->id == id)
 502			return sid->evsel;
 503	}
 504	return NULL;
 505}
 506
 507union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 508{
 509	struct perf_mmap *md = &evlist->mmap[idx];
 510	unsigned int head = perf_mmap__read_head(md);
 511	unsigned int old = md->prev;
 512	unsigned char *data = md->base + page_size;
 513	union perf_event *event = NULL;
 514
 
 
 
 
 
 
 
 515	if (evlist->overwrite) {
 516		/*
 517		 * If we're further behind than half the buffer, there's a chance
 518		 * the writer will bite our tail and mess up the samples under us.
 519		 *
 520		 * If we somehow ended up ahead of the head, we got messed up.
 521		 *
 522		 * In either case, truncate and restart at head.
 523		 */
 524		int diff = head - old;
 525		if (diff > md->mask / 2 || diff < 0) {
 526			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
 527
 528			/*
 529			 * head points to a known good entry, start there.
 530			 */
 531			old = head;
 532		}
 533	}
 534
 535	if (old != head) {
 536		size_t size;
 537
 538		event = (union perf_event *)&data[old & md->mask];
 539		size = event->header.size;
 540
 541		/*
 542		 * Event straddles the mmap boundary -- header should always
 543		 * be inside due to u64 alignment of output.
 544		 */
 545		if ((old & md->mask) + size != ((old + size) & md->mask)) {
 546			unsigned int offset = old;
 547			unsigned int len = min(sizeof(*event), size), cpy;
 548			void *dst = md->event_copy;
 549
 550			do {
 551				cpy = min(md->mask + 1 - (offset & md->mask), len);
 552				memcpy(dst, &data[offset & md->mask], cpy);
 553				offset += cpy;
 554				dst += cpy;
 555				len -= cpy;
 556			} while (len);
 557
 558			event = (union perf_event *) md->event_copy;
 559		}
 560
 561		old += size;
 562	}
 563
 564	md->prev = old;
 565
 566	return event;
 567}
 568
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
 570{
 
 
 571	if (!evlist->overwrite) {
 572		struct perf_mmap *md = &evlist->mmap[idx];
 573		unsigned int old = md->prev;
 574
 575		perf_mmap__write_tail(md, old);
 576	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 577}
 578
 579static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
 580{
 581	if (evlist->mmap[idx].base != NULL) {
 582		munmap(evlist->mmap[idx].base, evlist->mmap_len);
 583		evlist->mmap[idx].base = NULL;
 
 584	}
 
 585}
 586
 587void perf_evlist__munmap(struct perf_evlist *evlist)
 588{
 589	int i;
 590
 591	if (evlist->mmap == NULL)
 592		return;
 593
 594	for (i = 0; i < evlist->nr_mmaps; i++)
 595		__perf_evlist__munmap(evlist, i);
 596
 597	zfree(&evlist->mmap);
 598}
 599
 600static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 601{
 602	evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
 603	if (cpu_map__empty(evlist->cpus))
 604		evlist->nr_mmaps = thread_map__nr(evlist->threads);
 605	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
 606	return evlist->mmap != NULL ? 0 : -ENOMEM;
 607}
 608
 609static int __perf_evlist__mmap(struct perf_evlist *evlist,
 610			       int idx, int prot, int mask, int fd)
 
 
 
 
 
 
 611{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 612	evlist->mmap[idx].prev = 0;
 613	evlist->mmap[idx].mask = mask;
 614	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
 615				      MAP_SHARED, fd, 0);
 616	if (evlist->mmap[idx].base == MAP_FAILED) {
 617		pr_debug2("failed to mmap perf event ring buffer, error %d\n",
 618			  errno);
 619		evlist->mmap[idx].base = NULL;
 620		return -1;
 621	}
 622
 623	perf_evlist__add_pollfd(evlist, fd);
 
 
 
 624	return 0;
 625}
 626
 627static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
 628				       int prot, int mask, int cpu, int thread,
 629				       int *output)
 630{
 631	struct perf_evsel *evsel;
 632
 633	evlist__for_each(evlist, evsel) {
 634		int fd = FD(evsel, cpu, thread);
 
 
 
 
 
 635
 636		if (*output == -1) {
 637			*output = fd;
 638			if (__perf_evlist__mmap(evlist, idx, prot, mask,
 639						*output) < 0)
 640				return -1;
 641		} else {
 642			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
 643				return -1;
 
 
 644		}
 645
 646		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
 647		    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
 
 
 
 
 
 
 
 
 648			return -1;
 
 
 
 
 
 
 
 
 
 649	}
 650
 651	return 0;
 652}
 653
 654static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
 655				     int mask)
 656{
 657	int cpu, thread;
 658	int nr_cpus = cpu_map__nr(evlist->cpus);
 659	int nr_threads = thread_map__nr(evlist->threads);
 660
 661	pr_debug2("perf event ring buffer mmapped per cpu\n");
 662	for (cpu = 0; cpu < nr_cpus; cpu++) {
 663		int output = -1;
 664
 
 
 
 665		for (thread = 0; thread < nr_threads; thread++) {
 666			if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
 667							cpu, thread, &output))
 668				goto out_unmap;
 669		}
 670	}
 671
 672	return 0;
 673
 674out_unmap:
 675	for (cpu = 0; cpu < nr_cpus; cpu++)
 676		__perf_evlist__munmap(evlist, cpu);
 677	return -1;
 678}
 679
 680static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
 681					int mask)
 682{
 683	int thread;
 684	int nr_threads = thread_map__nr(evlist->threads);
 685
 686	pr_debug2("perf event ring buffer mmapped per thread\n");
 687	for (thread = 0; thread < nr_threads; thread++) {
 688		int output = -1;
 689
 690		if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
 691						thread, &output))
 
 
 
 692			goto out_unmap;
 693	}
 694
 695	return 0;
 696
 697out_unmap:
 698	for (thread = 0; thread < nr_threads; thread++)
 699		__perf_evlist__munmap(evlist, thread);
 700	return -1;
 701}
 702
 703static size_t perf_evlist__mmap_size(unsigned long pages)
 704{
 705	/* 512 kiB: default amount of unprivileged mlocked memory */
 706	if (pages == UINT_MAX)
 707		pages = (512 * 1024) / page_size;
 708	else if (!is_power_of_2(pages))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 709		return 0;
 710
 711	return (pages + 1) * page_size;
 712}
 713
 714static long parse_pages_arg(const char *str, unsigned long min,
 715			    unsigned long max)
 716{
 717	unsigned long pages, val;
 718	static struct parse_tag tags[] = {
 719		{ .tag  = 'B', .mult = 1       },
 720		{ .tag  = 'K', .mult = 1 << 10 },
 721		{ .tag  = 'M', .mult = 1 << 20 },
 722		{ .tag  = 'G', .mult = 1 << 30 },
 723		{ .tag  = 0 },
 724	};
 725
 726	if (str == NULL)
 727		return -EINVAL;
 728
 729	val = parse_tag_value(str, tags);
 730	if (val != (unsigned long) -1) {
 731		/* we got file size value */
 732		pages = PERF_ALIGN(val, page_size) / page_size;
 733	} else {
 734		/* we got pages count value */
 735		char *eptr;
 736		pages = strtoul(str, &eptr, 10);
 737		if (*eptr != '\0')
 738			return -EINVAL;
 739	}
 740
 741	if (pages == 0 && min == 0) {
 742		/* leave number of pages at 0 */
 743	} else if (!is_power_of_2(pages)) {
 744		/* round pages up to next power of 2 */
 745		pages = next_pow2_l(pages);
 746		if (!pages)
 747			return -EINVAL;
 748		pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
 749			pages * page_size, pages);
 750	}
 751
 752	if (pages > max)
 753		return -EINVAL;
 754
 755	return pages;
 756}
 757
 758int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
 759				  int unset __maybe_unused)
 760{
 761	unsigned int *mmap_pages = opt->value;
 762	unsigned long max = UINT_MAX;
 763	long pages;
 764
 765	if (max > SIZE_MAX / page_size)
 766		max = SIZE_MAX / page_size;
 767
 768	pages = parse_pages_arg(str, 1, max);
 769	if (pages < 0) {
 770		pr_err("Invalid argument for --mmap_pages/-m\n");
 771		return -1;
 772	}
 773
 774	*mmap_pages = pages;
 775	return 0;
 776}
 777
 
 
 
 
 
 
 778/**
 779 * perf_evlist__mmap - Create mmaps to receive events.
 780 * @evlist: list of events
 781 * @pages: map length in pages
 782 * @overwrite: overwrite older events?
 
 
 783 *
 784 * If @overwrite is %false the user needs to signal event consumption using
 785 * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
 786 * automatically.
 787 *
 
 
 
 788 * Return: %0 on success, negative error code otherwise.
 789 */
 790int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
 791		      bool overwrite)
 
 792{
 793	struct perf_evsel *evsel;
 794	const struct cpu_map *cpus = evlist->cpus;
 795	const struct thread_map *threads = evlist->threads;
 796	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
 
 
 797
 798	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
 799		return -ENOMEM;
 800
 801	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
 802		return -ENOMEM;
 803
 804	evlist->overwrite = overwrite;
 805	evlist->mmap_len = perf_evlist__mmap_size(pages);
 806	pr_debug("mmap size %zuB\n", evlist->mmap_len);
 807	mask = evlist->mmap_len - page_size - 1;
 
 
 
 808
 809	evlist__for_each(evlist, evsel) {
 810		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
 811		    evsel->sample_id == NULL &&
 812		    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
 813			return -ENOMEM;
 814	}
 815
 816	if (cpu_map__empty(cpus))
 817		return perf_evlist__mmap_per_thread(evlist, prot, mask);
 
 
 
 818
 819	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
 
 
 
 820}
 821
 822int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
 823{
 824	evlist->threads = thread_map__new_str(target->pid, target->tid,
 825					      target->uid);
 
 
 826
 827	if (evlist->threads == NULL)
 828		return -1;
 829
 830	if (target__uses_dummy_map(target))
 831		evlist->cpus = cpu_map__dummy_new();
 832	else
 833		evlist->cpus = cpu_map__new(target->cpu_list);
 834
 835	if (evlist->cpus == NULL)
 836		goto out_delete_threads;
 837
 
 
 
 
 838	return 0;
 839
 840out_delete_threads:
 841	thread_map__delete(evlist->threads);
 842	return -1;
 843}
 844
 845int perf_evlist__apply_filters(struct perf_evlist *evlist)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 846{
 847	struct perf_evsel *evsel;
 848	int err = 0;
 849	const int ncpus = cpu_map__nr(evlist->cpus),
 850		  nthreads = thread_map__nr(evlist->threads);
 851
 852	evlist__for_each(evlist, evsel) {
 853		if (evsel->filter == NULL)
 854			continue;
 855
 856		err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
 857		if (err)
 
 
 
 
 
 858			break;
 
 859	}
 860
 861	return err;
 862}
 863
 864int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
 865{
 866	struct perf_evsel *evsel;
 867	int err = 0;
 868	const int ncpus = cpu_map__nr(evlist->cpus),
 869		  nthreads = thread_map__nr(evlist->threads);
 870
 871	evlist__for_each(evlist, evsel) {
 872		err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
 
 
 
 873		if (err)
 874			break;
 875	}
 876
 877	return err;
 878}
 879
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 880bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
 881{
 882	struct perf_evsel *pos;
 883
 884	if (evlist->nr_entries == 1)
 885		return true;
 886
 887	if (evlist->id_pos < 0 || evlist->is_pos < 0)
 888		return false;
 889
 890	evlist__for_each(evlist, pos) {
 891		if (pos->id_pos != evlist->id_pos ||
 892		    pos->is_pos != evlist->is_pos)
 893			return false;
 894	}
 895
 896	return true;
 897}
 898
 899u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
 900{
 901	struct perf_evsel *evsel;
 902
 903	if (evlist->combined_sample_type)
 904		return evlist->combined_sample_type;
 905
 906	evlist__for_each(evlist, evsel)
 907		evlist->combined_sample_type |= evsel->attr.sample_type;
 908
 909	return evlist->combined_sample_type;
 910}
 911
 912u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
 913{
 914	evlist->combined_sample_type = 0;
 915	return __perf_evlist__combined_sample_type(evlist);
 916}
 917
 
 
 
 
 
 
 
 
 
 
 918bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
 919{
 920	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
 921	u64 read_format = first->attr.read_format;
 922	u64 sample_type = first->attr.sample_type;
 923
 924	evlist__for_each(evlist, pos) {
 925		if (read_format != pos->attr.read_format)
 926			return false;
 927	}
 928
 929	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
 930	if ((sample_type & PERF_SAMPLE_READ) &&
 931	    !(read_format & PERF_FORMAT_ID)) {
 932		return false;
 933	}
 934
 935	return true;
 936}
 937
 938u64 perf_evlist__read_format(struct perf_evlist *evlist)
 939{
 940	struct perf_evsel *first = perf_evlist__first(evlist);
 941	return first->attr.read_format;
 942}
 943
 944u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
 945{
 946	struct perf_evsel *first = perf_evlist__first(evlist);
 947	struct perf_sample *data;
 948	u64 sample_type;
 949	u16 size = 0;
 950
 951	if (!first->attr.sample_id_all)
 952		goto out;
 953
 954	sample_type = first->attr.sample_type;
 955
 956	if (sample_type & PERF_SAMPLE_TID)
 957		size += sizeof(data->tid) * 2;
 958
 959       if (sample_type & PERF_SAMPLE_TIME)
 960		size += sizeof(data->time);
 961
 962	if (sample_type & PERF_SAMPLE_ID)
 963		size += sizeof(data->id);
 964
 965	if (sample_type & PERF_SAMPLE_STREAM_ID)
 966		size += sizeof(data->stream_id);
 967
 968	if (sample_type & PERF_SAMPLE_CPU)
 969		size += sizeof(data->cpu) * 2;
 970
 971	if (sample_type & PERF_SAMPLE_IDENTIFIER)
 972		size += sizeof(data->id);
 973out:
 974	return size;
 975}
 976
 977bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
 978{
 979	struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
 980
 981	evlist__for_each_continue(evlist, pos) {
 982		if (first->attr.sample_id_all != pos->attr.sample_id_all)
 983			return false;
 984	}
 985
 986	return true;
 987}
 988
 989bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
 990{
 991	struct perf_evsel *first = perf_evlist__first(evlist);
 992	return first->attr.sample_id_all;
 993}
 994
 995void perf_evlist__set_selected(struct perf_evlist *evlist,
 996			       struct perf_evsel *evsel)
 997{
 998	evlist->selected = evsel;
 999}
1000
1001void perf_evlist__close(struct perf_evlist *evlist)
1002{
1003	struct perf_evsel *evsel;
1004	int ncpus = cpu_map__nr(evlist->cpus);
1005	int nthreads = thread_map__nr(evlist->threads);
1006	int n;
1007
1008	evlist__for_each_reverse(evlist, evsel) {
1009		n = evsel->cpus ? evsel->cpus->nr : ncpus;
1010		perf_evsel__close(evsel, n, nthreads);
1011	}
1012}
1013
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1014int perf_evlist__open(struct perf_evlist *evlist)
1015{
1016	struct perf_evsel *evsel;
1017	int err;
1018
 
 
 
 
 
 
 
 
 
 
1019	perf_evlist__update_id_pos(evlist);
1020
1021	evlist__for_each(evlist, evsel) {
1022		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
1023		if (err < 0)
1024			goto out_err;
1025	}
1026
1027	return 0;
1028out_err:
1029	perf_evlist__close(evlist);
1030	errno = -err;
1031	return err;
1032}
1033
1034int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1035				  const char *argv[], bool pipe_output,
1036				  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1037{
1038	int child_ready_pipe[2], go_pipe[2];
1039	char bf;
1040
1041	if (pipe(child_ready_pipe) < 0) {
1042		perror("failed to create 'ready' pipe");
1043		return -1;
1044	}
1045
1046	if (pipe(go_pipe) < 0) {
1047		perror("failed to create 'go' pipe");
1048		goto out_close_ready_pipe;
1049	}
1050
1051	evlist->workload.pid = fork();
1052	if (evlist->workload.pid < 0) {
1053		perror("failed to fork");
1054		goto out_close_pipes;
1055	}
1056
1057	if (!evlist->workload.pid) {
 
 
1058		if (pipe_output)
1059			dup2(2, 1);
1060
1061		signal(SIGTERM, SIG_DFL);
1062
1063		close(child_ready_pipe[0]);
1064		close(go_pipe[1]);
1065		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1066
1067		/*
1068		 * Tell the parent we're ready to go
1069		 */
1070		close(child_ready_pipe[1]);
1071
1072		/*
1073		 * Wait until the parent tells us to go.
1074		 */
1075		if (read(go_pipe[0], &bf, 1) == -1)
1076			perror("unable to read pipe");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077
1078		execvp(argv[0], (char **)argv);
1079
1080		if (exec_error) {
1081			union sigval val;
1082
1083			val.sival_int = errno;
1084			if (sigqueue(getppid(), SIGUSR1, val))
1085				perror(argv[0]);
1086		} else
1087			perror(argv[0]);
1088		exit(-1);
1089	}
1090
1091	if (exec_error) {
1092		struct sigaction act = {
1093			.sa_flags     = SA_SIGINFO,
1094			.sa_sigaction = exec_error,
1095		};
1096		sigaction(SIGUSR1, &act, NULL);
1097	}
1098
1099	if (target__none(target))
1100		evlist->threads->map[0] = evlist->workload.pid;
 
 
 
 
 
 
1101
1102	close(child_ready_pipe[1]);
1103	close(go_pipe[0]);
1104	/*
1105	 * wait for child to settle
1106	 */
1107	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1108		perror("unable to read pipe");
1109		goto out_close_pipes;
1110	}
1111
1112	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1113	evlist->workload.cork_fd = go_pipe[1];
1114	close(child_ready_pipe[0]);
1115	return 0;
1116
1117out_close_pipes:
1118	close(go_pipe[0]);
1119	close(go_pipe[1]);
1120out_close_ready_pipe:
1121	close(child_ready_pipe[0]);
1122	close(child_ready_pipe[1]);
1123	return -1;
1124}
1125
1126int perf_evlist__start_workload(struct perf_evlist *evlist)
1127{
1128	if (evlist->workload.cork_fd > 0) {
1129		char bf = 0;
1130		int ret;
1131		/*
1132		 * Remove the cork, let it rip!
1133		 */
1134		ret = write(evlist->workload.cork_fd, &bf, 1);
1135		if (ret < 0)
1136			perror("enable to write to pipe");
1137
1138		close(evlist->workload.cork_fd);
1139		return ret;
1140	}
1141
1142	return 0;
1143}
1144
1145int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1146			      struct perf_sample *sample)
1147{
1148	struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1149
1150	if (!evsel)
1151		return -EFAULT;
1152	return perf_evsel__parse_sample(evsel, event, sample);
1153}
1154
1155size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1156{
1157	struct perf_evsel *evsel;
1158	size_t printed = 0;
1159
1160	evlist__for_each(evlist, evsel) {
1161		printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1162				   perf_evsel__name(evsel));
1163	}
1164
1165	return printed + fprintf(fp, "\n");
1166}
1167
1168int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1169			     int err, char *buf, size_t size)
1170{
1171	char sbuf[128];
1172
1173	switch (err) {
1174	case ENOENT:
1175		scnprintf(buf, size, "%s",
1176			  "Error:\tUnable to find debugfs\n"
1177			  "Hint:\tWas your kernel was compiled with debugfs support?\n"
1178			  "Hint:\tIs the debugfs filesystem mounted?\n"
1179			  "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1180		break;
1181	case EACCES:
1182		scnprintf(buf, size,
1183			  "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1184			  "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1185			  debugfs_mountpoint, debugfs_mountpoint);
1186		break;
1187	default:
1188		scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1189		break;
1190	}
1191
1192	return 0;
1193}
1194
1195int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1196			       int err, char *buf, size_t size)
1197{
1198	int printed, value;
1199	char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1200
1201	switch (err) {
1202	case EACCES:
1203	case EPERM:
1204		printed = scnprintf(buf, size,
1205				    "Error:\t%s.\n"
1206				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1207
1208		value = perf_event_paranoid();
1209
1210		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1211
1212		if (value >= 2) {
1213			printed += scnprintf(buf + printed, size - printed,
1214					     "For your workloads it needs to be <= 1\nHint:\t");
1215		}
1216		printed += scnprintf(buf + printed, size - printed,
1217				     "For system wide tracing it needs to be set to -1");
1218
1219		printed += scnprintf(buf + printed, size - printed,
1220				    ".\nHint:\tThe current value is %d.", value);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1221		break;
1222	default:
1223		scnprintf(buf, size, "%s", emsg);
1224		break;
1225	}
1226
1227	return 0;
1228}
1229
1230void perf_evlist__to_front(struct perf_evlist *evlist,
1231			   struct perf_evsel *move_evsel)
1232{
1233	struct perf_evsel *evsel, *n;
1234	LIST_HEAD(move);
1235
1236	if (move_evsel == perf_evlist__first(evlist))
1237		return;
1238
1239	evlist__for_each_safe(evlist, n, evsel) {
1240		if (evsel->leader == move_evsel->leader)
1241			list_move_tail(&evsel->node, &move);
1242	}
1243
1244	list_splice(&move, &evlist->entries);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1245}