Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
   4 *
   5 * Parts came from builtin-{top,stat,record}.c, see those files for further
   6 * copyright notes.
 
 
   7 */
   8#include <api/fs/fs.h>
   9#include <errno.h>
  10#include <inttypes.h>
  11#include <poll.h>
  12#include "cpumap.h"
  13#include "util/mmap.h"
  14#include "thread_map.h"
  15#include "target.h"
  16#include "evlist.h"
  17#include "evsel.h"
  18#include "debug.h"
  19#include "units.h"
  20#include <internal/lib.h> // page_size
  21#include "../perf.h"
  22#include "asm/bug.h"
  23#include "bpf-event.h"
  24#include <signal.h>
  25#include <unistd.h>
  26#include <sched.h>
  27#include <stdlib.h>
  28
  29#include "parse-events.h"
  30#include <subcmd/parse-options.h>
  31
  32#include <fcntl.h>
  33#include <sys/ioctl.h>
  34#include <sys/mman.h>
  35
  36#include <linux/bitops.h>
  37#include <linux/hash.h>
  38#include <linux/log2.h>
  39#include <linux/err.h>
  40#include <linux/string.h>
  41#include <linux/zalloc.h>
  42#include <perf/evlist.h>
  43#include <perf/evsel.h>
  44#include <perf/cpumap.h>
  45
  46#include <internal/xyarray.h>
  47
  48#ifdef LACKS_SIGQUEUE_PROTOTYPE
  49int sigqueue(pid_t pid, int sig, const union sigval value);
  50#endif
  51
  52#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
  53#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
  54
  55void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
  56		  struct perf_thread_map *threads)
  57{
  58	perf_evlist__init(&evlist->core);
  59	perf_evlist__set_maps(&evlist->core, cpus, threads);
  60	fdarray__init(&evlist->core.pollfd, 64);
  61	evlist->workload.pid = -1;
  62	evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
  63}
  64
  65struct evlist *evlist__new(void)
  66{
  67	struct evlist *evlist = zalloc(sizeof(*evlist));
  68
  69	if (evlist != NULL)
  70		evlist__init(evlist, NULL, NULL);
  71
  72	return evlist;
  73}
  74
  75struct evlist *perf_evlist__new_default(void)
 
  76{
  77	struct evlist *evlist = evlist__new();
  78
  79	if (evlist && perf_evlist__add_default(evlist)) {
  80		evlist__delete(evlist);
  81		evlist = NULL;
  82	}
  83
  84	return evlist;
 
 
 
 
  85}
  86
  87struct evlist *perf_evlist__new_dummy(void)
 
  88{
  89	struct evlist *evlist = evlist__new();
  90
  91	if (evlist && perf_evlist__add_dummy(evlist)) {
  92		evlist__delete(evlist);
  93		evlist = NULL;
  94	}
  95
  96	return evlist;
  97}
  98
  99/**
 100 * perf_evlist__set_id_pos - set the positions of event ids.
 101 * @evlist: selected event list
 102 *
 103 * Events with compatible sample types all have the same id_pos
 104 * and is_pos.  For convenience, put a copy on evlist.
 105 */
 106void perf_evlist__set_id_pos(struct evlist *evlist)
 107{
 108	struct evsel *first = evlist__first(evlist);
 109
 110	evlist->id_pos = first->id_pos;
 111	evlist->is_pos = first->is_pos;
 112}
 113
 114static void perf_evlist__update_id_pos(struct evlist *evlist)
 115{
 116	struct evsel *evsel;
 117
 118	evlist__for_each_entry(evlist, evsel)
 119		perf_evsel__calc_id_pos(evsel);
 120
 121	perf_evlist__set_id_pos(evlist);
 
 
 122}
 123
 124static void evlist__purge(struct evlist *evlist)
 125{
 126	struct evsel *pos, *n;
 127
 128	evlist__for_each_entry_safe(evlist, n, pos) {
 129		list_del_init(&pos->core.node);
 130		pos->evlist = NULL;
 131		evsel__delete(pos);
 132	}
 133
 134	evlist->core.nr_entries = 0;
 135}
 136
 137void evlist__exit(struct evlist *evlist)
 138{
 139	zfree(&evlist->mmap);
 140	zfree(&evlist->overwrite_mmap);
 141	fdarray__exit(&evlist->core.pollfd);
 
 142}
 143
 144void evlist__delete(struct evlist *evlist)
 145{
 146	if (evlist == NULL)
 147		return;
 148
 149	evlist__munmap(evlist);
 150	evlist__close(evlist);
 151	perf_cpu_map__put(evlist->core.cpus);
 152	perf_thread_map__put(evlist->core.threads);
 153	evlist->core.cpus = NULL;
 154	evlist->core.threads = NULL;
 155	evlist__purge(evlist);
 156	evlist__exit(evlist);
 157	free(evlist);
 158}
 159
 160void evlist__add(struct evlist *evlist, struct evsel *entry)
 161{
 162	entry->evlist = evlist;
 163	entry->idx = evlist->core.nr_entries;
 164	entry->tracking = !entry->idx;
 165
 166	perf_evlist__add(&evlist->core, &entry->core);
 167
 168	if (evlist->core.nr_entries == 1)
 169		perf_evlist__set_id_pos(evlist);
 170}
 171
 172void evlist__remove(struct evlist *evlist, struct evsel *evsel)
 
 
 173{
 174	evsel->evlist = NULL;
 175	perf_evlist__remove(&evlist->core, &evsel->core);
 176}
 177
 178void perf_evlist__splice_list_tail(struct evlist *evlist,
 179				   struct list_head *list)
 180{
 181	struct evsel *evsel, *temp;
 182
 183	__evlist__for_each_entry_safe(list, temp, evsel) {
 184		list_del_init(&evsel->core.node);
 185		evlist__add(evlist, evsel);
 186	}
 187}
 188
 189void __perf_evlist__set_leader(struct list_head *list)
 190{
 191	struct evsel *evsel, *leader;
 192
 193	leader = list_entry(list->next, struct evsel, core.node);
 194	evsel = list_entry(list->prev, struct evsel, core.node);
 195
 196	leader->core.nr_members = evsel->idx - leader->idx + 1;
 197
 198	__evlist__for_each_entry(list, evsel) {
 199		evsel->leader = leader;
 200	}
 201}
 202
 203void perf_evlist__set_leader(struct evlist *evlist)
 204{
 205	if (evlist->core.nr_entries) {
 206		evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
 207		__perf_evlist__set_leader(&evlist->core.entries);
 208	}
 209}
 210
 211int __perf_evlist__add_default(struct evlist *evlist, bool precise)
 212{
 213	struct evsel *evsel = perf_evsel__new_cycles(precise);
 214
 215	if (evsel == NULL)
 216		return -ENOMEM;
 217
 218	evlist__add(evlist, evsel);
 219	return 0;
 220}
 221
 222int perf_evlist__add_dummy(struct evlist *evlist)
 223{
 224	struct perf_event_attr attr = {
 225		.type	= PERF_TYPE_SOFTWARE,
 226		.config = PERF_COUNT_SW_DUMMY,
 227		.size	= sizeof(attr), /* to capture ABI version */
 228	};
 229	struct evsel *evsel = perf_evsel__new_idx(&attr, evlist->core.nr_entries);
 230
 
 
 
 231	if (evsel == NULL)
 232		return -ENOMEM;
 233
 234	evlist__add(evlist, evsel);
 
 
 
 
 
 235	return 0;
 
 
 
 
 236}
 237
 238static int evlist__add_attrs(struct evlist *evlist,
 239				  struct perf_event_attr *attrs, size_t nr_attrs)
 240{
 241	struct evsel *evsel, *n;
 242	LIST_HEAD(head);
 243	size_t i;
 244
 245	for (i = 0; i < nr_attrs; i++) {
 246		evsel = perf_evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
 247		if (evsel == NULL)
 248			goto out_delete_partial_list;
 249		list_add_tail(&evsel->core.node, &head);
 250	}
 251
 252	perf_evlist__splice_list_tail(evlist, &head);
 253
 254	return 0;
 255
 256out_delete_partial_list:
 257	__evlist__for_each_entry_safe(&head, n, evsel)
 258		evsel__delete(evsel);
 259	return -1;
 260}
 261
 262int __perf_evlist__add_default_attrs(struct evlist *evlist,
 263				     struct perf_event_attr *attrs, size_t nr_attrs)
 264{
 265	size_t i;
 266
 267	for (i = 0; i < nr_attrs; i++)
 268		event_attr_init(attrs + i);
 269
 270	return evlist__add_attrs(evlist, attrs, nr_attrs);
 271}
 272
 273struct evsel *
 274perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
 275{
 276	struct evsel *evsel;
 277
 278	evlist__for_each_entry(evlist, evsel) {
 279		if (evsel->core.attr.type   == PERF_TYPE_TRACEPOINT &&
 280		    (int)evsel->core.attr.config == id)
 281			return evsel;
 282	}
 283
 284	return NULL;
 285}
 286
 287struct evsel *
 288perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
 289				     const char *name)
 290{
 291	struct evsel *evsel;
 292
 293	evlist__for_each_entry(evlist, evsel) {
 294		if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
 295		    (strcmp(evsel->name, name) == 0))
 296			return evsel;
 297	}
 298
 299	return NULL;
 
 300}
 301
 302int perf_evlist__add_newtp(struct evlist *evlist,
 303			   const char *sys, const char *name, void *handler)
 
 304{
 305	struct evsel *evsel = perf_evsel__newtp(sys, name);
 
 
 306
 307	if (IS_ERR(evsel))
 308		return -1;
 309
 310	evsel->handler = handler;
 311	evlist__add(evlist, evsel);
 312	return 0;
 313}
 314
 315static int perf_evlist__nr_threads(struct evlist *evlist,
 316				   struct evsel *evsel)
 317{
 318	if (evsel->core.system_wide)
 319		return 1;
 320	else
 321		return perf_thread_map__nr(evlist->core.threads);
 322}
 323
 324void evlist__disable(struct evlist *evlist)
 325{
 326	struct evsel *pos;
 327
 328	evlist__for_each_entry(evlist, pos) {
 329		if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd)
 330			continue;
 331		evsel__disable(pos);
 
 332	}
 333
 334	evlist->enabled = false;
 
 
 
 335}
 336
 337void evlist__enable(struct evlist *evlist)
 
 338{
 339	struct evsel *pos;
 340
 341	evlist__for_each_entry(evlist, pos) {
 342		if (!perf_evsel__is_group_leader(pos) || !pos->core.fd)
 343			continue;
 344		evsel__enable(pos);
 345	}
 346
 347	evlist->enabled = true;
 348}
 349
 350void perf_evlist__toggle_enable(struct evlist *evlist)
 
 
 351{
 352	(evlist->enabled ? evlist__disable : evlist__enable)(evlist);
 353}
 
 354
 355static int perf_evlist__enable_event_cpu(struct evlist *evlist,
 356					 struct evsel *evsel, int cpu)
 357{
 358	int thread;
 359	int nr_threads = perf_evlist__nr_threads(evlist, evsel);
 360
 361	if (!evsel->core.fd)
 362		return -EINVAL;
 
 363
 364	for (thread = 0; thread < nr_threads; thread++) {
 365		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
 366		if (err)
 367			return err;
 368	}
 369	return 0;
 370}
 371
 372static int perf_evlist__enable_event_thread(struct evlist *evlist,
 373					    struct evsel *evsel,
 374					    int thread)
 375{
 376	int cpu;
 377	int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
 378
 379	if (!evsel->core.fd)
 380		return -EINVAL;
 381
 382	for (cpu = 0; cpu < nr_cpus; cpu++) {
 383		int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
 384		if (err)
 385			return err;
 386	}
 387	return 0;
 388}
 389
 390int perf_evlist__enable_event_idx(struct evlist *evlist,
 391				  struct evsel *evsel, int idx)
 392{
 393	bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
 394
 395	if (per_cpu_mmaps)
 396		return perf_evlist__enable_event_cpu(evlist, evsel, idx);
 397	else
 398		return perf_evlist__enable_event_thread(evlist, evsel, idx);
 399}
 400
 401int evlist__add_pollfd(struct evlist *evlist, int fd)
 402{
 403	return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
 
 
 
 
 404}
 405
 406static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
 407					 void *arg __maybe_unused)
 408{
 409	struct mmap *map = fda->priv[fd].ptr;
 410
 411	if (map)
 412		perf_mmap__put(map);
 413}
 414
 415int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
 416{
 417	return fdarray__filter(&evlist->core.pollfd, revents_and_mask,
 418			       perf_evlist__munmap_filtered, NULL);
 
 
 
 419}
 420
 421int evlist__poll(struct evlist *evlist, int timeout)
 422{
 423	return perf_evlist__poll(&evlist->core, timeout);
 
 
 424}
 425
 426static void perf_evlist__set_sid_idx(struct evlist *evlist,
 427				     struct evsel *evsel, int idx, int cpu,
 428				     int thread)
 429{
 430	struct perf_sample_id *sid = SID(evsel, cpu, thread);
 431	sid->idx = idx;
 432	if (evlist->core.cpus && cpu >= 0)
 433		sid->cpu = evlist->core.cpus->map[cpu];
 434	else
 435		sid->cpu = -1;
 436	if (!evsel->core.system_wide && evlist->core.threads && thread >= 0)
 437		sid->tid = perf_thread_map__pid(evlist->core.threads, thread);
 438	else
 439		sid->tid = -1;
 440}
 441
 442struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
 
 
 443{
 444	struct hlist_head *head;
 445	struct perf_sample_id *sid;
 446	int hash;
 
 447
 448	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 449	head = &evlist->core.heads[hash];
 450
 451	hlist_for_each_entry(sid, head, node)
 452		if (sid->id == id)
 453			return sid;
 454
 455	return NULL;
 456}
 457
 458struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
 
 459{
 460	struct perf_sample_id *sid;
 461
 462	if (evlist->core.nr_entries == 1 || !id)
 463		return evlist__first(evlist);
 464
 465	sid = perf_evlist__id2sid(evlist, id);
 466	if (sid)
 467		return container_of(sid->evsel, struct evsel, core);
 468
 469	if (!perf_evlist__sample_id_all(evlist))
 470		return evlist__first(evlist);
 471
 472	return NULL;
 473}
 474
 475struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
 476						u64 id)
 
 477{
 478	struct perf_sample_id *sid;
 479
 480	if (!id)
 481		return NULL;
 482
 483	sid = perf_evlist__id2sid(evlist, id);
 484	if (sid)
 485		return container_of(sid->evsel, struct evsel, core);
 486
 487	return NULL;
 488}
 489
 490static int perf_evlist__event2id(struct evlist *evlist,
 491				 union perf_event *event, u64 *id)
 492{
 493	const __u64 *array = event->sample.array;
 494	ssize_t n;
 495
 496	n = (event->header.size - sizeof(event->header)) >> 3;
 
 
 
 497
 498	if (event->header.type == PERF_RECORD_SAMPLE) {
 499		if (evlist->id_pos >= n)
 500			return -1;
 501		*id = array[evlist->id_pos];
 502	} else {
 503		if (evlist->is_pos > n)
 504			return -1;
 505		n -= evlist->is_pos;
 506		*id = array[n];
 507	}
 508	return 0;
 509}
 510
 511struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
 512					    union perf_event *event)
 513{
 514	struct evsel *first = evlist__first(evlist);
 515	struct hlist_head *head;
 
 516	struct perf_sample_id *sid;
 517	int hash;
 518	u64 id;
 519
 520	if (evlist->core.nr_entries == 1)
 521		return first;
 522
 523	if (!first->core.attr.sample_id_all &&
 524	    event->header.type != PERF_RECORD_SAMPLE)
 525		return first;
 526
 527	if (perf_evlist__event2id(evlist, event, &id))
 528		return NULL;
 529
 530	/* Synthesized events have an id of zero */
 531	if (!id)
 532		return first;
 533
 534	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
 535	head = &evlist->core.heads[hash];
 536
 537	hlist_for_each_entry(sid, head, node) {
 538		if (sid->id == id)
 539			return container_of(sid->evsel, struct evsel, core);
 540	}
 541	return NULL;
 542}
 543
 544static int perf_evlist__set_paused(struct evlist *evlist, bool value)
 545{
 546	int i;
 547
 548	if (!evlist->overwrite_mmap)
 549		return 0;
 550
 551	for (i = 0; i < evlist->core.nr_mmaps; i++) {
 552		int fd = evlist->overwrite_mmap[i].core.fd;
 553		int err;
 554
 555		if (fd < 0)
 556			continue;
 557		err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
 558		if (err)
 559			return err;
 560	}
 561	return 0;
 562}
 563
 564static int perf_evlist__pause(struct evlist *evlist)
 565{
 566	return perf_evlist__set_paused(evlist, true);
 567}
 568
 569static int perf_evlist__resume(struct evlist *evlist)
 570{
 571	return perf_evlist__set_paused(evlist, false);
 572}
 573
 574static void evlist__munmap_nofree(struct evlist *evlist)
 575{
 576	int i;
 
 
 
 
 
 
 577
 578	if (evlist->mmap)
 579		for (i = 0; i < evlist->core.nr_mmaps; i++)
 580			perf_mmap__munmap(&evlist->mmap[i]);
 
 
 
 
 
 
 
 
 
 581
 582	if (evlist->overwrite_mmap)
 583		for (i = 0; i < evlist->core.nr_mmaps; i++)
 584			perf_mmap__munmap(&evlist->overwrite_mmap[i]);
 585}
 
 
 586
 587void evlist__munmap(struct evlist *evlist)
 588{
 589	evlist__munmap_nofree(evlist);
 590	zfree(&evlist->mmap);
 591	zfree(&evlist->overwrite_mmap);
 592}
 593
 594static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
 595				       bool overwrite)
 596{
 597	int i;
 598	struct mmap *map;
 599
 600	evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
 601	if (perf_cpu_map__empty(evlist->core.cpus))
 602		evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
 603	map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
 604	if (!map)
 605		return NULL;
 606
 607	for (i = 0; i < evlist->core.nr_mmaps; i++) {
 608		map[i].core.fd = -1;
 609		map[i].core.overwrite = overwrite;
 610		/*
 611		 * When the perf_mmap() call is made we grab one refcount, plus
 612		 * one extra to let perf_mmap__consume() get the last
 613		 * events after all real references (perf_mmap__get()) are
 614		 * dropped.
 615		 *
 616		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
 617		 * thus does perf_mmap__get() on it.
 618		 */
 619		refcount_set(&map[i].core.refcnt, 0);
 620	}
 621	return map;
 622}
 623
 624static bool
 625perf_evlist__should_poll(struct evlist *evlist __maybe_unused,
 626			 struct evsel *evsel)
 627{
 628	if (evsel->core.attr.write_backward)
 629		return false;
 630	return true;
 631}
 632
 633static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
 634				       struct mmap_params *mp, int cpu_idx,
 635				       int thread, int *_output, int *_output_overwrite)
 636{
 637	struct evsel *evsel;
 638	int revent;
 639	int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx);
 640
 641	evlist__for_each_entry(evlist, evsel) {
 642		struct mmap *maps = evlist->mmap;
 643		int *output = _output;
 644		int fd;
 645		int cpu;
 646
 647		mp->prot = PROT_READ | PROT_WRITE;
 648		if (evsel->core.attr.write_backward) {
 649			output = _output_overwrite;
 650			maps = evlist->overwrite_mmap;
 651
 652			if (!maps) {
 653				maps = evlist__alloc_mmap(evlist, true);
 654				if (!maps)
 655					return -1;
 656				evlist->overwrite_mmap = maps;
 657				if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
 658					perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
 659			}
 660			mp->prot &= ~PROT_WRITE;
 661		}
 662
 663		if (evsel->core.system_wide && thread)
 664			continue;
 665
 666		cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
 667		if (cpu == -1)
 668			continue;
 669
 670		fd = FD(evsel, cpu, thread);
 
 671
 672		if (*output == -1) {
 673			*output = fd;
 674
 675			if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
 676				return -1;
 677		} else {
 678			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
 679				return -1;
 680
 681			perf_mmap__get(&maps[idx]);
 
 
 
 682		}
 
 683
 684		revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
 
 
 685
 686		/*
 687		 * The system_wide flag causes a selected event to be opened
 688		 * always without a pid.  Consequently it will never get a
 689		 * POLLHUP, but it is used for tracking in combination with
 690		 * other events, so it should not need to be polled anyway.
 691		 * Therefore don't add it for polling.
 692		 */
 693		if (!evsel->core.system_wide &&
 694		     perf_evlist__add_pollfd(&evlist->core, fd, &maps[idx], revent) < 0) {
 695			perf_mmap__put(&maps[idx]);
 696			return -1;
 697		}
 698
 699		if (evsel->core.attr.read_format & PERF_FORMAT_ID) {
 700			if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, cpu, thread,
 701						   fd) < 0)
 702				return -1;
 703			perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
 704						 thread);
 705		}
 
 
 
 706	}
 707
 
 708	return 0;
 709}
 710
 711static int evlist__mmap_per_cpu(struct evlist *evlist,
 712				     struct mmap_params *mp)
 713{
 
 714	int cpu, thread;
 715	int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
 716	int nr_threads = perf_thread_map__nr(evlist->core.threads);
 717
 718	pr_debug2("perf event ring buffer mmapped per cpu\n");
 719	for (cpu = 0; cpu < nr_cpus; cpu++) {
 720		int output = -1;
 721		int output_overwrite = -1;
 722
 723		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
 724					      true);
 725
 726		for (thread = 0; thread < nr_threads; thread++) {
 727			if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
 728							thread, &output, &output_overwrite))
 729				goto out_unmap;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 730		}
 731	}
 732
 733	return 0;
 734
 735out_unmap:
 736	evlist__munmap_nofree(evlist);
 
 
 
 
 
 737	return -1;
 738}
 739
 740static int evlist__mmap_per_thread(struct evlist *evlist,
 741					struct mmap_params *mp)
 742{
 
 743	int thread;
 744	int nr_threads = perf_thread_map__nr(evlist->core.threads);
 745
 746	pr_debug2("perf event ring buffer mmapped per thread\n");
 747	for (thread = 0; thread < nr_threads; thread++) {
 748		int output = -1;
 749		int output_overwrite = -1;
 750
 751		auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
 752					      false);
 
 
 
 
 
 
 
 
 
 
 753
 754		if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
 755						&output, &output_overwrite))
 756			goto out_unmap;
 
 757	}
 758
 759	return 0;
 760
 761out_unmap:
 762	evlist__munmap_nofree(evlist);
 763	return -1;
 764}
 765
 766unsigned long perf_event_mlock_kb_in_pages(void)
 767{
 768	unsigned long pages;
 769	int max;
 770
 771	if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
 772		/*
 773		 * Pick a once upon a time good value, i.e. things look
 774		 * strange since we can't read a sysctl value, but lets not
 775		 * die yet...
 776		 */
 777		max = 512;
 778	} else {
 779		max -= (page_size / 1024);
 780	}
 781
 782	pages = (max * 1024) / page_size;
 783	if (!is_power_of_2(pages))
 784		pages = rounddown_pow_of_two(pages);
 785
 786	return pages;
 787}
 788
 789size_t evlist__mmap_size(unsigned long pages)
 790{
 791	if (pages == UINT_MAX)
 792		pages = perf_event_mlock_kb_in_pages();
 793	else if (!is_power_of_2(pages))
 794		return 0;
 795
 796	return (pages + 1) * page_size;
 797}
 798
 799static long parse_pages_arg(const char *str, unsigned long min,
 800			    unsigned long max)
 801{
 802	unsigned long pages, val;
 803	static struct parse_tag tags[] = {
 804		{ .tag  = 'B', .mult = 1       },
 805		{ .tag  = 'K', .mult = 1 << 10 },
 806		{ .tag  = 'M', .mult = 1 << 20 },
 807		{ .tag  = 'G', .mult = 1 << 30 },
 808		{ .tag  = 0 },
 809	};
 810
 811	if (str == NULL)
 812		return -EINVAL;
 813
 814	val = parse_tag_value(str, tags);
 815	if (val != (unsigned long) -1) {
 816		/* we got file size value */
 817		pages = PERF_ALIGN(val, page_size) / page_size;
 818	} else {
 819		/* we got pages count value */
 820		char *eptr;
 821		pages = strtoul(str, &eptr, 10);
 822		if (*eptr != '\0')
 823			return -EINVAL;
 824	}
 825
 826	if (pages == 0 && min == 0) {
 827		/* leave number of pages at 0 */
 828	} else if (!is_power_of_2(pages)) {
 829		char buf[100];
 830
 831		/* round pages up to next power of 2 */
 832		pages = roundup_pow_of_two(pages);
 833		if (!pages)
 834			return -EINVAL;
 835
 836		unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
 837		pr_info("rounding mmap pages size to %s (%lu pages)\n",
 838			buf, pages);
 839	}
 840
 841	if (pages > max)
 842		return -EINVAL;
 843
 844	return pages;
 845}
 846
 847int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
 848{
 849	unsigned long max = UINT_MAX;
 850	long pages;
 851
 852	if (max > SIZE_MAX / page_size)
 853		max = SIZE_MAX / page_size;
 854
 855	pages = parse_pages_arg(str, 1, max);
 856	if (pages < 0) {
 857		pr_err("Invalid argument for --mmap_pages/-m\n");
 858		return -1;
 859	}
 860
 861	*mmap_pages = pages;
 862	return 0;
 863}
 864
 865int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
 866				  int unset __maybe_unused)
 867{
 868	return __perf_evlist__parse_mmap_pages(opt->value, str);
 869}
 870
 871/**
 872 * evlist__mmap_ex - Create mmaps to receive events.
 873 * @evlist: list of events
 874 * @pages: map length in pages
 875 * @overwrite: overwrite older events?
 876 * @auxtrace_pages - auxtrace map length in pages
 877 * @auxtrace_overwrite - overwrite older auxtrace data?
 878 *
 879 * If @overwrite is %false the user needs to signal event consumption using
 880 * perf_mmap__write_tail().  Using evlist__mmap_read() does this
 881 * automatically.
 882 *
 883 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
 884 * consumption using auxtrace_mmap__write_tail().
 885 *
 886 * Return: %0 on success, negative error code otherwise.
 
 
 
 
 
 887 */
 888int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
 889			 unsigned int auxtrace_pages,
 890			 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
 891			 int comp_level)
 892{
 893	struct evsel *evsel;
 894	const struct perf_cpu_map *cpus = evlist->core.cpus;
 895	const struct perf_thread_map *threads = evlist->core.threads;
 896	/*
 897	 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
 898	 * Its value is decided by evsel's write_backward.
 899	 * So &mp should not be passed through const pointer.
 900	 */
 901	struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush,
 902				  .comp_level = comp_level };
 903
 904	if (!evlist->mmap)
 905		evlist->mmap = evlist__alloc_mmap(evlist, false);
 906	if (!evlist->mmap)
 907		return -ENOMEM;
 908
 909	if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(&evlist->core) < 0)
 910		return -ENOMEM;
 911
 912	evlist->core.mmap_len = evlist__mmap_size(pages);
 913	pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
 914	mp.mask = evlist->core.mmap_len - page_size - 1;
 915
 916	auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
 917				   auxtrace_pages, auxtrace_overwrite);
 918
 919	evlist__for_each_entry(evlist, evsel) {
 920		if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
 921		    evsel->core.sample_id == NULL &&
 922		    perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr) < 0)
 923			return -ENOMEM;
 924	}
 925
 926	if (perf_cpu_map__empty(cpus))
 927		return evlist__mmap_per_thread(evlist, &mp);
 928
 929	return evlist__mmap_per_cpu(evlist, &mp);
 930}
 931
 932int evlist__mmap(struct evlist *evlist, unsigned int pages)
 933{
 934	return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
 935}
 936
 937int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
 
 938{
 939	bool all_threads = (target->per_thread && target->system_wide);
 940	struct perf_cpu_map *cpus;
 941	struct perf_thread_map *threads;
 942
 943	/*
 944	 * If specify '-a' and '--per-thread' to perf record, perf record
 945	 * will override '--per-thread'. target->per_thread = false and
 946	 * target->system_wide = true.
 947	 *
 948	 * If specify '--per-thread' only to perf record,
 949	 * target->per_thread = true and target->system_wide = false.
 950	 *
 951	 * So target->per_thread && target->system_wide is false.
 952	 * For perf record, thread_map__new_str doesn't call
 953	 * thread_map__new_all_cpus. That will keep perf record's
 954	 * current behavior.
 955	 *
 956	 * For perf stat, it allows the case that target->per_thread and
 957	 * target->system_wide are all true. It means to collect system-wide
 958	 * per-thread data. thread_map__new_str will call
 959	 * thread_map__new_all_cpus to enumerate all threads.
 960	 */
 961	threads = thread_map__new_str(target->pid, target->tid, target->uid,
 962				      all_threads);
 963
 964	if (!threads)
 965		return -1;
 966
 967	if (target__uses_dummy_map(target))
 968		cpus = perf_cpu_map__dummy_new();
 
 
 969	else
 970		cpus = perf_cpu_map__new(target->cpu_list);
 971
 972	if (!cpus)
 973		goto out_delete_threads;
 974
 975	evlist->core.has_user_cpus = !!target->cpu_list;
 976
 977	perf_evlist__set_maps(&evlist->core, cpus, threads);
 978
 979	return 0;
 980
 981out_delete_threads:
 982	perf_thread_map__put(threads);
 983	return -1;
 984}
 985
 986void __perf_evlist__set_sample_bit(struct evlist *evlist,
 987				   enum perf_event_sample_format bit)
 988{
 989	struct evsel *evsel;
 990
 991	evlist__for_each_entry(evlist, evsel)
 992		__perf_evsel__set_sample_bit(evsel, bit);
 993}
 994
 995void __perf_evlist__reset_sample_bit(struct evlist *evlist,
 996				     enum perf_event_sample_format bit)
 997{
 998	struct evsel *evsel;
 999
1000	evlist__for_each_entry(evlist, evsel)
1001		__perf_evsel__reset_sample_bit(evsel, bit);
1002}
1003
1004int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
1005{
1006	struct evsel *evsel;
1007	int err = 0;
1008
1009	evlist__for_each_entry(evlist, evsel) {
1010		if (evsel->filter == NULL)
1011			continue;
1012
1013		/*
1014		 * filters only work for tracepoint event, which doesn't have cpu limit.
1015		 * So evlist and evsel should always be same.
1016		 */
1017		err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
1018		if (err) {
1019			*err_evsel = evsel;
1020			break;
1021		}
1022	}
1023
1024	return err;
1025}
1026
1027int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
1028{
1029	struct evsel *evsel;
1030	int err = 0;
1031
1032	evlist__for_each_entry(evlist, evsel) {
1033		if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1034			continue;
1035
1036		err = perf_evsel__set_filter(evsel, filter);
1037		if (err)
1038			break;
1039	}
1040
1041	return err;
1042}
1043
1044int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1045{
 
 
 
1046	char *filter;
1047	int ret = -1;
1048	size_t i;
1049
1050	for (i = 0; i < npids; ++i) {
1051		if (i == 0) {
1052			if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1053				return -1;
1054		} else {
1055			char *tmp;
1056
1057			if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1058				goto out_free;
1059
1060			free(filter);
1061			filter = tmp;
 
 
 
 
 
 
 
 
 
1062		}
1063	}
1064
1065	ret = perf_evlist__set_tp_filter(evlist, filter);
1066out_free:
1067	free(filter);
1068	return ret;
1069}
1070
1071int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
1072{
1073	return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
1074}
1075
1076bool perf_evlist__valid_sample_type(struct evlist *evlist)
1077{
1078	struct evsel *pos;
1079
1080	if (evlist->core.nr_entries == 1)
1081		return true;
1082
1083	if (evlist->id_pos < 0 || evlist->is_pos < 0)
1084		return false;
1085
1086	evlist__for_each_entry(evlist, pos) {
1087		if (pos->id_pos != evlist->id_pos ||
1088		    pos->is_pos != evlist->is_pos)
1089			return false;
1090	}
1091
1092	return true;
1093}
1094
1095u64 __perf_evlist__combined_sample_type(struct evlist *evlist)
1096{
1097	struct evsel *evsel;
1098
1099	if (evlist->combined_sample_type)
1100		return evlist->combined_sample_type;
1101
1102	evlist__for_each_entry(evlist, evsel)
1103		evlist->combined_sample_type |= evsel->core.attr.sample_type;
1104
1105	return evlist->combined_sample_type;
1106}
1107
1108u64 perf_evlist__combined_sample_type(struct evlist *evlist)
1109{
1110	evlist->combined_sample_type = 0;
1111	return __perf_evlist__combined_sample_type(evlist);
1112}
1113
1114u64 perf_evlist__combined_branch_type(struct evlist *evlist)
1115{
1116	struct evsel *evsel;
1117	u64 branch_type = 0;
1118
1119	evlist__for_each_entry(evlist, evsel)
1120		branch_type |= evsel->core.attr.branch_sample_type;
1121	return branch_type;
1122}
1123
1124bool perf_evlist__valid_read_format(struct evlist *evlist)
1125{
1126	struct evsel *first = evlist__first(evlist), *pos = first;
1127	u64 read_format = first->core.attr.read_format;
1128	u64 sample_type = first->core.attr.sample_type;
1129
1130	evlist__for_each_entry(evlist, pos) {
1131		if (read_format != pos->core.attr.read_format)
1132			return false;
1133	}
1134
1135	/* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1136	if ((sample_type & PERF_SAMPLE_READ) &&
1137	    !(read_format & PERF_FORMAT_ID)) {
1138		return false;
1139	}
1140
1141	return true;
1142}
1143
1144u16 perf_evlist__id_hdr_size(struct evlist *evlist)
1145{
1146	struct evsel *first = evlist__first(evlist);
1147	struct perf_sample *data;
1148	u64 sample_type;
1149	u16 size = 0;
1150
1151	if (!first->core.attr.sample_id_all)
 
 
1152		goto out;
1153
1154	sample_type = first->core.attr.sample_type;
1155
1156	if (sample_type & PERF_SAMPLE_TID)
1157		size += sizeof(data->tid) * 2;
1158
1159       if (sample_type & PERF_SAMPLE_TIME)
1160		size += sizeof(data->time);
1161
1162	if (sample_type & PERF_SAMPLE_ID)
1163		size += sizeof(data->id);
1164
1165	if (sample_type & PERF_SAMPLE_STREAM_ID)
1166		size += sizeof(data->stream_id);
1167
1168	if (sample_type & PERF_SAMPLE_CPU)
1169		size += sizeof(data->cpu) * 2;
1170
1171	if (sample_type & PERF_SAMPLE_IDENTIFIER)
1172		size += sizeof(data->id);
1173out:
1174	return size;
1175}
1176
1177bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
1178{
1179	struct evsel *first = evlist__first(evlist), *pos = first;
 
 
1180
1181	evlist__for_each_entry_continue(evlist, pos) {
1182		if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
1183			return false;
1184	}
1185
1186	return true;
1187}
1188
1189bool perf_evlist__sample_id_all(struct evlist *evlist)
1190{
1191	struct evsel *first = evlist__first(evlist);
1192	return first->core.attr.sample_id_all;
1193}
1194
1195void perf_evlist__set_selected(struct evlist *evlist,
1196			       struct evsel *evsel)
1197{
1198	evlist->selected = evsel;
1199}
1200
1201void evlist__close(struct evlist *evlist)
 
1202{
1203	struct evsel *evsel;
1204
1205	evlist__for_each_entry_reverse(evlist, evsel)
1206		evsel__close(evsel);
1207}
1208
1209static int perf_evlist__create_syswide_maps(struct evlist *evlist)
1210{
1211	struct perf_cpu_map *cpus;
1212	struct perf_thread_map *threads;
1213	int err = -ENOMEM;
1214
1215	/*
1216	 * Try reading /sys/devices/system/cpu/online to get
1217	 * an all cpus map.
1218	 *
1219	 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1220	 * code needs an overhaul to properly forward the
1221	 * error, and we may not want to do that fallback to a
1222	 * default cpu identity map :-\
1223	 */
1224	cpus = perf_cpu_map__new(NULL);
1225	if (!cpus)
1226		goto out;
1227
1228	threads = perf_thread_map__new_dummy();
1229	if (!threads)
1230		goto out_put;
1231
1232	perf_evlist__set_maps(&evlist->core, cpus, threads);
1233out:
1234	return err;
1235out_put:
1236	perf_cpu_map__put(cpus);
1237	goto out;
1238}
1239
1240int evlist__open(struct evlist *evlist)
1241{
1242	struct evsel *evsel;
1243	int err;
1244
1245	/*
1246	 * Default: one fd per CPU, all threads, aka systemwide
1247	 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1248	 */
1249	if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
1250		err = perf_evlist__create_syswide_maps(evlist);
1251		if (err < 0)
1252			goto out_err;
1253	}
1254
1255	perf_evlist__update_id_pos(evlist);
 
1256
1257	evlist__for_each_entry(evlist, evsel) {
1258		err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
1259		if (err < 0)
1260			goto out_err;
1261	}
1262
1263	return 0;
1264out_err:
1265	evlist__close(evlist);
 
 
 
 
 
1266	errno = -err;
1267	return err;
1268}
1269
1270int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
1271				  const char *argv[], bool pipe_output,
1272				  void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1273{
1274	int child_ready_pipe[2], go_pipe[2];
1275	char bf;
1276
1277	if (pipe(child_ready_pipe) < 0) {
1278		perror("failed to create 'ready' pipe");
1279		return -1;
1280	}
1281
1282	if (pipe(go_pipe) < 0) {
1283		perror("failed to create 'go' pipe");
1284		goto out_close_ready_pipe;
1285	}
1286
1287	evlist->workload.pid = fork();
1288	if (evlist->workload.pid < 0) {
1289		perror("failed to fork");
1290		goto out_close_pipes;
1291	}
1292
1293	if (!evlist->workload.pid) {
1294		int ret;
1295
1296		if (pipe_output)
1297			dup2(2, 1);
1298
1299		signal(SIGTERM, SIG_DFL);
1300
1301		close(child_ready_pipe[0]);
1302		close(go_pipe[1]);
1303		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1304
1305		/*
 
 
 
 
 
 
 
1306		 * Tell the parent we're ready to go
1307		 */
1308		close(child_ready_pipe[1]);
1309
1310		/*
1311		 * Wait until the parent tells us to go.
1312		 */
1313		ret = read(go_pipe[0], &bf, 1);
1314		/*
1315		 * The parent will ask for the execvp() to be performed by
1316		 * writing exactly one byte, in workload.cork_fd, usually via
1317		 * perf_evlist__start_workload().
1318		 *
1319		 * For cancelling the workload without actually running it,
1320		 * the parent will just close workload.cork_fd, without writing
1321		 * anything, i.e. read will return zero and we just exit()
1322		 * here.
1323		 */
1324		if (ret != 1) {
1325			if (ret == -1)
1326				perror("unable to read pipe");
1327			exit(ret);
1328		}
1329
1330		execvp(argv[0], (char **)argv);
1331
1332		if (exec_error) {
1333			union sigval val;
1334
1335			val.sival_int = errno;
1336			if (sigqueue(getppid(), SIGUSR1, val))
1337				perror(argv[0]);
1338		} else
1339			perror(argv[0]);
1340		exit(-1);
1341	}
1342
1343	if (exec_error) {
1344		struct sigaction act = {
1345			.sa_flags     = SA_SIGINFO,
1346			.sa_sigaction = exec_error,
1347		};
1348		sigaction(SIGUSR1, &act, NULL);
1349	}
1350
1351	if (target__none(target)) {
1352		if (evlist->core.threads == NULL) {
1353			fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1354				__func__, __LINE__);
1355			goto out_close_pipes;
1356		}
1357		perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1358	}
1359
1360	close(child_ready_pipe[1]);
1361	close(go_pipe[0]);
1362	/*
1363	 * wait for child to settle
1364	 */
1365	if (read(child_ready_pipe[0], &bf, 1) == -1) {
1366		perror("unable to read pipe");
1367		goto out_close_pipes;
1368	}
1369
1370	fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1371	evlist->workload.cork_fd = go_pipe[1];
1372	close(child_ready_pipe[0]);
1373	return 0;
1374
1375out_close_pipes:
1376	close(go_pipe[0]);
1377	close(go_pipe[1]);
1378out_close_ready_pipe:
1379	close(child_ready_pipe[0]);
1380	close(child_ready_pipe[1]);
1381	return -1;
1382}
1383
1384int perf_evlist__start_workload(struct evlist *evlist)
1385{
1386	if (evlist->workload.cork_fd > 0) {
1387		char bf = 0;
1388		int ret;
1389		/*
1390		 * Remove the cork, let it rip!
1391		 */
1392		ret = write(evlist->workload.cork_fd, &bf, 1);
1393		if (ret < 0)
1394			perror("unable to write to pipe");
1395
1396		close(evlist->workload.cork_fd);
1397		return ret;
1398	}
1399
1400	return 0;
1401}
1402
1403int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
1404			      struct perf_sample *sample)
1405{
1406	struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1407
1408	if (!evsel)
1409		return -EFAULT;
1410	return perf_evsel__parse_sample(evsel, event, sample);
1411}
1412
1413int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
1414					union perf_event *event,
1415					u64 *timestamp)
1416{
1417	struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1418
1419	if (!evsel)
1420		return -EFAULT;
1421	return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
1422}
1423
1424int perf_evlist__strerror_open(struct evlist *evlist,
1425			       int err, char *buf, size_t size)
1426{
1427	int printed, value;
1428	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1429
1430	switch (err) {
1431	case EACCES:
1432	case EPERM:
1433		printed = scnprintf(buf, size,
1434				    "Error:\t%s.\n"
1435				    "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1436
1437		value = perf_event_paranoid();
1438
1439		printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1440
1441		if (value >= 2) {
1442			printed += scnprintf(buf + printed, size - printed,
1443					     "For your workloads it needs to be <= 1\nHint:\t");
1444		}
1445		printed += scnprintf(buf + printed, size - printed,
1446				     "For system wide tracing it needs to be set to -1.\n");
1447
1448		printed += scnprintf(buf + printed, size - printed,
1449				    "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1450				    "Hint:\tThe current value is %d.", value);
1451		break;
1452	case EINVAL: {
1453		struct evsel *first = evlist__first(evlist);
1454		int max_freq;
1455
1456		if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1457			goto out_default;
1458
1459		if (first->core.attr.sample_freq < (u64)max_freq)
1460			goto out_default;
1461
1462		printed = scnprintf(buf, size,
1463				    "Error:\t%s.\n"
1464				    "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1465				    "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1466				    emsg, max_freq, first->core.attr.sample_freq);
1467		break;
1468	}
1469	default:
1470out_default:
1471		scnprintf(buf, size, "%s", emsg);
1472		break;
1473	}
1474
1475	return 0;
1476}
1477
1478int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
1479{
1480	char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1481	int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
1482
1483	switch (err) {
1484	case EPERM:
1485		sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1486		printed += scnprintf(buf + printed, size - printed,
1487				     "Error:\t%s.\n"
1488				     "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1489				     "Hint:\tTried using %zd kB.\n",
1490				     emsg, pages_max_per_user, pages_attempted);
1491
1492		if (pages_attempted >= pages_max_per_user) {
1493			printed += scnprintf(buf + printed, size - printed,
1494					     "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1495					     pages_max_per_user + pages_attempted);
1496		}
1497
1498		printed += scnprintf(buf + printed, size - printed,
1499				     "Hint:\tTry using a smaller -m/--mmap-pages value.");
1500		break;
1501	default:
1502		scnprintf(buf, size, "%s", emsg);
1503		break;
1504	}
1505
1506	return 0;
1507}
1508
1509void perf_evlist__to_front(struct evlist *evlist,
1510			   struct evsel *move_evsel)
1511{
1512	struct evsel *evsel, *n;
1513	LIST_HEAD(move);
1514
1515	if (move_evsel == evlist__first(evlist))
1516		return;
1517
1518	evlist__for_each_entry_safe(evlist, n, evsel) {
1519		if (evsel->leader == move_evsel->leader)
1520			list_move_tail(&evsel->core.node, &move);
1521	}
1522
1523	list_splice(&move, &evlist->core.entries);
1524}
1525
1526void perf_evlist__set_tracking_event(struct evlist *evlist,
1527				     struct evsel *tracking_evsel)
1528{
1529	struct evsel *evsel;
1530
1531	if (tracking_evsel->tracking)
1532		return;
1533
1534	evlist__for_each_entry(evlist, evsel) {
1535		if (evsel != tracking_evsel)
1536			evsel->tracking = false;
1537	}
1538
1539	tracking_evsel->tracking = true;
1540}
1541
1542struct evsel *
1543perf_evlist__find_evsel_by_str(struct evlist *evlist,
1544			       const char *str)
1545{
1546	struct evsel *evsel;
1547
1548	evlist__for_each_entry(evlist, evsel) {
1549		if (!evsel->name)
1550			continue;
1551		if (strcmp(str, evsel->name) == 0)
1552			return evsel;
1553	}
1554
1555	return NULL;
1556}
1557
1558void perf_evlist__toggle_bkw_mmap(struct evlist *evlist,
1559				  enum bkw_mmap_state state)
1560{
1561	enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1562	enum action {
1563		NONE,
1564		PAUSE,
1565		RESUME,
1566	} action = NONE;
1567
1568	if (!evlist->overwrite_mmap)
1569		return;
1570
1571	switch (old_state) {
1572	case BKW_MMAP_NOTREADY: {
1573		if (state != BKW_MMAP_RUNNING)
1574			goto state_err;
1575		break;
1576	}
1577	case BKW_MMAP_RUNNING: {
1578		if (state != BKW_MMAP_DATA_PENDING)
1579			goto state_err;
1580		action = PAUSE;
1581		break;
1582	}
1583	case BKW_MMAP_DATA_PENDING: {
1584		if (state != BKW_MMAP_EMPTY)
1585			goto state_err;
1586		break;
1587	}
1588	case BKW_MMAP_EMPTY: {
1589		if (state != BKW_MMAP_RUNNING)
1590			goto state_err;
1591		action = RESUME;
1592		break;
1593	}
1594	default:
1595		WARN_ONCE(1, "Shouldn't get there\n");
1596	}
1597
1598	evlist->bkw_mmap_state = state;
1599
1600	switch (action) {
1601	case PAUSE:
1602		perf_evlist__pause(evlist);
1603		break;
1604	case RESUME:
1605		perf_evlist__resume(evlist);
1606		break;
1607	case NONE:
1608	default:
1609		break;
1610	}
1611
1612state_err:
1613	return;
1614}
1615
1616bool perf_evlist__exclude_kernel(struct evlist *evlist)
1617{
1618	struct evsel *evsel;
1619
1620	evlist__for_each_entry(evlist, evsel) {
1621		if (!evsel->core.attr.exclude_kernel)
1622			return false;
1623	}
1624
1625	return true;
1626}
1627
1628/*
1629 * Events in data file are not collect in groups, but we still want
1630 * the group display. Set the artificial group and set the leader's
1631 * forced_leader flag to notify the display code.
1632 */
1633void perf_evlist__force_leader(struct evlist *evlist)
1634{
1635	if (!evlist->nr_groups) {
1636		struct evsel *leader = evlist__first(evlist);
1637
1638		perf_evlist__set_leader(evlist);
1639		leader->forced_leader = true;
1640	}
1641}
1642
1643struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
1644						 struct evsel *evsel)
1645{
1646	struct evsel *c2, *leader;
1647	bool is_open = true;
1648
1649	leader = evsel->leader;
1650	pr_debug("Weak group for %s/%d failed\n",
1651			leader->name, leader->core.nr_members);
1652
1653	/*
1654	 * for_each_group_member doesn't work here because it doesn't
1655	 * include the first entry.
1656	 */
1657	evlist__for_each_entry(evsel_list, c2) {
1658		if (c2 == evsel)
1659			is_open = false;
1660		if (c2->leader == leader) {
1661			if (is_open)
1662				perf_evsel__close(&c2->core);
1663			c2->leader = c2;
1664			c2->core.nr_members = 0;
1665		}
1666	}
1667	return leader;
1668}
1669
1670int perf_evlist__add_sb_event(struct evlist **evlist,
1671			      struct perf_event_attr *attr,
1672			      perf_evsel__sb_cb_t cb,
1673			      void *data)
1674{
1675	struct evsel *evsel;
1676	bool new_evlist = (*evlist) == NULL;
1677
1678	if (*evlist == NULL)
1679		*evlist = evlist__new();
1680	if (*evlist == NULL)
1681		return -1;
1682
1683	if (!attr->sample_id_all) {
1684		pr_warning("enabling sample_id_all for all side band events\n");
1685		attr->sample_id_all = 1;
1686	}
1687
1688	evsel = perf_evsel__new_idx(attr, (*evlist)->core.nr_entries);
1689	if (!evsel)
1690		goto out_err;
1691
1692	evsel->side_band.cb = cb;
1693	evsel->side_band.data = data;
1694	evlist__add(*evlist, evsel);
1695	return 0;
1696
1697out_err:
1698	if (new_evlist) {
1699		evlist__delete(*evlist);
1700		*evlist = NULL;
1701	}
1702	return -1;
1703}
1704
1705static void *perf_evlist__poll_thread(void *arg)
1706{
1707	struct evlist *evlist = arg;
1708	bool draining = false;
1709	int i, done = 0;
1710	/*
1711	 * In order to read symbols from other namespaces perf to needs to call
1712	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
1713	 * unshare(2) the fs so that we may continue to setns into namespaces
1714	 * that we're observing when, for instance, reading the build-ids at
1715	 * the end of a 'perf record' session.
1716	 */
1717	unshare(CLONE_FS);
1718
1719	while (!done) {
1720		bool got_data = false;
1721
1722		if (evlist->thread.done)
1723			draining = true;
1724
1725		if (!draining)
1726			evlist__poll(evlist, 1000);
1727
1728		for (i = 0; i < evlist->core.nr_mmaps; i++) {
1729			struct mmap *map = &evlist->mmap[i];
1730			union perf_event *event;
1731
1732			if (perf_mmap__read_init(map))
1733				continue;
1734			while ((event = perf_mmap__read_event(map)) != NULL) {
1735				struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1736
1737				if (evsel && evsel->side_band.cb)
1738					evsel->side_band.cb(event, evsel->side_band.data);
1739				else
1740					pr_warning("cannot locate proper evsel for the side band event\n");
1741
1742				perf_mmap__consume(map);
1743				got_data = true;
1744			}
1745			perf_mmap__read_done(map);
1746		}
1747
1748		if (draining && !got_data)
1749			break;
1750	}
1751	return NULL;
1752}
1753
1754int perf_evlist__start_sb_thread(struct evlist *evlist,
1755				 struct target *target)
1756{
1757	struct evsel *counter;
1758
1759	if (!evlist)
1760		return 0;
1761
1762	if (perf_evlist__create_maps(evlist, target))
1763		goto out_delete_evlist;
1764
1765	evlist__for_each_entry(evlist, counter) {
1766		if (evsel__open(counter, evlist->core.cpus,
1767				     evlist->core.threads) < 0)
1768			goto out_delete_evlist;
1769	}
1770
1771	if (evlist__mmap(evlist, UINT_MAX))
1772		goto out_delete_evlist;
1773
1774	evlist__for_each_entry(evlist, counter) {
1775		if (evsel__enable(counter))
1776			goto out_delete_evlist;
1777	}
1778
1779	evlist->thread.done = 0;
1780	if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
1781		goto out_delete_evlist;
1782
1783	return 0;
1784
1785out_delete_evlist:
1786	evlist__delete(evlist);
1787	evlist = NULL;
1788	return -1;
1789}
1790
1791void perf_evlist__stop_sb_thread(struct evlist *evlist)
1792{
1793	if (!evlist)
1794		return;
1795	evlist->thread.done = 1;
1796	pthread_join(evlist->thread.th, NULL);
1797	evlist__delete(evlist);
1798}
v3.5.6
 
  1/*
  2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  3 *
  4 * Parts came from builtin-{top,stat,record}.c, see those files for further
  5 * copyright notes.
  6 *
  7 * Released under the GPL v2. (and only v2, not any later version)
  8 */
  9#include "util.h"
 10#include "debugfs.h"
 
 11#include <poll.h>
 12#include "cpumap.h"
 
 13#include "thread_map.h"
 14#include "target.h"
 15#include "evlist.h"
 16#include "evsel.h"
 
 
 
 
 
 
 
 17#include <unistd.h>
 
 
 18
 19#include "parse-events.h"
 
 20
 
 
 21#include <sys/mman.h>
 22
 23#include <linux/bitops.h>
 24#include <linux/hash.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 25
 26#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 27#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
 28
 29void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
 30		       struct thread_map *threads)
 31{
 32	int i;
 
 
 
 
 
 33
 34	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
 35		INIT_HLIST_HEAD(&evlist->heads[i]);
 36	INIT_LIST_HEAD(&evlist->entries);
 37	perf_evlist__set_maps(evlist, cpus, threads);
 38	evlist->workload.pid = -1;
 39}
 40
 41struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
 42				     struct thread_map *threads)
 43{
 44	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
 45
 46	if (evlist != NULL)
 47		perf_evlist__init(evlist, cpus, threads);
 
 
 48
 49	return evlist;
 50}
 51
 52void perf_evlist__config_attrs(struct perf_evlist *evlist,
 53			       struct perf_record_opts *opts)
 
 
 
 
 
 
 54{
 55	struct perf_evsel *evsel, *first;
 56
 57	if (evlist->cpus->map[0] < 0)
 58		opts->no_inherit = true;
 
 59
 60	first = list_entry(evlist->entries.next, struct perf_evsel, node);
 
 
 61
 62	list_for_each_entry(evsel, &evlist->entries, node) {
 63		perf_evsel__config(evsel, opts, first);
 64
 65		if (evlist->nr_entries > 1)
 66			evsel->attr.sample_type |= PERF_SAMPLE_ID;
 67	}
 68}
 69
 70static void perf_evlist__purge(struct perf_evlist *evlist)
 71{
 72	struct perf_evsel *pos, *n;
 73
 74	list_for_each_entry_safe(pos, n, &evlist->entries, node) {
 75		list_del_init(&pos->node);
 76		perf_evsel__delete(pos);
 
 77	}
 78
 79	evlist->nr_entries = 0;
 80}
 81
 82void perf_evlist__exit(struct perf_evlist *evlist)
 83{
 84	free(evlist->mmap);
 85	free(evlist->pollfd);
 86	evlist->mmap = NULL;
 87	evlist->pollfd = NULL;
 88}
 89
 90void perf_evlist__delete(struct perf_evlist *evlist)
 91{
 92	perf_evlist__purge(evlist);
 93	perf_evlist__exit(evlist);
 
 
 
 
 
 
 
 
 
 94	free(evlist);
 95}
 96
 97void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 98{
 99	list_add_tail(&entry->node, &evlist->entries);
100	++evlist->nr_entries;
 
 
 
 
 
 
101}
102
103void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104				   struct list_head *list,
105				   int nr_entries)
106{
107	list_splice_tail(list, &evlist->entries);
108	evlist->nr_entries += nr_entries;
109}
110
111int perf_evlist__add_default(struct perf_evlist *evlist)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112{
113	struct perf_event_attr attr = {
114		.type = PERF_TYPE_HARDWARE,
115		.config = PERF_COUNT_HW_CPU_CYCLES,
 
116	};
117	struct perf_evsel *evsel;
118
119	event_attr_init(&attr);
120
121	evsel = perf_evsel__new(&attr, 0);
122	if (evsel == NULL)
123		goto error;
124
125	/* use strdup() because free(evsel) assumes name is allocated */
126	evsel->name = strdup("cycles");
127	if (!evsel->name)
128		goto error_free;
129
130	perf_evlist__add(evlist, evsel);
131	return 0;
132error_free:
133	perf_evsel__delete(evsel);
134error:
135	return -ENOMEM;
136}
137
138int perf_evlist__add_attrs(struct perf_evlist *evlist,
139			   struct perf_event_attr *attrs, size_t nr_attrs)
140{
141	struct perf_evsel *evsel, *n;
142	LIST_HEAD(head);
143	size_t i;
144
145	for (i = 0; i < nr_attrs; i++) {
146		evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
147		if (evsel == NULL)
148			goto out_delete_partial_list;
149		list_add_tail(&evsel->node, &head);
150	}
151
152	perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
153
154	return 0;
155
156out_delete_partial_list:
157	list_for_each_entry_safe(evsel, n, &head, node)
158		perf_evsel__delete(evsel);
159	return -1;
160}
161
162int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
163				     struct perf_event_attr *attrs, size_t nr_attrs)
164{
165	size_t i;
166
167	for (i = 0; i < nr_attrs; i++)
168		event_attr_init(attrs + i);
169
170	return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
171}
172
173static int trace_event__id(const char *evname)
 
174{
175	char *filename, *colon;
176	int err = -1, fd;
 
 
 
 
 
177
178	if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
179		return -1;
180
181	colon = strrchr(filename, ':');
182	if (colon != NULL)
183		*colon = '/';
184
185	fd = open(filename, O_RDONLY);
186	if (fd >= 0) {
187		char id[16];
188		if (read(fd, id, sizeof(id)) > 0)
189			err = atoi(id);
190		close(fd);
191	}
192
193	free(filename);
194	return err;
195}
196
197int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
198				 const char *tracepoints[],
199				 size_t nr_tracepoints)
200{
201	int err;
202	size_t i;
203	struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
204
205	if (attrs == NULL)
206		return -1;
207
208	for (i = 0; i < nr_tracepoints; i++) {
209		err = trace_event__id(tracepoints[i]);
 
 
 
 
 
 
 
 
 
 
 
210
211		if (err < 0)
212			goto out_free_attrs;
 
213
214		attrs[i].type	       = PERF_TYPE_TRACEPOINT;
215		attrs[i].config	       = err;
216	        attrs[i].sample_type   = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
217					  PERF_SAMPLE_CPU);
218		attrs[i].sample_period = 1;
219	}
220
221	err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
222out_free_attrs:
223	free(attrs);
224	return err;
225}
226
227static struct perf_evsel *
228	perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
229{
230	struct perf_evsel *evsel;
231
232	list_for_each_entry(evsel, &evlist->entries, node) {
233		if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
234		    (int)evsel->attr.config == id)
235			return evsel;
236	}
237
238	return NULL;
239}
240
241int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
242					  const struct perf_evsel_str_handler *assocs,
243					  size_t nr_assocs)
244{
245	struct perf_evsel *evsel;
246	int err;
247	size_t i;
248
249	for (i = 0; i < nr_assocs; i++) {
250		err = trace_event__id(assocs[i].name);
251		if (err < 0)
252			goto out;
 
253
254		evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
255		if (evsel == NULL)
256			continue;
257
258		err = -EEXIST;
259		if (evsel->handler.func != NULL)
260			goto out;
261		evsel->handler.func = assocs[i].handler;
262	}
 
 
 
 
 
 
 
 
 
 
 
 
263
264	err = 0;
265out:
266	return err;
 
 
 
267}
268
269void perf_evlist__disable(struct perf_evlist *evlist)
 
270{
271	int cpu, thread;
272	struct perf_evsel *pos;
 
 
 
 
 
273
274	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
275		list_for_each_entry(pos, &evlist->entries, node) {
276			for (thread = 0; thread < evlist->threads->nr; thread++)
277				ioctl(FD(pos, cpu, thread),
278				      PERF_EVENT_IOC_DISABLE, 0);
279		}
280	}
281}
282
283void perf_evlist__enable(struct perf_evlist *evlist)
 
284{
285	int cpu, thread;
286	struct perf_evsel *pos;
 
 
 
287
288	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
289		list_for_each_entry(pos, &evlist->entries, node) {
290			for (thread = 0; thread < evlist->threads->nr; thread++)
291				ioctl(FD(pos, cpu, thread),
292				      PERF_EVENT_IOC_ENABLE, 0);
293		}
294	}
295}
296
297static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
298{
299	int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
300	evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
301	return evlist->pollfd != NULL ? 0 : -ENOMEM;
302}
303
304void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
 
 
305{
306	fcntl(fd, F_SETFL, O_NONBLOCK);
307	evlist->pollfd[evlist->nr_fds].fd = fd;
308	evlist->pollfd[evlist->nr_fds].events = POLLIN;
309	evlist->nr_fds++;
 
 
 
 
 
 
310}
311
312static void perf_evlist__id_hash(struct perf_evlist *evlist,
313				 struct perf_evsel *evsel,
314				 int cpu, int thread, u64 id)
315{
 
 
316	int hash;
317	struct perf_sample_id *sid = SID(evsel, cpu, thread);
318
319	sid->id = id;
320	sid->evsel = evsel;
321	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
322	hlist_add_head(&sid->node, &evlist->heads[hash]);
 
 
 
 
323}
324
325void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
326			 int cpu, int thread, u64 id)
327{
328	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
329	evsel->id[evsel->ids++] = id;
 
 
 
 
 
 
 
 
 
 
 
330}
331
332static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
333				  struct perf_evsel *evsel,
334				  int cpu, int thread, int fd)
335{
336	u64 read_data[4] = { 0, };
337	int id_idx = 1; /* The first entry is the counter value */
 
 
 
 
 
 
338
339	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
340	    read(fd, &read_data, sizeof(read_data)) == -1)
341		return -1;
 
 
 
 
 
342
343	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
344		++id_idx;
345	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
346		++id_idx;
347
348	perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
 
 
 
 
 
 
 
 
 
349	return 0;
350}
351
352struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
 
353{
 
354	struct hlist_head *head;
355	struct hlist_node *pos;
356	struct perf_sample_id *sid;
357	int hash;
 
 
 
 
 
 
 
 
358
359	if (evlist->nr_entries == 1)
360		return list_entry(evlist->entries.next, struct perf_evsel, node);
 
 
 
 
361
362	hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
363	head = &evlist->heads[hash];
364
365	hlist_for_each_entry(sid, pos, head, node)
366		if (sid->id == id)
367			return sid->evsel;
 
 
 
 
 
 
 
 
 
 
368
369	if (!perf_evlist__sample_id_all(evlist))
370		return list_entry(evlist->entries.next, struct perf_evsel, node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371
372	return NULL;
 
 
373}
374
375union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
376{
377	/* XXX Move this to perf.c, making it generally available */
378	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
379	struct perf_mmap *md = &evlist->mmap[idx];
380	unsigned int head = perf_mmap__read_head(md);
381	unsigned int old = md->prev;
382	unsigned char *data = md->base + page_size;
383	union perf_event *event = NULL;
384
385	if (evlist->overwrite) {
386		/*
387		 * If we're further behind than half the buffer, there's a chance
388		 * the writer will bite our tail and mess up the samples under us.
389		 *
390		 * If we somehow ended up ahead of the head, we got messed up.
391		 *
392		 * In either case, truncate and restart at head.
393		 */
394		int diff = head - old;
395		if (diff > md->mask / 2 || diff < 0) {
396			fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
397
398			/*
399			 * head points to a known good entry, start there.
400			 */
401			old = head;
402		}
403	}
404
405	if (old != head) {
406		size_t size;
 
 
 
 
407
408		event = (union perf_event *)&data[old & md->mask];
409		size = event->header.size;
 
 
 
410
 
 
 
 
 
 
 
 
 
 
411		/*
412		 * Event straddles the mmap boundary -- header should always
413		 * be inside due to u64 alignment of output.
 
 
 
 
 
414		 */
415		if ((old & md->mask) + size != ((old + size) & md->mask)) {
416			unsigned int offset = old;
417			unsigned int len = min(sizeof(*event), size), cpy;
418			void *dst = &evlist->event_copy;
419
420			do {
421				cpy = min(md->mask + 1 - (offset & md->mask), len);
422				memcpy(dst, &data[offset & md->mask], cpy);
423				offset += cpy;
424				dst += cpy;
425				len -= cpy;
426			} while (len);
 
427
428			event = &evlist->event_copy;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429		}
430
431		old += size;
432	}
433
434	md->prev = old;
 
 
435
436	if (!evlist->overwrite)
437		perf_mmap__write_tail(md, old);
438
439	return event;
440}
441
442void perf_evlist__munmap(struct perf_evlist *evlist)
443{
444	int i;
 
 
445
446	for (i = 0; i < evlist->nr_mmaps; i++) {
447		if (evlist->mmap[i].base != NULL) {
448			munmap(evlist->mmap[i].base, evlist->mmap_len);
449			evlist->mmap[i].base = NULL;
450		}
451	}
452
453	free(evlist->mmap);
454	evlist->mmap = NULL;
455}
456
457static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
458{
459	evlist->nr_mmaps = evlist->cpus->nr;
460	if (evlist->cpus->map[0] == -1)
461		evlist->nr_mmaps = evlist->threads->nr;
462	evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
463	return evlist->mmap != NULL ? 0 : -ENOMEM;
464}
 
 
 
 
465
466static int __perf_evlist__mmap(struct perf_evlist *evlist,
467			       int idx, int prot, int mask, int fd)
468{
469	evlist->mmap[idx].prev = 0;
470	evlist->mmap[idx].mask = mask;
471	evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
472				      MAP_SHARED, fd, 0);
473	if (evlist->mmap[idx].base == MAP_FAILED) {
474		evlist->mmap[idx].base = NULL;
475		return -1;
476	}
477
478	perf_evlist__add_pollfd(evlist, fd);
479	return 0;
480}
481
482static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
 
483{
484	struct perf_evsel *evsel;
485	int cpu, thread;
 
 
486
487	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
 
488		int output = -1;
 
 
 
 
489
490		for (thread = 0; thread < evlist->threads->nr; thread++) {
491			list_for_each_entry(evsel, &evlist->entries, node) {
492				int fd = FD(evsel, cpu, thread);
493
494				if (output == -1) {
495					output = fd;
496					if (__perf_evlist__mmap(evlist, cpu,
497								prot, mask, output) < 0)
498						goto out_unmap;
499				} else {
500					if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
501						goto out_unmap;
502				}
503
504				if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
505				    perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
506					goto out_unmap;
507			}
508		}
509	}
510
511	return 0;
512
513out_unmap:
514	for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
515		if (evlist->mmap[cpu].base != NULL) {
516			munmap(evlist->mmap[cpu].base, evlist->mmap_len);
517			evlist->mmap[cpu].base = NULL;
518		}
519	}
520	return -1;
521}
522
523static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
 
524{
525	struct perf_evsel *evsel;
526	int thread;
 
527
528	for (thread = 0; thread < evlist->threads->nr; thread++) {
 
529		int output = -1;
 
530
531		list_for_each_entry(evsel, &evlist->entries, node) {
532			int fd = FD(evsel, 0, thread);
533
534			if (output == -1) {
535				output = fd;
536				if (__perf_evlist__mmap(evlist, thread,
537							prot, mask, output) < 0)
538					goto out_unmap;
539			} else {
540				if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
541					goto out_unmap;
542			}
543
544			if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
545			    perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
546				goto out_unmap;
547		}
548	}
549
550	return 0;
551
552out_unmap:
553	for (thread = 0; thread < evlist->threads->nr; thread++) {
554		if (evlist->mmap[thread].base != NULL) {
555			munmap(evlist->mmap[thread].base, evlist->mmap_len);
556			evlist->mmap[thread].base = NULL;
557		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
558	}
559	return -1;
 
 
 
 
 
 
 
 
560}
561
562/** perf_evlist__mmap - Create per cpu maps to receive events
 
 
 
 
 
 
563 *
564 * @evlist - list of events
565 * @pages - map length in pages
566 * @overwrite - overwrite older events?
567 *
568 * If overwrite is false the user needs to signal event consuption using:
 
569 *
570 *	struct perf_mmap *m = &evlist->mmap[cpu];
571 *	unsigned int head = perf_mmap__read_head(m);
572 *
573 *	perf_mmap__write_tail(m, head)
574 *
575 * Using perf_evlist__read_on_cpu does this automatically.
576 */
577int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
578		      bool overwrite)
579{
580	unsigned int page_size = sysconf(_SC_PAGE_SIZE);
581	struct perf_evsel *evsel;
582	const struct cpu_map *cpus = evlist->cpus;
583	const struct thread_map *threads = evlist->threads;
584	int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
585
586        /* 512 kiB: default amount of unprivileged mlocked memory */
587        if (pages == UINT_MAX)
588                pages = (512 * 1024) / page_size;
589	else if (!is_power_of_2(pages))
590		return -EINVAL;
 
591
592	mask = pages * page_size - 1;
593
594	if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
595		return -ENOMEM;
596
597	if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
598		return -ENOMEM;
599
600	evlist->overwrite = overwrite;
601	evlist->mmap_len = (pages + 1) * page_size;
602
603	list_for_each_entry(evsel, &evlist->entries, node) {
604		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
605		    evsel->sample_id == NULL &&
606		    perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
 
 
 
 
607			return -ENOMEM;
608	}
609
610	if (evlist->cpus->map[0] == -1)
611		return perf_evlist__mmap_per_thread(evlist, prot, mask);
 
 
 
612
613	return perf_evlist__mmap_per_cpu(evlist, prot, mask);
 
 
614}
615
616int perf_evlist__create_maps(struct perf_evlist *evlist,
617			     struct perf_target *target)
618{
619	evlist->threads = thread_map__new_str(target->pid, target->tid,
620					      target->uid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621
622	if (evlist->threads == NULL)
623		return -1;
624
625	if (perf_target__has_task(target))
626		evlist->cpus = cpu_map__dummy_new();
627	else if (!perf_target__has_cpu(target) && !target->uses_mmap)
628		evlist->cpus = cpu_map__dummy_new();
629	else
630		evlist->cpus = cpu_map__new(target->cpu_list);
631
632	if (evlist->cpus == NULL)
633		goto out_delete_threads;
634
 
 
 
 
635	return 0;
636
637out_delete_threads:
638	thread_map__delete(evlist->threads);
639	return -1;
640}
641
642void perf_evlist__delete_maps(struct perf_evlist *evlist)
 
643{
644	cpu_map__delete(evlist->cpus);
645	thread_map__delete(evlist->threads);
646	evlist->cpus	= NULL;
647	evlist->threads = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
648}
649
650int perf_evlist__set_filters(struct perf_evlist *evlist)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651{
652	const struct thread_map *threads = evlist->threads;
653	const struct cpu_map *cpus = evlist->cpus;
654	struct perf_evsel *evsel;
655	char *filter;
656	int thread;
657	int cpu;
658	int err;
659	int fd;
 
 
 
 
 
 
 
 
660
661	list_for_each_entry(evsel, &evlist->entries, node) {
662		filter = evsel->filter;
663		if (!filter)
664			continue;
665		for (cpu = 0; cpu < cpus->nr; cpu++) {
666			for (thread = 0; thread < threads->nr; thread++) {
667				fd = FD(evsel, cpu, thread);
668				err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
669				if (err)
670					return err;
671			}
672		}
673	}
674
675	return 0;
 
 
 
 
 
 
 
 
676}
677
678bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
679{
680	struct perf_evsel *pos, *first;
681
682	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
 
683
684	list_for_each_entry_continue(pos, &evlist->entries, node) {
685		if (first->attr.sample_type != pos->attr.sample_type)
 
 
 
 
686			return false;
687	}
688
689	return true;
690}
691
692u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
693{
694	struct perf_evsel *first;
 
 
 
695
696	first = list_entry(evlist->entries.next, struct perf_evsel, node);
697	return first->attr.sample_type;
 
 
 
 
 
 
 
 
698}
699
700u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
701{
702	struct perf_evsel *first;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703	struct perf_sample *data;
704	u64 sample_type;
705	u16 size = 0;
706
707	first = list_entry(evlist->entries.next, struct perf_evsel, node);
708
709	if (!first->attr.sample_id_all)
710		goto out;
711
712	sample_type = first->attr.sample_type;
713
714	if (sample_type & PERF_SAMPLE_TID)
715		size += sizeof(data->tid) * 2;
716
717       if (sample_type & PERF_SAMPLE_TIME)
718		size += sizeof(data->time);
719
720	if (sample_type & PERF_SAMPLE_ID)
721		size += sizeof(data->id);
722
723	if (sample_type & PERF_SAMPLE_STREAM_ID)
724		size += sizeof(data->stream_id);
725
726	if (sample_type & PERF_SAMPLE_CPU)
727		size += sizeof(data->cpu) * 2;
 
 
 
728out:
729	return size;
730}
731
732bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
733{
734	struct perf_evsel *pos, *first;
735
736	pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
737
738	list_for_each_entry_continue(pos, &evlist->entries, node) {
739		if (first->attr.sample_id_all != pos->attr.sample_id_all)
740			return false;
741	}
742
743	return true;
744}
745
746bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
747{
748	struct perf_evsel *first;
 
 
749
750	first = list_entry(evlist->entries.next, struct perf_evsel, node);
751	return first->attr.sample_id_all;
 
 
752}
753
754void perf_evlist__set_selected(struct perf_evlist *evlist,
755			       struct perf_evsel *evsel)
756{
757	evlist->selected = evsel;
 
 
 
758}
759
760int perf_evlist__open(struct perf_evlist *evlist, bool group)
761{
762	struct perf_evsel *evsel, *first;
763	int err, ncpus, nthreads;
 
764
765	first = list_entry(evlist->entries.next, struct perf_evsel, node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
766
767	list_for_each_entry(evsel, &evlist->entries, node) {
768		struct xyarray *group_fd = NULL;
 
 
 
 
 
 
 
769
770		if (group && evsel != first)
771			group_fd = first->fd;
772
773		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
774				       group, group_fd);
775		if (err < 0)
776			goto out_err;
777	}
778
779	return 0;
780out_err:
781	ncpus = evlist->cpus ? evlist->cpus->nr : 1;
782	nthreads = evlist->threads ? evlist->threads->nr : 1;
783
784	list_for_each_entry_reverse(evsel, &evlist->entries, node)
785		perf_evsel__close(evsel, ncpus, nthreads);
786
787	errno = -err;
788	return err;
789}
790
791int perf_evlist__prepare_workload(struct perf_evlist *evlist,
792				  struct perf_record_opts *opts,
793				  const char *argv[])
794{
795	int child_ready_pipe[2], go_pipe[2];
796	char bf;
797
798	if (pipe(child_ready_pipe) < 0) {
799		perror("failed to create 'ready' pipe");
800		return -1;
801	}
802
803	if (pipe(go_pipe) < 0) {
804		perror("failed to create 'go' pipe");
805		goto out_close_ready_pipe;
806	}
807
808	evlist->workload.pid = fork();
809	if (evlist->workload.pid < 0) {
810		perror("failed to fork");
811		goto out_close_pipes;
812	}
813
814	if (!evlist->workload.pid) {
815		if (opts->pipe_output)
 
 
816			dup2(2, 1);
817
 
 
818		close(child_ready_pipe[0]);
819		close(go_pipe[1]);
820		fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
821
822		/*
823		 * Do a dummy execvp to get the PLT entry resolved,
824		 * so we avoid the resolver overhead on the real
825		 * execvp call.
826		 */
827		execvp("", (char **)argv);
828
829		/*
830		 * Tell the parent we're ready to go
831		 */
832		close(child_ready_pipe[1]);
833
834		/*
835		 * Wait until the parent tells us to go.
836		 */
837		if (read(go_pipe[0], &bf, 1) == -1)
838			perror("unable to read pipe");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
839
840		execvp(argv[0], (char **)argv);
841
842		perror(argv[0]);
843		kill(getppid(), SIGUSR1);
 
 
 
 
 
 
844		exit(-1);
845	}
846
847	if (perf_target__none(&opts->target))
848		evlist->threads->map[0] = evlist->workload.pid;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
849
850	close(child_ready_pipe[1]);
851	close(go_pipe[0]);
852	/*
853	 * wait for child to settle
854	 */
855	if (read(child_ready_pipe[0], &bf, 1) == -1) {
856		perror("unable to read pipe");
857		goto out_close_pipes;
858	}
859
 
860	evlist->workload.cork_fd = go_pipe[1];
861	close(child_ready_pipe[0]);
862	return 0;
863
864out_close_pipes:
865	close(go_pipe[0]);
866	close(go_pipe[1]);
867out_close_ready_pipe:
868	close(child_ready_pipe[0]);
869	close(child_ready_pipe[1]);
870	return -1;
871}
872
873int perf_evlist__start_workload(struct perf_evlist *evlist)
874{
875	if (evlist->workload.cork_fd > 0) {
 
 
876		/*
877		 * Remove the cork, let it rip!
878		 */
879		return close(evlist->workload.cork_fd);
 
 
 
 
 
880	}
881
882	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
883}