Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <perf/evlist.h>
  3#include <perf/evsel.h>
  4#include <linux/bitops.h>
  5#include <linux/list.h>
  6#include <linux/hash.h>
  7#include <sys/ioctl.h>
  8#include <internal/evlist.h>
  9#include <internal/evsel.h>
 10#include <internal/xyarray.h>
 11#include <internal/mmap.h>
 12#include <internal/cpumap.h>
 13#include <internal/threadmap.h>
 14#include <internal/lib.h>
 15#include <linux/zalloc.h>
 16#include <stdlib.h>
 17#include <errno.h>
 18#include <unistd.h>
 19#include <fcntl.h>
 20#include <signal.h>
 21#include <poll.h>
 22#include <sys/mman.h>
 23#include <perf/cpumap.h>
 24#include <perf/threadmap.h>
 25#include <api/fd/array.h>
 26#include "internal.h"
 27
 28void perf_evlist__init(struct perf_evlist *evlist)
 29{
 30	INIT_LIST_HEAD(&evlist->entries);
 31	evlist->nr_entries = 0;
 32	fdarray__init(&evlist->pollfd, 64);
 33	perf_evlist__reset_id_hash(evlist);
 34}
 35
 36static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
 37					  struct perf_evsel *evsel)
 38{
 39	/*
 40	 * We already have cpus for evsel (via PMU sysfs) so
 41	 * keep it, if there's no target cpu list defined.
 42	 */
 43	if (evsel->system_wide) {
 
 44		perf_cpu_map__put(evsel->cpus);
 45		evsel->cpus = perf_cpu_map__new(NULL);
 
 
 
 
 
 
 
 46	} else if (!evsel->own_cpus || evlist->has_user_cpus ||
 47		   (!evsel->requires_cpu && perf_cpu_map__empty(evlist->user_requested_cpus))) {
 
 
 
 
 
 
 
 48		perf_cpu_map__put(evsel->cpus);
 49		evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
 50	} else if (evsel->cpus != evsel->own_cpus) {
 
 
 
 
 51		perf_cpu_map__put(evsel->cpus);
 52		evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
 53	}
 54
 55	if (evsel->system_wide) {
 56		perf_thread_map__put(evsel->threads);
 57		evsel->threads = perf_thread_map__new_dummy();
 58	} else {
 59		perf_thread_map__put(evsel->threads);
 60		evsel->threads = perf_thread_map__get(evlist->threads);
 61	}
 62
 63	evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
 64}
 65
 66static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
 67{
 68	struct perf_evsel *evsel;
 69
 70	evlist->needs_map_propagation = true;
 71
 72	perf_evlist__for_each_evsel(evlist, evsel)
 73		__perf_evlist__propagate_maps(evlist, evsel);
 74}
 75
 76void perf_evlist__add(struct perf_evlist *evlist,
 77		      struct perf_evsel *evsel)
 78{
 79	evsel->idx = evlist->nr_entries;
 80	list_add_tail(&evsel->node, &evlist->entries);
 81	evlist->nr_entries += 1;
 82
 83	if (evlist->needs_map_propagation)
 84		__perf_evlist__propagate_maps(evlist, evsel);
 85}
 86
 87void perf_evlist__remove(struct perf_evlist *evlist,
 88			 struct perf_evsel *evsel)
 89{
 90	list_del_init(&evsel->node);
 91	evlist->nr_entries -= 1;
 92}
 93
 94struct perf_evlist *perf_evlist__new(void)
 95{
 96	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
 97
 98	if (evlist != NULL)
 99		perf_evlist__init(evlist);
100
101	return evlist;
102}
103
104struct perf_evsel *
105perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
106{
107	struct perf_evsel *next;
108
109	if (!prev) {
110		next = list_first_entry(&evlist->entries,
111					struct perf_evsel,
112					node);
113	} else {
114		next = list_next_entry(prev, node);
115	}
116
117	/* Empty list is noticed here so don't need checking on entry. */
118	if (&next->node == &evlist->entries)
119		return NULL;
120
121	return next;
122}
123
124static void perf_evlist__purge(struct perf_evlist *evlist)
125{
126	struct perf_evsel *pos, *n;
127
128	perf_evlist__for_each_entry_safe(evlist, n, pos) {
129		list_del_init(&pos->node);
130		perf_evsel__delete(pos);
131	}
132
133	evlist->nr_entries = 0;
134}
135
136void perf_evlist__exit(struct perf_evlist *evlist)
137{
138	perf_cpu_map__put(evlist->user_requested_cpus);
139	perf_cpu_map__put(evlist->all_cpus);
140	perf_thread_map__put(evlist->threads);
141	evlist->user_requested_cpus = NULL;
142	evlist->all_cpus = NULL;
143	evlist->threads = NULL;
144	fdarray__exit(&evlist->pollfd);
145}
146
147void perf_evlist__delete(struct perf_evlist *evlist)
148{
149	if (evlist == NULL)
150		return;
151
152	perf_evlist__munmap(evlist);
153	perf_evlist__close(evlist);
154	perf_evlist__purge(evlist);
155	perf_evlist__exit(evlist);
156	free(evlist);
157}
158
159void perf_evlist__set_maps(struct perf_evlist *evlist,
160			   struct perf_cpu_map *cpus,
161			   struct perf_thread_map *threads)
162{
163	/*
164	 * Allow for the possibility that one or another of the maps isn't being
165	 * changed i.e. don't put it.  Note we are assuming the maps that are
166	 * being applied are brand new and evlist is taking ownership of the
167	 * original reference count of 1.  If that is not the case it is up to
168	 * the caller to increase the reference count.
169	 */
170	if (cpus != evlist->user_requested_cpus) {
171		perf_cpu_map__put(evlist->user_requested_cpus);
172		evlist->user_requested_cpus = perf_cpu_map__get(cpus);
173	}
174
175	if (threads != evlist->threads) {
176		perf_thread_map__put(evlist->threads);
177		evlist->threads = perf_thread_map__get(threads);
178	}
179
180	perf_evlist__propagate_maps(evlist);
181}
182
183int perf_evlist__open(struct perf_evlist *evlist)
184{
185	struct perf_evsel *evsel;
186	int err;
187
188	perf_evlist__for_each_entry(evlist, evsel) {
189		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
190		if (err < 0)
191			goto out_err;
192	}
193
194	return 0;
195
196out_err:
197	perf_evlist__close(evlist);
198	return err;
199}
200
201void perf_evlist__close(struct perf_evlist *evlist)
202{
203	struct perf_evsel *evsel;
204
205	perf_evlist__for_each_entry_reverse(evlist, evsel)
206		perf_evsel__close(evsel);
207}
208
209void perf_evlist__enable(struct perf_evlist *evlist)
210{
211	struct perf_evsel *evsel;
212
213	perf_evlist__for_each_entry(evlist, evsel)
214		perf_evsel__enable(evsel);
215}
216
217void perf_evlist__disable(struct perf_evlist *evlist)
218{
219	struct perf_evsel *evsel;
220
221	perf_evlist__for_each_entry(evlist, evsel)
222		perf_evsel__disable(evsel);
223}
224
225u64 perf_evlist__read_format(struct perf_evlist *evlist)
226{
227	struct perf_evsel *first = perf_evlist__first(evlist);
228
229	return first->attr.read_format;
230}
231
232#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
233
234static void perf_evlist__id_hash(struct perf_evlist *evlist,
235				 struct perf_evsel *evsel,
236				 int cpu, int thread, u64 id)
237{
238	int hash;
239	struct perf_sample_id *sid = SID(evsel, cpu, thread);
240
241	sid->id = id;
242	sid->evsel = evsel;
243	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
244	hlist_add_head(&sid->node, &evlist->heads[hash]);
245}
246
247void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
248{
249	int i;
250
251	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
252		INIT_HLIST_HEAD(&evlist->heads[i]);
253}
254
255void perf_evlist__id_add(struct perf_evlist *evlist,
256			 struct perf_evsel *evsel,
257			 int cpu, int thread, u64 id)
258{
259	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
260	evsel->id[evsel->ids++] = id;
261}
262
263int perf_evlist__id_add_fd(struct perf_evlist *evlist,
264			   struct perf_evsel *evsel,
265			   int cpu, int thread, int fd)
266{
267	u64 read_data[4] = { 0, };
268	int id_idx = 1; /* The first entry is the counter value */
269	u64 id;
270	int ret;
271
272	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
273	if (!ret)
274		goto add;
275
276	if (errno != ENOTTY)
277		return -1;
278
279	/* Legacy way to get event id.. All hail to old kernels! */
280
281	/*
282	 * This way does not work with group format read, so bail
283	 * out in that case.
284	 */
285	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
286		return -1;
287
288	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
289	    read(fd, &read_data, sizeof(read_data)) == -1)
290		return -1;
291
292	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
293		++id_idx;
294	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
295		++id_idx;
296
297	id = read_data[id_idx];
298
299add:
300	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
301	return 0;
302}
303
304int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
305{
306	int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
307	int nr_threads = perf_thread_map__nr(evlist->threads);
308	int nfds = 0;
309	struct perf_evsel *evsel;
310
311	perf_evlist__for_each_entry(evlist, evsel) {
312		if (evsel->system_wide)
313			nfds += nr_cpus;
314		else
315			nfds += nr_cpus * nr_threads;
316	}
317
318	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
319	    fdarray__grow(&evlist->pollfd, nfds) < 0)
320		return -ENOMEM;
321
322	return 0;
323}
324
325int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
326			    void *ptr, short revent, enum fdarray_flags flags)
327{
328	int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
329
330	if (pos >= 0) {
331		evlist->pollfd.priv[pos].ptr = ptr;
332		fcntl(fd, F_SETFL, O_NONBLOCK);
333	}
334
335	return pos;
336}
337
338static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
339					 void *arg __maybe_unused)
340{
341	struct perf_mmap *map = fda->priv[fd].ptr;
342
343	if (map)
344		perf_mmap__put(map);
345}
346
347int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
348{
349	return fdarray__filter(&evlist->pollfd, revents_and_mask,
350			       perf_evlist__munmap_filtered, NULL);
351}
352
353int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
354{
355	return fdarray__poll(&evlist->pollfd, timeout);
356}
357
358static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
359{
360	int i;
361	struct perf_mmap *map;
362
363	map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
364	if (!map)
365		return NULL;
366
367	for (i = 0; i < evlist->nr_mmaps; i++) {
368		struct perf_mmap *prev = i ? &map[i - 1] : NULL;
369
370		/*
371		 * When the perf_mmap() call is made we grab one refcount, plus
372		 * one extra to let perf_mmap__consume() get the last
373		 * events after all real references (perf_mmap__get()) are
374		 * dropped.
375		 *
376		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
377		 * thus does perf_mmap__get() on it.
378		 */
379		perf_mmap__init(&map[i], prev, overwrite, NULL);
380	}
381
382	return map;
383}
384
385static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
386{
387	struct perf_sample_id *sid = SID(evsel, cpu, thread);
388
389	sid->idx = idx;
390	sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
391	sid->tid = perf_thread_map__pid(evsel->threads, thread);
392}
393
394static struct perf_mmap*
395perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
396{
397	struct perf_mmap *maps;
398
399	maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
400
401	if (!maps) {
402		maps = perf_evlist__alloc_mmap(evlist, overwrite);
403		if (!maps)
404			return NULL;
405
406		if (overwrite)
407			evlist->mmap_ovw = maps;
408		else
409			evlist->mmap = maps;
410	}
411
412	return &maps[idx];
413}
414
415#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
416
417static int
418perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
419			  int output, struct perf_cpu cpu)
420{
421	return perf_mmap__mmap(map, mp, output, cpu);
422}
423
424static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
425					bool overwrite)
426{
427	if (overwrite)
428		evlist->mmap_ovw_first = map;
429	else
430		evlist->mmap_first = map;
431}
432
433static int
434mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
435	       int idx, struct perf_mmap_param *mp, int cpu_idx,
436	       int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
437{
438	struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
439	struct perf_evsel *evsel;
440	int revent;
441
442	perf_evlist__for_each_entry(evlist, evsel) {
443		bool overwrite = evsel->attr.write_backward;
444		enum fdarray_flags flgs;
445		struct perf_mmap *map;
446		int *output, fd, cpu;
447
448		if (evsel->system_wide && thread)
449			continue;
450
451		cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
452		if (cpu == -1)
453			continue;
454
455		map = ops->get(evlist, overwrite, idx);
456		if (map == NULL)
457			return -ENOMEM;
458
459		if (overwrite) {
460			mp->prot = PROT_READ;
461			output   = _output_overwrite;
462		} else {
463			mp->prot = PROT_READ | PROT_WRITE;
464			output   = _output;
465		}
466
467		fd = FD(evsel, cpu, thread);
468
469		if (*output == -1) {
470			*output = fd;
471
472			/*
473			 * The last one will be done at perf_mmap__consume(), so that we
474			 * make sure we don't prevent tools from consuming every last event in
475			 * the ring buffer.
476			 *
477			 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
478			 * anymore, but the last events for it are still in the ring buffer,
479			 * waiting to be consumed.
480			 *
481			 * Tools can chose to ignore this at their own discretion, but the
482			 * evlist layer can't just drop it when filtering events in
483			 * perf_evlist__filter_pollfd().
484			 */
485			refcount_set(&map->refcnt, 2);
486
487			if (ops->idx)
488				ops->idx(evlist, evsel, mp, idx);
489
490			/* Debug message used by test scripts */
491			pr_debug("idx %d: mmapping fd %d\n", idx, *output);
492			if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
493				return -1;
494
495			*nr_mmaps += 1;
496
497			if (!idx)
498				perf_evlist__set_mmap_first(evlist, map, overwrite);
499		} else {
500			/* Debug message used by test scripts */
501			pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
502			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
503				return -1;
504
505			perf_mmap__get(map);
506		}
507
508		revent = !overwrite ? POLLIN : 0;
509
510		flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
511		if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
512			perf_mmap__put(map);
513			return -1;
514		}
515
516		if (evsel->attr.read_format & PERF_FORMAT_ID) {
517			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
518						   fd) < 0)
519				return -1;
520			perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
521		}
522	}
523
524	return 0;
525}
526
527static int
528mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
529		struct perf_mmap_param *mp)
530{
531	int nr_threads = perf_thread_map__nr(evlist->threads);
532	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
533	int cpu, thread, idx = 0;
534	int nr_mmaps = 0;
535
536	pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
537		 __func__, nr_cpus, nr_threads);
538
539	/* per-thread mmaps */
540	for (thread = 0; thread < nr_threads; thread++, idx++) {
541		int output = -1;
542		int output_overwrite = -1;
543
544		if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
545				   &output_overwrite, &nr_mmaps))
546			goto out_unmap;
547	}
548
549	/* system-wide mmaps i.e. per-cpu */
550	for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
551		int output = -1;
552		int output_overwrite = -1;
553
554		if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
555				   &output_overwrite, &nr_mmaps))
556			goto out_unmap;
557	}
558
559	if (nr_mmaps != evlist->nr_mmaps)
560		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
561
562	return 0;
563
564out_unmap:
565	perf_evlist__munmap(evlist);
566	return -1;
567}
568
569static int
570mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
571	     struct perf_mmap_param *mp)
572{
573	int nr_threads = perf_thread_map__nr(evlist->threads);
574	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
575	int nr_mmaps = 0;
576	int cpu, thread;
577
578	pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
579
580	for (cpu = 0; cpu < nr_cpus; cpu++) {
581		int output = -1;
582		int output_overwrite = -1;
583
584		for (thread = 0; thread < nr_threads; thread++) {
585			if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
586					   thread, &output, &output_overwrite, &nr_mmaps))
587				goto out_unmap;
588		}
589	}
590
591	if (nr_mmaps != evlist->nr_mmaps)
592		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
593
594	return 0;
595
596out_unmap:
597	perf_evlist__munmap(evlist);
598	return -1;
599}
600
601static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
602{
603	int nr_mmaps;
604
605	/* One for each CPU */
606	nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
607	if (perf_cpu_map__empty(evlist->all_cpus)) {
608		/* Plus one for each thread */
609		nr_mmaps += perf_thread_map__nr(evlist->threads);
610		/* Minus the per-thread CPU (-1) */
611		nr_mmaps -= 1;
612	}
613
614	return nr_mmaps;
615}
616
617int perf_evlist__mmap_ops(struct perf_evlist *evlist,
618			  struct perf_evlist_mmap_ops *ops,
619			  struct perf_mmap_param *mp)
620{
621	const struct perf_cpu_map *cpus = evlist->all_cpus;
622	struct perf_evsel *evsel;
623
624	if (!ops || !ops->get || !ops->mmap)
625		return -EINVAL;
626
627	mp->mask = evlist->mmap_len - page_size - 1;
628
629	evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
630
631	perf_evlist__for_each_entry(evlist, evsel) {
632		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
633		    evsel->sample_id == NULL &&
634		    perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
635			return -ENOMEM;
636	}
637
638	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
639		return -ENOMEM;
640
641	if (perf_cpu_map__empty(cpus))
642		return mmap_per_thread(evlist, ops, mp);
643
644	return mmap_per_cpu(evlist, ops, mp);
645}
646
647int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
648{
649	struct perf_mmap_param mp;
650	struct perf_evlist_mmap_ops ops = {
651		.get  = perf_evlist__mmap_cb_get,
652		.mmap = perf_evlist__mmap_cb_mmap,
653	};
654
655	evlist->mmap_len = (pages + 1) * page_size;
656
657	return perf_evlist__mmap_ops(evlist, &ops, &mp);
658}
659
660void perf_evlist__munmap(struct perf_evlist *evlist)
661{
662	int i;
663
664	if (evlist->mmap) {
665		for (i = 0; i < evlist->nr_mmaps; i++)
666			perf_mmap__munmap(&evlist->mmap[i]);
667	}
668
669	if (evlist->mmap_ovw) {
670		for (i = 0; i < evlist->nr_mmaps; i++)
671			perf_mmap__munmap(&evlist->mmap_ovw[i]);
672	}
673
674	zfree(&evlist->mmap);
675	zfree(&evlist->mmap_ovw);
676}
677
678struct perf_mmap*
679perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
680		       bool overwrite)
681{
682	if (map)
683		return map->next;
684
685	return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
686}
687
688void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
689{
690	struct perf_evsel *first, *last, *evsel;
691
692	first = list_first_entry(list, struct perf_evsel, node);
693	last = list_last_entry(list, struct perf_evsel, node);
694
695	leader->nr_members = last->idx - first->idx + 1;
696
697	__perf_evlist__for_each_entry(list, evsel)
698		evsel->leader = leader;
 
 
 
699}
700
701void perf_evlist__set_leader(struct perf_evlist *evlist)
702{
703	if (evlist->nr_entries) {
704		struct perf_evsel *first = list_entry(evlist->entries.next,
705						struct perf_evsel, node);
706
707		evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
708		__perf_evlist__set_leader(&evlist->entries, first);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709	}
710}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <perf/evlist.h>
  3#include <perf/evsel.h>
  4#include <linux/bitops.h>
  5#include <linux/list.h>
  6#include <linux/hash.h>
  7#include <sys/ioctl.h>
  8#include <internal/evlist.h>
  9#include <internal/evsel.h>
 10#include <internal/xyarray.h>
 11#include <internal/mmap.h>
 12#include <internal/cpumap.h>
 13#include <internal/threadmap.h>
 14#include <internal/lib.h>
 15#include <linux/zalloc.h>
 16#include <stdlib.h>
 17#include <errno.h>
 18#include <unistd.h>
 19#include <fcntl.h>
 20#include <signal.h>
 21#include <poll.h>
 22#include <sys/mman.h>
 23#include <perf/cpumap.h>
 24#include <perf/threadmap.h>
 25#include <api/fd/array.h>
 26#include "internal.h"
 27
 28void perf_evlist__init(struct perf_evlist *evlist)
 29{
 30	INIT_LIST_HEAD(&evlist->entries);
 31	evlist->nr_entries = 0;
 32	fdarray__init(&evlist->pollfd, 64);
 33	perf_evlist__reset_id_hash(evlist);
 34}
 35
 36static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
 37					  struct perf_evsel *evsel)
 38{
 
 
 
 
 39	if (evsel->system_wide) {
 40		/* System wide: set the cpu map of the evsel to all online CPUs. */
 41		perf_cpu_map__put(evsel->cpus);
 42		evsel->cpus = perf_cpu_map__new_online_cpus();
 43	} else if (evlist->has_user_cpus && evsel->is_pmu_core) {
 44		/*
 45		 * User requested CPUs on a core PMU, ensure the requested CPUs
 46		 * are valid by intersecting with those of the PMU.
 47		 */
 48		perf_cpu_map__put(evsel->cpus);
 49		evsel->cpus = perf_cpu_map__intersect(evlist->user_requested_cpus, evsel->own_cpus);
 50	} else if (!evsel->own_cpus || evlist->has_user_cpus ||
 51		(!evsel->requires_cpu && perf_cpu_map__has_any_cpu(evlist->user_requested_cpus))) {
 52		/*
 53		 * The PMU didn't specify a default cpu map, this isn't a core
 54		 * event and the user requested CPUs or the evlist user
 55		 * requested CPUs have the "any CPU" (aka dummy) CPU value. In
 56		 * which case use the user requested CPUs rather than the PMU
 57		 * ones.
 58		 */
 59		perf_cpu_map__put(evsel->cpus);
 60		evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
 61	} else if (evsel->cpus != evsel->own_cpus) {
 62		/*
 63		 * No user requested cpu map but the PMU cpu map doesn't match
 64		 * the evsel's. Reset it back to the PMU cpu map.
 65		 */
 66		perf_cpu_map__put(evsel->cpus);
 67		evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
 68	}
 69
 70	if (evsel->system_wide) {
 71		perf_thread_map__put(evsel->threads);
 72		evsel->threads = perf_thread_map__new_dummy();
 73	} else {
 74		perf_thread_map__put(evsel->threads);
 75		evsel->threads = perf_thread_map__get(evlist->threads);
 76	}
 77
 78	evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
 79}
 80
 81static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
 82{
 83	struct perf_evsel *evsel;
 84
 85	evlist->needs_map_propagation = true;
 86
 87	perf_evlist__for_each_evsel(evlist, evsel)
 88		__perf_evlist__propagate_maps(evlist, evsel);
 89}
 90
 91void perf_evlist__add(struct perf_evlist *evlist,
 92		      struct perf_evsel *evsel)
 93{
 94	evsel->idx = evlist->nr_entries;
 95	list_add_tail(&evsel->node, &evlist->entries);
 96	evlist->nr_entries += 1;
 97
 98	if (evlist->needs_map_propagation)
 99		__perf_evlist__propagate_maps(evlist, evsel);
100}
101
102void perf_evlist__remove(struct perf_evlist *evlist,
103			 struct perf_evsel *evsel)
104{
105	list_del_init(&evsel->node);
106	evlist->nr_entries -= 1;
107}
108
109struct perf_evlist *perf_evlist__new(void)
110{
111	struct perf_evlist *evlist = zalloc(sizeof(*evlist));
112
113	if (evlist != NULL)
114		perf_evlist__init(evlist);
115
116	return evlist;
117}
118
119struct perf_evsel *
120perf_evlist__next(struct perf_evlist *evlist, struct perf_evsel *prev)
121{
122	struct perf_evsel *next;
123
124	if (!prev) {
125		next = list_first_entry(&evlist->entries,
126					struct perf_evsel,
127					node);
128	} else {
129		next = list_next_entry(prev, node);
130	}
131
132	/* Empty list is noticed here so don't need checking on entry. */
133	if (&next->node == &evlist->entries)
134		return NULL;
135
136	return next;
137}
138
139static void perf_evlist__purge(struct perf_evlist *evlist)
140{
141	struct perf_evsel *pos, *n;
142
143	perf_evlist__for_each_entry_safe(evlist, n, pos) {
144		list_del_init(&pos->node);
145		perf_evsel__delete(pos);
146	}
147
148	evlist->nr_entries = 0;
149}
150
151void perf_evlist__exit(struct perf_evlist *evlist)
152{
153	perf_cpu_map__put(evlist->user_requested_cpus);
154	perf_cpu_map__put(evlist->all_cpus);
155	perf_thread_map__put(evlist->threads);
156	evlist->user_requested_cpus = NULL;
157	evlist->all_cpus = NULL;
158	evlist->threads = NULL;
159	fdarray__exit(&evlist->pollfd);
160}
161
162void perf_evlist__delete(struct perf_evlist *evlist)
163{
164	if (evlist == NULL)
165		return;
166
167	perf_evlist__munmap(evlist);
168	perf_evlist__close(evlist);
169	perf_evlist__purge(evlist);
170	perf_evlist__exit(evlist);
171	free(evlist);
172}
173
174void perf_evlist__set_maps(struct perf_evlist *evlist,
175			   struct perf_cpu_map *cpus,
176			   struct perf_thread_map *threads)
177{
178	/*
179	 * Allow for the possibility that one or another of the maps isn't being
180	 * changed i.e. don't put it.  Note we are assuming the maps that are
181	 * being applied are brand new and evlist is taking ownership of the
182	 * original reference count of 1.  If that is not the case it is up to
183	 * the caller to increase the reference count.
184	 */
185	if (cpus != evlist->user_requested_cpus) {
186		perf_cpu_map__put(evlist->user_requested_cpus);
187		evlist->user_requested_cpus = perf_cpu_map__get(cpus);
188	}
189
190	if (threads != evlist->threads) {
191		perf_thread_map__put(evlist->threads);
192		evlist->threads = perf_thread_map__get(threads);
193	}
194
195	perf_evlist__propagate_maps(evlist);
196}
197
198int perf_evlist__open(struct perf_evlist *evlist)
199{
200	struct perf_evsel *evsel;
201	int err;
202
203	perf_evlist__for_each_entry(evlist, evsel) {
204		err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
205		if (err < 0)
206			goto out_err;
207	}
208
209	return 0;
210
211out_err:
212	perf_evlist__close(evlist);
213	return err;
214}
215
216void perf_evlist__close(struct perf_evlist *evlist)
217{
218	struct perf_evsel *evsel;
219
220	perf_evlist__for_each_entry_reverse(evlist, evsel)
221		perf_evsel__close(evsel);
222}
223
224void perf_evlist__enable(struct perf_evlist *evlist)
225{
226	struct perf_evsel *evsel;
227
228	perf_evlist__for_each_entry(evlist, evsel)
229		perf_evsel__enable(evsel);
230}
231
232void perf_evlist__disable(struct perf_evlist *evlist)
233{
234	struct perf_evsel *evsel;
235
236	perf_evlist__for_each_entry(evlist, evsel)
237		perf_evsel__disable(evsel);
238}
239
240u64 perf_evlist__read_format(struct perf_evlist *evlist)
241{
242	struct perf_evsel *first = perf_evlist__first(evlist);
243
244	return first->attr.read_format;
245}
246
247#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
248
249static void perf_evlist__id_hash(struct perf_evlist *evlist,
250				 struct perf_evsel *evsel,
251				 int cpu, int thread, u64 id)
252{
253	int hash;
254	struct perf_sample_id *sid = SID(evsel, cpu, thread);
255
256	sid->id = id;
257	sid->evsel = evsel;
258	hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
259	hlist_add_head(&sid->node, &evlist->heads[hash]);
260}
261
262void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
263{
264	int i;
265
266	for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
267		INIT_HLIST_HEAD(&evlist->heads[i]);
268}
269
270void perf_evlist__id_add(struct perf_evlist *evlist,
271			 struct perf_evsel *evsel,
272			 int cpu, int thread, u64 id)
273{
274	perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
275	evsel->id[evsel->ids++] = id;
276}
277
278int perf_evlist__id_add_fd(struct perf_evlist *evlist,
279			   struct perf_evsel *evsel,
280			   int cpu, int thread, int fd)
281{
282	u64 read_data[4] = { 0, };
283	int id_idx = 1; /* The first entry is the counter value */
284	u64 id;
285	int ret;
286
287	ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
288	if (!ret)
289		goto add;
290
291	if (errno != ENOTTY)
292		return -1;
293
294	/* Legacy way to get event id.. All hail to old kernels! */
295
296	/*
297	 * This way does not work with group format read, so bail
298	 * out in that case.
299	 */
300	if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
301		return -1;
302
303	if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
304	    read(fd, &read_data, sizeof(read_data)) == -1)
305		return -1;
306
307	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
308		++id_idx;
309	if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
310		++id_idx;
311
312	id = read_data[id_idx];
313
314add:
315	perf_evlist__id_add(evlist, evsel, cpu, thread, id);
316	return 0;
317}
318
319int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
320{
321	int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
322	int nr_threads = perf_thread_map__nr(evlist->threads);
323	int nfds = 0;
324	struct perf_evsel *evsel;
325
326	perf_evlist__for_each_entry(evlist, evsel) {
327		if (evsel->system_wide)
328			nfds += nr_cpus;
329		else
330			nfds += nr_cpus * nr_threads;
331	}
332
333	if (fdarray__available_entries(&evlist->pollfd) < nfds &&
334	    fdarray__grow(&evlist->pollfd, nfds) < 0)
335		return -ENOMEM;
336
337	return 0;
338}
339
340int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
341			    void *ptr, short revent, enum fdarray_flags flags)
342{
343	int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
344
345	if (pos >= 0) {
346		evlist->pollfd.priv[pos].ptr = ptr;
347		fcntl(fd, F_SETFL, O_NONBLOCK);
348	}
349
350	return pos;
351}
352
353static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
354					 void *arg __maybe_unused)
355{
356	struct perf_mmap *map = fda->priv[fd].ptr;
357
358	if (map)
359		perf_mmap__put(map);
360}
361
362int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
363{
364	return fdarray__filter(&evlist->pollfd, revents_and_mask,
365			       perf_evlist__munmap_filtered, NULL);
366}
367
368int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
369{
370	return fdarray__poll(&evlist->pollfd, timeout);
371}
372
373static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, bool overwrite)
374{
375	int i;
376	struct perf_mmap *map;
377
378	map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
379	if (!map)
380		return NULL;
381
382	for (i = 0; i < evlist->nr_mmaps; i++) {
383		struct perf_mmap *prev = i ? &map[i - 1] : NULL;
384
385		/*
386		 * When the perf_mmap() call is made we grab one refcount, plus
387		 * one extra to let perf_mmap__consume() get the last
388		 * events after all real references (perf_mmap__get()) are
389		 * dropped.
390		 *
391		 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
392		 * thus does perf_mmap__get() on it.
393		 */
394		perf_mmap__init(&map[i], prev, overwrite, NULL);
395	}
396
397	return map;
398}
399
400static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
401{
402	struct perf_sample_id *sid = SID(evsel, cpu, thread);
403
404	sid->idx = idx;
405	sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
406	sid->tid = perf_thread_map__pid(evsel->threads, thread);
407}
408
409static struct perf_mmap*
410perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
411{
412	struct perf_mmap *maps;
413
414	maps = overwrite ? evlist->mmap_ovw : evlist->mmap;
415
416	if (!maps) {
417		maps = perf_evlist__alloc_mmap(evlist, overwrite);
418		if (!maps)
419			return NULL;
420
421		if (overwrite)
422			evlist->mmap_ovw = maps;
423		else
424			evlist->mmap = maps;
425	}
426
427	return &maps[idx];
428}
429
430#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
431
432static int
433perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
434			  int output, struct perf_cpu cpu)
435{
436	return perf_mmap__mmap(map, mp, output, cpu);
437}
438
439static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_mmap *map,
440					bool overwrite)
441{
442	if (overwrite)
443		evlist->mmap_ovw_first = map;
444	else
445		evlist->mmap_first = map;
446}
447
448static int
449mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
450	       int idx, struct perf_mmap_param *mp, int cpu_idx,
451	       int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
452{
453	struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
454	struct perf_evsel *evsel;
455	int revent;
456
457	perf_evlist__for_each_entry(evlist, evsel) {
458		bool overwrite = evsel->attr.write_backward;
459		enum fdarray_flags flgs;
460		struct perf_mmap *map;
461		int *output, fd, cpu;
462
463		if (evsel->system_wide && thread)
464			continue;
465
466		cpu = perf_cpu_map__idx(evsel->cpus, evlist_cpu);
467		if (cpu == -1)
468			continue;
469
470		map = ops->get(evlist, overwrite, idx);
471		if (map == NULL)
472			return -ENOMEM;
473
474		if (overwrite) {
475			mp->prot = PROT_READ;
476			output   = _output_overwrite;
477		} else {
478			mp->prot = PROT_READ | PROT_WRITE;
479			output   = _output;
480		}
481
482		fd = FD(evsel, cpu, thread);
483
484		if (*output == -1) {
485			*output = fd;
486
487			/*
488			 * The last one will be done at perf_mmap__consume(), so that we
489			 * make sure we don't prevent tools from consuming every last event in
490			 * the ring buffer.
491			 *
492			 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
493			 * anymore, but the last events for it are still in the ring buffer,
494			 * waiting to be consumed.
495			 *
496			 * Tools can chose to ignore this at their own discretion, but the
497			 * evlist layer can't just drop it when filtering events in
498			 * perf_evlist__filter_pollfd().
499			 */
500			refcount_set(&map->refcnt, 2);
501
502			if (ops->idx)
503				ops->idx(evlist, evsel, mp, idx);
504
505			/* Debug message used by test scripts */
506			pr_debug("idx %d: mmapping fd %d\n", idx, *output);
507			if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
508				return -1;
509
510			*nr_mmaps += 1;
511
512			if (!idx)
513				perf_evlist__set_mmap_first(evlist, map, overwrite);
514		} else {
515			/* Debug message used by test scripts */
516			pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
517			if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
518				return -1;
519
520			perf_mmap__get(map);
521		}
522
523		revent = !overwrite ? POLLIN : 0;
524
525		flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
526		if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
527			perf_mmap__put(map);
528			return -1;
529		}
530
531		if (evsel->attr.read_format & PERF_FORMAT_ID) {
532			if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
533						   fd) < 0)
534				return -1;
535			perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
536		}
537	}
538
539	return 0;
540}
541
542static int
543mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
544		struct perf_mmap_param *mp)
545{
546	int nr_threads = perf_thread_map__nr(evlist->threads);
547	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
548	int cpu, thread, idx = 0;
549	int nr_mmaps = 0;
550
551	pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
552		 __func__, nr_cpus, nr_threads);
553
554	/* per-thread mmaps */
555	for (thread = 0; thread < nr_threads; thread++, idx++) {
556		int output = -1;
557		int output_overwrite = -1;
558
559		if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
560				   &output_overwrite, &nr_mmaps))
561			goto out_unmap;
562	}
563
564	/* system-wide mmaps i.e. per-cpu */
565	for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
566		int output = -1;
567		int output_overwrite = -1;
568
569		if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
570				   &output_overwrite, &nr_mmaps))
571			goto out_unmap;
572	}
573
574	if (nr_mmaps != evlist->nr_mmaps)
575		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
576
577	return 0;
578
579out_unmap:
580	perf_evlist__munmap(evlist);
581	return -1;
582}
583
584static int
585mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
586	     struct perf_mmap_param *mp)
587{
588	int nr_threads = perf_thread_map__nr(evlist->threads);
589	int nr_cpus    = perf_cpu_map__nr(evlist->all_cpus);
590	int nr_mmaps = 0;
591	int cpu, thread;
592
593	pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
594
595	for (cpu = 0; cpu < nr_cpus; cpu++) {
596		int output = -1;
597		int output_overwrite = -1;
598
599		for (thread = 0; thread < nr_threads; thread++) {
600			if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
601					   thread, &output, &output_overwrite, &nr_mmaps))
602				goto out_unmap;
603		}
604	}
605
606	if (nr_mmaps != evlist->nr_mmaps)
607		pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
608
609	return 0;
610
611out_unmap:
612	perf_evlist__munmap(evlist);
613	return -1;
614}
615
616static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
617{
618	int nr_mmaps;
619
620	/* One for each CPU */
621	nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
622	if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) {
623		/* Plus one for each thread */
624		nr_mmaps += perf_thread_map__nr(evlist->threads);
625		/* Minus the per-thread CPU (-1) */
626		nr_mmaps -= 1;
627	}
628
629	return nr_mmaps;
630}
631
632int perf_evlist__mmap_ops(struct perf_evlist *evlist,
633			  struct perf_evlist_mmap_ops *ops,
634			  struct perf_mmap_param *mp)
635{
636	const struct perf_cpu_map *cpus = evlist->all_cpus;
637	struct perf_evsel *evsel;
638
639	if (!ops || !ops->get || !ops->mmap)
640		return -EINVAL;
641
642	mp->mask = evlist->mmap_len - page_size - 1;
643
644	evlist->nr_mmaps = perf_evlist__nr_mmaps(evlist);
645
646	perf_evlist__for_each_entry(evlist, evsel) {
647		if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
648		    evsel->sample_id == NULL &&
649		    perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
650			return -ENOMEM;
651	}
652
653	if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
654		return -ENOMEM;
655
656	if (perf_cpu_map__has_any_cpu_or_is_empty(cpus))
657		return mmap_per_thread(evlist, ops, mp);
658
659	return mmap_per_cpu(evlist, ops, mp);
660}
661
662int perf_evlist__mmap(struct perf_evlist *evlist, int pages)
663{
664	struct perf_mmap_param mp;
665	struct perf_evlist_mmap_ops ops = {
666		.get  = perf_evlist__mmap_cb_get,
667		.mmap = perf_evlist__mmap_cb_mmap,
668	};
669
670	evlist->mmap_len = (pages + 1) * page_size;
671
672	return perf_evlist__mmap_ops(evlist, &ops, &mp);
673}
674
675void perf_evlist__munmap(struct perf_evlist *evlist)
676{
677	int i;
678
679	if (evlist->mmap) {
680		for (i = 0; i < evlist->nr_mmaps; i++)
681			perf_mmap__munmap(&evlist->mmap[i]);
682	}
683
684	if (evlist->mmap_ovw) {
685		for (i = 0; i < evlist->nr_mmaps; i++)
686			perf_mmap__munmap(&evlist->mmap_ovw[i]);
687	}
688
689	zfree(&evlist->mmap);
690	zfree(&evlist->mmap_ovw);
691}
692
693struct perf_mmap*
694perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
695		       bool overwrite)
696{
697	if (map)
698		return map->next;
699
700	return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
701}
702
703void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
704{
705	struct perf_evsel *evsel;
706	int n = 0;
 
 
 
 
707
708	__perf_evlist__for_each_entry(list, evsel) {
709		evsel->leader = leader;
710		n++;
711	}
712	leader->nr_members = n;
713}
714
715void perf_evlist__set_leader(struct perf_evlist *evlist)
716{
717	if (evlist->nr_entries) {
718		struct perf_evsel *first = list_entry(evlist->entries.next,
719						struct perf_evsel, node);
720
 
721		__perf_evlist__set_leader(&evlist->entries, first);
722	}
723}
724
725int perf_evlist__nr_groups(struct perf_evlist *evlist)
726{
727	struct perf_evsel *evsel;
728	int nr_groups = 0;
729
730	perf_evlist__for_each_evsel(evlist, evsel) {
731		/*
732		 * evsels by default have a nr_members of 1, and they are their
733		 * own leader. If the nr_members is >1 then this is an
734		 * indication of a group.
735		 */
736		if (evsel->leader == evsel && evsel->nr_members > 1)
737			nr_groups++;
738	}
739	return nr_groups;
740}
741
742void perf_evlist__go_system_wide(struct perf_evlist *evlist, struct perf_evsel *evsel)
743{
744	if (!evsel->system_wide) {
745		evsel->system_wide = true;
746		if (evlist->needs_map_propagation)
747			__perf_evlist__propagate_maps(evlist, evsel);
748	}
749}