Linux Audio

Check our new training course

Loading...
v5.4
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef _PERF_PERF_H
 3#define _PERF_PERF_H
 4
 5#include <stdbool.h>
 
 
 6
 7#ifndef MAX_NR_CPUS
 8#define MAX_NR_CPUS			2048
 
 
 9#endif
10
11extern const char *input_name;
12extern bool perf_host, perf_guest;
13extern const char perf_version_string[];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
15void pthread__unblock_sigwinch(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
17enum perf_affinity {
18	PERF_AFFINITY_SYS = 0,
19	PERF_AFFINITY_NODE,
20	PERF_AFFINITY_CPU,
21	PERF_AFFINITY_MAX
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22};
23
24extern int version_verbose;
 
25#endif
v3.1
 
  1#ifndef _PERF_PERF_H
  2#define _PERF_PERF_H
  3
  4struct winsize;
  5
  6void get_term_dimensions(struct winsize *ws);
  7
  8#if defined(__i386__)
  9#include "../../arch/x86/include/asm/unistd.h"
 10#define rmb()		asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 11#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
 12#endif
 13
 14#if defined(__x86_64__)
 15#include "../../arch/x86/include/asm/unistd.h"
 16#define rmb()		asm volatile("lfence" ::: "memory")
 17#define cpu_relax()	asm volatile("rep; nop" ::: "memory");
 18#endif
 19
 20#ifdef __powerpc__
 21#include "../../arch/powerpc/include/asm/unistd.h"
 22#define rmb()		asm volatile ("sync" ::: "memory")
 23#define cpu_relax()	asm volatile ("" ::: "memory");
 24#endif
 25
 26#ifdef __s390__
 27#include "../../arch/s390/include/asm/unistd.h"
 28#define rmb()		asm volatile("bcr 15,0" ::: "memory")
 29#define cpu_relax()	asm volatile("" ::: "memory");
 30#endif
 31
 32#ifdef __sh__
 33#include "../../arch/sh/include/asm/unistd.h"
 34#if defined(__SH4A__) || defined(__SH5__)
 35# define rmb()		asm volatile("synco" ::: "memory")
 36#else
 37# define rmb()		asm volatile("" ::: "memory")
 38#endif
 39#define cpu_relax()	asm volatile("" ::: "memory")
 40#endif
 41
 42#ifdef __hppa__
 43#include "../../arch/parisc/include/asm/unistd.h"
 44#define rmb()		asm volatile("" ::: "memory")
 45#define cpu_relax()	asm volatile("" ::: "memory");
 46#endif
 47
 48#ifdef __sparc__
 49#include "../../arch/sparc/include/asm/unistd.h"
 50#define rmb()		asm volatile("":::"memory")
 51#define cpu_relax()	asm volatile("":::"memory")
 52#endif
 53
 54#ifdef __alpha__
 55#include "../../arch/alpha/include/asm/unistd.h"
 56#define rmb()		asm volatile("mb" ::: "memory")
 57#define cpu_relax()	asm volatile("" ::: "memory")
 58#endif
 59
 60#ifdef __ia64__
 61#include "../../arch/ia64/include/asm/unistd.h"
 62#define rmb()		asm volatile ("mf" ::: "memory")
 63#define cpu_relax()	asm volatile ("hint @pause" ::: "memory")
 64#endif
 65
 66#ifdef __arm__
 67#include "../../arch/arm/include/asm/unistd.h"
 68/*
 69 * Use the __kuser_memory_barrier helper in the CPU helper page. See
 70 * arch/arm/kernel/entry-armv.S in the kernel source for details.
 71 */
 72#define rmb()		((void(*)(void))0xffff0fa0)()
 73#define cpu_relax()	asm volatile("":::"memory")
 74#endif
 75
 76#ifdef __mips__
 77#include "../../arch/mips/include/asm/unistd.h"
 78#define rmb()		asm volatile(					\
 79				".set	mips2\n\t"			\
 80				"sync\n\t"				\
 81				".set	mips0"				\
 82				: /* no output */			\
 83				: /* no input */			\
 84				: "memory")
 85#define cpu_relax()	asm volatile("" ::: "memory")
 86#endif
 87
 88#include <time.h>
 89#include <unistd.h>
 90#include <sys/types.h>
 91#include <sys/syscall.h>
 92
 93#include "../../include/linux/perf_event.h"
 94#include "util/types.h"
 95#include <stdbool.h>
 96
 97struct perf_mmap {
 98	void			*base;
 99	int			mask;
100	unsigned int		prev;
101};
102
103static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
104{
105	struct perf_event_mmap_page *pc = mm->base;
106	int head = pc->data_head;
107	rmb();
108	return head;
109}
110
111static inline void perf_mmap__write_tail(struct perf_mmap *md,
112					 unsigned long tail)
113{
114	struct perf_event_mmap_page *pc = md->base;
115
116	/*
117	 * ensure all reads are done before we write the tail out.
118	 */
119	/* mb(); */
120	pc->data_tail = tail;
121}
122
123/*
124 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
125 * counters in the current task.
126 */
127#define PR_TASK_PERF_EVENTS_DISABLE   31
128#define PR_TASK_PERF_EVENTS_ENABLE    32
129
130#ifndef NSEC_PER_SEC
131# define NSEC_PER_SEC			1000000000ULL
132#endif
133
134static inline unsigned long long rdclock(void)
135{
136	struct timespec ts;
137
138	clock_gettime(CLOCK_MONOTONIC, &ts);
139	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
140}
141
142/*
143 * Pick up some kernel type conventions:
144 */
145#define __user
146#define asmlinkage
147
148#define unlikely(x)	__builtin_expect(!!(x), 0)
149#define min(x, y) ({				\
150	typeof(x) _min1 = (x);			\
151	typeof(y) _min2 = (y);			\
152	(void) (&_min1 == &_min2);		\
153	_min1 < _min2 ? _min1 : _min2; })
154
155static inline int
156sys_perf_event_open(struct perf_event_attr *attr,
157		      pid_t pid, int cpu, int group_fd,
158		      unsigned long flags)
159{
160	attr->size = sizeof(*attr);
161	return syscall(__NR_perf_event_open, attr, pid, cpu,
162		       group_fd, flags);
163}
164
165#define MAX_COUNTERS			256
166#define MAX_NR_CPUS			256
167
168struct ip_callchain {
169	u64 nr;
170	u64 ips[0];
171};
172
173extern bool perf_host, perf_guest;
174
175#endif