Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _PERF_PERF_H
3#define _PERF_PERF_H
4
5#include <stdbool.h>
6
7#ifndef MAX_NR_CPUS
8#define MAX_NR_CPUS 2048
9#endif
10
11extern const char *input_name;
12extern bool perf_host, perf_guest;
13extern const char perf_version_string[];
14
15void pthread__unblock_sigwinch(void);
16
17enum perf_affinity {
18 PERF_AFFINITY_SYS = 0,
19 PERF_AFFINITY_NODE,
20 PERF_AFFINITY_CPU,
21 PERF_AFFINITY_MAX
22};
23
24extern int version_verbose;
25#endif
1#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
4#include <asm/unistd.h>
5
6#if defined(__i386__)
7#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
8#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
9#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
10#define cpu_relax() asm volatile("rep; nop" ::: "memory");
11#define CPUINFO_PROC "model name"
12#ifndef __NR_perf_event_open
13# define __NR_perf_event_open 336
14#endif
15#ifndef __NR_futex
16# define __NR_futex 240
17#endif
18#endif
19
20#if defined(__x86_64__)
21#define mb() asm volatile("mfence" ::: "memory")
22#define wmb() asm volatile("sfence" ::: "memory")
23#define rmb() asm volatile("lfence" ::: "memory")
24#define cpu_relax() asm volatile("rep; nop" ::: "memory");
25#define CPUINFO_PROC "model name"
26#ifndef __NR_perf_event_open
27# define __NR_perf_event_open 298
28#endif
29#ifndef __NR_futex
30# define __NR_futex 202
31#endif
32#endif
33
34#ifdef __powerpc__
35#include "../../arch/powerpc/include/uapi/asm/unistd.h"
36#define mb() asm volatile ("sync" ::: "memory")
37#define wmb() asm volatile ("sync" ::: "memory")
38#define rmb() asm volatile ("sync" ::: "memory")
39#define CPUINFO_PROC "cpu"
40#endif
41
42#ifdef __s390__
43#define mb() asm volatile("bcr 15,0" ::: "memory")
44#define wmb() asm volatile("bcr 15,0" ::: "memory")
45#define rmb() asm volatile("bcr 15,0" ::: "memory")
46#endif
47
48#ifdef __sh__
49#if defined(__SH4A__) || defined(__SH5__)
50# define mb() asm volatile("synco" ::: "memory")
51# define wmb() asm volatile("synco" ::: "memory")
52# define rmb() asm volatile("synco" ::: "memory")
53#else
54# define mb() asm volatile("" ::: "memory")
55# define wmb() asm volatile("" ::: "memory")
56# define rmb() asm volatile("" ::: "memory")
57#endif
58#define CPUINFO_PROC "cpu type"
59#endif
60
61#ifdef __hppa__
62#define mb() asm volatile("" ::: "memory")
63#define wmb() asm volatile("" ::: "memory")
64#define rmb() asm volatile("" ::: "memory")
65#define CPUINFO_PROC "cpu"
66#endif
67
68#ifdef __sparc__
69#ifdef __LP64__
70#define mb() asm volatile("ba,pt %%xcc, 1f\n" \
71 "membar #StoreLoad\n" \
72 "1:\n":::"memory")
73#else
74#define mb() asm volatile("":::"memory")
75#endif
76#define wmb() asm volatile("":::"memory")
77#define rmb() asm volatile("":::"memory")
78#define CPUINFO_PROC "cpu"
79#endif
80
81#ifdef __alpha__
82#define mb() asm volatile("mb" ::: "memory")
83#define wmb() asm volatile("wmb" ::: "memory")
84#define rmb() asm volatile("mb" ::: "memory")
85#define CPUINFO_PROC "cpu model"
86#endif
87
88#ifdef __ia64__
89#define mb() asm volatile ("mf" ::: "memory")
90#define wmb() asm volatile ("mf" ::: "memory")
91#define rmb() asm volatile ("mf" ::: "memory")
92#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
93#define CPUINFO_PROC "model name"
94#endif
95
96#ifdef __arm__
97/*
98 * Use the __kuser_memory_barrier helper in the CPU helper page. See
99 * arch/arm/kernel/entry-armv.S in the kernel source for details.
100 */
101#define mb() ((void(*)(void))0xffff0fa0)()
102#define wmb() ((void(*)(void))0xffff0fa0)()
103#define rmb() ((void(*)(void))0xffff0fa0)()
104#define CPUINFO_PROC "Processor"
105#endif
106
107#ifdef __aarch64__
108#define mb() asm volatile("dmb ish" ::: "memory")
109#define wmb() asm volatile("dmb ishst" ::: "memory")
110#define rmb() asm volatile("dmb ishld" ::: "memory")
111#define cpu_relax() asm volatile("yield" ::: "memory")
112#endif
113
114#ifdef __mips__
115#define mb() asm volatile( \
116 ".set mips2\n\t" \
117 "sync\n\t" \
118 ".set mips0" \
119 : /* no output */ \
120 : /* no input */ \
121 : "memory")
122#define wmb() mb()
123#define rmb() mb()
124#define CPUINFO_PROC "cpu model"
125#endif
126
127#ifdef __arc__
128#define mb() asm volatile("" ::: "memory")
129#define wmb() asm volatile("" ::: "memory")
130#define rmb() asm volatile("" ::: "memory")
131#define CPUINFO_PROC "Processor"
132#endif
133
134#ifdef __metag__
135#define mb() asm volatile("" ::: "memory")
136#define wmb() asm volatile("" ::: "memory")
137#define rmb() asm volatile("" ::: "memory")
138#define CPUINFO_PROC "CPU"
139#endif
140
141#ifdef __xtensa__
142#define mb() asm volatile("memw" ::: "memory")
143#define wmb() asm volatile("memw" ::: "memory")
144#define rmb() asm volatile("" ::: "memory")
145#define CPUINFO_PROC "core ID"
146#endif
147
148#ifdef __tile__
149#define mb() asm volatile ("mf" ::: "memory")
150#define wmb() asm volatile ("mf" ::: "memory")
151#define rmb() asm volatile ("mf" ::: "memory")
152#define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
153#define CPUINFO_PROC "model name"
154#endif
155
156#define barrier() asm volatile ("" ::: "memory")
157
158#ifndef cpu_relax
159#define cpu_relax() barrier()
160#endif
161
162#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
163
164
165#include <time.h>
166#include <unistd.h>
167#include <sys/types.h>
168#include <sys/syscall.h>
169
170#include <linux/perf_event.h>
171#include "util/types.h"
172#include <stdbool.h>
173
174/*
175 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
176 * counters in the current task.
177 */
178#define PR_TASK_PERF_EVENTS_DISABLE 31
179#define PR_TASK_PERF_EVENTS_ENABLE 32
180
181#ifndef NSEC_PER_SEC
182# define NSEC_PER_SEC 1000000000ULL
183#endif
184#ifndef NSEC_PER_USEC
185# define NSEC_PER_USEC 1000ULL
186#endif
187
188static inline unsigned long long rdclock(void)
189{
190 struct timespec ts;
191
192 clock_gettime(CLOCK_MONOTONIC, &ts);
193 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
194}
195
196/*
197 * Pick up some kernel type conventions:
198 */
199#define __user
200#define asmlinkage
201
202#define unlikely(x) __builtin_expect(!!(x), 0)
203#define min(x, y) ({ \
204 typeof(x) _min1 = (x); \
205 typeof(y) _min2 = (y); \
206 (void) (&_min1 == &_min2); \
207 _min1 < _min2 ? _min1 : _min2; })
208
209extern bool test_attr__enabled;
210void test_attr__init(void);
211void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
212 int fd, int group_fd, unsigned long flags);
213
214static inline int
215sys_perf_event_open(struct perf_event_attr *attr,
216 pid_t pid, int cpu, int group_fd,
217 unsigned long flags)
218{
219 int fd;
220
221 fd = syscall(__NR_perf_event_open, attr, pid, cpu,
222 group_fd, flags);
223
224 if (unlikely(test_attr__enabled))
225 test_attr__open(attr, pid, cpu, fd, group_fd, flags);
226
227 return fd;
228}
229
230#define MAX_COUNTERS 256
231#define MAX_NR_CPUS 256
232
233struct ip_callchain {
234 u64 nr;
235 u64 ips[0];
236};
237
238struct branch_flags {
239 u64 mispred:1;
240 u64 predicted:1;
241 u64 in_tx:1;
242 u64 abort:1;
243 u64 reserved:60;
244};
245
246struct branch_entry {
247 u64 from;
248 u64 to;
249 struct branch_flags flags;
250};
251
252struct branch_stack {
253 u64 nr;
254 struct branch_entry entries[0];
255};
256
257extern const char *input_name;
258extern bool perf_host, perf_guest;
259extern const char perf_version_string[];
260
261void pthread__unblock_sigwinch(void);
262
263#include "util/target.h"
264
265enum perf_call_graph_mode {
266 CALLCHAIN_NONE,
267 CALLCHAIN_FP,
268 CALLCHAIN_DWARF,
269 CALLCHAIN_MAX
270};
271
272struct record_opts {
273 struct target target;
274 int call_graph;
275 bool call_graph_enabled;
276 bool group;
277 bool inherit_stat;
278 bool no_buffering;
279 bool no_inherit;
280 bool no_inherit_set;
281 bool no_samples;
282 bool raw_samples;
283 bool sample_address;
284 bool sample_weight;
285 bool sample_time;
286 bool period;
287 unsigned int freq;
288 unsigned int mmap_pages;
289 unsigned int user_freq;
290 u64 branch_stack;
291 u64 default_interval;
292 u64 user_interval;
293 u16 stack_dump_size;
294 bool sample_transaction;
295 unsigned initial_delay;
296};
297
298#endif