Loading...
1#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
4struct winsize;
5
6void get_term_dimensions(struct winsize *ws);
7
8#if defined(__i386__)
9#include "../../arch/x86/include/asm/unistd.h"
10#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
11#define cpu_relax() asm volatile("rep; nop" ::: "memory");
12#define CPUINFO_PROC "model name"
13#ifndef __NR_perf_event_open
14# define __NR_perf_event_open 336
15#endif
16#endif
17
18#if defined(__x86_64__)
19#include "../../arch/x86/include/asm/unistd.h"
20#define rmb() asm volatile("lfence" ::: "memory")
21#define cpu_relax() asm volatile("rep; nop" ::: "memory");
22#define CPUINFO_PROC "model name"
23#ifndef __NR_perf_event_open
24# define __NR_perf_event_open 298
25#endif
26#endif
27
28#ifdef __powerpc__
29#include "../../arch/powerpc/include/asm/unistd.h"
30#define rmb() asm volatile ("sync" ::: "memory")
31#define cpu_relax() asm volatile ("" ::: "memory");
32#define CPUINFO_PROC "cpu"
33#endif
34
35#ifdef __s390__
36#include "../../arch/s390/include/asm/unistd.h"
37#define rmb() asm volatile("bcr 15,0" ::: "memory")
38#define cpu_relax() asm volatile("" ::: "memory");
39#endif
40
41#ifdef __sh__
42#include "../../arch/sh/include/asm/unistd.h"
43#if defined(__SH4A__) || defined(__SH5__)
44# define rmb() asm volatile("synco" ::: "memory")
45#else
46# define rmb() asm volatile("" ::: "memory")
47#endif
48#define cpu_relax() asm volatile("" ::: "memory")
49#define CPUINFO_PROC "cpu type"
50#endif
51
52#ifdef __hppa__
53#include "../../arch/parisc/include/asm/unistd.h"
54#define rmb() asm volatile("" ::: "memory")
55#define cpu_relax() asm volatile("" ::: "memory");
56#define CPUINFO_PROC "cpu"
57#endif
58
59#ifdef __sparc__
60#include "../../arch/sparc/include/asm/unistd.h"
61#define rmb() asm volatile("":::"memory")
62#define cpu_relax() asm volatile("":::"memory")
63#define CPUINFO_PROC "cpu"
64#endif
65
66#ifdef __alpha__
67#include "../../arch/alpha/include/asm/unistd.h"
68#define rmb() asm volatile("mb" ::: "memory")
69#define cpu_relax() asm volatile("" ::: "memory")
70#define CPUINFO_PROC "cpu model"
71#endif
72
73#ifdef __ia64__
74#include "../../arch/ia64/include/asm/unistd.h"
75#define rmb() asm volatile ("mf" ::: "memory")
76#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
77#define CPUINFO_PROC "model name"
78#endif
79
80#ifdef __arm__
81#include "../../arch/arm/include/asm/unistd.h"
82/*
83 * Use the __kuser_memory_barrier helper in the CPU helper page. See
84 * arch/arm/kernel/entry-armv.S in the kernel source for details.
85 */
86#define rmb() ((void(*)(void))0xffff0fa0)()
87#define cpu_relax() asm volatile("":::"memory")
88#define CPUINFO_PROC "Processor"
89#endif
90
91#ifdef __mips__
92#include "../../arch/mips/include/asm/unistd.h"
93#define rmb() asm volatile( \
94 ".set mips2\n\t" \
95 "sync\n\t" \
96 ".set mips0" \
97 : /* no output */ \
98 : /* no input */ \
99 : "memory")
100#define cpu_relax() asm volatile("" ::: "memory")
101#define CPUINFO_PROC "cpu model"
102#endif
103
104#include <time.h>
105#include <unistd.h>
106#include <sys/types.h>
107#include <sys/syscall.h>
108
109#include "../../include/linux/perf_event.h"
110#include "util/types.h"
111#include <stdbool.h>
112
113struct perf_mmap {
114 void *base;
115 int mask;
116 unsigned int prev;
117};
118
119static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
120{
121 struct perf_event_mmap_page *pc = mm->base;
122 int head = pc->data_head;
123 rmb();
124 return head;
125}
126
127static inline void perf_mmap__write_tail(struct perf_mmap *md,
128 unsigned long tail)
129{
130 struct perf_event_mmap_page *pc = md->base;
131
132 /*
133 * ensure all reads are done before we write the tail out.
134 */
135 /* mb(); */
136 pc->data_tail = tail;
137}
138
139/*
140 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
141 * counters in the current task.
142 */
143#define PR_TASK_PERF_EVENTS_DISABLE 31
144#define PR_TASK_PERF_EVENTS_ENABLE 32
145
146#ifndef NSEC_PER_SEC
147# define NSEC_PER_SEC 1000000000ULL
148#endif
149
150static inline unsigned long long rdclock(void)
151{
152 struct timespec ts;
153
154 clock_gettime(CLOCK_MONOTONIC, &ts);
155 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
156}
157
158/*
159 * Pick up some kernel type conventions:
160 */
161#define __user
162#define asmlinkage
163
164#define unlikely(x) __builtin_expect(!!(x), 0)
165#define min(x, y) ({ \
166 typeof(x) _min1 = (x); \
167 typeof(y) _min2 = (y); \
168 (void) (&_min1 == &_min2); \
169 _min1 < _min2 ? _min1 : _min2; })
170
171static inline int
172sys_perf_event_open(struct perf_event_attr *attr,
173 pid_t pid, int cpu, int group_fd,
174 unsigned long flags)
175{
176 return syscall(__NR_perf_event_open, attr, pid, cpu,
177 group_fd, flags);
178}
179
180#define MAX_COUNTERS 256
181#define MAX_NR_CPUS 256
182
183struct ip_callchain {
184 u64 nr;
185 u64 ips[0];
186};
187
188struct branch_flags {
189 u64 mispred:1;
190 u64 predicted:1;
191 u64 reserved:62;
192};
193
194struct branch_entry {
195 u64 from;
196 u64 to;
197 struct branch_flags flags;
198};
199
200struct branch_stack {
201 u64 nr;
202 struct branch_entry entries[0];
203};
204
205extern bool perf_host, perf_guest;
206extern const char perf_version_string[];
207
208void pthread__unblock_sigwinch(void);
209
210#include "util/target.h"
211
212struct perf_record_opts {
213 struct perf_target target;
214 bool call_graph;
215 bool group;
216 bool inherit_stat;
217 bool no_delay;
218 bool no_inherit;
219 bool no_samples;
220 bool pipe_output;
221 bool raw_samples;
222 bool sample_address;
223 bool sample_time;
224 bool sample_id_all_missing;
225 bool exclude_guest_missing;
226 bool period;
227 unsigned int freq;
228 unsigned int mmap_pages;
229 unsigned int user_freq;
230 u64 branch_stack;
231 u64 default_interval;
232 u64 user_interval;
233};
234
235#endif
1#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
4#include <asm/unistd.h>
5
6#if defined(__i386__)
7#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
8#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
9#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
10#define cpu_relax() asm volatile("rep; nop" ::: "memory");
11#define CPUINFO_PROC "model name"
12#ifndef __NR_perf_event_open
13# define __NR_perf_event_open 336
14#endif
15#ifndef __NR_futex
16# define __NR_futex 240
17#endif
18#endif
19
20#if defined(__x86_64__)
21#define mb() asm volatile("mfence" ::: "memory")
22#define wmb() asm volatile("sfence" ::: "memory")
23#define rmb() asm volatile("lfence" ::: "memory")
24#define cpu_relax() asm volatile("rep; nop" ::: "memory");
25#define CPUINFO_PROC "model name"
26#ifndef __NR_perf_event_open
27# define __NR_perf_event_open 298
28#endif
29#ifndef __NR_futex
30# define __NR_futex 202
31#endif
32#endif
33
34#ifdef __powerpc__
35#include "../../arch/powerpc/include/uapi/asm/unistd.h"
36#define mb() asm volatile ("sync" ::: "memory")
37#define wmb() asm volatile ("sync" ::: "memory")
38#define rmb() asm volatile ("sync" ::: "memory")
39#define CPUINFO_PROC "cpu"
40#endif
41
42#ifdef __s390__
43#define mb() asm volatile("bcr 15,0" ::: "memory")
44#define wmb() asm volatile("bcr 15,0" ::: "memory")
45#define rmb() asm volatile("bcr 15,0" ::: "memory")
46#endif
47
48#ifdef __sh__
49#if defined(__SH4A__) || defined(__SH5__)
50# define mb() asm volatile("synco" ::: "memory")
51# define wmb() asm volatile("synco" ::: "memory")
52# define rmb() asm volatile("synco" ::: "memory")
53#else
54# define mb() asm volatile("" ::: "memory")
55# define wmb() asm volatile("" ::: "memory")
56# define rmb() asm volatile("" ::: "memory")
57#endif
58#define CPUINFO_PROC "cpu type"
59#endif
60
61#ifdef __hppa__
62#define mb() asm volatile("" ::: "memory")
63#define wmb() asm volatile("" ::: "memory")
64#define rmb() asm volatile("" ::: "memory")
65#define CPUINFO_PROC "cpu"
66#endif
67
68#ifdef __sparc__
69#ifdef __LP64__
70#define mb() asm volatile("ba,pt %%xcc, 1f\n" \
71 "membar #StoreLoad\n" \
72 "1:\n":::"memory")
73#else
74#define mb() asm volatile("":::"memory")
75#endif
76#define wmb() asm volatile("":::"memory")
77#define rmb() asm volatile("":::"memory")
78#define CPUINFO_PROC "cpu"
79#endif
80
81#ifdef __alpha__
82#define mb() asm volatile("mb" ::: "memory")
83#define wmb() asm volatile("wmb" ::: "memory")
84#define rmb() asm volatile("mb" ::: "memory")
85#define CPUINFO_PROC "cpu model"
86#endif
87
88#ifdef __ia64__
89#define mb() asm volatile ("mf" ::: "memory")
90#define wmb() asm volatile ("mf" ::: "memory")
91#define rmb() asm volatile ("mf" ::: "memory")
92#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
93#define CPUINFO_PROC "model name"
94#endif
95
96#ifdef __arm__
97/*
98 * Use the __kuser_memory_barrier helper in the CPU helper page. See
99 * arch/arm/kernel/entry-armv.S in the kernel source for details.
100 */
101#define mb() ((void(*)(void))0xffff0fa0)()
102#define wmb() ((void(*)(void))0xffff0fa0)()
103#define rmb() ((void(*)(void))0xffff0fa0)()
104#define CPUINFO_PROC "Processor"
105#endif
106
107#ifdef __aarch64__
108#define mb() asm volatile("dmb ish" ::: "memory")
109#define wmb() asm volatile("dmb ishst" ::: "memory")
110#define rmb() asm volatile("dmb ishld" ::: "memory")
111#define cpu_relax() asm volatile("yield" ::: "memory")
112#endif
113
114#ifdef __mips__
115#define mb() asm volatile( \
116 ".set mips2\n\t" \
117 "sync\n\t" \
118 ".set mips0" \
119 : /* no output */ \
120 : /* no input */ \
121 : "memory")
122#define wmb() mb()
123#define rmb() mb()
124#define CPUINFO_PROC "cpu model"
125#endif
126
127#ifdef __arc__
128#define mb() asm volatile("" ::: "memory")
129#define wmb() asm volatile("" ::: "memory")
130#define rmb() asm volatile("" ::: "memory")
131#define CPUINFO_PROC "Processor"
132#endif
133
134#ifdef __metag__
135#define mb() asm volatile("" ::: "memory")
136#define wmb() asm volatile("" ::: "memory")
137#define rmb() asm volatile("" ::: "memory")
138#define CPUINFO_PROC "CPU"
139#endif
140
141#ifdef __xtensa__
142#define mb() asm volatile("memw" ::: "memory")
143#define wmb() asm volatile("memw" ::: "memory")
144#define rmb() asm volatile("" ::: "memory")
145#define CPUINFO_PROC "core ID"
146#endif
147
148#ifdef __tile__
149#define mb() asm volatile ("mf" ::: "memory")
150#define wmb() asm volatile ("mf" ::: "memory")
151#define rmb() asm volatile ("mf" ::: "memory")
152#define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
153#define CPUINFO_PROC "model name"
154#endif
155
156#define barrier() asm volatile ("" ::: "memory")
157
158#ifndef cpu_relax
159#define cpu_relax() barrier()
160#endif
161
162#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
163
164
165#include <time.h>
166#include <unistd.h>
167#include <sys/types.h>
168#include <sys/syscall.h>
169
170#include <linux/perf_event.h>
171#include "util/types.h"
172#include <stdbool.h>
173
174/*
175 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
176 * counters in the current task.
177 */
178#define PR_TASK_PERF_EVENTS_DISABLE 31
179#define PR_TASK_PERF_EVENTS_ENABLE 32
180
181#ifndef NSEC_PER_SEC
182# define NSEC_PER_SEC 1000000000ULL
183#endif
184#ifndef NSEC_PER_USEC
185# define NSEC_PER_USEC 1000ULL
186#endif
187
188static inline unsigned long long rdclock(void)
189{
190 struct timespec ts;
191
192 clock_gettime(CLOCK_MONOTONIC, &ts);
193 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
194}
195
196/*
197 * Pick up some kernel type conventions:
198 */
199#define __user
200#define asmlinkage
201
202#define unlikely(x) __builtin_expect(!!(x), 0)
203#define min(x, y) ({ \
204 typeof(x) _min1 = (x); \
205 typeof(y) _min2 = (y); \
206 (void) (&_min1 == &_min2); \
207 _min1 < _min2 ? _min1 : _min2; })
208
209extern bool test_attr__enabled;
210void test_attr__init(void);
211void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
212 int fd, int group_fd, unsigned long flags);
213
214static inline int
215sys_perf_event_open(struct perf_event_attr *attr,
216 pid_t pid, int cpu, int group_fd,
217 unsigned long flags)
218{
219 int fd;
220
221 fd = syscall(__NR_perf_event_open, attr, pid, cpu,
222 group_fd, flags);
223
224 if (unlikely(test_attr__enabled))
225 test_attr__open(attr, pid, cpu, fd, group_fd, flags);
226
227 return fd;
228}
229
230#define MAX_COUNTERS 256
231#define MAX_NR_CPUS 256
232
233struct ip_callchain {
234 u64 nr;
235 u64 ips[0];
236};
237
238struct branch_flags {
239 u64 mispred:1;
240 u64 predicted:1;
241 u64 in_tx:1;
242 u64 abort:1;
243 u64 reserved:60;
244};
245
246struct branch_entry {
247 u64 from;
248 u64 to;
249 struct branch_flags flags;
250};
251
252struct branch_stack {
253 u64 nr;
254 struct branch_entry entries[0];
255};
256
257extern const char *input_name;
258extern bool perf_host, perf_guest;
259extern const char perf_version_string[];
260
261void pthread__unblock_sigwinch(void);
262
263#include "util/target.h"
264
265enum perf_call_graph_mode {
266 CALLCHAIN_NONE,
267 CALLCHAIN_FP,
268 CALLCHAIN_DWARF,
269 CALLCHAIN_MAX
270};
271
272struct record_opts {
273 struct target target;
274 int call_graph;
275 bool call_graph_enabled;
276 bool group;
277 bool inherit_stat;
278 bool no_buffering;
279 bool no_inherit;
280 bool no_inherit_set;
281 bool no_samples;
282 bool raw_samples;
283 bool sample_address;
284 bool sample_weight;
285 bool sample_time;
286 bool period;
287 unsigned int freq;
288 unsigned int mmap_pages;
289 unsigned int user_freq;
290 u64 branch_stack;
291 u64 default_interval;
292 u64 user_interval;
293 u16 stack_dump_size;
294 bool sample_transaction;
295 unsigned initial_delay;
296};
297
298#endif