Loading...
1#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
4struct winsize;
5
6void get_term_dimensions(struct winsize *ws);
7
8#if defined(__i386__)
9#include "../../arch/x86/include/asm/unistd.h"
10#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
11#define cpu_relax() asm volatile("rep; nop" ::: "memory");
12#define CPUINFO_PROC "model name"
13#ifndef __NR_perf_event_open
14# define __NR_perf_event_open 336
15#endif
16#endif
17
18#if defined(__x86_64__)
19#include "../../arch/x86/include/asm/unistd.h"
20#define rmb() asm volatile("lfence" ::: "memory")
21#define cpu_relax() asm volatile("rep; nop" ::: "memory");
22#define CPUINFO_PROC "model name"
23#ifndef __NR_perf_event_open
24# define __NR_perf_event_open 298
25#endif
26#endif
27
28#ifdef __powerpc__
29#include "../../arch/powerpc/include/asm/unistd.h"
30#define rmb() asm volatile ("sync" ::: "memory")
31#define cpu_relax() asm volatile ("" ::: "memory");
32#define CPUINFO_PROC "cpu"
33#endif
34
35#ifdef __s390__
36#include "../../arch/s390/include/asm/unistd.h"
37#define rmb() asm volatile("bcr 15,0" ::: "memory")
38#define cpu_relax() asm volatile("" ::: "memory");
39#endif
40
41#ifdef __sh__
42#include "../../arch/sh/include/asm/unistd.h"
43#if defined(__SH4A__) || defined(__SH5__)
44# define rmb() asm volatile("synco" ::: "memory")
45#else
46# define rmb() asm volatile("" ::: "memory")
47#endif
48#define cpu_relax() asm volatile("" ::: "memory")
49#define CPUINFO_PROC "cpu type"
50#endif
51
52#ifdef __hppa__
53#include "../../arch/parisc/include/asm/unistd.h"
54#define rmb() asm volatile("" ::: "memory")
55#define cpu_relax() asm volatile("" ::: "memory");
56#define CPUINFO_PROC "cpu"
57#endif
58
59#ifdef __sparc__
60#include "../../arch/sparc/include/asm/unistd.h"
61#define rmb() asm volatile("":::"memory")
62#define cpu_relax() asm volatile("":::"memory")
63#define CPUINFO_PROC "cpu"
64#endif
65
66#ifdef __alpha__
67#include "../../arch/alpha/include/asm/unistd.h"
68#define rmb() asm volatile("mb" ::: "memory")
69#define cpu_relax() asm volatile("" ::: "memory")
70#define CPUINFO_PROC "cpu model"
71#endif
72
73#ifdef __ia64__
74#include "../../arch/ia64/include/asm/unistd.h"
75#define rmb() asm volatile ("mf" ::: "memory")
76#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
77#define CPUINFO_PROC "model name"
78#endif
79
80#ifdef __arm__
81#include "../../arch/arm/include/asm/unistd.h"
82/*
83 * Use the __kuser_memory_barrier helper in the CPU helper page. See
84 * arch/arm/kernel/entry-armv.S in the kernel source for details.
85 */
86#define rmb() ((void(*)(void))0xffff0fa0)()
87#define cpu_relax() asm volatile("":::"memory")
88#define CPUINFO_PROC "Processor"
89#endif
90
91#ifdef __mips__
92#include "../../arch/mips/include/asm/unistd.h"
93#define rmb() asm volatile( \
94 ".set mips2\n\t" \
95 "sync\n\t" \
96 ".set mips0" \
97 : /* no output */ \
98 : /* no input */ \
99 : "memory")
100#define cpu_relax() asm volatile("" ::: "memory")
101#define CPUINFO_PROC "cpu model"
102#endif
103
104#include <time.h>
105#include <unistd.h>
106#include <sys/types.h>
107#include <sys/syscall.h>
108
109#include "../../include/linux/perf_event.h"
110#include "util/types.h"
111#include <stdbool.h>
112
113struct perf_mmap {
114 void *base;
115 int mask;
116 unsigned int prev;
117};
118
119static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
120{
121 struct perf_event_mmap_page *pc = mm->base;
122 int head = pc->data_head;
123 rmb();
124 return head;
125}
126
127static inline void perf_mmap__write_tail(struct perf_mmap *md,
128 unsigned long tail)
129{
130 struct perf_event_mmap_page *pc = md->base;
131
132 /*
133 * ensure all reads are done before we write the tail out.
134 */
135 /* mb(); */
136 pc->data_tail = tail;
137}
138
139/*
140 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
141 * counters in the current task.
142 */
143#define PR_TASK_PERF_EVENTS_DISABLE 31
144#define PR_TASK_PERF_EVENTS_ENABLE 32
145
146#ifndef NSEC_PER_SEC
147# define NSEC_PER_SEC 1000000000ULL
148#endif
149
150static inline unsigned long long rdclock(void)
151{
152 struct timespec ts;
153
154 clock_gettime(CLOCK_MONOTONIC, &ts);
155 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
156}
157
158/*
159 * Pick up some kernel type conventions:
160 */
161#define __user
162#define asmlinkage
163
164#define unlikely(x) __builtin_expect(!!(x), 0)
165#define min(x, y) ({ \
166 typeof(x) _min1 = (x); \
167 typeof(y) _min2 = (y); \
168 (void) (&_min1 == &_min2); \
169 _min1 < _min2 ? _min1 : _min2; })
170
171static inline int
172sys_perf_event_open(struct perf_event_attr *attr,
173 pid_t pid, int cpu, int group_fd,
174 unsigned long flags)
175{
176 return syscall(__NR_perf_event_open, attr, pid, cpu,
177 group_fd, flags);
178}
179
180#define MAX_COUNTERS 256
181#define MAX_NR_CPUS 256
182
183struct ip_callchain {
184 u64 nr;
185 u64 ips[0];
186};
187
188struct branch_flags {
189 u64 mispred:1;
190 u64 predicted:1;
191 u64 reserved:62;
192};
193
194struct branch_entry {
195 u64 from;
196 u64 to;
197 struct branch_flags flags;
198};
199
200struct branch_stack {
201 u64 nr;
202 struct branch_entry entries[0];
203};
204
205extern bool perf_host, perf_guest;
206extern const char perf_version_string[];
207
208void pthread__unblock_sigwinch(void);
209
210#include "util/target.h"
211
212struct perf_record_opts {
213 struct perf_target target;
214 bool call_graph;
215 bool group;
216 bool inherit_stat;
217 bool no_delay;
218 bool no_inherit;
219 bool no_samples;
220 bool pipe_output;
221 bool raw_samples;
222 bool sample_address;
223 bool sample_time;
224 bool sample_id_all_missing;
225 bool exclude_guest_missing;
226 bool period;
227 unsigned int freq;
228 unsigned int mmap_pages;
229 unsigned int user_freq;
230 u64 branch_stack;
231 u64 default_interval;
232 u64 user_interval;
233};
234
235#endif
1#ifndef _PERF_PERF_H
2#define _PERF_PERF_H
3
4struct winsize;
5
6void get_term_dimensions(struct winsize *ws);
7
8#if defined(__i386__)
9#include "../../arch/x86/include/asm/unistd.h"
10#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
11#define cpu_relax() asm volatile("rep; nop" ::: "memory");
12#endif
13
14#if defined(__x86_64__)
15#include "../../arch/x86/include/asm/unistd.h"
16#define rmb() asm volatile("lfence" ::: "memory")
17#define cpu_relax() asm volatile("rep; nop" ::: "memory");
18#endif
19
20#ifdef __powerpc__
21#include "../../arch/powerpc/include/asm/unistd.h"
22#define rmb() asm volatile ("sync" ::: "memory")
23#define cpu_relax() asm volatile ("" ::: "memory");
24#endif
25
26#ifdef __s390__
27#include "../../arch/s390/include/asm/unistd.h"
28#define rmb() asm volatile("bcr 15,0" ::: "memory")
29#define cpu_relax() asm volatile("" ::: "memory");
30#endif
31
32#ifdef __sh__
33#include "../../arch/sh/include/asm/unistd.h"
34#if defined(__SH4A__) || defined(__SH5__)
35# define rmb() asm volatile("synco" ::: "memory")
36#else
37# define rmb() asm volatile("" ::: "memory")
38#endif
39#define cpu_relax() asm volatile("" ::: "memory")
40#endif
41
42#ifdef __hppa__
43#include "../../arch/parisc/include/asm/unistd.h"
44#define rmb() asm volatile("" ::: "memory")
45#define cpu_relax() asm volatile("" ::: "memory");
46#endif
47
48#ifdef __sparc__
49#include "../../arch/sparc/include/asm/unistd.h"
50#define rmb() asm volatile("":::"memory")
51#define cpu_relax() asm volatile("":::"memory")
52#endif
53
54#ifdef __alpha__
55#include "../../arch/alpha/include/asm/unistd.h"
56#define rmb() asm volatile("mb" ::: "memory")
57#define cpu_relax() asm volatile("" ::: "memory")
58#endif
59
60#ifdef __ia64__
61#include "../../arch/ia64/include/asm/unistd.h"
62#define rmb() asm volatile ("mf" ::: "memory")
63#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
64#endif
65
66#ifdef __arm__
67#include "../../arch/arm/include/asm/unistd.h"
68/*
69 * Use the __kuser_memory_barrier helper in the CPU helper page. See
70 * arch/arm/kernel/entry-armv.S in the kernel source for details.
71 */
72#define rmb() ((void(*)(void))0xffff0fa0)()
73#define cpu_relax() asm volatile("":::"memory")
74#endif
75
76#ifdef __mips__
77#include "../../arch/mips/include/asm/unistd.h"
78#define rmb() asm volatile( \
79 ".set mips2\n\t" \
80 "sync\n\t" \
81 ".set mips0" \
82 : /* no output */ \
83 : /* no input */ \
84 : "memory")
85#define cpu_relax() asm volatile("" ::: "memory")
86#endif
87
88#include <time.h>
89#include <unistd.h>
90#include <sys/types.h>
91#include <sys/syscall.h>
92
93#include "../../include/linux/perf_event.h"
94#include "util/types.h"
95#include <stdbool.h>
96
97struct perf_mmap {
98 void *base;
99 int mask;
100 unsigned int prev;
101};
102
103static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
104{
105 struct perf_event_mmap_page *pc = mm->base;
106 int head = pc->data_head;
107 rmb();
108 return head;
109}
110
111static inline void perf_mmap__write_tail(struct perf_mmap *md,
112 unsigned long tail)
113{
114 struct perf_event_mmap_page *pc = md->base;
115
116 /*
117 * ensure all reads are done before we write the tail out.
118 */
119 /* mb(); */
120 pc->data_tail = tail;
121}
122
123/*
124 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
125 * counters in the current task.
126 */
127#define PR_TASK_PERF_EVENTS_DISABLE 31
128#define PR_TASK_PERF_EVENTS_ENABLE 32
129
130#ifndef NSEC_PER_SEC
131# define NSEC_PER_SEC 1000000000ULL
132#endif
133
134static inline unsigned long long rdclock(void)
135{
136 struct timespec ts;
137
138 clock_gettime(CLOCK_MONOTONIC, &ts);
139 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
140}
141
142/*
143 * Pick up some kernel type conventions:
144 */
145#define __user
146#define asmlinkage
147
148#define unlikely(x) __builtin_expect(!!(x), 0)
149#define min(x, y) ({ \
150 typeof(x) _min1 = (x); \
151 typeof(y) _min2 = (y); \
152 (void) (&_min1 == &_min2); \
153 _min1 < _min2 ? _min1 : _min2; })
154
155static inline int
156sys_perf_event_open(struct perf_event_attr *attr,
157 pid_t pid, int cpu, int group_fd,
158 unsigned long flags)
159{
160 attr->size = sizeof(*attr);
161 return syscall(__NR_perf_event_open, attr, pid, cpu,
162 group_fd, flags);
163}
164
165#define MAX_COUNTERS 256
166#define MAX_NR_CPUS 256
167
168struct ip_callchain {
169 u64 nr;
170 u64 ips[0];
171};
172
173extern bool perf_host, perf_guest;
174
175#endif