Loading...
1#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
4#include <linux/hardirq.h>
5#include <linux/uaccess.h>
6
7/* Buffer handling */
8
9#define RING_BUFFER_WRITABLE 0x01
10
11struct ring_buffer {
12 atomic_t refcount;
13 struct rcu_head rcu_head;
14 struct irq_work irq_work;
15#ifdef CONFIG_PERF_USE_VMALLOC
16 struct work_struct work;
17 int page_order; /* allocation order */
18#endif
19 int nr_pages; /* nr of data pages */
20 int overwrite; /* can overwrite itself */
21
22 atomic_t poll; /* POLL_ for wakeups */
23
24 local_t head; /* write position */
25 local_t nest; /* nested writers */
26 local_t events; /* event limit */
27 local_t wakeup; /* wakeup stamp */
28 local_t lost; /* nr records lost */
29
30 long watermark; /* wakeup watermark */
31 long aux_watermark;
32 /* poll crap */
33 spinlock_t event_lock;
34 struct list_head event_list;
35
36 atomic_t mmap_count;
37 unsigned long mmap_locked;
38 struct user_struct *mmap_user;
39
40 /* AUX area */
41 local_t aux_head;
42 local_t aux_nest;
43 local_t aux_wakeup;
44 unsigned long aux_pgoff;
45 int aux_nr_pages;
46 int aux_overwrite;
47 atomic_t aux_mmap_count;
48 unsigned long aux_mmap_locked;
49 void (*free_aux)(void *);
50 atomic_t aux_refcount;
51 void **aux_pages;
52 void *aux_priv;
53
54 struct perf_event_mmap_page *user_page;
55 void *data_pages[0];
56};
57
58extern void rb_free(struct ring_buffer *rb);
59
60static inline void rb_free_rcu(struct rcu_head *rcu_head)
61{
62 struct ring_buffer *rb;
63
64 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65 rb_free(rb);
66}
67
68extern struct ring_buffer *
69rb_alloc(int nr_pages, long watermark, int cpu, int flags);
70extern void perf_event_wakeup(struct perf_event *event);
71extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
72 pgoff_t pgoff, int nr_pages, long watermark, int flags);
73extern void rb_free_aux(struct ring_buffer *rb);
74extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
75extern void ring_buffer_put(struct ring_buffer *rb);
76
77static inline bool rb_has_aux(struct ring_buffer *rb)
78{
79 return !!rb->aux_nr_pages;
80}
81
82void perf_event_aux_event(struct perf_event *event, unsigned long head,
83 unsigned long size, u64 flags);
84
85extern struct page *
86perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
87
88#ifdef CONFIG_PERF_USE_VMALLOC
89/*
90 * Back perf_mmap() with vmalloc memory.
91 *
92 * Required for architectures that have d-cache aliasing issues.
93 */
94
95static inline int page_order(struct ring_buffer *rb)
96{
97 return rb->page_order;
98}
99
100#else
101
102static inline int page_order(struct ring_buffer *rb)
103{
104 return 0;
105}
106#endif
107
108static inline unsigned long perf_data_size(struct ring_buffer *rb)
109{
110 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
111}
112
113static inline unsigned long perf_aux_size(struct ring_buffer *rb)
114{
115 return rb->aux_nr_pages << PAGE_SHIFT;
116}
117
118#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
119static inline unsigned long \
120func_name(struct perf_output_handle *handle, \
121 const void *buf, unsigned long len) \
122{ \
123 unsigned long size, written; \
124 \
125 do { \
126 size = min(handle->size, len); \
127 written = memcpy_func(handle->addr, buf, size); \
128 written = size - written; \
129 \
130 len -= written; \
131 handle->addr += written; \
132 buf += written; \
133 handle->size -= written; \
134 if (!handle->size) { \
135 struct ring_buffer *rb = handle->rb; \
136 \
137 handle->page++; \
138 handle->page &= rb->nr_pages - 1; \
139 handle->addr = rb->data_pages[handle->page]; \
140 handle->size = PAGE_SIZE << page_order(rb); \
141 } \
142 } while (len && written == size); \
143 \
144 return len; \
145}
146
147static inline unsigned long
148memcpy_common(void *dst, const void *src, unsigned long n)
149{
150 memcpy(dst, src, n);
151 return 0;
152}
153
154DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
155
156static inline unsigned long
157memcpy_skip(void *dst, const void *src, unsigned long n)
158{
159 return 0;
160}
161
162DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
163
164#ifndef arch_perf_out_copy_user
165#define arch_perf_out_copy_user arch_perf_out_copy_user
166
167static inline unsigned long
168arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
169{
170 unsigned long ret;
171
172 pagefault_disable();
173 ret = __copy_from_user_inatomic(dst, src, n);
174 pagefault_enable();
175
176 return ret;
177}
178#endif
179
180DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
181
182/* Callchain handling */
183extern struct perf_callchain_entry *
184perf_callchain(struct perf_event *event, struct pt_regs *regs);
185
186static inline int get_recursion_context(int *recursion)
187{
188 int rctx;
189
190 if (in_nmi())
191 rctx = 3;
192 else if (in_irq())
193 rctx = 2;
194 else if (in_softirq())
195 rctx = 1;
196 else
197 rctx = 0;
198
199 if (recursion[rctx])
200 return -1;
201
202 recursion[rctx]++;
203 barrier();
204
205 return rctx;
206}
207
208static inline void put_recursion_context(int *recursion, int rctx)
209{
210 barrier();
211 recursion[rctx]--;
212}
213
214#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
215static inline bool arch_perf_have_user_stack_dump(void)
216{
217 return true;
218}
219
220#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
221#else
222static inline bool arch_perf_have_user_stack_dump(void)
223{
224 return false;
225}
226
227#define perf_user_stack_pointer(regs) 0
228#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
229
230#endif /* _KERNEL_EVENTS_INTERNAL_H */
1#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
4#define RING_BUFFER_WRITABLE 0x01
5
6struct ring_buffer {
7 atomic_t refcount;
8 struct rcu_head rcu_head;
9#ifdef CONFIG_PERF_USE_VMALLOC
10 struct work_struct work;
11 int page_order; /* allocation order */
12#endif
13 int nr_pages; /* nr of data pages */
14 int writable; /* are we writable */
15
16 atomic_t poll; /* POLL_ for wakeups */
17
18 local_t head; /* write position */
19 local_t nest; /* nested writers */
20 local_t events; /* event limit */
21 local_t wakeup; /* wakeup stamp */
22 local_t lost; /* nr records lost */
23
24 long watermark; /* wakeup watermark */
25
26 struct perf_event_mmap_page *user_page;
27 void *data_pages[0];
28};
29
30extern void rb_free(struct ring_buffer *rb);
31extern struct ring_buffer *
32rb_alloc(int nr_pages, long watermark, int cpu, int flags);
33extern void perf_event_wakeup(struct perf_event *event);
34
35extern void
36perf_event_header__init_id(struct perf_event_header *header,
37 struct perf_sample_data *data,
38 struct perf_event *event);
39extern void
40perf_event__output_id_sample(struct perf_event *event,
41 struct perf_output_handle *handle,
42 struct perf_sample_data *sample);
43
44extern struct page *
45perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
46
47#ifdef CONFIG_PERF_USE_VMALLOC
48/*
49 * Back perf_mmap() with vmalloc memory.
50 *
51 * Required for architectures that have d-cache aliasing issues.
52 */
53
54static inline int page_order(struct ring_buffer *rb)
55{
56 return rb->page_order;
57}
58
59#else
60
61static inline int page_order(struct ring_buffer *rb)
62{
63 return 0;
64}
65#endif
66
67static unsigned long perf_data_size(struct ring_buffer *rb)
68{
69 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
70}
71
72static inline void
73__output_copy(struct perf_output_handle *handle,
74 const void *buf, unsigned int len)
75{
76 do {
77 unsigned long size = min_t(unsigned long, handle->size, len);
78
79 memcpy(handle->addr, buf, size);
80
81 len -= size;
82 handle->addr += size;
83 buf += size;
84 handle->size -= size;
85 if (!handle->size) {
86 struct ring_buffer *rb = handle->rb;
87
88 handle->page++;
89 handle->page &= rb->nr_pages - 1;
90 handle->addr = rb->data_pages[handle->page];
91 handle->size = PAGE_SIZE << page_order(rb);
92 }
93 } while (len);
94}
95
96#endif /* _KERNEL_EVENTS_INTERNAL_H */