Loading...
1#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
4#include <linux/hardirq.h>
5#include <linux/uaccess.h>
6
7/* Buffer handling */
8
9#define RING_BUFFER_WRITABLE 0x01
10
11struct ring_buffer {
12 atomic_t refcount;
13 struct rcu_head rcu_head;
14 struct irq_work irq_work;
15#ifdef CONFIG_PERF_USE_VMALLOC
16 struct work_struct work;
17 int page_order; /* allocation order */
18#endif
19 int nr_pages; /* nr of data pages */
20 int overwrite; /* can overwrite itself */
21
22 atomic_t poll; /* POLL_ for wakeups */
23
24 local_t head; /* write position */
25 local_t nest; /* nested writers */
26 local_t events; /* event limit */
27 local_t wakeup; /* wakeup stamp */
28 local_t lost; /* nr records lost */
29
30 long watermark; /* wakeup watermark */
31 long aux_watermark;
32 /* poll crap */
33 spinlock_t event_lock;
34 struct list_head event_list;
35
36 atomic_t mmap_count;
37 unsigned long mmap_locked;
38 struct user_struct *mmap_user;
39
40 /* AUX area */
41 local_t aux_head;
42 local_t aux_nest;
43 local_t aux_wakeup;
44 unsigned long aux_pgoff;
45 int aux_nr_pages;
46 int aux_overwrite;
47 atomic_t aux_mmap_count;
48 unsigned long aux_mmap_locked;
49 void (*free_aux)(void *);
50 atomic_t aux_refcount;
51 void **aux_pages;
52 void *aux_priv;
53
54 struct perf_event_mmap_page *user_page;
55 void *data_pages[0];
56};
57
58extern void rb_free(struct ring_buffer *rb);
59
60static inline void rb_free_rcu(struct rcu_head *rcu_head)
61{
62 struct ring_buffer *rb;
63
64 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65 rb_free(rb);
66}
67
68extern struct ring_buffer *
69rb_alloc(int nr_pages, long watermark, int cpu, int flags);
70extern void perf_event_wakeup(struct perf_event *event);
71extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
72 pgoff_t pgoff, int nr_pages, long watermark, int flags);
73extern void rb_free_aux(struct ring_buffer *rb);
74extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
75extern void ring_buffer_put(struct ring_buffer *rb);
76
77static inline bool rb_has_aux(struct ring_buffer *rb)
78{
79 return !!rb->aux_nr_pages;
80}
81
82void perf_event_aux_event(struct perf_event *event, unsigned long head,
83 unsigned long size, u64 flags);
84
85extern struct page *
86perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
87
88#ifdef CONFIG_PERF_USE_VMALLOC
89/*
90 * Back perf_mmap() with vmalloc memory.
91 *
92 * Required for architectures that have d-cache aliasing issues.
93 */
94
95static inline int page_order(struct ring_buffer *rb)
96{
97 return rb->page_order;
98}
99
100#else
101
102static inline int page_order(struct ring_buffer *rb)
103{
104 return 0;
105}
106#endif
107
108static inline unsigned long perf_data_size(struct ring_buffer *rb)
109{
110 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
111}
112
113static inline unsigned long perf_aux_size(struct ring_buffer *rb)
114{
115 return rb->aux_nr_pages << PAGE_SHIFT;
116}
117
118#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
119static inline unsigned long \
120func_name(struct perf_output_handle *handle, \
121 const void *buf, unsigned long len) \
122{ \
123 unsigned long size, written; \
124 \
125 do { \
126 size = min(handle->size, len); \
127 written = memcpy_func(handle->addr, buf, size); \
128 written = size - written; \
129 \
130 len -= written; \
131 handle->addr += written; \
132 buf += written; \
133 handle->size -= written; \
134 if (!handle->size) { \
135 struct ring_buffer *rb = handle->rb; \
136 \
137 handle->page++; \
138 handle->page &= rb->nr_pages - 1; \
139 handle->addr = rb->data_pages[handle->page]; \
140 handle->size = PAGE_SIZE << page_order(rb); \
141 } \
142 } while (len && written == size); \
143 \
144 return len; \
145}
146
147static inline unsigned long
148memcpy_common(void *dst, const void *src, unsigned long n)
149{
150 memcpy(dst, src, n);
151 return 0;
152}
153
154DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
155
156static inline unsigned long
157memcpy_skip(void *dst, const void *src, unsigned long n)
158{
159 return 0;
160}
161
162DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
163
164#ifndef arch_perf_out_copy_user
165#define arch_perf_out_copy_user arch_perf_out_copy_user
166
167static inline unsigned long
168arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
169{
170 unsigned long ret;
171
172 pagefault_disable();
173 ret = __copy_from_user_inatomic(dst, src, n);
174 pagefault_enable();
175
176 return ret;
177}
178#endif
179
180DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
181
182/* Callchain handling */
183extern struct perf_callchain_entry *
184perf_callchain(struct perf_event *event, struct pt_regs *regs);
185
186static inline int get_recursion_context(int *recursion)
187{
188 int rctx;
189
190 if (in_nmi())
191 rctx = 3;
192 else if (in_irq())
193 rctx = 2;
194 else if (in_softirq())
195 rctx = 1;
196 else
197 rctx = 0;
198
199 if (recursion[rctx])
200 return -1;
201
202 recursion[rctx]++;
203 barrier();
204
205 return rctx;
206}
207
208static inline void put_recursion_context(int *recursion, int rctx)
209{
210 barrier();
211 recursion[rctx]--;
212}
213
214#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
215static inline bool arch_perf_have_user_stack_dump(void)
216{
217 return true;
218}
219
220#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
221#else
222static inline bool arch_perf_have_user_stack_dump(void)
223{
224 return false;
225}
226
227#define perf_user_stack_pointer(regs) 0
228#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
229
230#endif /* _KERNEL_EVENTS_INTERNAL_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _KERNEL_EVENTS_INTERNAL_H
3#define _KERNEL_EVENTS_INTERNAL_H
4
5#include <linux/hardirq.h>
6#include <linux/uaccess.h>
7#include <linux/refcount.h>
8
9/* Buffer handling */
10
11#define RING_BUFFER_WRITABLE 0x01
12
13struct perf_buffer {
14 refcount_t refcount;
15 struct rcu_head rcu_head;
16#ifdef CONFIG_PERF_USE_VMALLOC
17 struct work_struct work;
18 int page_order; /* allocation order */
19#endif
20 int nr_pages; /* nr of data pages */
21 int overwrite; /* can overwrite itself */
22 int paused; /* can write into ring buffer */
23
24 atomic_t poll; /* POLL_ for wakeups */
25
26 local_t head; /* write position */
27 unsigned int nest; /* nested writers */
28 local_t events; /* event limit */
29 local_t wakeup; /* wakeup stamp */
30 local_t lost; /* nr records lost */
31
32 long watermark; /* wakeup watermark */
33 long aux_watermark;
34 /* poll crap */
35 spinlock_t event_lock;
36 struct list_head event_list;
37
38 atomic_t mmap_count;
39 unsigned long mmap_locked;
40 struct user_struct *mmap_user;
41
42 /* AUX area */
43 struct mutex aux_mutex;
44 long aux_head;
45 unsigned int aux_nest;
46 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
47 unsigned long aux_pgoff;
48 int aux_nr_pages;
49 int aux_overwrite;
50 atomic_t aux_mmap_count;
51 unsigned long aux_mmap_locked;
52 void (*free_aux)(void *);
53 refcount_t aux_refcount;
54 int aux_in_sampling;
55 int aux_in_pause_resume;
56 void **aux_pages;
57 void *aux_priv;
58
59 struct perf_event_mmap_page *user_page;
60 void *data_pages[];
61};
62
63extern void rb_free(struct perf_buffer *rb);
64
65static inline void rb_free_rcu(struct rcu_head *rcu_head)
66{
67 struct perf_buffer *rb;
68
69 rb = container_of(rcu_head, struct perf_buffer, rcu_head);
70 rb_free(rb);
71}
72
73static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
74{
75 if (!pause && rb->nr_pages)
76 rb->paused = 0;
77 else
78 rb->paused = 1;
79}
80
81extern struct perf_buffer *
82rb_alloc(int nr_pages, long watermark, int cpu, int flags);
83extern void perf_event_wakeup(struct perf_event *event);
84extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
85 pgoff_t pgoff, int nr_pages, long watermark, int flags);
86extern void rb_free_aux(struct perf_buffer *rb);
87extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
88extern void ring_buffer_put(struct perf_buffer *rb);
89
90static inline bool rb_has_aux(struct perf_buffer *rb)
91{
92 return !!rb->aux_nr_pages;
93}
94
95void perf_event_aux_event(struct perf_event *event, unsigned long head,
96 unsigned long size, u64 flags);
97
98extern struct page *
99perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
100
101#ifdef CONFIG_PERF_USE_VMALLOC
102/*
103 * Back perf_mmap() with vmalloc memory.
104 *
105 * Required for architectures that have d-cache aliasing issues.
106 */
107
108static inline int page_order(struct perf_buffer *rb)
109{
110 return rb->page_order;
111}
112
113#else
114
115static inline int page_order(struct perf_buffer *rb)
116{
117 return 0;
118}
119#endif
120
121static inline int data_page_nr(struct perf_buffer *rb)
122{
123 return rb->nr_pages << page_order(rb);
124}
125
126static inline unsigned long perf_data_size(struct perf_buffer *rb)
127{
128 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
129}
130
131static inline unsigned long perf_aux_size(struct perf_buffer *rb)
132{
133 return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
134}
135
136#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
137{ \
138 unsigned long size, written; \
139 \
140 do { \
141 size = min(handle->size, len); \
142 written = memcpy_func(__VA_ARGS__); \
143 written = size - written; \
144 \
145 len -= written; \
146 handle->addr += written; \
147 if (advance_buf) \
148 buf += written; \
149 handle->size -= written; \
150 if (!handle->size) { \
151 struct perf_buffer *rb = handle->rb; \
152 \
153 handle->page++; \
154 handle->page &= rb->nr_pages - 1; \
155 handle->addr = rb->data_pages[handle->page]; \
156 handle->size = PAGE_SIZE << page_order(rb); \
157 } \
158 } while (len && written == size); \
159 \
160 return len; \
161}
162
163#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
164static inline unsigned long \
165func_name(struct perf_output_handle *handle, \
166 const void *buf, unsigned long len) \
167__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
168
169static inline unsigned long
170__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
171 const void *buf, unsigned long len)
172{
173 unsigned long orig_len = len;
174 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
175 orig_len - len, size)
176}
177
178static inline unsigned long
179memcpy_common(void *dst, const void *src, unsigned long n)
180{
181 memcpy(dst, src, n);
182 return 0;
183}
184
185DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
186
187static inline unsigned long
188memcpy_skip(void *dst, const void *src, unsigned long n)
189{
190 return 0;
191}
192
193DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
194
195#ifndef arch_perf_out_copy_user
196#define arch_perf_out_copy_user arch_perf_out_copy_user
197
198static inline unsigned long
199arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
200{
201 unsigned long ret;
202
203 pagefault_disable();
204 ret = __copy_from_user_inatomic(dst, src, n);
205 pagefault_enable();
206
207 return ret;
208}
209#endif
210
211DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
212
213static inline int get_recursion_context(u8 *recursion)
214{
215 unsigned char rctx = interrupt_context_level();
216
217 if (recursion[rctx])
218 return -1;
219
220 recursion[rctx]++;
221 barrier();
222
223 return rctx;
224}
225
226static inline void put_recursion_context(u8 *recursion, unsigned char rctx)
227{
228 barrier();
229 recursion[rctx]--;
230}
231
232#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
233static inline bool arch_perf_have_user_stack_dump(void)
234{
235 return true;
236}
237
238#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
239#else
240static inline bool arch_perf_have_user_stack_dump(void)
241{
242 return false;
243}
244
245#define perf_user_stack_pointer(regs) 0
246#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
247
248#endif /* _KERNEL_EVENTS_INTERNAL_H */