Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _KERNEL_EVENTS_INTERNAL_H
3#define _KERNEL_EVENTS_INTERNAL_H
4
5#include <linux/hardirq.h>
6#include <linux/uaccess.h>
7
8/* Buffer handling */
9
10#define RING_BUFFER_WRITABLE 0x01
11
12struct ring_buffer {
13 atomic_t refcount;
14 struct rcu_head rcu_head;
15#ifdef CONFIG_PERF_USE_VMALLOC
16 struct work_struct work;
17 int page_order; /* allocation order */
18#endif
19 int nr_pages; /* nr of data pages */
20 int overwrite; /* can overwrite itself */
21 int paused; /* can write into ring buffer */
22
23 atomic_t poll; /* POLL_ for wakeups */
24
25 local_t head; /* write position */
26 local_t nest; /* nested writers */
27 local_t events; /* event limit */
28 local_t wakeup; /* wakeup stamp */
29 local_t lost; /* nr records lost */
30
31 long watermark; /* wakeup watermark */
32 long aux_watermark;
33 /* poll crap */
34 spinlock_t event_lock;
35 struct list_head event_list;
36
37 atomic_t mmap_count;
38 unsigned long mmap_locked;
39 struct user_struct *mmap_user;
40
41 /* AUX area */
42 long aux_head;
43 local_t aux_nest;
44 long aux_wakeup; /* last aux_watermark boundary crossed by aux_head */
45 unsigned long aux_pgoff;
46 int aux_nr_pages;
47 int aux_overwrite;
48 atomic_t aux_mmap_count;
49 unsigned long aux_mmap_locked;
50 void (*free_aux)(void *);
51 atomic_t aux_refcount;
52 void **aux_pages;
53 void *aux_priv;
54
55 struct perf_event_mmap_page *user_page;
56 void *data_pages[0];
57};
58
59extern void rb_free(struct ring_buffer *rb);
60
61static inline void rb_free_rcu(struct rcu_head *rcu_head)
62{
63 struct ring_buffer *rb;
64
65 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
66 rb_free(rb);
67}
68
69static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
70{
71 if (!pause && rb->nr_pages)
72 rb->paused = 0;
73 else
74 rb->paused = 1;
75}
76
77extern struct ring_buffer *
78rb_alloc(int nr_pages, long watermark, int cpu, int flags);
79extern void perf_event_wakeup(struct perf_event *event);
80extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
81 pgoff_t pgoff, int nr_pages, long watermark, int flags);
82extern void rb_free_aux(struct ring_buffer *rb);
83extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
84extern void ring_buffer_put(struct ring_buffer *rb);
85
86static inline bool rb_has_aux(struct ring_buffer *rb)
87{
88 return !!rb->aux_nr_pages;
89}
90
91void perf_event_aux_event(struct perf_event *event, unsigned long head,
92 unsigned long size, u64 flags);
93
94extern struct page *
95perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
96
97#ifdef CONFIG_PERF_USE_VMALLOC
98/*
99 * Back perf_mmap() with vmalloc memory.
100 *
101 * Required for architectures that have d-cache aliasing issues.
102 */
103
104static inline int page_order(struct ring_buffer *rb)
105{
106 return rb->page_order;
107}
108
109#else
110
111static inline int page_order(struct ring_buffer *rb)
112{
113 return 0;
114}
115#endif
116
117static inline unsigned long perf_data_size(struct ring_buffer *rb)
118{
119 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
120}
121
122static inline unsigned long perf_aux_size(struct ring_buffer *rb)
123{
124 return rb->aux_nr_pages << PAGE_SHIFT;
125}
126
127#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...) \
128{ \
129 unsigned long size, written; \
130 \
131 do { \
132 size = min(handle->size, len); \
133 written = memcpy_func(__VA_ARGS__); \
134 written = size - written; \
135 \
136 len -= written; \
137 handle->addr += written; \
138 if (advance_buf) \
139 buf += written; \
140 handle->size -= written; \
141 if (!handle->size) { \
142 struct ring_buffer *rb = handle->rb; \
143 \
144 handle->page++; \
145 handle->page &= rb->nr_pages - 1; \
146 handle->addr = rb->data_pages[handle->page]; \
147 handle->size = PAGE_SIZE << page_order(rb); \
148 } \
149 } while (len && written == size); \
150 \
151 return len; \
152}
153
154#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
155static inline unsigned long \
156func_name(struct perf_output_handle *handle, \
157 const void *buf, unsigned long len) \
158__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
159
160static inline unsigned long
161__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
162 const void *buf, unsigned long len)
163{
164 unsigned long orig_len = len;
165 __DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
166 orig_len - len, size)
167}
168
169static inline unsigned long
170memcpy_common(void *dst, const void *src, unsigned long n)
171{
172 memcpy(dst, src, n);
173 return 0;
174}
175
176DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
177
178static inline unsigned long
179memcpy_skip(void *dst, const void *src, unsigned long n)
180{
181 return 0;
182}
183
184DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
185
186#ifndef arch_perf_out_copy_user
187#define arch_perf_out_copy_user arch_perf_out_copy_user
188
189static inline unsigned long
190arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
191{
192 unsigned long ret;
193
194 pagefault_disable();
195 ret = __copy_from_user_inatomic(dst, src, n);
196 pagefault_enable();
197
198 return ret;
199}
200#endif
201
202DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
203
204static inline int get_recursion_context(int *recursion)
205{
206 int rctx;
207
208 if (unlikely(in_nmi()))
209 rctx = 3;
210 else if (in_irq())
211 rctx = 2;
212 else if (in_softirq())
213 rctx = 1;
214 else
215 rctx = 0;
216
217 if (recursion[rctx])
218 return -1;
219
220 recursion[rctx]++;
221 barrier();
222
223 return rctx;
224}
225
226static inline void put_recursion_context(int *recursion, int rctx)
227{
228 barrier();
229 recursion[rctx]--;
230}
231
232#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
233static inline bool arch_perf_have_user_stack_dump(void)
234{
235 return true;
236}
237
238#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
239#else
240static inline bool arch_perf_have_user_stack_dump(void)
241{
242 return false;
243}
244
245#define perf_user_stack_pointer(regs) 0
246#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
247
248#endif /* _KERNEL_EVENTS_INTERNAL_H */
1#ifndef _KERNEL_EVENTS_INTERNAL_H
2#define _KERNEL_EVENTS_INTERNAL_H
3
4#include <linux/hardirq.h>
5#include <linux/uaccess.h>
6
7/* Buffer handling */
8
9#define RING_BUFFER_WRITABLE 0x01
10
11struct ring_buffer {
12 atomic_t refcount;
13 struct rcu_head rcu_head;
14#ifdef CONFIG_PERF_USE_VMALLOC
15 struct work_struct work;
16 int page_order; /* allocation order */
17#endif
18 int nr_pages; /* nr of data pages */
19 int overwrite; /* can overwrite itself */
20
21 atomic_t poll; /* POLL_ for wakeups */
22
23 local_t head; /* write position */
24 local_t nest; /* nested writers */
25 local_t events; /* event limit */
26 local_t wakeup; /* wakeup stamp */
27 local_t lost; /* nr records lost */
28
29 long watermark; /* wakeup watermark */
30 /* poll crap */
31 spinlock_t event_lock;
32 struct list_head event_list;
33
34 atomic_t mmap_count;
35 unsigned long mmap_locked;
36 struct user_struct *mmap_user;
37
38 struct perf_event_mmap_page *user_page;
39 void *data_pages[0];
40};
41
42extern void rb_free(struct ring_buffer *rb);
43extern struct ring_buffer *
44rb_alloc(int nr_pages, long watermark, int cpu, int flags);
45extern void perf_event_wakeup(struct perf_event *event);
46
47extern void
48perf_event_header__init_id(struct perf_event_header *header,
49 struct perf_sample_data *data,
50 struct perf_event *event);
51extern void
52perf_event__output_id_sample(struct perf_event *event,
53 struct perf_output_handle *handle,
54 struct perf_sample_data *sample);
55
56extern struct page *
57perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
58
59#ifdef CONFIG_PERF_USE_VMALLOC
60/*
61 * Back perf_mmap() with vmalloc memory.
62 *
63 * Required for architectures that have d-cache aliasing issues.
64 */
65
66static inline int page_order(struct ring_buffer *rb)
67{
68 return rb->page_order;
69}
70
71#else
72
73static inline int page_order(struct ring_buffer *rb)
74{
75 return 0;
76}
77#endif
78
79static inline unsigned long perf_data_size(struct ring_buffer *rb)
80{
81 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
82}
83
84#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
85static inline unsigned long \
86func_name(struct perf_output_handle *handle, \
87 const void *buf, unsigned long len) \
88{ \
89 unsigned long size, written; \
90 \
91 do { \
92 size = min(handle->size, len); \
93 written = memcpy_func(handle->addr, buf, size); \
94 written = size - written; \
95 \
96 len -= written; \
97 handle->addr += written; \
98 buf += written; \
99 handle->size -= written; \
100 if (!handle->size) { \
101 struct ring_buffer *rb = handle->rb; \
102 \
103 handle->page++; \
104 handle->page &= rb->nr_pages - 1; \
105 handle->addr = rb->data_pages[handle->page]; \
106 handle->size = PAGE_SIZE << page_order(rb); \
107 } \
108 } while (len && written == size); \
109 \
110 return len; \
111}
112
113static inline unsigned long
114memcpy_common(void *dst, const void *src, unsigned long n)
115{
116 memcpy(dst, src, n);
117 return 0;
118}
119
120DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
121
122static inline unsigned long
123memcpy_skip(void *dst, const void *src, unsigned long n)
124{
125 return 0;
126}
127
128DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
129
130#ifndef arch_perf_out_copy_user
131#define arch_perf_out_copy_user arch_perf_out_copy_user
132
133static inline unsigned long
134arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
135{
136 unsigned long ret;
137
138 pagefault_disable();
139 ret = __copy_from_user_inatomic(dst, src, n);
140 pagefault_enable();
141
142 return ret;
143}
144#endif
145
146DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
147
148/* Callchain handling */
149extern struct perf_callchain_entry *
150perf_callchain(struct perf_event *event, struct pt_regs *regs);
151extern int get_callchain_buffers(void);
152extern void put_callchain_buffers(void);
153
154static inline int get_recursion_context(int *recursion)
155{
156 int rctx;
157
158 if (in_nmi())
159 rctx = 3;
160 else if (in_irq())
161 rctx = 2;
162 else if (in_softirq())
163 rctx = 1;
164 else
165 rctx = 0;
166
167 if (recursion[rctx])
168 return -1;
169
170 recursion[rctx]++;
171 barrier();
172
173 return rctx;
174}
175
176static inline void put_recursion_context(int *recursion, int rctx)
177{
178 barrier();
179 recursion[rctx]--;
180}
181
182#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
183static inline bool arch_perf_have_user_stack_dump(void)
184{
185 return true;
186}
187
188#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
189#else
190static inline bool arch_perf_have_user_stack_dump(void)
191{
192 return false;
193}
194
195#define perf_user_stack_pointer(regs) 0
196#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
197
198#endif /* _KERNEL_EVENTS_INTERNAL_H */