Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.10.11
 
  1#ifndef _KERNEL_EVENTS_INTERNAL_H
  2#define _KERNEL_EVENTS_INTERNAL_H
  3
  4#include <linux/hardirq.h>
  5#include <linux/uaccess.h>
  6
  7/* Buffer handling */
  8
  9#define RING_BUFFER_WRITABLE		0x01
 10
 11struct ring_buffer {
 12	atomic_t			refcount;
 13	struct rcu_head			rcu_head;
 14#ifdef CONFIG_PERF_USE_VMALLOC
 15	struct work_struct		work;
 16	int				page_order;	/* allocation order  */
 17#endif
 18	int				nr_pages;	/* nr of data pages  */
 19	int				overwrite;	/* can overwrite itself */
 20	int				paused;		/* can write into ring buffer */
 21
 22	atomic_t			poll;		/* POLL_ for wakeups */
 23
 24	local_t				head;		/* write position    */
 25	local_t				nest;		/* nested writers    */
 26	local_t				events;		/* event limit       */
 27	local_t				wakeup;		/* wakeup stamp      */
 28	local_t				lost;		/* nr records lost   */
 29
 30	long				watermark;	/* wakeup watermark  */
 31	long				aux_watermark;
 32	/* poll crap */
 33	spinlock_t			event_lock;
 34	struct list_head		event_list;
 35
 36	atomic_t			mmap_count;
 37	unsigned long			mmap_locked;
 38	struct user_struct		*mmap_user;
 39
 40	/* AUX area */
 41	local_t				aux_head;
 42	local_t				aux_nest;
 43	local_t				aux_wakeup;
 44	unsigned long			aux_pgoff;
 45	int				aux_nr_pages;
 46	int				aux_overwrite;
 47	atomic_t			aux_mmap_count;
 48	unsigned long			aux_mmap_locked;
 49	void				(*free_aux)(void *);
 50	atomic_t			aux_refcount;
 51	void				**aux_pages;
 52	void				*aux_priv;
 53
 54	struct perf_event_mmap_page	*user_page;
 55	void				*data_pages[0];
 56};
 57
 58extern void rb_free(struct ring_buffer *rb);
 59
 60static inline void rb_free_rcu(struct rcu_head *rcu_head)
 61{
 62	struct ring_buffer *rb;
 63
 64	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
 65	rb_free(rb);
 66}
 67
 68static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
 69{
 70	if (!pause && rb->nr_pages)
 71		rb->paused = 0;
 72	else
 73		rb->paused = 1;
 74}
 75
 76extern struct ring_buffer *
 77rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 78extern void perf_event_wakeup(struct perf_event *event);
 79extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
 80			pgoff_t pgoff, int nr_pages, long watermark, int flags);
 81extern void rb_free_aux(struct ring_buffer *rb);
 82extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
 83extern void ring_buffer_put(struct ring_buffer *rb);
 84
 85static inline bool rb_has_aux(struct ring_buffer *rb)
 86{
 87	return !!rb->aux_nr_pages;
 88}
 89
 90void perf_event_aux_event(struct perf_event *event, unsigned long head,
 91			  unsigned long size, u64 flags);
 92
 93extern struct page *
 94perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
 95
 96#ifdef CONFIG_PERF_USE_VMALLOC
 97/*
 98 * Back perf_mmap() with vmalloc memory.
 99 *
100 * Required for architectures that have d-cache aliasing issues.
101 */
102
103static inline int page_order(struct ring_buffer *rb)
104{
105	return rb->page_order;
106}
107
108#else
109
110static inline int page_order(struct ring_buffer *rb)
111{
112	return 0;
113}
114#endif
115
116static inline unsigned long perf_data_size(struct ring_buffer *rb)
117{
118	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
119}
120
121static inline unsigned long perf_aux_size(struct ring_buffer *rb)
122{
123	return rb->aux_nr_pages << PAGE_SHIFT;
124}
125
126#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
127{									\
128	unsigned long size, written;					\
129									\
130	do {								\
131		size    = min(handle->size, len);			\
132		written = memcpy_func(__VA_ARGS__);			\
133		written = size - written;				\
134									\
135		len -= written;						\
136		handle->addr += written;				\
137		if (advance_buf)					\
138			buf += written;					\
139		handle->size -= written;				\
140		if (!handle->size) {					\
141			struct ring_buffer *rb = handle->rb;		\
142									\
143			handle->page++;					\
144			handle->page &= rb->nr_pages - 1;		\
145			handle->addr = rb->data_pages[handle->page];	\
146			handle->size = PAGE_SIZE << page_order(rb);	\
147		}							\
148	} while (len && written == size);				\
149									\
150	return len;							\
151}
152
153#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
154static inline unsigned long						\
155func_name(struct perf_output_handle *handle,				\
156	  const void *buf, unsigned long len)				\
157__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
158
159static inline unsigned long
160__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
161		const void *buf, unsigned long len)
162{
163	unsigned long orig_len = len;
164	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
165				  orig_len - len, size)
166}
167
168static inline unsigned long
169memcpy_common(void *dst, const void *src, unsigned long n)
170{
171	memcpy(dst, src, n);
172	return 0;
173}
174
175DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
176
177static inline unsigned long
178memcpy_skip(void *dst, const void *src, unsigned long n)
179{
180	return 0;
181}
182
183DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
184
185#ifndef arch_perf_out_copy_user
186#define arch_perf_out_copy_user arch_perf_out_copy_user
187
188static inline unsigned long
189arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
190{
191	unsigned long ret;
192
193	pagefault_disable();
194	ret = __copy_from_user_inatomic(dst, src, n);
195	pagefault_enable();
196
197	return ret;
198}
199#endif
200
201DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
202
203/* Callchain handling */
204extern struct perf_callchain_entry *
205perf_callchain(struct perf_event *event, struct pt_regs *regs);
206
207static inline int get_recursion_context(int *recursion)
208{
209	int rctx;
210
211	if (in_nmi())
212		rctx = 3;
213	else if (in_irq())
214		rctx = 2;
215	else if (in_softirq())
216		rctx = 1;
217	else
218		rctx = 0;
219
220	if (recursion[rctx])
221		return -1;
222
223	recursion[rctx]++;
224	barrier();
225
226	return rctx;
227}
228
229static inline void put_recursion_context(int *recursion, int rctx)
230{
231	barrier();
232	recursion[rctx]--;
233}
234
235#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
236static inline bool arch_perf_have_user_stack_dump(void)
237{
238	return true;
239}
240
241#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
242#else
243static inline bool arch_perf_have_user_stack_dump(void)
244{
245	return false;
246}
247
248#define perf_user_stack_pointer(regs) 0
249#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
250
251#endif /* _KERNEL_EVENTS_INTERNAL_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _KERNEL_EVENTS_INTERNAL_H
  3#define _KERNEL_EVENTS_INTERNAL_H
  4
  5#include <linux/hardirq.h>
  6#include <linux/uaccess.h>
  7
  8/* Buffer handling */
  9
 10#define RING_BUFFER_WRITABLE		0x01
 11
 12struct ring_buffer {
 13	atomic_t			refcount;
 14	struct rcu_head			rcu_head;
 15#ifdef CONFIG_PERF_USE_VMALLOC
 16	struct work_struct		work;
 17	int				page_order;	/* allocation order  */
 18#endif
 19	int				nr_pages;	/* nr of data pages  */
 20	int				overwrite;	/* can overwrite itself */
 21	int				paused;		/* can write into ring buffer */
 22
 23	atomic_t			poll;		/* POLL_ for wakeups */
 24
 25	local_t				head;		/* write position    */
 26	local_t				nest;		/* nested writers    */
 27	local_t				events;		/* event limit       */
 28	local_t				wakeup;		/* wakeup stamp      */
 29	local_t				lost;		/* nr records lost   */
 30
 31	long				watermark;	/* wakeup watermark  */
 32	long				aux_watermark;
 33	/* poll crap */
 34	spinlock_t			event_lock;
 35	struct list_head		event_list;
 36
 37	atomic_t			mmap_count;
 38	unsigned long			mmap_locked;
 39	struct user_struct		*mmap_user;
 40
 41	/* AUX area */
 42	long				aux_head;
 43	local_t				aux_nest;
 44	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
 45	unsigned long			aux_pgoff;
 46	int				aux_nr_pages;
 47	int				aux_overwrite;
 48	atomic_t			aux_mmap_count;
 49	unsigned long			aux_mmap_locked;
 50	void				(*free_aux)(void *);
 51	atomic_t			aux_refcount;
 52	void				**aux_pages;
 53	void				*aux_priv;
 54
 55	struct perf_event_mmap_page	*user_page;
 56	void				*data_pages[0];
 57};
 58
 59extern void rb_free(struct ring_buffer *rb);
 60
 61static inline void rb_free_rcu(struct rcu_head *rcu_head)
 62{
 63	struct ring_buffer *rb;
 64
 65	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
 66	rb_free(rb);
 67}
 68
 69static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
 70{
 71	if (!pause && rb->nr_pages)
 72		rb->paused = 0;
 73	else
 74		rb->paused = 1;
 75}
 76
 77extern struct ring_buffer *
 78rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 79extern void perf_event_wakeup(struct perf_event *event);
 80extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
 81			pgoff_t pgoff, int nr_pages, long watermark, int flags);
 82extern void rb_free_aux(struct ring_buffer *rb);
 83extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
 84extern void ring_buffer_put(struct ring_buffer *rb);
 85
 86static inline bool rb_has_aux(struct ring_buffer *rb)
 87{
 88	return !!rb->aux_nr_pages;
 89}
 90
 91void perf_event_aux_event(struct perf_event *event, unsigned long head,
 92			  unsigned long size, u64 flags);
 93
 94extern struct page *
 95perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
 96
 97#ifdef CONFIG_PERF_USE_VMALLOC
 98/*
 99 * Back perf_mmap() with vmalloc memory.
100 *
101 * Required for architectures that have d-cache aliasing issues.
102 */
103
104static inline int page_order(struct ring_buffer *rb)
105{
106	return rb->page_order;
107}
108
109#else
110
111static inline int page_order(struct ring_buffer *rb)
112{
113	return 0;
114}
115#endif
116
117static inline unsigned long perf_data_size(struct ring_buffer *rb)
118{
119	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
120}
121
122static inline unsigned long perf_aux_size(struct ring_buffer *rb)
123{
124	return rb->aux_nr_pages << PAGE_SHIFT;
125}
126
127#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
128{									\
129	unsigned long size, written;					\
130									\
131	do {								\
132		size    = min(handle->size, len);			\
133		written = memcpy_func(__VA_ARGS__);			\
134		written = size - written;				\
135									\
136		len -= written;						\
137		handle->addr += written;				\
138		if (advance_buf)					\
139			buf += written;					\
140		handle->size -= written;				\
141		if (!handle->size) {					\
142			struct ring_buffer *rb = handle->rb;		\
143									\
144			handle->page++;					\
145			handle->page &= rb->nr_pages - 1;		\
146			handle->addr = rb->data_pages[handle->page];	\
147			handle->size = PAGE_SIZE << page_order(rb);	\
148		}							\
149	} while (len && written == size);				\
150									\
151	return len;							\
152}
153
154#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
155static inline unsigned long						\
156func_name(struct perf_output_handle *handle,				\
157	  const void *buf, unsigned long len)				\
158__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
159
160static inline unsigned long
161__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
162		const void *buf, unsigned long len)
163{
164	unsigned long orig_len = len;
165	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
166				  orig_len - len, size)
167}
168
169static inline unsigned long
170memcpy_common(void *dst, const void *src, unsigned long n)
171{
172	memcpy(dst, src, n);
173	return 0;
174}
175
176DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
177
178static inline unsigned long
179memcpy_skip(void *dst, const void *src, unsigned long n)
180{
181	return 0;
182}
183
184DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
185
186#ifndef arch_perf_out_copy_user
187#define arch_perf_out_copy_user arch_perf_out_copy_user
188
189static inline unsigned long
190arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
191{
192	unsigned long ret;
193
194	pagefault_disable();
195	ret = __copy_from_user_inatomic(dst, src, n);
196	pagefault_enable();
197
198	return ret;
199}
200#endif
201
202DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
203
 
 
 
 
204static inline int get_recursion_context(int *recursion)
205{
206	int rctx;
207
208	if (unlikely(in_nmi()))
209		rctx = 3;
210	else if (in_irq())
211		rctx = 2;
212	else if (in_softirq())
213		rctx = 1;
214	else
215		rctx = 0;
216
217	if (recursion[rctx])
218		return -1;
219
220	recursion[rctx]++;
221	barrier();
222
223	return rctx;
224}
225
226static inline void put_recursion_context(int *recursion, int rctx)
227{
228	barrier();
229	recursion[rctx]--;
230}
231
232#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
233static inline bool arch_perf_have_user_stack_dump(void)
234{
235	return true;
236}
237
238#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
239#else
240static inline bool arch_perf_have_user_stack_dump(void)
241{
242	return false;
243}
244
245#define perf_user_stack_pointer(regs) 0
246#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
247
248#endif /* _KERNEL_EVENTS_INTERNAL_H */