Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _KERNEL_EVENTS_INTERNAL_H
  3#define _KERNEL_EVENTS_INTERNAL_H
  4
  5#include <linux/hardirq.h>
  6#include <linux/uaccess.h>
  7#include <linux/refcount.h>
  8
  9/* Buffer handling */
 10
 11#define RING_BUFFER_WRITABLE		0x01
 12
 13struct perf_buffer {
 14	refcount_t			refcount;
 15	struct rcu_head			rcu_head;
 16#ifdef CONFIG_PERF_USE_VMALLOC
 17	struct work_struct		work;
 18	int				page_order;	/* allocation order  */
 19#endif
 20	int				nr_pages;	/* nr of data pages  */
 21	int				overwrite;	/* can overwrite itself */
 22	int				paused;		/* can write into ring buffer */
 23
 24	atomic_t			poll;		/* POLL_ for wakeups */
 25
 26	local_t				head;		/* write position    */
 27	unsigned int			nest;		/* nested writers    */
 28	local_t				events;		/* event limit       */
 29	local_t				wakeup;		/* wakeup stamp      */
 30	local_t				lost;		/* nr records lost   */
 31
 32	long				watermark;	/* wakeup watermark  */
 33	long				aux_watermark;
 34	/* poll crap */
 35	spinlock_t			event_lock;
 36	struct list_head		event_list;
 37
 38	atomic_t			mmap_count;
 39	unsigned long			mmap_locked;
 40	struct user_struct		*mmap_user;
 41
 42	/* AUX area */
 43	struct mutex			aux_mutex;
 44	long				aux_head;
 45	unsigned int			aux_nest;
 46	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
 47	unsigned long			aux_pgoff;
 48	int				aux_nr_pages;
 49	int				aux_overwrite;
 50	atomic_t			aux_mmap_count;
 51	unsigned long			aux_mmap_locked;
 52	void				(*free_aux)(void *);
 53	refcount_t			aux_refcount;
 54	int				aux_in_sampling;
 55	int				aux_in_pause_resume;
 56	void				**aux_pages;
 57	void				*aux_priv;
 58
 59	struct perf_event_mmap_page	*user_page;
 60	void				*data_pages[];
 61};
 62
 63extern void rb_free(struct perf_buffer *rb);
 64
 65static inline void rb_free_rcu(struct rcu_head *rcu_head)
 66{
 67	struct perf_buffer *rb;
 68
 69	rb = container_of(rcu_head, struct perf_buffer, rcu_head);
 70	rb_free(rb);
 71}
 72
 73static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause)
 74{
 75	if (!pause && rb->nr_pages)
 76		rb->paused = 0;
 77	else
 78		rb->paused = 1;
 79}
 80
 81extern struct perf_buffer *
 82rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 83extern void perf_event_wakeup(struct perf_event *event);
 84extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
 85			pgoff_t pgoff, int nr_pages, long watermark, int flags);
 86extern void rb_free_aux(struct perf_buffer *rb);
 87extern struct perf_buffer *ring_buffer_get(struct perf_event *event);
 88extern void ring_buffer_put(struct perf_buffer *rb);
 89
 90static inline bool rb_has_aux(struct perf_buffer *rb)
 91{
 92	return !!rb->aux_nr_pages;
 93}
 94
 95void perf_event_aux_event(struct perf_event *event, unsigned long head,
 96			  unsigned long size, u64 flags);
 97
 98extern struct page *
 99perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff);
100
101#ifdef CONFIG_PERF_USE_VMALLOC
102/*
103 * Back perf_mmap() with vmalloc memory.
104 *
105 * Required for architectures that have d-cache aliasing issues.
106 */
107
108static inline int page_order(struct perf_buffer *rb)
109{
110	return rb->page_order;
111}
112
113#else
114
115static inline int page_order(struct perf_buffer *rb)
116{
117	return 0;
118}
119#endif
120
121static inline int data_page_nr(struct perf_buffer *rb)
122{
123	return rb->nr_pages << page_order(rb);
124}
125
126static inline unsigned long perf_data_size(struct perf_buffer *rb)
127{
128	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
129}
130
131static inline unsigned long perf_aux_size(struct perf_buffer *rb)
132{
133	return (unsigned long)rb->aux_nr_pages << PAGE_SHIFT;
134}
135
136#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
137{									\
138	unsigned long size, written;					\
139									\
140	do {								\
141		size    = min(handle->size, len);			\
142		written = memcpy_func(__VA_ARGS__);			\
143		written = size - written;				\
144									\
145		len -= written;						\
146		handle->addr += written;				\
147		if (advance_buf)					\
148			buf += written;					\
149		handle->size -= written;				\
150		if (!handle->size) {					\
151			struct perf_buffer *rb = handle->rb;	\
152									\
153			handle->page++;					\
154			handle->page &= rb->nr_pages - 1;		\
155			handle->addr = rb->data_pages[handle->page];	\
156			handle->size = PAGE_SIZE << page_order(rb);	\
157		}							\
158	} while (len && written == size);				\
159									\
160	return len;							\
161}
162
163#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
164static inline unsigned long						\
165func_name(struct perf_output_handle *handle,				\
166	  const void *buf, unsigned long len)				\
167__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
168
169static inline unsigned long
170__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
171		const void *buf, unsigned long len)
172{
173	unsigned long orig_len = len;
174	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
175				  orig_len - len, size)
176}
177
178static inline unsigned long
179memcpy_common(void *dst, const void *src, unsigned long n)
180{
181	memcpy(dst, src, n);
182	return 0;
183}
184
185DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
186
187static inline unsigned long
188memcpy_skip(void *dst, const void *src, unsigned long n)
189{
190	return 0;
191}
192
193DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
194
195#ifndef arch_perf_out_copy_user
196#define arch_perf_out_copy_user arch_perf_out_copy_user
197
198static inline unsigned long
199arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
200{
201	unsigned long ret;
202
203	pagefault_disable();
204	ret = __copy_from_user_inatomic(dst, src, n);
205	pagefault_enable();
206
207	return ret;
208}
209#endif
210
211DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
212
213static inline int get_recursion_context(u8 *recursion)
214{
215	unsigned char rctx = interrupt_context_level();
 
 
 
 
 
 
 
 
 
216
217	if (recursion[rctx])
218		return -1;
219
220	recursion[rctx]++;
221	barrier();
222
223	return rctx;
224}
225
226static inline void put_recursion_context(u8 *recursion, unsigned char rctx)
227{
228	barrier();
229	recursion[rctx]--;
230}
231
232#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
233static inline bool arch_perf_have_user_stack_dump(void)
234{
235	return true;
236}
237
238#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
239#else
240static inline bool arch_perf_have_user_stack_dump(void)
241{
242	return false;
243}
244
245#define perf_user_stack_pointer(regs) 0
246#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
247
248#endif /* _KERNEL_EVENTS_INTERNAL_H */
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _KERNEL_EVENTS_INTERNAL_H
  3#define _KERNEL_EVENTS_INTERNAL_H
  4
  5#include <linux/hardirq.h>
  6#include <linux/uaccess.h>
  7#include <linux/refcount.h>
  8
  9/* Buffer handling */
 10
 11#define RING_BUFFER_WRITABLE		0x01
 12
 13struct ring_buffer {
 14	refcount_t			refcount;
 15	struct rcu_head			rcu_head;
 16#ifdef CONFIG_PERF_USE_VMALLOC
 17	struct work_struct		work;
 18	int				page_order;	/* allocation order  */
 19#endif
 20	int				nr_pages;	/* nr of data pages  */
 21	int				overwrite;	/* can overwrite itself */
 22	int				paused;		/* can write into ring buffer */
 23
 24	atomic_t			poll;		/* POLL_ for wakeups */
 25
 26	local_t				head;		/* write position    */
 27	unsigned int			nest;		/* nested writers    */
 28	local_t				events;		/* event limit       */
 29	local_t				wakeup;		/* wakeup stamp      */
 30	local_t				lost;		/* nr records lost   */
 31
 32	long				watermark;	/* wakeup watermark  */
 33	long				aux_watermark;
 34	/* poll crap */
 35	spinlock_t			event_lock;
 36	struct list_head		event_list;
 37
 38	atomic_t			mmap_count;
 39	unsigned long			mmap_locked;
 40	struct user_struct		*mmap_user;
 41
 42	/* AUX area */
 
 43	long				aux_head;
 44	unsigned int			aux_nest;
 45	long				aux_wakeup;	/* last aux_watermark boundary crossed by aux_head */
 46	unsigned long			aux_pgoff;
 47	int				aux_nr_pages;
 48	int				aux_overwrite;
 49	atomic_t			aux_mmap_count;
 50	unsigned long			aux_mmap_locked;
 51	void				(*free_aux)(void *);
 52	refcount_t			aux_refcount;
 
 
 53	void				**aux_pages;
 54	void				*aux_priv;
 55
 56	struct perf_event_mmap_page	*user_page;
 57	void				*data_pages[0];
 58};
 59
 60extern void rb_free(struct ring_buffer *rb);
 61
 62static inline void rb_free_rcu(struct rcu_head *rcu_head)
 63{
 64	struct ring_buffer *rb;
 65
 66	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
 67	rb_free(rb);
 68}
 69
 70static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
 71{
 72	if (!pause && rb->nr_pages)
 73		rb->paused = 0;
 74	else
 75		rb->paused = 1;
 76}
 77
 78extern struct ring_buffer *
 79rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 80extern void perf_event_wakeup(struct perf_event *event);
 81extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
 82			pgoff_t pgoff, int nr_pages, long watermark, int flags);
 83extern void rb_free_aux(struct ring_buffer *rb);
 84extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
 85extern void ring_buffer_put(struct ring_buffer *rb);
 86
 87static inline bool rb_has_aux(struct ring_buffer *rb)
 88{
 89	return !!rb->aux_nr_pages;
 90}
 91
 92void perf_event_aux_event(struct perf_event *event, unsigned long head,
 93			  unsigned long size, u64 flags);
 94
 95extern struct page *
 96perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
 97
 98#ifdef CONFIG_PERF_USE_VMALLOC
 99/*
100 * Back perf_mmap() with vmalloc memory.
101 *
102 * Required for architectures that have d-cache aliasing issues.
103 */
104
105static inline int page_order(struct ring_buffer *rb)
106{
107	return rb->page_order;
108}
109
110#else
111
112static inline int page_order(struct ring_buffer *rb)
113{
114	return 0;
115}
116#endif
117
118static inline unsigned long perf_data_size(struct ring_buffer *rb)
 
 
 
 
 
119{
120	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
121}
122
123static inline unsigned long perf_aux_size(struct ring_buffer *rb)
124{
125	return rb->aux_nr_pages << PAGE_SHIFT;
126}
127
128#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
129{									\
130	unsigned long size, written;					\
131									\
132	do {								\
133		size    = min(handle->size, len);			\
134		written = memcpy_func(__VA_ARGS__);			\
135		written = size - written;				\
136									\
137		len -= written;						\
138		handle->addr += written;				\
139		if (advance_buf)					\
140			buf += written;					\
141		handle->size -= written;				\
142		if (!handle->size) {					\
143			struct ring_buffer *rb = handle->rb;		\
144									\
145			handle->page++;					\
146			handle->page &= rb->nr_pages - 1;		\
147			handle->addr = rb->data_pages[handle->page];	\
148			handle->size = PAGE_SIZE << page_order(rb);	\
149		}							\
150	} while (len && written == size);				\
151									\
152	return len;							\
153}
154
155#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
156static inline unsigned long						\
157func_name(struct perf_output_handle *handle,				\
158	  const void *buf, unsigned long len)				\
159__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
160
161static inline unsigned long
162__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
163		const void *buf, unsigned long len)
164{
165	unsigned long orig_len = len;
166	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
167				  orig_len - len, size)
168}
169
170static inline unsigned long
171memcpy_common(void *dst, const void *src, unsigned long n)
172{
173	memcpy(dst, src, n);
174	return 0;
175}
176
177DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
178
179static inline unsigned long
180memcpy_skip(void *dst, const void *src, unsigned long n)
181{
182	return 0;
183}
184
185DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
186
187#ifndef arch_perf_out_copy_user
188#define arch_perf_out_copy_user arch_perf_out_copy_user
189
190static inline unsigned long
191arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
192{
193	unsigned long ret;
194
195	pagefault_disable();
196	ret = __copy_from_user_inatomic(dst, src, n);
197	pagefault_enable();
198
199	return ret;
200}
201#endif
202
203DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
204
205static inline int get_recursion_context(int *recursion)
206{
207	int rctx;
208
209	if (unlikely(in_nmi()))
210		rctx = 3;
211	else if (in_irq())
212		rctx = 2;
213	else if (in_softirq())
214		rctx = 1;
215	else
216		rctx = 0;
217
218	if (recursion[rctx])
219		return -1;
220
221	recursion[rctx]++;
222	barrier();
223
224	return rctx;
225}
226
227static inline void put_recursion_context(int *recursion, int rctx)
228{
229	barrier();
230	recursion[rctx]--;
231}
232
233#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
234static inline bool arch_perf_have_user_stack_dump(void)
235{
236	return true;
237}
238
239#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
240#else
241static inline bool arch_perf_have_user_stack_dump(void)
242{
243	return false;
244}
245
246#define perf_user_stack_pointer(regs) 0
247#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
248
249#endif /* _KERNEL_EVENTS_INTERNAL_H */