Linux Audio

Check our new training course

Loading...
v4.10.11
  1#ifndef _KERNEL_EVENTS_INTERNAL_H
  2#define _KERNEL_EVENTS_INTERNAL_H
  3
  4#include <linux/hardirq.h>
  5#include <linux/uaccess.h>
  6
  7/* Buffer handling */
  8
  9#define RING_BUFFER_WRITABLE		0x01
 10
 11struct ring_buffer {
 12	atomic_t			refcount;
 13	struct rcu_head			rcu_head;
 14#ifdef CONFIG_PERF_USE_VMALLOC
 15	struct work_struct		work;
 16	int				page_order;	/* allocation order  */
 17#endif
 18	int				nr_pages;	/* nr of data pages  */
 19	int				overwrite;	/* can overwrite itself */
 20	int				paused;		/* can write into ring buffer */
 21
 22	atomic_t			poll;		/* POLL_ for wakeups */
 23
 24	local_t				head;		/* write position    */
 25	local_t				nest;		/* nested writers    */
 26	local_t				events;		/* event limit       */
 27	local_t				wakeup;		/* wakeup stamp      */
 28	local_t				lost;		/* nr records lost   */
 29
 30	long				watermark;	/* wakeup watermark  */
 31	long				aux_watermark;
 32	/* poll crap */
 33	spinlock_t			event_lock;
 34	struct list_head		event_list;
 35
 36	atomic_t			mmap_count;
 37	unsigned long			mmap_locked;
 38	struct user_struct		*mmap_user;
 39
 40	/* AUX area */
 41	local_t				aux_head;
 42	local_t				aux_nest;
 43	local_t				aux_wakeup;
 44	unsigned long			aux_pgoff;
 45	int				aux_nr_pages;
 46	int				aux_overwrite;
 47	atomic_t			aux_mmap_count;
 48	unsigned long			aux_mmap_locked;
 49	void				(*free_aux)(void *);
 50	atomic_t			aux_refcount;
 51	void				**aux_pages;
 52	void				*aux_priv;
 53
 54	struct perf_event_mmap_page	*user_page;
 55	void				*data_pages[0];
 56};
 57
 58extern void rb_free(struct ring_buffer *rb);
 59
 60static inline void rb_free_rcu(struct rcu_head *rcu_head)
 61{
 62	struct ring_buffer *rb;
 63
 64	rb = container_of(rcu_head, struct ring_buffer, rcu_head);
 65	rb_free(rb);
 66}
 67
 68static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
 69{
 70	if (!pause && rb->nr_pages)
 71		rb->paused = 0;
 72	else
 73		rb->paused = 1;
 74}
 75
 76extern struct ring_buffer *
 77rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 78extern void perf_event_wakeup(struct perf_event *event);
 79extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
 80			pgoff_t pgoff, int nr_pages, long watermark, int flags);
 81extern void rb_free_aux(struct ring_buffer *rb);
 82extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
 83extern void ring_buffer_put(struct ring_buffer *rb);
 84
 85static inline bool rb_has_aux(struct ring_buffer *rb)
 86{
 87	return !!rb->aux_nr_pages;
 88}
 89
 90void perf_event_aux_event(struct perf_event *event, unsigned long head,
 91			  unsigned long size, u64 flags);
 
 92
 93extern struct page *
 94perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
 95
 96#ifdef CONFIG_PERF_USE_VMALLOC
 97/*
 98 * Back perf_mmap() with vmalloc memory.
 99 *
100 * Required for architectures that have d-cache aliasing issues.
101 */
102
103static inline int page_order(struct ring_buffer *rb)
104{
105	return rb->page_order;
106}
107
108#else
109
110static inline int page_order(struct ring_buffer *rb)
111{
112	return 0;
113}
114#endif
115
116static inline unsigned long perf_data_size(struct ring_buffer *rb)
117{
118	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
119}
120
121static inline unsigned long perf_aux_size(struct ring_buffer *rb)
122{
123	return rb->aux_nr_pages << PAGE_SHIFT;
124}
125
126#define __DEFINE_OUTPUT_COPY_BODY(advance_buf, memcpy_func, ...)	\
127{									\
128	unsigned long size, written;					\
129									\
130	do {								\
131		size    = min(handle->size, len);			\
132		written = memcpy_func(__VA_ARGS__);			\
133		written = size - written;				\
134									\
135		len -= written;						\
136		handle->addr += written;				\
137		if (advance_buf)					\
138			buf += written;					\
139		handle->size -= written;				\
140		if (!handle->size) {					\
141			struct ring_buffer *rb = handle->rb;		\
142									\
143			handle->page++;					\
144			handle->page &= rb->nr_pages - 1;		\
145			handle->addr = rb->data_pages[handle->page];	\
146			handle->size = PAGE_SIZE << page_order(rb);	\
147		}							\
148	} while (len && written == size);				\
149									\
150	return len;							\
151}
152
153#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
154static inline unsigned long						\
155func_name(struct perf_output_handle *handle,				\
156	  const void *buf, unsigned long len)				\
157__DEFINE_OUTPUT_COPY_BODY(true, memcpy_func, handle->addr, buf, size)
158
159static inline unsigned long
160__output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
161		const void *buf, unsigned long len)
162{
163	unsigned long orig_len = len;
164	__DEFINE_OUTPUT_COPY_BODY(false, copy_func, handle->addr, buf,
165				  orig_len - len, size)
166}
167
168static inline unsigned long
169memcpy_common(void *dst, const void *src, unsigned long n)
170{
171	memcpy(dst, src, n);
172	return 0;
173}
174
175DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
176
177static inline unsigned long
178memcpy_skip(void *dst, const void *src, unsigned long n)
179{
180	return 0;
181}
182
183DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
184
185#ifndef arch_perf_out_copy_user
186#define arch_perf_out_copy_user arch_perf_out_copy_user
187
188static inline unsigned long
189arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
190{
191	unsigned long ret;
192
193	pagefault_disable();
194	ret = __copy_from_user_inatomic(dst, src, n);
195	pagefault_enable();
196
197	return ret;
198}
199#endif
200
201DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
202
203/* Callchain handling */
204extern struct perf_callchain_entry *
205perf_callchain(struct perf_event *event, struct pt_regs *regs);
 
 
206
207static inline int get_recursion_context(int *recursion)
208{
209	int rctx;
210
211	if (in_nmi())
212		rctx = 3;
213	else if (in_irq())
214		rctx = 2;
215	else if (in_softirq())
216		rctx = 1;
217	else
218		rctx = 0;
219
220	if (recursion[rctx])
221		return -1;
222
223	recursion[rctx]++;
224	barrier();
225
226	return rctx;
227}
228
229static inline void put_recursion_context(int *recursion, int rctx)
230{
231	barrier();
232	recursion[rctx]--;
233}
234
235#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
236static inline bool arch_perf_have_user_stack_dump(void)
237{
238	return true;
239}
240
241#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
242#else
243static inline bool arch_perf_have_user_stack_dump(void)
244{
245	return false;
246}
247
248#define perf_user_stack_pointer(regs) 0
249#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
250
251#endif /* _KERNEL_EVENTS_INTERNAL_H */
v3.15
  1#ifndef _KERNEL_EVENTS_INTERNAL_H
  2#define _KERNEL_EVENTS_INTERNAL_H
  3
  4#include <linux/hardirq.h>
  5#include <linux/uaccess.h>
  6
  7/* Buffer handling */
  8
  9#define RING_BUFFER_WRITABLE		0x01
 10
 11struct ring_buffer {
 12	atomic_t			refcount;
 13	struct rcu_head			rcu_head;
 14#ifdef CONFIG_PERF_USE_VMALLOC
 15	struct work_struct		work;
 16	int				page_order;	/* allocation order  */
 17#endif
 18	int				nr_pages;	/* nr of data pages  */
 19	int				overwrite;	/* can overwrite itself */
 
 20
 21	atomic_t			poll;		/* POLL_ for wakeups */
 22
 23	local_t				head;		/* write position    */
 24	local_t				nest;		/* nested writers    */
 25	local_t				events;		/* event limit       */
 26	local_t				wakeup;		/* wakeup stamp      */
 27	local_t				lost;		/* nr records lost   */
 28
 29	long				watermark;	/* wakeup watermark  */
 
 30	/* poll crap */
 31	spinlock_t			event_lock;
 32	struct list_head		event_list;
 33
 34	atomic_t			mmap_count;
 35	unsigned long			mmap_locked;
 36	struct user_struct		*mmap_user;
 37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38	struct perf_event_mmap_page	*user_page;
 39	void				*data_pages[0];
 40};
 41
 42extern void rb_free(struct ring_buffer *rb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43extern struct ring_buffer *
 44rb_alloc(int nr_pages, long watermark, int cpu, int flags);
 45extern void perf_event_wakeup(struct perf_event *event);
 
 
 
 
 
 46
 47extern void
 48perf_event_header__init_id(struct perf_event_header *header,
 49			   struct perf_sample_data *data,
 50			   struct perf_event *event);
 51extern void
 52perf_event__output_id_sample(struct perf_event *event,
 53			     struct perf_output_handle *handle,
 54			     struct perf_sample_data *sample);
 55
 56extern struct page *
 57perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
 58
 59#ifdef CONFIG_PERF_USE_VMALLOC
 60/*
 61 * Back perf_mmap() with vmalloc memory.
 62 *
 63 * Required for architectures that have d-cache aliasing issues.
 64 */
 65
 66static inline int page_order(struct ring_buffer *rb)
 67{
 68	return rb->page_order;
 69}
 70
 71#else
 72
 73static inline int page_order(struct ring_buffer *rb)
 74{
 75	return 0;
 76}
 77#endif
 78
 79static inline unsigned long perf_data_size(struct ring_buffer *rb)
 80{
 81	return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
 82}
 83
 84#define DEFINE_OUTPUT_COPY(func_name, memcpy_func)			\
 85static inline unsigned long						\
 86func_name(struct perf_output_handle *handle,				\
 87	  const void *buf, unsigned long len)				\
 
 
 88{									\
 89	unsigned long size, written;					\
 90									\
 91	do {								\
 92		size    = min(handle->size, len);			\
 93		written = memcpy_func(handle->addr, buf, size);		\
 94		written = size - written;				\
 95									\
 96		len -= written;						\
 97		handle->addr += written;				\
 98		buf += written;						\
 
 99		handle->size -= written;				\
100		if (!handle->size) {					\
101			struct ring_buffer *rb = handle->rb;		\
102									\
103			handle->page++;					\
104			handle->page &= rb->nr_pages - 1;		\
105			handle->addr = rb->data_pages[handle->page];	\
106			handle->size = PAGE_SIZE << page_order(rb);	\
107		}							\
108	} while (len && written == size);				\
109									\
110	return len;							\
111}
112
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113static inline unsigned long
114memcpy_common(void *dst, const void *src, unsigned long n)
115{
116	memcpy(dst, src, n);
117	return 0;
118}
119
120DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
121
122static inline unsigned long
123memcpy_skip(void *dst, const void *src, unsigned long n)
124{
125	return 0;
126}
127
128DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
129
130#ifndef arch_perf_out_copy_user
131#define arch_perf_out_copy_user arch_perf_out_copy_user
132
133static inline unsigned long
134arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
135{
136	unsigned long ret;
137
138	pagefault_disable();
139	ret = __copy_from_user_inatomic(dst, src, n);
140	pagefault_enable();
141
142	return ret;
143}
144#endif
145
146DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
147
148/* Callchain handling */
149extern struct perf_callchain_entry *
150perf_callchain(struct perf_event *event, struct pt_regs *regs);
151extern int get_callchain_buffers(void);
152extern void put_callchain_buffers(void);
153
154static inline int get_recursion_context(int *recursion)
155{
156	int rctx;
157
158	if (in_nmi())
159		rctx = 3;
160	else if (in_irq())
161		rctx = 2;
162	else if (in_softirq())
163		rctx = 1;
164	else
165		rctx = 0;
166
167	if (recursion[rctx])
168		return -1;
169
170	recursion[rctx]++;
171	barrier();
172
173	return rctx;
174}
175
176static inline void put_recursion_context(int *recursion, int rctx)
177{
178	barrier();
179	recursion[rctx]--;
180}
181
182#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
183static inline bool arch_perf_have_user_stack_dump(void)
184{
185	return true;
186}
187
188#define perf_user_stack_pointer(regs) user_stack_pointer(regs)
189#else
190static inline bool arch_perf_have_user_stack_dump(void)
191{
192	return false;
193}
194
195#define perf_user_stack_pointer(regs) 0
196#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
197
198#endif /* _KERNEL_EVENTS_INTERNAL_H */