Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Performance events ring-buffer code:
  3 *
  4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8 *
  9 * For licensing details see kernel-base/COPYING
 10 */
 11
 12#include <linux/perf_event.h>
 13#include <linux/vmalloc.h>
 14#include <linux/slab.h>
 
 
 15
 16#include "internal.h"
 17
 18static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
 19			      unsigned long offset, unsigned long head)
 20{
 21	unsigned long mask;
 22
 23	if (!rb->writable)
 24		return true;
 25
 26	mask = perf_data_size(rb) - 1;
 27
 28	offset = (offset - tail) & mask;
 29	head   = (head   - tail) & mask;
 30
 31	if ((int)(head - offset) < 0)
 32		return false;
 33
 34	return true;
 35}
 36
 37static void perf_output_wakeup(struct perf_output_handle *handle)
 38{
 39	atomic_set(&handle->rb->poll, POLL_IN);
 40
 41	handle->event->pending_wakeup = 1;
 42	irq_work_queue(&handle->event->pending);
 43}
 44
 45/*
 46 * We need to ensure a later event_id doesn't publish a head when a former
 47 * event isn't done writing. However since we need to deal with NMIs we
 48 * cannot fully serialize things.
 49 *
 50 * We only publish the head (and generate a wakeup) when the outer-most
 51 * event completes.
 52 */
 53static void perf_output_get_handle(struct perf_output_handle *handle)
 54{
 55	struct ring_buffer *rb = handle->rb;
 56
 57	preempt_disable();
 58	local_inc(&rb->nest);
 59	handle->wakeup = local_read(&rb->wakeup);
 60}
 61
 62static void perf_output_put_handle(struct perf_output_handle *handle)
 63{
 64	struct ring_buffer *rb = handle->rb;
 65	unsigned long head;
 66
 67again:
 68	head = local_read(&rb->head);
 69
 70	/*
 71	 * IRQ/NMI can happen here, which means we can miss a head update.
 72	 */
 73
 74	if (!local_dec_and_test(&rb->nest))
 75		goto out;
 76
 77	/*
 78	 * Publish the known good head. Rely on the full barrier implied
 79	 * by atomic_dec_and_test() order the rb->head read and this
 80	 * write.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 81	 */
 
 82	rb->user_page->data_head = head;
 83
 84	/*
 85	 * Now check if we missed an update, rely on the (compiler)
 86	 * barrier in atomic_dec_and_test() to re-read rb->head.
 87	 */
 88	if (unlikely(head != local_read(&rb->head))) {
 89		local_inc(&rb->nest);
 90		goto again;
 91	}
 92
 93	if (handle->wakeup != local_read(&rb->wakeup))
 94		perf_output_wakeup(handle);
 95
 96out:
 97	preempt_enable();
 98}
 99
100int perf_output_begin(struct perf_output_handle *handle,
101		      struct perf_event *event, unsigned int size)
 
 
 
 
 
 
 
 
 
 
 
 
 
102{
103	struct ring_buffer *rb;
104	unsigned long tail, offset, head;
105	int have_lost;
106	struct perf_sample_data sample_data;
107	struct {
108		struct perf_event_header header;
109		u64			 id;
110		u64			 lost;
111	} lost_event;
112
113	rcu_read_lock();
114	/*
115	 * For inherited events we send all the output towards the parent.
116	 */
117	if (event->parent)
118		event = event->parent;
119
120	rb = rcu_dereference(event->rb);
121	if (!rb)
122		goto out;
123
124	handle->rb	= rb;
125	handle->event	= event;
126
127	if (!rb->nr_pages)
128		goto out;
 
 
 
 
129
130	have_lost = local_read(&rb->lost);
131	if (have_lost) {
132		lost_event.header.size = sizeof(lost_event);
133		perf_event_header__init_id(&lost_event.header, &sample_data,
134					   event);
135		size += lost_event.header.size;
136	}
137
138	perf_output_get_handle(handle);
139
140	do {
 
 
 
 
 
 
 
 
 
141		/*
142		 * Userspace could choose to issue a mb() before updating the
143		 * tail pointer. So that all reads will be completed before the
144		 * write is issued.
 
 
 
 
 
 
145		 */
146		tail = ACCESS_ONCE(rb->user_page->data_tail);
147		smp_rmb();
148		offset = head = local_read(&rb->head);
149		head += size;
150		if (unlikely(!perf_output_space(rb, tail, offset, head)))
151			goto fail;
152	} while (local_cmpxchg(&rb->head, offset, head) != offset);
153
154	if (head - local_read(&rb->wakeup) > rb->watermark)
 
 
 
 
 
 
 
 
 
 
155		local_add(rb->watermark, &rb->wakeup);
156
157	handle->page = offset >> (PAGE_SHIFT + page_order(rb));
158	handle->page &= rb->nr_pages - 1;
159	handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
160	handle->addr = rb->data_pages[handle->page];
161	handle->addr += handle->size;
162	handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
163
164	if (have_lost) {
 
 
 
165		lost_event.header.type = PERF_RECORD_LOST;
166		lost_event.header.misc = 0;
167		lost_event.id          = event->id;
168		lost_event.lost        = local_xchg(&rb->lost, 0);
169
 
 
170		perf_output_put(handle, lost_event);
171		perf_event__output_id_sample(event, handle, &sample_data);
172	}
173
174	return 0;
175
176fail:
177	local_inc(&rb->lost);
178	perf_output_put_handle(handle);
179out:
180	rcu_read_unlock();
181
182	return -ENOSPC;
183}
184
185void perf_output_copy(struct perf_output_handle *handle,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186		      const void *buf, unsigned int len)
187{
188	__output_copy(handle, buf, len);
 
 
 
 
 
 
189}
190
191void perf_output_end(struct perf_output_handle *handle)
192{
193	perf_output_put_handle(handle);
194	rcu_read_unlock();
195}
196
197static void
198ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
199{
200	long max_size = perf_data_size(rb);
201
202	if (watermark)
203		rb->watermark = min(max_size, watermark);
204
205	if (!rb->watermark)
206		rb->watermark = max_size / 2;
207
208	if (flags & RING_BUFFER_WRITABLE)
209		rb->writable = 1;
 
 
210
211	atomic_set(&rb->refcount, 1);
212
213	INIT_LIST_HEAD(&rb->event_list);
214	spin_lock_init(&rb->event_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215}
216
217#ifndef CONFIG_PERF_USE_VMALLOC
218
219/*
220 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
221 */
222
223struct page *
224perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
225{
226	if (pgoff > rb->nr_pages)
227		return NULL;
228
229	if (pgoff == 0)
230		return virt_to_page(rb->user_page);
231
232	return virt_to_page(rb->data_pages[pgoff - 1]);
233}
234
235static void *perf_mmap_alloc_page(int cpu)
236{
237	struct page *page;
238	int node;
239
240	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
241	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
242	if (!page)
243		return NULL;
244
245	return page_address(page);
246}
247
248struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
249{
250	struct ring_buffer *rb;
251	unsigned long size;
252	int i;
253
254	size = sizeof(struct ring_buffer);
255	size += nr_pages * sizeof(void *);
256
257	rb = kzalloc(size, GFP_KERNEL);
258	if (!rb)
259		goto fail;
260
261	rb->user_page = perf_mmap_alloc_page(cpu);
262	if (!rb->user_page)
263		goto fail_user_page;
264
265	for (i = 0; i < nr_pages; i++) {
266		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
267		if (!rb->data_pages[i])
268			goto fail_data_pages;
269	}
270
271	rb->nr_pages = nr_pages;
272
273	ring_buffer_init(rb, watermark, flags);
274
275	return rb;
276
277fail_data_pages:
278	for (i--; i >= 0; i--)
279		free_page((unsigned long)rb->data_pages[i]);
280
281	free_page((unsigned long)rb->user_page);
282
283fail_user_page:
284	kfree(rb);
285
286fail:
287	return NULL;
288}
289
290static void perf_mmap_free_page(unsigned long addr)
291{
292	struct page *page = virt_to_page((void *)addr);
293
294	page->mapping = NULL;
295	__free_page(page);
296}
297
298void rb_free(struct ring_buffer *rb)
299{
300	int i;
301
302	perf_mmap_free_page((unsigned long)rb->user_page);
303	for (i = 0; i < rb->nr_pages; i++)
304		perf_mmap_free_page((unsigned long)rb->data_pages[i]);
305	kfree(rb);
306}
307
308#else
 
 
 
 
309
310struct page *
311perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
312{
313	if (pgoff > (1UL << page_order(rb)))
 
314		return NULL;
315
316	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
317}
318
319static void perf_mmap_unmark_page(void *addr)
320{
321	struct page *page = vmalloc_to_page(addr);
322
323	page->mapping = NULL;
324}
325
326static void rb_free_work(struct work_struct *work)
327{
328	struct ring_buffer *rb;
329	void *base;
330	int i, nr;
331
332	rb = container_of(work, struct ring_buffer, work);
333	nr = 1 << page_order(rb);
334
335	base = rb->user_page;
336	for (i = 0; i < nr + 1; i++)
 
337		perf_mmap_unmark_page(base + (i * PAGE_SIZE));
338
339	vfree(base);
340	kfree(rb);
341}
342
343void rb_free(struct ring_buffer *rb)
344{
345	schedule_work(&rb->work);
346}
347
348struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
349{
350	struct ring_buffer *rb;
351	unsigned long size;
352	void *all_buf;
353
354	size = sizeof(struct ring_buffer);
355	size += sizeof(void *);
356
357	rb = kzalloc(size, GFP_KERNEL);
358	if (!rb)
359		goto fail;
360
361	INIT_WORK(&rb->work, rb_free_work);
362
363	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
364	if (!all_buf)
365		goto fail_all_buf;
366
367	rb->user_page = all_buf;
368	rb->data_pages[0] = all_buf + PAGE_SIZE;
369	rb->page_order = ilog2(nr_pages);
370	rb->nr_pages = 1;
 
 
371
372	ring_buffer_init(rb, watermark, flags);
373
374	return rb;
375
376fail_all_buf:
377	kfree(rb);
378
379fail:
380	return NULL;
381}
382
383#endif
v4.10.11
  1/*
  2 * Performance events ring-buffer code:
  3 *
  4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  7 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8 *
  9 * For licensing details see kernel-base/COPYING
 10 */
 11
 12#include <linux/perf_event.h>
 13#include <linux/vmalloc.h>
 14#include <linux/slab.h>
 15#include <linux/circ_buf.h>
 16#include <linux/poll.h>
 17
 18#include "internal.h"
 19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20static void perf_output_wakeup(struct perf_output_handle *handle)
 21{
 22	atomic_set(&handle->rb->poll, POLLIN);
 23
 24	handle->event->pending_wakeup = 1;
 25	irq_work_queue(&handle->event->pending);
 26}
 27
 28/*
 29 * We need to ensure a later event_id doesn't publish a head when a former
 30 * event isn't done writing. However since we need to deal with NMIs we
 31 * cannot fully serialize things.
 32 *
 33 * We only publish the head (and generate a wakeup) when the outer-most
 34 * event completes.
 35 */
 36static void perf_output_get_handle(struct perf_output_handle *handle)
 37{
 38	struct ring_buffer *rb = handle->rb;
 39
 40	preempt_disable();
 41	local_inc(&rb->nest);
 42	handle->wakeup = local_read(&rb->wakeup);
 43}
 44
 45static void perf_output_put_handle(struct perf_output_handle *handle)
 46{
 47	struct ring_buffer *rb = handle->rb;
 48	unsigned long head;
 49
 50again:
 51	head = local_read(&rb->head);
 52
 53	/*
 54	 * IRQ/NMI can happen here, which means we can miss a head update.
 55	 */
 56
 57	if (!local_dec_and_test(&rb->nest))
 58		goto out;
 59
 60	/*
 61	 * Since the mmap() consumer (userspace) can run on a different CPU:
 62	 *
 63	 *   kernel				user
 64	 *
 65	 *   if (LOAD ->data_tail) {		LOAD ->data_head
 66	 *			(A)		smp_rmb()	(C)
 67	 *	STORE $data			LOAD $data
 68	 *	smp_wmb()	(B)		smp_mb()	(D)
 69	 *	STORE ->data_head		STORE ->data_tail
 70	 *   }
 71	 *
 72	 * Where A pairs with D, and B pairs with C.
 73	 *
 74	 * In our case (A) is a control dependency that separates the load of
 75	 * the ->data_tail and the stores of $data. In case ->data_tail
 76	 * indicates there is no room in the buffer to store $data we do not.
 77	 *
 78	 * D needs to be a full barrier since it separates the data READ
 79	 * from the tail WRITE.
 80	 *
 81	 * For B a WMB is sufficient since it separates two WRITEs, and for C
 82	 * an RMB is sufficient since it separates two READs.
 83	 *
 84	 * See perf_output_begin().
 85	 */
 86	smp_wmb(); /* B, matches C */
 87	rb->user_page->data_head = head;
 88
 89	/*
 90	 * Now check if we missed an update -- rely on previous implied
 91	 * compiler barriers to force a re-read.
 92	 */
 93	if (unlikely(head != local_read(&rb->head))) {
 94		local_inc(&rb->nest);
 95		goto again;
 96	}
 97
 98	if (handle->wakeup != local_read(&rb->wakeup))
 99		perf_output_wakeup(handle);
100
101out:
102	preempt_enable();
103}
104
105static bool __always_inline
106ring_buffer_has_space(unsigned long head, unsigned long tail,
107		      unsigned long data_size, unsigned int size,
108		      bool backward)
109{
110	if (!backward)
111		return CIRC_SPACE(head, tail, data_size) >= size;
112	else
113		return CIRC_SPACE(tail, head, data_size) >= size;
114}
115
116static int __always_inline
117__perf_output_begin(struct perf_output_handle *handle,
118		    struct perf_event *event, unsigned int size,
119		    bool backward)
120{
121	struct ring_buffer *rb;
122	unsigned long tail, offset, head;
123	int have_lost, page_shift;
 
124	struct {
125		struct perf_event_header header;
126		u64			 id;
127		u64			 lost;
128	} lost_event;
129
130	rcu_read_lock();
131	/*
132	 * For inherited events we send all the output towards the parent.
133	 */
134	if (event->parent)
135		event = event->parent;
136
137	rb = rcu_dereference(event->rb);
138	if (unlikely(!rb))
139		goto out;
140
141	if (unlikely(rb->paused)) {
142		if (rb->nr_pages)
143			local_inc(&rb->lost);
 
144		goto out;
145	}
146
147	handle->rb    = rb;
148	handle->event = event;
149
150	have_lost = local_read(&rb->lost);
151	if (unlikely(have_lost)) {
152		size += sizeof(lost_event);
153		if (event->attr.sample_id_all)
154			size += event->id_header_size;
 
155	}
156
157	perf_output_get_handle(handle);
158
159	do {
160		tail = READ_ONCE(rb->user_page->data_tail);
161		offset = head = local_read(&rb->head);
162		if (!rb->overwrite) {
163			if (unlikely(!ring_buffer_has_space(head, tail,
164							    perf_data_size(rb),
165							    size, backward)))
166				goto fail;
167		}
168
169		/*
170		 * The above forms a control dependency barrier separating the
171		 * @tail load above from the data stores below. Since the @tail
172		 * load is required to compute the branch to fail below.
173		 *
174		 * A, matches D; the full memory barrier userspace SHOULD issue
175		 * after reading the data and before storing the new tail
176		 * position.
177		 *
178		 * See perf_output_put_handle().
179		 */
180
181		if (!backward)
182			head += size;
183		else
184			head -= size;
 
185	} while (local_cmpxchg(&rb->head, offset, head) != offset);
186
187	if (backward) {
188		offset = head;
189		head = (u64)(-head);
190	}
191
192	/*
193	 * We rely on the implied barrier() by local_cmpxchg() to ensure
194	 * none of the data stores below can be lifted up by the compiler.
195	 */
196
197	if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
198		local_add(rb->watermark, &rb->wakeup);
199
200	page_shift = PAGE_SHIFT + page_order(rb);
201
202	handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
203	offset &= (1UL << page_shift) - 1;
204	handle->addr = rb->data_pages[handle->page] + offset;
205	handle->size = (1UL << page_shift) - offset;
206
207	if (unlikely(have_lost)) {
208		struct perf_sample_data sample_data;
209
210		lost_event.header.size = sizeof(lost_event);
211		lost_event.header.type = PERF_RECORD_LOST;
212		lost_event.header.misc = 0;
213		lost_event.id          = event->id;
214		lost_event.lost        = local_xchg(&rb->lost, 0);
215
216		perf_event_header__init_id(&lost_event.header,
217					   &sample_data, event);
218		perf_output_put(handle, lost_event);
219		perf_event__output_id_sample(event, handle, &sample_data);
220	}
221
222	return 0;
223
224fail:
225	local_inc(&rb->lost);
226	perf_output_put_handle(handle);
227out:
228	rcu_read_unlock();
229
230	return -ENOSPC;
231}
232
233int perf_output_begin_forward(struct perf_output_handle *handle,
234			     struct perf_event *event, unsigned int size)
235{
236	return __perf_output_begin(handle, event, size, false);
237}
238
239int perf_output_begin_backward(struct perf_output_handle *handle,
240			       struct perf_event *event, unsigned int size)
241{
242	return __perf_output_begin(handle, event, size, true);
243}
244
245int perf_output_begin(struct perf_output_handle *handle,
246		      struct perf_event *event, unsigned int size)
247{
248
249	return __perf_output_begin(handle, event, size,
250				   unlikely(is_write_backward(event)));
251}
252
253unsigned int perf_output_copy(struct perf_output_handle *handle,
254		      const void *buf, unsigned int len)
255{
256	return __output_copy(handle, buf, len);
257}
258
259unsigned int perf_output_skip(struct perf_output_handle *handle,
260			      unsigned int len)
261{
262	return __output_skip(handle, NULL, len);
263}
264
265void perf_output_end(struct perf_output_handle *handle)
266{
267	perf_output_put_handle(handle);
268	rcu_read_unlock();
269}
270
271static void
272ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
273{
274	long max_size = perf_data_size(rb);
275
276	if (watermark)
277		rb->watermark = min(max_size, watermark);
278
279	if (!rb->watermark)
280		rb->watermark = max_size / 2;
281
282	if (flags & RING_BUFFER_WRITABLE)
283		rb->overwrite = 0;
284	else
285		rb->overwrite = 1;
286
287	atomic_set(&rb->refcount, 1);
288
289	INIT_LIST_HEAD(&rb->event_list);
290	spin_lock_init(&rb->event_lock);
291
292	/*
293	 * perf_output_begin() only checks rb->paused, therefore
294	 * rb->paused must be true if we have no pages for output.
295	 */
296	if (!rb->nr_pages)
297		rb->paused = 1;
298}
299
300/*
301 * This is called before hardware starts writing to the AUX area to
302 * obtain an output handle and make sure there's room in the buffer.
303 * When the capture completes, call perf_aux_output_end() to commit
304 * the recorded data to the buffer.
305 *
306 * The ordering is similar to that of perf_output_{begin,end}, with
307 * the exception of (B), which should be taken care of by the pmu
308 * driver, since ordering rules will differ depending on hardware.
309 *
310 * Call this from pmu::start(); see the comment in perf_aux_output_end()
311 * about its use in pmu callbacks. Both can also be called from the PMI
312 * handler if needed.
313 */
314void *perf_aux_output_begin(struct perf_output_handle *handle,
315			    struct perf_event *event)
316{
317	struct perf_event *output_event = event;
318	unsigned long aux_head, aux_tail;
319	struct ring_buffer *rb;
320
321	if (output_event->parent)
322		output_event = output_event->parent;
323
324	/*
325	 * Since this will typically be open across pmu::add/pmu::del, we
326	 * grab ring_buffer's refcount instead of holding rcu read lock
327	 * to make sure it doesn't disappear under us.
328	 */
329	rb = ring_buffer_get(output_event);
330	if (!rb)
331		return NULL;
332
333	if (!rb_has_aux(rb))
334		goto err;
335
336	/*
337	 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
338	 * about to get freed, so we leave immediately.
339	 *
340	 * Checking rb::aux_mmap_count and rb::refcount has to be done in
341	 * the same order, see perf_mmap_close. Otherwise we end up freeing
342	 * aux pages in this path, which is a bug, because in_atomic().
343	 */
344	if (!atomic_read(&rb->aux_mmap_count))
345		goto err;
346
347	if (!atomic_inc_not_zero(&rb->aux_refcount))
348		goto err;
349
350	/*
351	 * Nesting is not supported for AUX area, make sure nested
352	 * writers are caught early
353	 */
354	if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
355		goto err_put;
356
357	aux_head = local_read(&rb->aux_head);
358
359	handle->rb = rb;
360	handle->event = event;
361	handle->head = aux_head;
362	handle->size = 0;
363
364	/*
365	 * In overwrite mode, AUX data stores do not depend on aux_tail,
366	 * therefore (A) control dependency barrier does not exist. The
367	 * (B) <-> (C) ordering is still observed by the pmu driver.
368	 */
369	if (!rb->aux_overwrite) {
370		aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
371		handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
372		if (aux_head - aux_tail < perf_aux_size(rb))
373			handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
374
375		/*
376		 * handle->size computation depends on aux_tail load; this forms a
377		 * control dependency barrier separating aux_tail load from aux data
378		 * store that will be enabled on successful return
379		 */
380		if (!handle->size) { /* A, matches D */
381			event->pending_disable = 1;
382			perf_output_wakeup(handle);
383			local_set(&rb->aux_nest, 0);
384			goto err_put;
385		}
386	}
387
388	return handle->rb->aux_priv;
389
390err_put:
391	/* can't be last */
392	rb_free_aux(rb);
393
394err:
395	ring_buffer_put(rb);
396	handle->event = NULL;
397
398	return NULL;
399}
400
401/*
402 * Commit the data written by hardware into the ring buffer by adjusting
403 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
404 * pmu driver's responsibility to observe ordering rules of the hardware,
405 * so that all the data is externally visible before this is called.
406 *
407 * Note: this has to be called from pmu::stop() callback, as the assumption
408 * of the AUX buffer management code is that after pmu::stop(), the AUX
409 * transaction must be stopped and therefore drop the AUX reference count.
410 */
411void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
412			 bool truncated)
413{
414	struct ring_buffer *rb = handle->rb;
415	bool wakeup = truncated;
416	unsigned long aux_head;
417	u64 flags = 0;
418
419	if (truncated)
420		flags |= PERF_AUX_FLAG_TRUNCATED;
421
422	/* in overwrite mode, driver provides aux_head via handle */
423	if (rb->aux_overwrite) {
424		flags |= PERF_AUX_FLAG_OVERWRITE;
425
426		aux_head = handle->head;
427		local_set(&rb->aux_head, aux_head);
428	} else {
429		aux_head = local_read(&rb->aux_head);
430		local_add(size, &rb->aux_head);
431	}
432
433	if (size || flags) {
434		/*
435		 * Only send RECORD_AUX if we have something useful to communicate
436		 */
437
438		perf_event_aux_event(handle->event, aux_head, size, flags);
439	}
440
441	aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
442
443	if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
444		wakeup = true;
445		local_add(rb->aux_watermark, &rb->aux_wakeup);
446	}
447
448	if (wakeup) {
449		if (truncated)
450			handle->event->pending_disable = 1;
451		perf_output_wakeup(handle);
452	}
453
454	handle->event = NULL;
455
456	local_set(&rb->aux_nest, 0);
457	/* can't be last */
458	rb_free_aux(rb);
459	ring_buffer_put(rb);
460}
461
462/*
463 * Skip over a given number of bytes in the AUX buffer, due to, for example,
464 * hardware's alignment constraints.
465 */
466int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
467{
468	struct ring_buffer *rb = handle->rb;
469	unsigned long aux_head;
470
471	if (size > handle->size)
472		return -ENOSPC;
473
474	local_add(size, &rb->aux_head);
475
476	aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
477	if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
478		perf_output_wakeup(handle);
479		local_add(rb->aux_watermark, &rb->aux_wakeup);
480		handle->wakeup = local_read(&rb->aux_wakeup) +
481				 rb->aux_watermark;
482	}
483
484	handle->head = aux_head;
485	handle->size -= size;
486
487	return 0;
488}
489
490void *perf_get_aux(struct perf_output_handle *handle)
491{
492	/* this is only valid between perf_aux_output_begin and *_end */
493	if (!handle->event)
494		return NULL;
495
496	return handle->rb->aux_priv;
497}
498
499#define PERF_AUX_GFP	(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
500
501static struct page *rb_alloc_aux_page(int node, int order)
502{
503	struct page *page;
504
505	if (order > MAX_ORDER)
506		order = MAX_ORDER;
507
508	do {
509		page = alloc_pages_node(node, PERF_AUX_GFP, order);
510	} while (!page && order--);
511
512	if (page && order) {
513		/*
514		 * Communicate the allocation size to the driver:
515		 * if we managed to secure a high-order allocation,
516		 * set its first page's private to this order;
517		 * !PagePrivate(page) means it's just a normal page.
518		 */
519		split_page(page, order);
520		SetPagePrivate(page);
521		set_page_private(page, order);
522	}
523
524	return page;
525}
526
527static void rb_free_aux_page(struct ring_buffer *rb, int idx)
528{
529	struct page *page = virt_to_page(rb->aux_pages[idx]);
530
531	ClearPagePrivate(page);
532	page->mapping = NULL;
533	__free_page(page);
534}
535
536static void __rb_free_aux(struct ring_buffer *rb)
537{
538	int pg;
539
540	/*
541	 * Should never happen, the last reference should be dropped from
542	 * perf_mmap_close() path, which first stops aux transactions (which
543	 * in turn are the atomic holders of aux_refcount) and then does the
544	 * last rb_free_aux().
545	 */
546	WARN_ON_ONCE(in_atomic());
547
548	if (rb->aux_priv) {
549		rb->free_aux(rb->aux_priv);
550		rb->free_aux = NULL;
551		rb->aux_priv = NULL;
552	}
553
554	if (rb->aux_nr_pages) {
555		for (pg = 0; pg < rb->aux_nr_pages; pg++)
556			rb_free_aux_page(rb, pg);
557
558		kfree(rb->aux_pages);
559		rb->aux_nr_pages = 0;
560	}
561}
562
563int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
564		 pgoff_t pgoff, int nr_pages, long watermark, int flags)
565{
566	bool overwrite = !(flags & RING_BUFFER_WRITABLE);
567	int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
568	int ret = -ENOMEM, max_order = 0;
569
570	if (!has_aux(event))
571		return -ENOTSUPP;
572
573	if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
574		/*
575		 * We need to start with the max_order that fits in nr_pages,
576		 * not the other way around, hence ilog2() and not get_order.
577		 */
578		max_order = ilog2(nr_pages);
579
580		/*
581		 * PMU requests more than one contiguous chunks of memory
582		 * for SW double buffering
583		 */
584		if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
585		    !overwrite) {
586			if (!max_order)
587				return -EINVAL;
588
589			max_order--;
590		}
591	}
592
593	rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
594	if (!rb->aux_pages)
595		return -ENOMEM;
596
597	rb->free_aux = event->pmu->free_aux;
598	for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
599		struct page *page;
600		int last, order;
601
602		order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
603		page = rb_alloc_aux_page(node, order);
604		if (!page)
605			goto out;
606
607		for (last = rb->aux_nr_pages + (1 << page_private(page));
608		     last > rb->aux_nr_pages; rb->aux_nr_pages++)
609			rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
610	}
611
612	/*
613	 * In overwrite mode, PMUs that don't support SG may not handle more
614	 * than one contiguous allocation, since they rely on PMI to do double
615	 * buffering. In this case, the entire buffer has to be one contiguous
616	 * chunk.
617	 */
618	if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
619	    overwrite) {
620		struct page *page = virt_to_page(rb->aux_pages[0]);
621
622		if (page_private(page) != max_order)
623			goto out;
624	}
625
626	rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
627					     overwrite);
628	if (!rb->aux_priv)
629		goto out;
630
631	ret = 0;
632
633	/*
634	 * aux_pages (and pmu driver's private data, aux_priv) will be
635	 * referenced in both producer's and consumer's contexts, thus
636	 * we keep a refcount here to make sure either of the two can
637	 * reference them safely.
638	 */
639	atomic_set(&rb->aux_refcount, 1);
640
641	rb->aux_overwrite = overwrite;
642	rb->aux_watermark = watermark;
643
644	if (!rb->aux_watermark && !rb->aux_overwrite)
645		rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
646
647out:
648	if (!ret)
649		rb->aux_pgoff = pgoff;
650	else
651		__rb_free_aux(rb);
652
653	return ret;
654}
655
656void rb_free_aux(struct ring_buffer *rb)
657{
658	if (atomic_dec_and_test(&rb->aux_refcount))
659		__rb_free_aux(rb);
660}
661
662#ifndef CONFIG_PERF_USE_VMALLOC
663
664/*
665 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
666 */
667
668static struct page *
669__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
670{
671	if (pgoff > rb->nr_pages)
672		return NULL;
673
674	if (pgoff == 0)
675		return virt_to_page(rb->user_page);
676
677	return virt_to_page(rb->data_pages[pgoff - 1]);
678}
679
680static void *perf_mmap_alloc_page(int cpu)
681{
682	struct page *page;
683	int node;
684
685	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
686	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
687	if (!page)
688		return NULL;
689
690	return page_address(page);
691}
692
693struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
694{
695	struct ring_buffer *rb;
696	unsigned long size;
697	int i;
698
699	size = sizeof(struct ring_buffer);
700	size += nr_pages * sizeof(void *);
701
702	rb = kzalloc(size, GFP_KERNEL);
703	if (!rb)
704		goto fail;
705
706	rb->user_page = perf_mmap_alloc_page(cpu);
707	if (!rb->user_page)
708		goto fail_user_page;
709
710	for (i = 0; i < nr_pages; i++) {
711		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
712		if (!rb->data_pages[i])
713			goto fail_data_pages;
714	}
715
716	rb->nr_pages = nr_pages;
717
718	ring_buffer_init(rb, watermark, flags);
719
720	return rb;
721
722fail_data_pages:
723	for (i--; i >= 0; i--)
724		free_page((unsigned long)rb->data_pages[i]);
725
726	free_page((unsigned long)rb->user_page);
727
728fail_user_page:
729	kfree(rb);
730
731fail:
732	return NULL;
733}
734
735static void perf_mmap_free_page(unsigned long addr)
736{
737	struct page *page = virt_to_page((void *)addr);
738
739	page->mapping = NULL;
740	__free_page(page);
741}
742
743void rb_free(struct ring_buffer *rb)
744{
745	int i;
746
747	perf_mmap_free_page((unsigned long)rb->user_page);
748	for (i = 0; i < rb->nr_pages; i++)
749		perf_mmap_free_page((unsigned long)rb->data_pages[i]);
750	kfree(rb);
751}
752
753#else
754static int data_page_nr(struct ring_buffer *rb)
755{
756	return rb->nr_pages << page_order(rb);
757}
758
759static struct page *
760__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
761{
762	/* The '>' counts in the user page. */
763	if (pgoff > data_page_nr(rb))
764		return NULL;
765
766	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
767}
768
769static void perf_mmap_unmark_page(void *addr)
770{
771	struct page *page = vmalloc_to_page(addr);
772
773	page->mapping = NULL;
774}
775
776static void rb_free_work(struct work_struct *work)
777{
778	struct ring_buffer *rb;
779	void *base;
780	int i, nr;
781
782	rb = container_of(work, struct ring_buffer, work);
783	nr = data_page_nr(rb);
784
785	base = rb->user_page;
786	/* The '<=' counts in the user page. */
787	for (i = 0; i <= nr; i++)
788		perf_mmap_unmark_page(base + (i * PAGE_SIZE));
789
790	vfree(base);
791	kfree(rb);
792}
793
794void rb_free(struct ring_buffer *rb)
795{
796	schedule_work(&rb->work);
797}
798
799struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
800{
801	struct ring_buffer *rb;
802	unsigned long size;
803	void *all_buf;
804
805	size = sizeof(struct ring_buffer);
806	size += sizeof(void *);
807
808	rb = kzalloc(size, GFP_KERNEL);
809	if (!rb)
810		goto fail;
811
812	INIT_WORK(&rb->work, rb_free_work);
813
814	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
815	if (!all_buf)
816		goto fail_all_buf;
817
818	rb->user_page = all_buf;
819	rb->data_pages[0] = all_buf + PAGE_SIZE;
820	if (nr_pages) {
821		rb->nr_pages = 1;
822		rb->page_order = ilog2(nr_pages);
823	}
824
825	ring_buffer_init(rb, watermark, flags);
826
827	return rb;
828
829fail_all_buf:
830	kfree(rb);
831
832fail:
833	return NULL;
834}
835
836#endif
837
838struct page *
839perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
840{
841	if (rb->aux_nr_pages) {
842		/* above AUX space */
843		if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
844			return NULL;
845
846		/* AUX space */
847		if (pgoff >= rb->aux_pgoff)
848			return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
849	}
850
851	return __perf_mmap_to_page(rb, pgoff);
852}