Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * Performance events ring-buffer code:
  3 *
  4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  7 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8 *
  9 * For licensing details see kernel-base/COPYING
 10 */
 11
 12#include <linux/perf_event.h>
 13#include <linux/vmalloc.h>
 14#include <linux/slab.h>
 
 
 
 15
 16#include "internal.h"
 17
 18static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
 19			      unsigned long offset, unsigned long head)
 20{
 21	unsigned long mask;
 22
 23	if (!rb->writable)
 24		return true;
 25
 26	mask = perf_data_size(rb) - 1;
 27
 28	offset = (offset - tail) & mask;
 29	head   = (head   - tail) & mask;
 30
 31	if ((int)(head - offset) < 0)
 32		return false;
 33
 34	return true;
 35}
 36
 37static void perf_output_wakeup(struct perf_output_handle *handle)
 38{
 39	atomic_set(&handle->rb->poll, POLL_IN);
 40
 41	handle->event->pending_wakeup = 1;
 42	irq_work_queue(&handle->event->pending);
 
 
 
 
 43}
 44
 45/*
 46 * We need to ensure a later event_id doesn't publish a head when a former
 47 * event isn't done writing. However since we need to deal with NMIs we
 48 * cannot fully serialize things.
 49 *
 50 * We only publish the head (and generate a wakeup) when the outer-most
 51 * event completes.
 52 */
 53static void perf_output_get_handle(struct perf_output_handle *handle)
 54{
 55	struct ring_buffer *rb = handle->rb;
 56
 57	preempt_disable();
 58	local_inc(&rb->nest);
 
 
 
 
 
 59	handle->wakeup = local_read(&rb->wakeup);
 60}
 61
 62static void perf_output_put_handle(struct perf_output_handle *handle)
 63{
 64	struct ring_buffer *rb = handle->rb;
 65	unsigned long head;
 
 
 
 
 
 
 
 
 
 
 
 66
 67again:
 
 
 
 
 
 
 
 
 
 68	head = local_read(&rb->head);
 69
 70	/*
 71	 * IRQ/NMI can happen here, which means we can miss a head update.
 
 72	 */
 73
 74	if (!local_dec_and_test(&rb->nest))
 75		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77	/*
 78	 * Publish the known good head. Rely on the full barrier implied
 79	 * by atomic_dec_and_test() order the rb->head read and this
 80	 * write.
 81	 */
 82	rb->user_page->data_head = head;
 
 83
 84	/*
 85	 * Now check if we missed an update, rely on the (compiler)
 86	 * barrier in atomic_dec_and_test() to re-read rb->head.
 87	 */
 
 88	if (unlikely(head != local_read(&rb->head))) {
 89		local_inc(&rb->nest);
 90		goto again;
 91	}
 92
 93	if (handle->wakeup != local_read(&rb->wakeup))
 94		perf_output_wakeup(handle);
 95
 96out:
 97	preempt_enable();
 98}
 99
100int perf_output_begin(struct perf_output_handle *handle,
101		      struct perf_event *event, unsigned int size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102{
103	struct ring_buffer *rb;
104	unsigned long tail, offset, head;
105	int have_lost;
106	struct perf_sample_data sample_data;
107	struct {
108		struct perf_event_header header;
109		u64			 id;
110		u64			 lost;
111	} lost_event;
112
113	rcu_read_lock();
114	/*
115	 * For inherited events we send all the output towards the parent.
116	 */
117	if (event->parent)
118		event = event->parent;
119
120	rb = rcu_dereference(event->rb);
121	if (!rb)
122		goto out;
123
124	handle->rb	= rb;
125	handle->event	= event;
126
127	if (!rb->nr_pages)
 
128		goto out;
 
 
 
 
129
130	have_lost = local_read(&rb->lost);
131	if (have_lost) {
132		lost_event.header.size = sizeof(lost_event);
133		perf_event_header__init_id(&lost_event.header, &sample_data,
134					   event);
135		size += lost_event.header.size;
136	}
137
138	perf_output_get_handle(handle);
139
 
140	do {
 
 
 
 
 
 
 
 
 
141		/*
142		 * Userspace could choose to issue a mb() before updating the
143		 * tail pointer. So that all reads will be completed before the
144		 * write is issued.
 
 
 
 
 
 
145		 */
146		tail = ACCESS_ONCE(rb->user_page->data_tail);
147		smp_rmb();
148		offset = head = local_read(&rb->head);
149		head += size;
150		if (unlikely(!perf_output_space(rb, tail, offset, head)))
151			goto fail;
152	} while (local_cmpxchg(&rb->head, offset, head) != offset);
153
154	if (head - local_read(&rb->wakeup) > rb->watermark)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155		local_add(rb->watermark, &rb->wakeup);
156
157	handle->page = offset >> (PAGE_SHIFT + page_order(rb));
158	handle->page &= rb->nr_pages - 1;
159	handle->size = offset & ((PAGE_SIZE << page_order(rb)) - 1);
160	handle->addr = rb->data_pages[handle->page];
161	handle->addr += handle->size;
162	handle->size = (PAGE_SIZE << page_order(rb)) - handle->size;
163
164	if (have_lost) {
 
 
 
 
 
 
165		lost_event.header.type = PERF_RECORD_LOST;
166		lost_event.header.misc = 0;
167		lost_event.id          = event->id;
168		lost_event.lost        = local_xchg(&rb->lost, 0);
169
 
 
170		perf_output_put(handle, lost_event);
171		perf_event__output_id_sample(event, handle, &sample_data);
172	}
173
174	return 0;
175
176fail:
177	local_inc(&rb->lost);
 
178	perf_output_put_handle(handle);
179out:
180	rcu_read_unlock();
181
182	return -ENOSPC;
183}
184
185void perf_output_copy(struct perf_output_handle *handle,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186		      const void *buf, unsigned int len)
187{
188	__output_copy(handle, buf, len);
 
 
 
 
 
 
189}
190
191void perf_output_end(struct perf_output_handle *handle)
192{
193	perf_output_put_handle(handle);
194	rcu_read_unlock();
195}
196
197static void
198ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
199{
200	long max_size = perf_data_size(rb);
201
202	if (watermark)
203		rb->watermark = min(max_size, watermark);
204
205	if (!rb->watermark)
206		rb->watermark = max_size / 2;
207
208	if (flags & RING_BUFFER_WRITABLE)
209		rb->writable = 1;
 
 
210
211	atomic_set(&rb->refcount, 1);
212
213	INIT_LIST_HEAD(&rb->event_list);
214	spin_lock_init(&rb->event_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215}
216
217#ifndef CONFIG_PERF_USE_VMALLOC
218
219/*
220 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
221 */
222
223struct page *
224perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
225{
226	if (pgoff > rb->nr_pages)
227		return NULL;
228
229	if (pgoff == 0)
230		return virt_to_page(rb->user_page);
231
232	return virt_to_page(rb->data_pages[pgoff - 1]);
233}
234
235static void *perf_mmap_alloc_page(int cpu)
236{
237	struct page *page;
238	int node;
239
240	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
241	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
242	if (!page)
243		return NULL;
244
245	return page_address(page);
246}
247
248struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
 
 
 
 
 
 
 
 
249{
250	struct ring_buffer *rb;
251	unsigned long size;
252	int i;
253
254	size = sizeof(struct ring_buffer);
255	size += nr_pages * sizeof(void *);
256
257	rb = kzalloc(size, GFP_KERNEL);
 
 
 
 
258	if (!rb)
259		goto fail;
260
261	rb->user_page = perf_mmap_alloc_page(cpu);
262	if (!rb->user_page)
263		goto fail_user_page;
264
265	for (i = 0; i < nr_pages; i++) {
266		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
267		if (!rb->data_pages[i])
268			goto fail_data_pages;
269	}
270
271	rb->nr_pages = nr_pages;
272
273	ring_buffer_init(rb, watermark, flags);
274
275	return rb;
276
277fail_data_pages:
278	for (i--; i >= 0; i--)
279		free_page((unsigned long)rb->data_pages[i]);
280
281	free_page((unsigned long)rb->user_page);
282
283fail_user_page:
284	kfree(rb);
285
286fail:
287	return NULL;
288}
289
290static void perf_mmap_free_page(unsigned long addr)
291{
292	struct page *page = virt_to_page((void *)addr);
293
294	page->mapping = NULL;
295	__free_page(page);
296}
297
298void rb_free(struct ring_buffer *rb)
299{
300	int i;
301
302	perf_mmap_free_page((unsigned long)rb->user_page);
303	for (i = 0; i < rb->nr_pages; i++)
304		perf_mmap_free_page((unsigned long)rb->data_pages[i]);
305	kfree(rb);
306}
307
308#else
309
310struct page *
311perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
312{
313	if (pgoff > (1UL << page_order(rb)))
 
314		return NULL;
315
316	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
317}
318
319static void perf_mmap_unmark_page(void *addr)
320{
321	struct page *page = vmalloc_to_page(addr);
322
323	page->mapping = NULL;
324}
325
326static void rb_free_work(struct work_struct *work)
327{
328	struct ring_buffer *rb;
329	void *base;
330	int i, nr;
331
332	rb = container_of(work, struct ring_buffer, work);
333	nr = 1 << page_order(rb);
334
335	base = rb->user_page;
336	for (i = 0; i < nr + 1; i++)
 
337		perf_mmap_unmark_page(base + (i * PAGE_SIZE));
338
339	vfree(base);
340	kfree(rb);
341}
342
343void rb_free(struct ring_buffer *rb)
344{
345	schedule_work(&rb->work);
346}
347
348struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
349{
350	struct ring_buffer *rb;
351	unsigned long size;
352	void *all_buf;
 
353
354	size = sizeof(struct ring_buffer);
355	size += sizeof(void *);
356
357	rb = kzalloc(size, GFP_KERNEL);
 
358	if (!rb)
359		goto fail;
360
361	INIT_WORK(&rb->work, rb_free_work);
362
363	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
364	if (!all_buf)
365		goto fail_all_buf;
366
367	rb->user_page = all_buf;
368	rb->data_pages[0] = all_buf + PAGE_SIZE;
369	rb->page_order = ilog2(nr_pages);
370	rb->nr_pages = 1;
 
 
371
372	ring_buffer_init(rb, watermark, flags);
373
374	return rb;
375
376fail_all_buf:
377	kfree(rb);
378
379fail:
380	return NULL;
381}
382
383#endif
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Performance events ring-buffer code:
  4 *
  5 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  6 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  7 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  8 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 
 
  9 */
 10
 11#include <linux/perf_event.h>
 12#include <linux/vmalloc.h>
 13#include <linux/slab.h>
 14#include <linux/circ_buf.h>
 15#include <linux/poll.h>
 16#include <linux/nospec.h>
 17
 18#include "internal.h"
 19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20static void perf_output_wakeup(struct perf_output_handle *handle)
 21{
 22	atomic_set(&handle->rb->poll, EPOLLIN);
 23
 24	handle->event->pending_wakeup = 1;
 25
 26	if (*perf_event_fasync(handle->event) && !handle->event->pending_kill)
 27		handle->event->pending_kill = POLL_IN;
 28
 29	irq_work_queue(&handle->event->pending_irq);
 30}
 31
 32/*
 33 * We need to ensure a later event_id doesn't publish a head when a former
 34 * event isn't done writing. However since we need to deal with NMIs we
 35 * cannot fully serialize things.
 36 *
 37 * We only publish the head (and generate a wakeup) when the outer-most
 38 * event completes.
 39 */
 40static void perf_output_get_handle(struct perf_output_handle *handle)
 41{
 42	struct perf_buffer *rb = handle->rb;
 43
 44	preempt_disable();
 45
 46	/*
 47	 * Avoid an explicit LOAD/STORE such that architectures with memops
 48	 * can use them.
 49	 */
 50	(*(volatile unsigned int *)&rb->nest)++;
 51	handle->wakeup = local_read(&rb->wakeup);
 52}
 53
 54static void perf_output_put_handle(struct perf_output_handle *handle)
 55{
 56	struct perf_buffer *rb = handle->rb;
 57	unsigned long head;
 58	unsigned int nest;
 59
 60	/*
 61	 * If this isn't the outermost nesting, we don't have to update
 62	 * @rb->user_page->data_head.
 63	 */
 64	nest = READ_ONCE(rb->nest);
 65	if (nest > 1) {
 66		WRITE_ONCE(rb->nest, nest - 1);
 67		goto out;
 68	}
 69
 70again:
 71	/*
 72	 * In order to avoid publishing a head value that goes backwards,
 73	 * we must ensure the load of @rb->head happens after we've
 74	 * incremented @rb->nest.
 75	 *
 76	 * Otherwise we can observe a @rb->head value before one published
 77	 * by an IRQ/NMI happening between the load and the increment.
 78	 */
 79	barrier();
 80	head = local_read(&rb->head);
 81
 82	/*
 83	 * IRQ/NMI can happen here and advance @rb->head, causing our
 84	 * load above to be stale.
 85	 */
 86
 87	/*
 88	 * Since the mmap() consumer (userspace) can run on a different CPU:
 89	 *
 90	 *   kernel				user
 91	 *
 92	 *   if (LOAD ->data_tail) {		LOAD ->data_head
 93	 *			(A)		smp_rmb()	(C)
 94	 *	STORE $data			LOAD $data
 95	 *	smp_wmb()	(B)		smp_mb()	(D)
 96	 *	STORE ->data_head		STORE ->data_tail
 97	 *   }
 98	 *
 99	 * Where A pairs with D, and B pairs with C.
100	 *
101	 * In our case (A) is a control dependency that separates the load of
102	 * the ->data_tail and the stores of $data. In case ->data_tail
103	 * indicates there is no room in the buffer to store $data we do not.
104	 *
105	 * D needs to be a full barrier since it separates the data READ
106	 * from the tail WRITE.
107	 *
108	 * For B a WMB is sufficient since it separates two WRITEs, and for C
109	 * an RMB is sufficient since it separates two READs.
110	 *
111	 * See perf_output_begin().
112	 */
113	smp_wmb(); /* B, matches C */
114	WRITE_ONCE(rb->user_page->data_head, head);
115
116	/*
117	 * We must publish the head before decrementing the nest count,
118	 * otherwise an IRQ/NMI can publish a more recent head value and our
119	 * write will (temporarily) publish a stale value.
120	 */
121	barrier();
122	WRITE_ONCE(rb->nest, 0);
123
124	/*
125	 * Ensure we decrement @rb->nest before we validate the @rb->head.
126	 * Otherwise we cannot be sure we caught the 'last' nested update.
127	 */
128	barrier();
129	if (unlikely(head != local_read(&rb->head))) {
130		WRITE_ONCE(rb->nest, 1);
131		goto again;
132	}
133
134	if (handle->wakeup != local_read(&rb->wakeup))
135		perf_output_wakeup(handle);
136
137out:
138	preempt_enable();
139}
140
141static __always_inline bool
142ring_buffer_has_space(unsigned long head, unsigned long tail,
143		      unsigned long data_size, unsigned int size,
144		      bool backward)
145{
146	if (!backward)
147		return CIRC_SPACE(head, tail, data_size) >= size;
148	else
149		return CIRC_SPACE(tail, head, data_size) >= size;
150}
151
152static __always_inline int
153__perf_output_begin(struct perf_output_handle *handle,
154		    struct perf_sample_data *data,
155		    struct perf_event *event, unsigned int size,
156		    bool backward)
157{
158	struct perf_buffer *rb;
159	unsigned long tail, offset, head;
160	int have_lost, page_shift;
 
161	struct {
162		struct perf_event_header header;
163		u64			 id;
164		u64			 lost;
165	} lost_event;
166
167	rcu_read_lock();
168	/*
169	 * For inherited events we send all the output towards the parent.
170	 */
171	if (event->parent)
172		event = event->parent;
173
174	rb = rcu_dereference(event->rb);
175	if (unlikely(!rb))
176		goto out;
177
178	if (unlikely(rb->paused)) {
179		if (rb->nr_pages) {
180			local_inc(&rb->lost);
181			atomic64_inc(&event->lost_samples);
182		}
183		goto out;
184	}
185
186	handle->rb    = rb;
187	handle->event = event;
188
189	have_lost = local_read(&rb->lost);
190	if (unlikely(have_lost)) {
191		size += sizeof(lost_event);
192		if (event->attr.sample_id_all)
193			size += event->id_header_size;
 
194	}
195
196	perf_output_get_handle(handle);
197
198	offset = local_read(&rb->head);
199	do {
200		head = offset;
201		tail = READ_ONCE(rb->user_page->data_tail);
202		if (!rb->overwrite) {
203			if (unlikely(!ring_buffer_has_space(head, tail,
204							    perf_data_size(rb),
205							    size, backward)))
206				goto fail;
207		}
208
209		/*
210		 * The above forms a control dependency barrier separating the
211		 * @tail load above from the data stores below. Since the @tail
212		 * load is required to compute the branch to fail below.
213		 *
214		 * A, matches D; the full memory barrier userspace SHOULD issue
215		 * after reading the data and before storing the new tail
216		 * position.
217		 *
218		 * See perf_output_put_handle().
219		 */
 
 
 
 
 
 
 
220
221		if (!backward)
222			head += size;
223		else
224			head -= size;
225	} while (!local_try_cmpxchg(&rb->head, &offset, head));
226
227	if (backward) {
228		offset = head;
229		head = (u64)(-head);
230	}
231
232	/*
233	 * We rely on the implied barrier() by local_cmpxchg() to ensure
234	 * none of the data stores below can be lifted up by the compiler.
235	 */
236
237	if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
238		local_add(rb->watermark, &rb->wakeup);
239
240	page_shift = PAGE_SHIFT + page_order(rb);
 
 
 
 
 
241
242	handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
243	offset &= (1UL << page_shift) - 1;
244	handle->addr = rb->data_pages[handle->page] + offset;
245	handle->size = (1UL << page_shift) - offset;
246
247	if (unlikely(have_lost)) {
248		lost_event.header.size = sizeof(lost_event);
249		lost_event.header.type = PERF_RECORD_LOST;
250		lost_event.header.misc = 0;
251		lost_event.id          = event->id;
252		lost_event.lost        = local_xchg(&rb->lost, 0);
253
254		/* XXX mostly redundant; @data is already fully initializes */
255		perf_event_header__init_id(&lost_event.header, data, event);
256		perf_output_put(handle, lost_event);
257		perf_event__output_id_sample(event, handle, data);
258	}
259
260	return 0;
261
262fail:
263	local_inc(&rb->lost);
264	atomic64_inc(&event->lost_samples);
265	perf_output_put_handle(handle);
266out:
267	rcu_read_unlock();
268
269	return -ENOSPC;
270}
271
272int perf_output_begin_forward(struct perf_output_handle *handle,
273			      struct perf_sample_data *data,
274			      struct perf_event *event, unsigned int size)
275{
276	return __perf_output_begin(handle, data, event, size, false);
277}
278
279int perf_output_begin_backward(struct perf_output_handle *handle,
280			       struct perf_sample_data *data,
281			       struct perf_event *event, unsigned int size)
282{
283	return __perf_output_begin(handle, data, event, size, true);
284}
285
286int perf_output_begin(struct perf_output_handle *handle,
287		      struct perf_sample_data *data,
288		      struct perf_event *event, unsigned int size)
289{
290
291	return __perf_output_begin(handle, data, event, size,
292				   unlikely(is_write_backward(event)));
293}
294
295unsigned int perf_output_copy(struct perf_output_handle *handle,
296		      const void *buf, unsigned int len)
297{
298	return __output_copy(handle, buf, len);
299}
300
301unsigned int perf_output_skip(struct perf_output_handle *handle,
302			      unsigned int len)
303{
304	return __output_skip(handle, NULL, len);
305}
306
307void perf_output_end(struct perf_output_handle *handle)
308{
309	perf_output_put_handle(handle);
310	rcu_read_unlock();
311}
312
313static void
314ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
315{
316	long max_size = perf_data_size(rb);
317
318	if (watermark)
319		rb->watermark = min(max_size, watermark);
320
321	if (!rb->watermark)
322		rb->watermark = max_size / 2;
323
324	if (flags & RING_BUFFER_WRITABLE)
325		rb->overwrite = 0;
326	else
327		rb->overwrite = 1;
328
329	refcount_set(&rb->refcount, 1);
330
331	INIT_LIST_HEAD(&rb->event_list);
332	spin_lock_init(&rb->event_lock);
333
334	/*
335	 * perf_output_begin() only checks rb->paused, therefore
336	 * rb->paused must be true if we have no pages for output.
337	 */
338	if (!rb->nr_pages)
339		rb->paused = 1;
340
341	mutex_init(&rb->aux_mutex);
342}
343
344void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
345{
346	/*
347	 * OVERWRITE is determined by perf_aux_output_end() and can't
348	 * be passed in directly.
349	 */
350	if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
351		return;
352
353	handle->aux_flags |= flags;
354}
355EXPORT_SYMBOL_GPL(perf_aux_output_flag);
356
357/*
358 * This is called before hardware starts writing to the AUX area to
359 * obtain an output handle and make sure there's room in the buffer.
360 * When the capture completes, call perf_aux_output_end() to commit
361 * the recorded data to the buffer.
362 *
363 * The ordering is similar to that of perf_output_{begin,end}, with
364 * the exception of (B), which should be taken care of by the pmu
365 * driver, since ordering rules will differ depending on hardware.
366 *
367 * Call this from pmu::start(); see the comment in perf_aux_output_end()
368 * about its use in pmu callbacks. Both can also be called from the PMI
369 * handler if needed.
370 */
371void *perf_aux_output_begin(struct perf_output_handle *handle,
372			    struct perf_event *event)
373{
374	struct perf_event *output_event = event;
375	unsigned long aux_head, aux_tail;
376	struct perf_buffer *rb;
377	unsigned int nest;
378
379	if (output_event->parent)
380		output_event = output_event->parent;
381
382	/*
383	 * Since this will typically be open across pmu::add/pmu::del, we
384	 * grab ring_buffer's refcount instead of holding rcu read lock
385	 * to make sure it doesn't disappear under us.
386	 */
387	rb = ring_buffer_get(output_event);
388	if (!rb)
389		return NULL;
390
391	if (!rb_has_aux(rb))
392		goto err;
393
394	/*
395	 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
396	 * about to get freed, so we leave immediately.
397	 *
398	 * Checking rb::aux_mmap_count and rb::refcount has to be done in
399	 * the same order, see perf_mmap_close. Otherwise we end up freeing
400	 * aux pages in this path, which is a bug, because in_atomic().
401	 */
402	if (!atomic_read(&rb->aux_mmap_count))
403		goto err;
404
405	if (!refcount_inc_not_zero(&rb->aux_refcount))
406		goto err;
407
408	nest = READ_ONCE(rb->aux_nest);
409	/*
410	 * Nesting is not supported for AUX area, make sure nested
411	 * writers are caught early
412	 */
413	if (WARN_ON_ONCE(nest))
414		goto err_put;
415
416	WRITE_ONCE(rb->aux_nest, nest + 1);
417
418	aux_head = rb->aux_head;
419
420	handle->rb = rb;
421	handle->event = event;
422	handle->head = aux_head;
423	handle->size = 0;
424	handle->aux_flags = 0;
425
426	/*
427	 * In overwrite mode, AUX data stores do not depend on aux_tail,
428	 * therefore (A) control dependency barrier does not exist. The
429	 * (B) <-> (C) ordering is still observed by the pmu driver.
430	 */
431	if (!rb->aux_overwrite) {
432		aux_tail = READ_ONCE(rb->user_page->aux_tail);
433		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
434		if (aux_head - aux_tail < perf_aux_size(rb))
435			handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
436
437		/*
438		 * handle->size computation depends on aux_tail load; this forms a
439		 * control dependency barrier separating aux_tail load from aux data
440		 * store that will be enabled on successful return
441		 */
442		if (!handle->size) { /* A, matches D */
443			event->pending_disable = smp_processor_id();
444			perf_output_wakeup(handle);
445			WRITE_ONCE(rb->aux_nest, 0);
446			goto err_put;
447		}
448	}
449
450	return handle->rb->aux_priv;
451
452err_put:
453	/* can't be last */
454	rb_free_aux(rb);
455
456err:
457	ring_buffer_put(rb);
458	handle->event = NULL;
459
460	return NULL;
461}
462EXPORT_SYMBOL_GPL(perf_aux_output_begin);
463
464static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
465{
466	if (rb->aux_overwrite)
467		return false;
468
469	if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
470		rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
471		return true;
472	}
473
474	return false;
475}
476
477/*
478 * Commit the data written by hardware into the ring buffer by adjusting
479 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
480 * pmu driver's responsibility to observe ordering rules of the hardware,
481 * so that all the data is externally visible before this is called.
482 *
483 * Note: this has to be called from pmu::stop() callback, as the assumption
484 * of the AUX buffer management code is that after pmu::stop(), the AUX
485 * transaction must be stopped and therefore drop the AUX reference count.
486 */
487void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
488{
489	bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
490	struct perf_buffer *rb = handle->rb;
491	unsigned long aux_head;
492
493	/* in overwrite mode, driver provides aux_head via handle */
494	if (rb->aux_overwrite) {
495		handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
496
497		aux_head = handle->head;
498		rb->aux_head = aux_head;
499	} else {
500		handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
501
502		aux_head = rb->aux_head;
503		rb->aux_head += size;
504	}
505
506	/*
507	 * Only send RECORD_AUX if we have something useful to communicate
508	 *
509	 * Note: the OVERWRITE records by themselves are not considered
510	 * useful, as they don't communicate any *new* information,
511	 * aside from the short-lived offset, that becomes history at
512	 * the next event sched-in and therefore isn't useful.
513	 * The userspace that needs to copy out AUX data in overwrite
514	 * mode should know to use user_page::aux_head for the actual
515	 * offset. So, from now on we don't output AUX records that
516	 * have *only* OVERWRITE flag set.
517	 */
518	if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
519		perf_event_aux_event(handle->event, aux_head, size,
520				     handle->aux_flags);
521
522	WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
523	if (rb_need_aux_wakeup(rb))
524		wakeup = true;
525
526	if (wakeup) {
527		if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
528			handle->event->pending_disable = smp_processor_id();
529		perf_output_wakeup(handle);
530	}
531
532	handle->event = NULL;
533
534	WRITE_ONCE(rb->aux_nest, 0);
535	/* can't be last */
536	rb_free_aux(rb);
537	ring_buffer_put(rb);
538}
539EXPORT_SYMBOL_GPL(perf_aux_output_end);
540
541/*
542 * Skip over a given number of bytes in the AUX buffer, due to, for example,
543 * hardware's alignment constraints.
544 */
545int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
546{
547	struct perf_buffer *rb = handle->rb;
548
549	if (size > handle->size)
550		return -ENOSPC;
551
552	rb->aux_head += size;
553
554	WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
555	if (rb_need_aux_wakeup(rb)) {
556		perf_output_wakeup(handle);
557		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
558	}
559
560	handle->head = rb->aux_head;
561	handle->size -= size;
562
563	return 0;
564}
565EXPORT_SYMBOL_GPL(perf_aux_output_skip);
566
567void *perf_get_aux(struct perf_output_handle *handle)
568{
569	/* this is only valid between perf_aux_output_begin and *_end */
570	if (!handle->event)
571		return NULL;
572
573	return handle->rb->aux_priv;
574}
575EXPORT_SYMBOL_GPL(perf_get_aux);
576
577/*
578 * Copy out AUX data from an AUX handle.
579 */
580long perf_output_copy_aux(struct perf_output_handle *aux_handle,
581			  struct perf_output_handle *handle,
582			  unsigned long from, unsigned long to)
583{
584	struct perf_buffer *rb = aux_handle->rb;
585	unsigned long tocopy, remainder, len = 0;
586	void *addr;
587
588	from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
589	to &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
590
591	do {
592		tocopy = PAGE_SIZE - offset_in_page(from);
593		if (to > from)
594			tocopy = min(tocopy, to - from);
595		if (!tocopy)
596			break;
597
598		addr = rb->aux_pages[from >> PAGE_SHIFT];
599		addr += offset_in_page(from);
600
601		remainder = perf_output_copy(handle, addr, tocopy);
602		if (remainder)
603			return -EFAULT;
604
605		len += tocopy;
606		from += tocopy;
607		from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
608	} while (to != from);
609
610	return len;
611}
612
613#define PERF_AUX_GFP	(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
614
615static struct page *rb_alloc_aux_page(int node, int order)
616{
617	struct page *page;
618
619	if (order > MAX_PAGE_ORDER)
620		order = MAX_PAGE_ORDER;
621
622	do {
623		page = alloc_pages_node(node, PERF_AUX_GFP, order);
624	} while (!page && order--);
625
626	if (page && order) {
627		/*
628		 * Communicate the allocation size to the driver:
629		 * if we managed to secure a high-order allocation,
630		 * set its first page's private to this order;
631		 * !PagePrivate(page) means it's just a normal page.
632		 */
633		split_page(page, order);
634		SetPagePrivate(page);
635		set_page_private(page, order);
636	}
637
638	return page;
639}
640
641static void rb_free_aux_page(struct perf_buffer *rb, int idx)
642{
643	struct page *page = virt_to_page(rb->aux_pages[idx]);
644
645	ClearPagePrivate(page);
646	page->mapping = NULL;
647	__free_page(page);
648}
649
650static void __rb_free_aux(struct perf_buffer *rb)
651{
652	int pg;
653
654	/*
655	 * Should never happen, the last reference should be dropped from
656	 * perf_mmap_close() path, which first stops aux transactions (which
657	 * in turn are the atomic holders of aux_refcount) and then does the
658	 * last rb_free_aux().
659	 */
660	WARN_ON_ONCE(in_atomic());
661
662	if (rb->aux_priv) {
663		rb->free_aux(rb->aux_priv);
664		rb->free_aux = NULL;
665		rb->aux_priv = NULL;
666	}
667
668	if (rb->aux_nr_pages) {
669		for (pg = 0; pg < rb->aux_nr_pages; pg++)
670			rb_free_aux_page(rb, pg);
671
672		kfree(rb->aux_pages);
673		rb->aux_nr_pages = 0;
674	}
675}
676
677int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
678		 pgoff_t pgoff, int nr_pages, long watermark, int flags)
679{
680	bool overwrite = !(flags & RING_BUFFER_WRITABLE);
681	int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
682	int ret = -ENOMEM, max_order;
683
684	if (!has_aux(event))
685		return -EOPNOTSUPP;
686
687	if (nr_pages <= 0)
688		return -EINVAL;
689
690	if (!overwrite) {
691		/*
692		 * Watermark defaults to half the buffer, and so does the
693		 * max_order, to aid PMU drivers in double buffering.
694		 */
695		if (!watermark)
696			watermark = min_t(unsigned long,
697					  U32_MAX,
698					  (unsigned long)nr_pages << (PAGE_SHIFT - 1));
699
700		/*
701		 * Use aux_watermark as the basis for chunking to
702		 * help PMU drivers honor the watermark.
703		 */
704		max_order = get_order(watermark);
705	} else {
706		/*
707		 * We need to start with the max_order that fits in nr_pages,
708		 * not the other way around, hence ilog2() and not get_order.
709		 */
710		max_order = ilog2(nr_pages);
711		watermark = 0;
712	}
713
714	/*
715	 * kcalloc_node() is unable to allocate buffer if the size is larger
716	 * than: PAGE_SIZE << MAX_PAGE_ORDER; directly bail out in this case.
717	 */
718	if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_PAGE_ORDER)
719		return -ENOMEM;
720	rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
721				     node);
722	if (!rb->aux_pages)
723		return -ENOMEM;
724
725	rb->free_aux = event->pmu->free_aux;
726	for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
727		struct page *page;
728		int last, order;
729
730		order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
731		page = rb_alloc_aux_page(node, order);
732		if (!page)
733			goto out;
734
735		for (last = rb->aux_nr_pages + (1 << page_private(page));
736		     last > rb->aux_nr_pages; rb->aux_nr_pages++)
737			rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
738	}
739
740	/*
741	 * In overwrite mode, PMUs that don't support SG may not handle more
742	 * than one contiguous allocation, since they rely on PMI to do double
743	 * buffering. In this case, the entire buffer has to be one contiguous
744	 * chunk.
745	 */
746	if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
747	    overwrite) {
748		struct page *page = virt_to_page(rb->aux_pages[0]);
749
750		if (page_private(page) != max_order)
751			goto out;
752	}
753
754	rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
755					     overwrite);
756	if (!rb->aux_priv)
757		goto out;
758
759	ret = 0;
760
761	/*
762	 * aux_pages (and pmu driver's private data, aux_priv) will be
763	 * referenced in both producer's and consumer's contexts, thus
764	 * we keep a refcount here to make sure either of the two can
765	 * reference them safely.
766	 */
767	refcount_set(&rb->aux_refcount, 1);
768
769	rb->aux_overwrite = overwrite;
770	rb->aux_watermark = watermark;
771
772out:
773	if (!ret)
774		rb->aux_pgoff = pgoff;
775	else
776		__rb_free_aux(rb);
777
778	return ret;
779}
780
781void rb_free_aux(struct perf_buffer *rb)
782{
783	if (refcount_dec_and_test(&rb->aux_refcount))
784		__rb_free_aux(rb);
785}
786
787#ifndef CONFIG_PERF_USE_VMALLOC
788
789/*
790 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
791 */
792
793static struct page *
794__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
795{
796	if (pgoff > rb->nr_pages)
797		return NULL;
798
799	if (pgoff == 0)
800		return virt_to_page(rb->user_page);
801
802	return virt_to_page(rb->data_pages[pgoff - 1]);
803}
804
805static void *perf_mmap_alloc_page(int cpu)
806{
807	struct page *page;
808	int node;
809
810	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
811	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
812	if (!page)
813		return NULL;
814
815	return page_address(page);
816}
817
818static void perf_mmap_free_page(void *addr)
819{
820	struct page *page = virt_to_page(addr);
821
822	page->mapping = NULL;
823	__free_page(page);
824}
825
826struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
827{
828	struct perf_buffer *rb;
829	unsigned long size;
830	int i, node;
831
832	size = sizeof(struct perf_buffer);
833	size += nr_pages * sizeof(void *);
834
835	if (order_base_2(size) > PAGE_SHIFT+MAX_PAGE_ORDER)
836		goto fail;
837
838	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
839	rb = kzalloc_node(size, GFP_KERNEL, node);
840	if (!rb)
841		goto fail;
842
843	rb->user_page = perf_mmap_alloc_page(cpu);
844	if (!rb->user_page)
845		goto fail_user_page;
846
847	for (i = 0; i < nr_pages; i++) {
848		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
849		if (!rb->data_pages[i])
850			goto fail_data_pages;
851	}
852
853	rb->nr_pages = nr_pages;
854
855	ring_buffer_init(rb, watermark, flags);
856
857	return rb;
858
859fail_data_pages:
860	for (i--; i >= 0; i--)
861		perf_mmap_free_page(rb->data_pages[i]);
862
863	perf_mmap_free_page(rb->user_page);
864
865fail_user_page:
866	kfree(rb);
867
868fail:
869	return NULL;
870}
871
872void rb_free(struct perf_buffer *rb)
 
 
 
 
 
 
 
 
873{
874	int i;
875
876	perf_mmap_free_page(rb->user_page);
877	for (i = 0; i < rb->nr_pages; i++)
878		perf_mmap_free_page(rb->data_pages[i]);
879	kfree(rb);
880}
881
882#else
883static struct page *
884__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
 
885{
886	/* The '>' counts in the user page. */
887	if (pgoff > data_page_nr(rb))
888		return NULL;
889
890	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
891}
892
893static void perf_mmap_unmark_page(void *addr)
894{
895	struct page *page = vmalloc_to_page(addr);
896
897	page->mapping = NULL;
898}
899
900static void rb_free_work(struct work_struct *work)
901{
902	struct perf_buffer *rb;
903	void *base;
904	int i, nr;
905
906	rb = container_of(work, struct perf_buffer, work);
907	nr = data_page_nr(rb);
908
909	base = rb->user_page;
910	/* The '<=' counts in the user page. */
911	for (i = 0; i <= nr; i++)
912		perf_mmap_unmark_page(base + (i * PAGE_SIZE));
913
914	vfree(base);
915	kfree(rb);
916}
917
918void rb_free(struct perf_buffer *rb)
919{
920	schedule_work(&rb->work);
921}
922
923struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
924{
925	struct perf_buffer *rb;
926	unsigned long size;
927	void *all_buf;
928	int node;
929
930	size = sizeof(struct perf_buffer);
931	size += sizeof(void *);
932
933	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
934	rb = kzalloc_node(size, GFP_KERNEL, node);
935	if (!rb)
936		goto fail;
937
938	INIT_WORK(&rb->work, rb_free_work);
939
940	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
941	if (!all_buf)
942		goto fail_all_buf;
943
944	rb->user_page = all_buf;
945	rb->data_pages[0] = all_buf + PAGE_SIZE;
946	if (nr_pages) {
947		rb->nr_pages = 1;
948		rb->page_order = ilog2(nr_pages);
949	}
950
951	ring_buffer_init(rb, watermark, flags);
952
953	return rb;
954
955fail_all_buf:
956	kfree(rb);
957
958fail:
959	return NULL;
960}
961
962#endif
963
964struct page *
965perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
966{
967	if (rb->aux_nr_pages) {
968		/* above AUX space */
969		if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
970			return NULL;
971
972		/* AUX space */
973		if (pgoff >= rb->aux_pgoff) {
974			int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
975			return virt_to_page(rb->aux_pages[aux_pgoff]);
976		}
977	}
978
979	return __perf_mmap_to_page(rb, pgoff);
980}