Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Performance events ring-buffer code:
  3 *
  4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  7 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8 *
  9 * For licensing details see kernel-base/COPYING
 10 */
 11
 12#include <linux/perf_event.h>
 13#include <linux/vmalloc.h>
 14#include <linux/slab.h>
 15#include <linux/circ_buf.h>
 16#include <linux/poll.h>
 
 17
 18#include "internal.h"
 19
 20static void perf_output_wakeup(struct perf_output_handle *handle)
 21{
 22	atomic_set(&handle->rb->poll, POLLIN);
 23
 24	handle->event->pending_wakeup = 1;
 25	irq_work_queue(&handle->event->pending);
 
 
 
 
 26}
 27
 28/*
 29 * We need to ensure a later event_id doesn't publish a head when a former
 30 * event isn't done writing. However since we need to deal with NMIs we
 31 * cannot fully serialize things.
 32 *
 33 * We only publish the head (and generate a wakeup) when the outer-most
 34 * event completes.
 35 */
 36static void perf_output_get_handle(struct perf_output_handle *handle)
 37{
 38	struct ring_buffer *rb = handle->rb;
 39
 40	preempt_disable();
 41	local_inc(&rb->nest);
 
 
 
 
 
 42	handle->wakeup = local_read(&rb->wakeup);
 43}
 44
 45static void perf_output_put_handle(struct perf_output_handle *handle)
 46{
 47	struct ring_buffer *rb = handle->rb;
 48	unsigned long head;
 
 
 
 
 
 
 
 
 
 
 
 49
 50again:
 
 
 
 
 
 
 
 
 
 51	head = local_read(&rb->head);
 52
 53	/*
 54	 * IRQ/NMI can happen here, which means we can miss a head update.
 
 55	 */
 56
 57	if (!local_dec_and_test(&rb->nest))
 58		goto out;
 59
 60	/*
 61	 * Since the mmap() consumer (userspace) can run on a different CPU:
 62	 *
 63	 *   kernel				user
 64	 *
 65	 *   if (LOAD ->data_tail) {		LOAD ->data_head
 66	 *			(A)		smp_rmb()	(C)
 67	 *	STORE $data			LOAD $data
 68	 *	smp_wmb()	(B)		smp_mb()	(D)
 69	 *	STORE ->data_head		STORE ->data_tail
 70	 *   }
 71	 *
 72	 * Where A pairs with D, and B pairs with C.
 73	 *
 74	 * In our case (A) is a control dependency that separates the load of
 75	 * the ->data_tail and the stores of $data. In case ->data_tail
 76	 * indicates there is no room in the buffer to store $data we do not.
 77	 *
 78	 * D needs to be a full barrier since it separates the data READ
 79	 * from the tail WRITE.
 80	 *
 81	 * For B a WMB is sufficient since it separates two WRITEs, and for C
 82	 * an RMB is sufficient since it separates two READs.
 83	 *
 84	 * See perf_output_begin().
 85	 */
 86	smp_wmb(); /* B, matches C */
 87	rb->user_page->data_head = head;
 88
 89	/*
 90	 * Now check if we missed an update -- rely on previous implied
 91	 * compiler barriers to force a re-read.
 
 92	 */
 
 
 
 
 
 
 
 
 93	if (unlikely(head != local_read(&rb->head))) {
 94		local_inc(&rb->nest);
 95		goto again;
 96	}
 97
 98	if (handle->wakeup != local_read(&rb->wakeup))
 99		perf_output_wakeup(handle);
100
101out:
102	preempt_enable();
103}
104
105int perf_output_begin(struct perf_output_handle *handle,
106		      struct perf_event *event, unsigned int size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107{
108	struct ring_buffer *rb;
109	unsigned long tail, offset, head;
110	int have_lost, page_shift;
111	struct {
112		struct perf_event_header header;
113		u64			 id;
114		u64			 lost;
115	} lost_event;
116
117	rcu_read_lock();
118	/*
119	 * For inherited events we send all the output towards the parent.
120	 */
121	if (event->parent)
122		event = event->parent;
123
124	rb = rcu_dereference(event->rb);
125	if (unlikely(!rb))
126		goto out;
127
128	if (unlikely(!rb->nr_pages))
 
 
 
 
129		goto out;
 
130
131	handle->rb    = rb;
132	handle->event = event;
133
134	have_lost = local_read(&rb->lost);
135	if (unlikely(have_lost)) {
136		size += sizeof(lost_event);
137		if (event->attr.sample_id_all)
138			size += event->id_header_size;
139	}
140
141	perf_output_get_handle(handle);
142
 
143	do {
 
144		tail = READ_ONCE(rb->user_page->data_tail);
145		offset = head = local_read(&rb->head);
146		if (!rb->overwrite &&
147		    unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
148			goto fail;
 
 
149
150		/*
151		 * The above forms a control dependency barrier separating the
152		 * @tail load above from the data stores below. Since the @tail
153		 * load is required to compute the branch to fail below.
154		 *
155		 * A, matches D; the full memory barrier userspace SHOULD issue
156		 * after reading the data and before storing the new tail
157		 * position.
158		 *
159		 * See perf_output_put_handle().
160		 */
161
162		head += size;
163	} while (local_cmpxchg(&rb->head, offset, head) != offset);
 
 
 
 
 
 
 
 
164
165	/*
166	 * We rely on the implied barrier() by local_cmpxchg() to ensure
167	 * none of the data stores below can be lifted up by the compiler.
168	 */
169
170	if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
171		local_add(rb->watermark, &rb->wakeup);
172
173	page_shift = PAGE_SHIFT + page_order(rb);
174
175	handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
176	offset &= (1UL << page_shift) - 1;
177	handle->addr = rb->data_pages[handle->page] + offset;
178	handle->size = (1UL << page_shift) - offset;
179
180	if (unlikely(have_lost)) {
181		struct perf_sample_data sample_data;
182
183		lost_event.header.size = sizeof(lost_event);
184		lost_event.header.type = PERF_RECORD_LOST;
185		lost_event.header.misc = 0;
186		lost_event.id          = event->id;
187		lost_event.lost        = local_xchg(&rb->lost, 0);
188
189		perf_event_header__init_id(&lost_event.header,
190					   &sample_data, event);
191		perf_output_put(handle, lost_event);
192		perf_event__output_id_sample(event, handle, &sample_data);
193	}
194
195	return 0;
196
197fail:
198	local_inc(&rb->lost);
 
199	perf_output_put_handle(handle);
200out:
201	rcu_read_unlock();
202
203	return -ENOSPC;
204}
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206unsigned int perf_output_copy(struct perf_output_handle *handle,
207		      const void *buf, unsigned int len)
208{
209	return __output_copy(handle, buf, len);
210}
211
212unsigned int perf_output_skip(struct perf_output_handle *handle,
213			      unsigned int len)
214{
215	return __output_skip(handle, NULL, len);
216}
217
218void perf_output_end(struct perf_output_handle *handle)
219{
220	perf_output_put_handle(handle);
221	rcu_read_unlock();
222}
223
224static void rb_irq_work(struct irq_work *work);
225
226static void
227ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
228{
229	long max_size = perf_data_size(rb);
230
231	if (watermark)
232		rb->watermark = min(max_size, watermark);
233
234	if (!rb->watermark)
235		rb->watermark = max_size / 2;
236
237	if (flags & RING_BUFFER_WRITABLE)
238		rb->overwrite = 0;
239	else
240		rb->overwrite = 1;
241
242	atomic_set(&rb->refcount, 1);
243
244	INIT_LIST_HEAD(&rb->event_list);
245	spin_lock_init(&rb->event_lock);
246	init_irq_work(&rb->irq_work, rb_irq_work);
 
 
 
 
 
 
 
 
247}
248
249static void ring_buffer_put_async(struct ring_buffer *rb)
250{
251	if (!atomic_dec_and_test(&rb->refcount))
 
 
 
 
252		return;
253
254	rb->rcu_head.next = (void *)rb;
255	irq_work_queue(&rb->irq_work);
256}
 
257
258/*
259 * This is called before hardware starts writing to the AUX area to
260 * obtain an output handle and make sure there's room in the buffer.
261 * When the capture completes, call perf_aux_output_end() to commit
262 * the recorded data to the buffer.
263 *
264 * The ordering is similar to that of perf_output_{begin,end}, with
265 * the exception of (B), which should be taken care of by the pmu
266 * driver, since ordering rules will differ depending on hardware.
 
 
 
 
267 */
268void *perf_aux_output_begin(struct perf_output_handle *handle,
269			    struct perf_event *event)
270{
271	struct perf_event *output_event = event;
272	unsigned long aux_head, aux_tail;
273	struct ring_buffer *rb;
 
274
275	if (output_event->parent)
276		output_event = output_event->parent;
277
278	/*
279	 * Since this will typically be open across pmu::add/pmu::del, we
280	 * grab ring_buffer's refcount instead of holding rcu read lock
281	 * to make sure it doesn't disappear under us.
282	 */
283	rb = ring_buffer_get(output_event);
284	if (!rb)
285		return NULL;
286
287	if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288		goto err;
289
 
290	/*
291	 * Nesting is not supported for AUX area, make sure nested
292	 * writers are caught early
293	 */
294	if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
295		goto err_put;
296
297	aux_head = local_read(&rb->aux_head);
 
 
298
299	handle->rb = rb;
300	handle->event = event;
301	handle->head = aux_head;
302	handle->size = 0;
 
303
304	/*
305	 * In overwrite mode, AUX data stores do not depend on aux_tail,
306	 * therefore (A) control dependency barrier does not exist. The
307	 * (B) <-> (C) ordering is still observed by the pmu driver.
308	 */
309	if (!rb->aux_overwrite) {
310		aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
311		handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
312		if (aux_head - aux_tail < perf_aux_size(rb))
313			handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
314
315		/*
316		 * handle->size computation depends on aux_tail load; this forms a
317		 * control dependency barrier separating aux_tail load from aux data
318		 * store that will be enabled on successful return
319		 */
320		if (!handle->size) { /* A, matches D */
321			event->pending_disable = 1;
322			perf_output_wakeup(handle);
323			local_set(&rb->aux_nest, 0);
324			goto err_put;
325		}
326	}
327
328	return handle->rb->aux_priv;
329
330err_put:
 
331	rb_free_aux(rb);
332
333err:
334	ring_buffer_put_async(rb);
335	handle->event = NULL;
336
337	return NULL;
338}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
340/*
341 * Commit the data written by hardware into the ring buffer by adjusting
342 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
343 * pmu driver's responsibility to observe ordering rules of the hardware,
344 * so that all the data is externally visible before this is called.
 
 
 
 
345 */
346void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
347			 bool truncated)
348{
349	struct ring_buffer *rb = handle->rb;
350	bool wakeup = truncated;
351	unsigned long aux_head;
352	u64 flags = 0;
353
354	if (truncated)
355		flags |= PERF_AUX_FLAG_TRUNCATED;
356
357	/* in overwrite mode, driver provides aux_head via handle */
358	if (rb->aux_overwrite) {
359		flags |= PERF_AUX_FLAG_OVERWRITE;
360
361		aux_head = handle->head;
362		local_set(&rb->aux_head, aux_head);
363	} else {
364		aux_head = local_read(&rb->aux_head);
365		local_add(size, &rb->aux_head);
366	}
367
368	if (size || flags) {
369		/*
370		 * Only send RECORD_AUX if we have something useful to communicate
371		 */
372
373		perf_event_aux_event(handle->event, aux_head, size, flags);
 
374	}
375
376	aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
378	if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
 
379		wakeup = true;
380		local_add(rb->aux_watermark, &rb->aux_wakeup);
381	}
382
383	if (wakeup) {
384		if (truncated)
385			handle->event->pending_disable = 1;
386		perf_output_wakeup(handle);
387	}
388
389	handle->event = NULL;
390
391	local_set(&rb->aux_nest, 0);
 
392	rb_free_aux(rb);
393	ring_buffer_put_async(rb);
394}
 
395
396/*
397 * Skip over a given number of bytes in the AUX buffer, due to, for example,
398 * hardware's alignment constraints.
399 */
400int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
401{
402	struct ring_buffer *rb = handle->rb;
403	unsigned long aux_head;
404
405	if (size > handle->size)
406		return -ENOSPC;
407
408	local_add(size, &rb->aux_head);
409
410	aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
411	if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
412		perf_output_wakeup(handle);
413		local_add(rb->aux_watermark, &rb->aux_wakeup);
414		handle->wakeup = local_read(&rb->aux_wakeup) +
415				 rb->aux_watermark;
416	}
417
418	handle->head = aux_head;
419	handle->size -= size;
420
421	return 0;
422}
 
423
424void *perf_get_aux(struct perf_output_handle *handle)
425{
426	/* this is only valid between perf_aux_output_begin and *_end */
427	if (!handle->event)
428		return NULL;
429
430	return handle->rb->aux_priv;
431}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
432
433#define PERF_AUX_GFP	(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
434
435static struct page *rb_alloc_aux_page(int node, int order)
436{
437	struct page *page;
438
439	if (order > MAX_ORDER)
440		order = MAX_ORDER;
441
442	do {
443		page = alloc_pages_node(node, PERF_AUX_GFP, order);
444	} while (!page && order--);
445
446	if (page && order) {
447		/*
448		 * Communicate the allocation size to the driver:
449		 * if we managed to secure a high-order allocation,
450		 * set its first page's private to this order;
451		 * !PagePrivate(page) means it's just a normal page.
452		 */
453		split_page(page, order);
454		SetPagePrivate(page);
455		set_page_private(page, order);
456	}
457
458	return page;
459}
460
461static void rb_free_aux_page(struct ring_buffer *rb, int idx)
462{
463	struct page *page = virt_to_page(rb->aux_pages[idx]);
464
465	ClearPagePrivate(page);
466	page->mapping = NULL;
467	__free_page(page);
468}
469
470static void __rb_free_aux(struct ring_buffer *rb)
471{
472	int pg;
473
 
 
 
 
 
 
 
 
474	if (rb->aux_priv) {
475		rb->free_aux(rb->aux_priv);
476		rb->free_aux = NULL;
477		rb->aux_priv = NULL;
478	}
479
480	if (rb->aux_nr_pages) {
481		for (pg = 0; pg < rb->aux_nr_pages; pg++)
482			rb_free_aux_page(rb, pg);
483
484		kfree(rb->aux_pages);
485		rb->aux_nr_pages = 0;
486	}
487}
488
489int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
490		 pgoff_t pgoff, int nr_pages, long watermark, int flags)
491{
492	bool overwrite = !(flags & RING_BUFFER_WRITABLE);
493	int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
494	int ret = -ENOMEM, max_order = 0;
495
496	if (!has_aux(event))
497		return -ENOTSUPP;
 
 
 
498
499	if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
500		/*
501		 * We need to start with the max_order that fits in nr_pages,
502		 * not the other way around, hence ilog2() and not get_order.
503		 */
504		max_order = ilog2(nr_pages);
 
 
 
505
506		/*
507		 * PMU requests more than one contiguous chunks of memory
508		 * for SW double buffering
509		 */
510		if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
511		    !overwrite) {
512			if (!max_order)
513				return -EINVAL;
514
515			max_order--;
516		}
 
517	}
518
519	rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
 
 
 
 
 
 
 
520	if (!rb->aux_pages)
521		return -ENOMEM;
522
523	rb->free_aux = event->pmu->free_aux;
524	for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
525		struct page *page;
526		int last, order;
527
528		order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
529		page = rb_alloc_aux_page(node, order);
530		if (!page)
531			goto out;
532
533		for (last = rb->aux_nr_pages + (1 << page_private(page));
534		     last > rb->aux_nr_pages; rb->aux_nr_pages++)
535			rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
536	}
537
538	/*
539	 * In overwrite mode, PMUs that don't support SG may not handle more
540	 * than one contiguous allocation, since they rely on PMI to do double
541	 * buffering. In this case, the entire buffer has to be one contiguous
542	 * chunk.
543	 */
544	if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
545	    overwrite) {
546		struct page *page = virt_to_page(rb->aux_pages[0]);
547
548		if (page_private(page) != max_order)
549			goto out;
550	}
551
552	rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
553					     overwrite);
554	if (!rb->aux_priv)
555		goto out;
556
557	ret = 0;
558
559	/*
560	 * aux_pages (and pmu driver's private data, aux_priv) will be
561	 * referenced in both producer's and consumer's contexts, thus
562	 * we keep a refcount here to make sure either of the two can
563	 * reference them safely.
564	 */
565	atomic_set(&rb->aux_refcount, 1);
566
567	rb->aux_overwrite = overwrite;
568	rb->aux_watermark = watermark;
569
570	if (!rb->aux_watermark && !rb->aux_overwrite)
571		rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
572
573out:
574	if (!ret)
575		rb->aux_pgoff = pgoff;
576	else
577		__rb_free_aux(rb);
578
579	return ret;
580}
581
582void rb_free_aux(struct ring_buffer *rb)
583{
584	if (atomic_dec_and_test(&rb->aux_refcount))
585		irq_work_queue(&rb->irq_work);
586}
587
588static void rb_irq_work(struct irq_work *work)
589{
590	struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
591
592	if (!atomic_read(&rb->aux_refcount))
593		__rb_free_aux(rb);
594
595	if (rb->rcu_head.next == (void *)rb)
596		call_rcu(&rb->rcu_head, rb_free_rcu);
597}
598
599#ifndef CONFIG_PERF_USE_VMALLOC
600
601/*
602 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
603 */
604
605static struct page *
606__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
607{
608	if (pgoff > rb->nr_pages)
609		return NULL;
610
611	if (pgoff == 0)
612		return virt_to_page(rb->user_page);
613
614	return virt_to_page(rb->data_pages[pgoff - 1]);
615}
616
617static void *perf_mmap_alloc_page(int cpu)
618{
619	struct page *page;
620	int node;
621
622	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
623	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
624	if (!page)
625		return NULL;
626
627	return page_address(page);
628}
629
630struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
 
 
 
 
 
 
 
 
631{
632	struct ring_buffer *rb;
633	unsigned long size;
634	int i;
635
636	size = sizeof(struct ring_buffer);
637	size += nr_pages * sizeof(void *);
638
639	rb = kzalloc(size, GFP_KERNEL);
 
 
 
 
640	if (!rb)
641		goto fail;
642
643	rb->user_page = perf_mmap_alloc_page(cpu);
644	if (!rb->user_page)
645		goto fail_user_page;
646
647	for (i = 0; i < nr_pages; i++) {
648		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
649		if (!rb->data_pages[i])
650			goto fail_data_pages;
651	}
652
653	rb->nr_pages = nr_pages;
654
655	ring_buffer_init(rb, watermark, flags);
656
657	return rb;
658
659fail_data_pages:
660	for (i--; i >= 0; i--)
661		free_page((unsigned long)rb->data_pages[i]);
662
663	free_page((unsigned long)rb->user_page);
664
665fail_user_page:
666	kfree(rb);
667
668fail:
669	return NULL;
670}
671
672static void perf_mmap_free_page(unsigned long addr)
673{
674	struct page *page = virt_to_page((void *)addr);
675
676	page->mapping = NULL;
677	__free_page(page);
678}
679
680void rb_free(struct ring_buffer *rb)
681{
682	int i;
683
684	perf_mmap_free_page((unsigned long)rb->user_page);
685	for (i = 0; i < rb->nr_pages; i++)
686		perf_mmap_free_page((unsigned long)rb->data_pages[i]);
687	kfree(rb);
688}
689
690#else
691static int data_page_nr(struct ring_buffer *rb)
692{
693	return rb->nr_pages << page_order(rb);
694}
695
696static struct page *
697__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
698{
699	/* The '>' counts in the user page. */
700	if (pgoff > data_page_nr(rb))
701		return NULL;
702
703	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
704}
705
706static void perf_mmap_unmark_page(void *addr)
707{
708	struct page *page = vmalloc_to_page(addr);
709
710	page->mapping = NULL;
711}
712
713static void rb_free_work(struct work_struct *work)
714{
715	struct ring_buffer *rb;
716	void *base;
717	int i, nr;
718
719	rb = container_of(work, struct ring_buffer, work);
720	nr = data_page_nr(rb);
721
722	base = rb->user_page;
723	/* The '<=' counts in the user page. */
724	for (i = 0; i <= nr; i++)
725		perf_mmap_unmark_page(base + (i * PAGE_SIZE));
726
727	vfree(base);
728	kfree(rb);
729}
730
731void rb_free(struct ring_buffer *rb)
732{
733	schedule_work(&rb->work);
734}
735
736struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
737{
738	struct ring_buffer *rb;
739	unsigned long size;
740	void *all_buf;
 
741
742	size = sizeof(struct ring_buffer);
743	size += sizeof(void *);
744
745	rb = kzalloc(size, GFP_KERNEL);
 
746	if (!rb)
747		goto fail;
748
749	INIT_WORK(&rb->work, rb_free_work);
750
751	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
752	if (!all_buf)
753		goto fail_all_buf;
754
755	rb->user_page = all_buf;
756	rb->data_pages[0] = all_buf + PAGE_SIZE;
757	if (nr_pages) {
758		rb->nr_pages = 1;
759		rb->page_order = ilog2(nr_pages);
760	}
761
762	ring_buffer_init(rb, watermark, flags);
763
764	return rb;
765
766fail_all_buf:
767	kfree(rb);
768
769fail:
770	return NULL;
771}
772
773#endif
774
775struct page *
776perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
777{
778	if (rb->aux_nr_pages) {
779		/* above AUX space */
780		if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
781			return NULL;
782
783		/* AUX space */
784		if (pgoff >= rb->aux_pgoff)
785			return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
 
 
786	}
787
788	return __perf_mmap_to_page(rb, pgoff);
789}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Performance events ring-buffer code:
  4 *
  5 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  6 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  7 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  8 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
 
 
  9 */
 10
 11#include <linux/perf_event.h>
 12#include <linux/vmalloc.h>
 13#include <linux/slab.h>
 14#include <linux/circ_buf.h>
 15#include <linux/poll.h>
 16#include <linux/nospec.h>
 17
 18#include "internal.h"
 19
 20static void perf_output_wakeup(struct perf_output_handle *handle)
 21{
 22	atomic_set(&handle->rb->poll, EPOLLIN);
 23
 24	handle->event->pending_wakeup = 1;
 25
 26	if (*perf_event_fasync(handle->event) && !handle->event->pending_kill)
 27		handle->event->pending_kill = POLL_IN;
 28
 29	irq_work_queue(&handle->event->pending_irq);
 30}
 31
 32/*
 33 * We need to ensure a later event_id doesn't publish a head when a former
 34 * event isn't done writing. However since we need to deal with NMIs we
 35 * cannot fully serialize things.
 36 *
 37 * We only publish the head (and generate a wakeup) when the outer-most
 38 * event completes.
 39 */
 40static void perf_output_get_handle(struct perf_output_handle *handle)
 41{
 42	struct perf_buffer *rb = handle->rb;
 43
 44	preempt_disable();
 45
 46	/*
 47	 * Avoid an explicit LOAD/STORE such that architectures with memops
 48	 * can use them.
 49	 */
 50	(*(volatile unsigned int *)&rb->nest)++;
 51	handle->wakeup = local_read(&rb->wakeup);
 52}
 53
 54static void perf_output_put_handle(struct perf_output_handle *handle)
 55{
 56	struct perf_buffer *rb = handle->rb;
 57	unsigned long head;
 58	unsigned int nest;
 59
 60	/*
 61	 * If this isn't the outermost nesting, we don't have to update
 62	 * @rb->user_page->data_head.
 63	 */
 64	nest = READ_ONCE(rb->nest);
 65	if (nest > 1) {
 66		WRITE_ONCE(rb->nest, nest - 1);
 67		goto out;
 68	}
 69
 70again:
 71	/*
 72	 * In order to avoid publishing a head value that goes backwards,
 73	 * we must ensure the load of @rb->head happens after we've
 74	 * incremented @rb->nest.
 75	 *
 76	 * Otherwise we can observe a @rb->head value before one published
 77	 * by an IRQ/NMI happening between the load and the increment.
 78	 */
 79	barrier();
 80	head = local_read(&rb->head);
 81
 82	/*
 83	 * IRQ/NMI can happen here and advance @rb->head, causing our
 84	 * load above to be stale.
 85	 */
 86
 
 
 
 87	/*
 88	 * Since the mmap() consumer (userspace) can run on a different CPU:
 89	 *
 90	 *   kernel				user
 91	 *
 92	 *   if (LOAD ->data_tail) {		LOAD ->data_head
 93	 *			(A)		smp_rmb()	(C)
 94	 *	STORE $data			LOAD $data
 95	 *	smp_wmb()	(B)		smp_mb()	(D)
 96	 *	STORE ->data_head		STORE ->data_tail
 97	 *   }
 98	 *
 99	 * Where A pairs with D, and B pairs with C.
100	 *
101	 * In our case (A) is a control dependency that separates the load of
102	 * the ->data_tail and the stores of $data. In case ->data_tail
103	 * indicates there is no room in the buffer to store $data we do not.
104	 *
105	 * D needs to be a full barrier since it separates the data READ
106	 * from the tail WRITE.
107	 *
108	 * For B a WMB is sufficient since it separates two WRITEs, and for C
109	 * an RMB is sufficient since it separates two READs.
110	 *
111	 * See perf_output_begin().
112	 */
113	smp_wmb(); /* B, matches C */
114	WRITE_ONCE(rb->user_page->data_head, head);
115
116	/*
117	 * We must publish the head before decrementing the nest count,
118	 * otherwise an IRQ/NMI can publish a more recent head value and our
119	 * write will (temporarily) publish a stale value.
120	 */
121	barrier();
122	WRITE_ONCE(rb->nest, 0);
123
124	/*
125	 * Ensure we decrement @rb->nest before we validate the @rb->head.
126	 * Otherwise we cannot be sure we caught the 'last' nested update.
127	 */
128	barrier();
129	if (unlikely(head != local_read(&rb->head))) {
130		WRITE_ONCE(rb->nest, 1);
131		goto again;
132	}
133
134	if (handle->wakeup != local_read(&rb->wakeup))
135		perf_output_wakeup(handle);
136
137out:
138	preempt_enable();
139}
140
141static __always_inline bool
142ring_buffer_has_space(unsigned long head, unsigned long tail,
143		      unsigned long data_size, unsigned int size,
144		      bool backward)
145{
146	if (!backward)
147		return CIRC_SPACE(head, tail, data_size) >= size;
148	else
149		return CIRC_SPACE(tail, head, data_size) >= size;
150}
151
152static __always_inline int
153__perf_output_begin(struct perf_output_handle *handle,
154		    struct perf_sample_data *data,
155		    struct perf_event *event, unsigned int size,
156		    bool backward)
157{
158	struct perf_buffer *rb;
159	unsigned long tail, offset, head;
160	int have_lost, page_shift;
161	struct {
162		struct perf_event_header header;
163		u64			 id;
164		u64			 lost;
165	} lost_event;
166
167	rcu_read_lock();
168	/*
169	 * For inherited events we send all the output towards the parent.
170	 */
171	if (event->parent)
172		event = event->parent;
173
174	rb = rcu_dereference(event->rb);
175	if (unlikely(!rb))
176		goto out;
177
178	if (unlikely(rb->paused)) {
179		if (rb->nr_pages) {
180			local_inc(&rb->lost);
181			atomic64_inc(&event->lost_samples);
182		}
183		goto out;
184	}
185
186	handle->rb    = rb;
187	handle->event = event;
188
189	have_lost = local_read(&rb->lost);
190	if (unlikely(have_lost)) {
191		size += sizeof(lost_event);
192		if (event->attr.sample_id_all)
193			size += event->id_header_size;
194	}
195
196	perf_output_get_handle(handle);
197
198	offset = local_read(&rb->head);
199	do {
200		head = offset;
201		tail = READ_ONCE(rb->user_page->data_tail);
202		if (!rb->overwrite) {
203			if (unlikely(!ring_buffer_has_space(head, tail,
204							    perf_data_size(rb),
205							    size, backward)))
206				goto fail;
207		}
208
209		/*
210		 * The above forms a control dependency barrier separating the
211		 * @tail load above from the data stores below. Since the @tail
212		 * load is required to compute the branch to fail below.
213		 *
214		 * A, matches D; the full memory barrier userspace SHOULD issue
215		 * after reading the data and before storing the new tail
216		 * position.
217		 *
218		 * See perf_output_put_handle().
219		 */
220
221		if (!backward)
222			head += size;
223		else
224			head -= size;
225	} while (!local_try_cmpxchg(&rb->head, &offset, head));
226
227	if (backward) {
228		offset = head;
229		head = (u64)(-head);
230	}
231
232	/*
233	 * We rely on the implied barrier() by local_cmpxchg() to ensure
234	 * none of the data stores below can be lifted up by the compiler.
235	 */
236
237	if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
238		local_add(rb->watermark, &rb->wakeup);
239
240	page_shift = PAGE_SHIFT + page_order(rb);
241
242	handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
243	offset &= (1UL << page_shift) - 1;
244	handle->addr = rb->data_pages[handle->page] + offset;
245	handle->size = (1UL << page_shift) - offset;
246
247	if (unlikely(have_lost)) {
 
 
248		lost_event.header.size = sizeof(lost_event);
249		lost_event.header.type = PERF_RECORD_LOST;
250		lost_event.header.misc = 0;
251		lost_event.id          = event->id;
252		lost_event.lost        = local_xchg(&rb->lost, 0);
253
254		/* XXX mostly redundant; @data is already fully initializes */
255		perf_event_header__init_id(&lost_event.header, data, event);
256		perf_output_put(handle, lost_event);
257		perf_event__output_id_sample(event, handle, data);
258	}
259
260	return 0;
261
262fail:
263	local_inc(&rb->lost);
264	atomic64_inc(&event->lost_samples);
265	perf_output_put_handle(handle);
266out:
267	rcu_read_unlock();
268
269	return -ENOSPC;
270}
271
272int perf_output_begin_forward(struct perf_output_handle *handle,
273			      struct perf_sample_data *data,
274			      struct perf_event *event, unsigned int size)
275{
276	return __perf_output_begin(handle, data, event, size, false);
277}
278
279int perf_output_begin_backward(struct perf_output_handle *handle,
280			       struct perf_sample_data *data,
281			       struct perf_event *event, unsigned int size)
282{
283	return __perf_output_begin(handle, data, event, size, true);
284}
285
286int perf_output_begin(struct perf_output_handle *handle,
287		      struct perf_sample_data *data,
288		      struct perf_event *event, unsigned int size)
289{
290
291	return __perf_output_begin(handle, data, event, size,
292				   unlikely(is_write_backward(event)));
293}
294
295unsigned int perf_output_copy(struct perf_output_handle *handle,
296		      const void *buf, unsigned int len)
297{
298	return __output_copy(handle, buf, len);
299}
300
301unsigned int perf_output_skip(struct perf_output_handle *handle,
302			      unsigned int len)
303{
304	return __output_skip(handle, NULL, len);
305}
306
307void perf_output_end(struct perf_output_handle *handle)
308{
309	perf_output_put_handle(handle);
310	rcu_read_unlock();
311}
312
 
 
313static void
314ring_buffer_init(struct perf_buffer *rb, long watermark, int flags)
315{
316	long max_size = perf_data_size(rb);
317
318	if (watermark)
319		rb->watermark = min(max_size, watermark);
320
321	if (!rb->watermark)
322		rb->watermark = max_size / 2;
323
324	if (flags & RING_BUFFER_WRITABLE)
325		rb->overwrite = 0;
326	else
327		rb->overwrite = 1;
328
329	refcount_set(&rb->refcount, 1);
330
331	INIT_LIST_HEAD(&rb->event_list);
332	spin_lock_init(&rb->event_lock);
333
334	/*
335	 * perf_output_begin() only checks rb->paused, therefore
336	 * rb->paused must be true if we have no pages for output.
337	 */
338	if (!rb->nr_pages)
339		rb->paused = 1;
340
341	mutex_init(&rb->aux_mutex);
342}
343
344void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
345{
346	/*
347	 * OVERWRITE is determined by perf_aux_output_end() and can't
348	 * be passed in directly.
349	 */
350	if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
351		return;
352
353	handle->aux_flags |= flags;
 
354}
355EXPORT_SYMBOL_GPL(perf_aux_output_flag);
356
357/*
358 * This is called before hardware starts writing to the AUX area to
359 * obtain an output handle and make sure there's room in the buffer.
360 * When the capture completes, call perf_aux_output_end() to commit
361 * the recorded data to the buffer.
362 *
363 * The ordering is similar to that of perf_output_{begin,end}, with
364 * the exception of (B), which should be taken care of by the pmu
365 * driver, since ordering rules will differ depending on hardware.
366 *
367 * Call this from pmu::start(); see the comment in perf_aux_output_end()
368 * about its use in pmu callbacks. Both can also be called from the PMI
369 * handler if needed.
370 */
371void *perf_aux_output_begin(struct perf_output_handle *handle,
372			    struct perf_event *event)
373{
374	struct perf_event *output_event = event;
375	unsigned long aux_head, aux_tail;
376	struct perf_buffer *rb;
377	unsigned int nest;
378
379	if (output_event->parent)
380		output_event = output_event->parent;
381
382	/*
383	 * Since this will typically be open across pmu::add/pmu::del, we
384	 * grab ring_buffer's refcount instead of holding rcu read lock
385	 * to make sure it doesn't disappear under us.
386	 */
387	rb = ring_buffer_get(output_event);
388	if (!rb)
389		return NULL;
390
391	if (!rb_has_aux(rb))
392		goto err;
393
394	/*
395	 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
396	 * about to get freed, so we leave immediately.
397	 *
398	 * Checking rb::aux_mmap_count and rb::refcount has to be done in
399	 * the same order, see perf_mmap_close. Otherwise we end up freeing
400	 * aux pages in this path, which is a bug, because in_atomic().
401	 */
402	if (!atomic_read(&rb->aux_mmap_count))
403		goto err;
404
405	if (!refcount_inc_not_zero(&rb->aux_refcount))
406		goto err;
407
408	nest = READ_ONCE(rb->aux_nest);
409	/*
410	 * Nesting is not supported for AUX area, make sure nested
411	 * writers are caught early
412	 */
413	if (WARN_ON_ONCE(nest))
414		goto err_put;
415
416	WRITE_ONCE(rb->aux_nest, nest + 1);
417
418	aux_head = rb->aux_head;
419
420	handle->rb = rb;
421	handle->event = event;
422	handle->head = aux_head;
423	handle->size = 0;
424	handle->aux_flags = 0;
425
426	/*
427	 * In overwrite mode, AUX data stores do not depend on aux_tail,
428	 * therefore (A) control dependency barrier does not exist. The
429	 * (B) <-> (C) ordering is still observed by the pmu driver.
430	 */
431	if (!rb->aux_overwrite) {
432		aux_tail = READ_ONCE(rb->user_page->aux_tail);
433		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
434		if (aux_head - aux_tail < perf_aux_size(rb))
435			handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
436
437		/*
438		 * handle->size computation depends on aux_tail load; this forms a
439		 * control dependency barrier separating aux_tail load from aux data
440		 * store that will be enabled on successful return
441		 */
442		if (!handle->size) { /* A, matches D */
443			event->pending_disable = smp_processor_id();
444			perf_output_wakeup(handle);
445			WRITE_ONCE(rb->aux_nest, 0);
446			goto err_put;
447		}
448	}
449
450	return handle->rb->aux_priv;
451
452err_put:
453	/* can't be last */
454	rb_free_aux(rb);
455
456err:
457	ring_buffer_put(rb);
458	handle->event = NULL;
459
460	return NULL;
461}
462EXPORT_SYMBOL_GPL(perf_aux_output_begin);
463
464static __always_inline bool rb_need_aux_wakeup(struct perf_buffer *rb)
465{
466	if (rb->aux_overwrite)
467		return false;
468
469	if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
470		rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
471		return true;
472	}
473
474	return false;
475}
476
477/*
478 * Commit the data written by hardware into the ring buffer by adjusting
479 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
480 * pmu driver's responsibility to observe ordering rules of the hardware,
481 * so that all the data is externally visible before this is called.
482 *
483 * Note: this has to be called from pmu::stop() callback, as the assumption
484 * of the AUX buffer management code is that after pmu::stop(), the AUX
485 * transaction must be stopped and therefore drop the AUX reference count.
486 */
487void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
 
488{
489	bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
490	struct perf_buffer *rb = handle->rb;
491	unsigned long aux_head;
 
 
 
 
492
493	/* in overwrite mode, driver provides aux_head via handle */
494	if (rb->aux_overwrite) {
495		handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
496
497		aux_head = handle->head;
498		rb->aux_head = aux_head;
499	} else {
500		handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
 
 
 
 
 
 
 
501
502		aux_head = rb->aux_head;
503		rb->aux_head += size;
504	}
505
506	/*
507	 * Only send RECORD_AUX if we have something useful to communicate
508	 *
509	 * Note: the OVERWRITE records by themselves are not considered
510	 * useful, as they don't communicate any *new* information,
511	 * aside from the short-lived offset, that becomes history at
512	 * the next event sched-in and therefore isn't useful.
513	 * The userspace that needs to copy out AUX data in overwrite
514	 * mode should know to use user_page::aux_head for the actual
515	 * offset. So, from now on we don't output AUX records that
516	 * have *only* OVERWRITE flag set.
517	 */
518	if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
519		perf_event_aux_event(handle->event, aux_head, size,
520				     handle->aux_flags);
521
522	WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
523	if (rb_need_aux_wakeup(rb))
524		wakeup = true;
 
 
525
526	if (wakeup) {
527		if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
528			handle->event->pending_disable = smp_processor_id();
529		perf_output_wakeup(handle);
530	}
531
532	handle->event = NULL;
533
534	WRITE_ONCE(rb->aux_nest, 0);
535	/* can't be last */
536	rb_free_aux(rb);
537	ring_buffer_put(rb);
538}
539EXPORT_SYMBOL_GPL(perf_aux_output_end);
540
541/*
542 * Skip over a given number of bytes in the AUX buffer, due to, for example,
543 * hardware's alignment constraints.
544 */
545int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
546{
547	struct perf_buffer *rb = handle->rb;
 
548
549	if (size > handle->size)
550		return -ENOSPC;
551
552	rb->aux_head += size;
553
554	WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
555	if (rb_need_aux_wakeup(rb)) {
556		perf_output_wakeup(handle);
557		handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
 
 
558	}
559
560	handle->head = rb->aux_head;
561	handle->size -= size;
562
563	return 0;
564}
565EXPORT_SYMBOL_GPL(perf_aux_output_skip);
566
567void *perf_get_aux(struct perf_output_handle *handle)
568{
569	/* this is only valid between perf_aux_output_begin and *_end */
570	if (!handle->event)
571		return NULL;
572
573	return handle->rb->aux_priv;
574}
575EXPORT_SYMBOL_GPL(perf_get_aux);
576
577/*
578 * Copy out AUX data from an AUX handle.
579 */
580long perf_output_copy_aux(struct perf_output_handle *aux_handle,
581			  struct perf_output_handle *handle,
582			  unsigned long from, unsigned long to)
583{
584	struct perf_buffer *rb = aux_handle->rb;
585	unsigned long tocopy, remainder, len = 0;
586	void *addr;
587
588	from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
589	to &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
590
591	do {
592		tocopy = PAGE_SIZE - offset_in_page(from);
593		if (to > from)
594			tocopy = min(tocopy, to - from);
595		if (!tocopy)
596			break;
597
598		addr = rb->aux_pages[from >> PAGE_SHIFT];
599		addr += offset_in_page(from);
600
601		remainder = perf_output_copy(handle, addr, tocopy);
602		if (remainder)
603			return -EFAULT;
604
605		len += tocopy;
606		from += tocopy;
607		from &= (rb->aux_nr_pages << PAGE_SHIFT) - 1;
608	} while (to != from);
609
610	return len;
611}
612
613#define PERF_AUX_GFP	(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
614
615static struct page *rb_alloc_aux_page(int node, int order)
616{
617	struct page *page;
618
619	if (order > MAX_PAGE_ORDER)
620		order = MAX_PAGE_ORDER;
621
622	do {
623		page = alloc_pages_node(node, PERF_AUX_GFP, order);
624	} while (!page && order--);
625
626	if (page && order) {
627		/*
628		 * Communicate the allocation size to the driver:
629		 * if we managed to secure a high-order allocation,
630		 * set its first page's private to this order;
631		 * !PagePrivate(page) means it's just a normal page.
632		 */
633		split_page(page, order);
634		SetPagePrivate(page);
635		set_page_private(page, order);
636	}
637
638	return page;
639}
640
641static void rb_free_aux_page(struct perf_buffer *rb, int idx)
642{
643	struct page *page = virt_to_page(rb->aux_pages[idx]);
644
645	ClearPagePrivate(page);
646	page->mapping = NULL;
647	__free_page(page);
648}
649
650static void __rb_free_aux(struct perf_buffer *rb)
651{
652	int pg;
653
654	/*
655	 * Should never happen, the last reference should be dropped from
656	 * perf_mmap_close() path, which first stops aux transactions (which
657	 * in turn are the atomic holders of aux_refcount) and then does the
658	 * last rb_free_aux().
659	 */
660	WARN_ON_ONCE(in_atomic());
661
662	if (rb->aux_priv) {
663		rb->free_aux(rb->aux_priv);
664		rb->free_aux = NULL;
665		rb->aux_priv = NULL;
666	}
667
668	if (rb->aux_nr_pages) {
669		for (pg = 0; pg < rb->aux_nr_pages; pg++)
670			rb_free_aux_page(rb, pg);
671
672		kfree(rb->aux_pages);
673		rb->aux_nr_pages = 0;
674	}
675}
676
677int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
678		 pgoff_t pgoff, int nr_pages, long watermark, int flags)
679{
680	bool overwrite = !(flags & RING_BUFFER_WRITABLE);
681	int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
682	int ret = -ENOMEM, max_order;
683
684	if (!has_aux(event))
685		return -EOPNOTSUPP;
686
687	if (nr_pages <= 0)
688		return -EINVAL;
689
690	if (!overwrite) {
691		/*
692		 * Watermark defaults to half the buffer, and so does the
693		 * max_order, to aid PMU drivers in double buffering.
694		 */
695		if (!watermark)
696			watermark = min_t(unsigned long,
697					  U32_MAX,
698					  (unsigned long)nr_pages << (PAGE_SHIFT - 1));
699
700		/*
701		 * Use aux_watermark as the basis for chunking to
702		 * help PMU drivers honor the watermark.
703		 */
704		max_order = get_order(watermark);
705	} else {
706		/*
707		 * We need to start with the max_order that fits in nr_pages,
708		 * not the other way around, hence ilog2() and not get_order.
709		 */
710		max_order = ilog2(nr_pages);
711		watermark = 0;
712	}
713
714	/*
715	 * kcalloc_node() is unable to allocate buffer if the size is larger
716	 * than: PAGE_SIZE << MAX_PAGE_ORDER; directly bail out in this case.
717	 */
718	if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_PAGE_ORDER)
719		return -ENOMEM;
720	rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
721				     node);
722	if (!rb->aux_pages)
723		return -ENOMEM;
724
725	rb->free_aux = event->pmu->free_aux;
726	for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
727		struct page *page;
728		int last, order;
729
730		order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
731		page = rb_alloc_aux_page(node, order);
732		if (!page)
733			goto out;
734
735		for (last = rb->aux_nr_pages + (1 << page_private(page));
736		     last > rb->aux_nr_pages; rb->aux_nr_pages++)
737			rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
738	}
739
740	/*
741	 * In overwrite mode, PMUs that don't support SG may not handle more
742	 * than one contiguous allocation, since they rely on PMI to do double
743	 * buffering. In this case, the entire buffer has to be one contiguous
744	 * chunk.
745	 */
746	if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
747	    overwrite) {
748		struct page *page = virt_to_page(rb->aux_pages[0]);
749
750		if (page_private(page) != max_order)
751			goto out;
752	}
753
754	rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
755					     overwrite);
756	if (!rb->aux_priv)
757		goto out;
758
759	ret = 0;
760
761	/*
762	 * aux_pages (and pmu driver's private data, aux_priv) will be
763	 * referenced in both producer's and consumer's contexts, thus
764	 * we keep a refcount here to make sure either of the two can
765	 * reference them safely.
766	 */
767	refcount_set(&rb->aux_refcount, 1);
768
769	rb->aux_overwrite = overwrite;
770	rb->aux_watermark = watermark;
771
 
 
 
772out:
773	if (!ret)
774		rb->aux_pgoff = pgoff;
775	else
776		__rb_free_aux(rb);
777
778	return ret;
779}
780
781void rb_free_aux(struct perf_buffer *rb)
 
 
 
 
 
 
782{
783	if (refcount_dec_and_test(&rb->aux_refcount))
 
 
784		__rb_free_aux(rb);
 
 
 
785}
786
787#ifndef CONFIG_PERF_USE_VMALLOC
788
789/*
790 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
791 */
792
793static struct page *
794__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
795{
796	if (pgoff > rb->nr_pages)
797		return NULL;
798
799	if (pgoff == 0)
800		return virt_to_page(rb->user_page);
801
802	return virt_to_page(rb->data_pages[pgoff - 1]);
803}
804
805static void *perf_mmap_alloc_page(int cpu)
806{
807	struct page *page;
808	int node;
809
810	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
811	page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
812	if (!page)
813		return NULL;
814
815	return page_address(page);
816}
817
818static void perf_mmap_free_page(void *addr)
819{
820	struct page *page = virt_to_page(addr);
821
822	page->mapping = NULL;
823	__free_page(page);
824}
825
826struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
827{
828	struct perf_buffer *rb;
829	unsigned long size;
830	int i, node;
831
832	size = sizeof(struct perf_buffer);
833	size += nr_pages * sizeof(void *);
834
835	if (order_base_2(size) > PAGE_SHIFT+MAX_PAGE_ORDER)
836		goto fail;
837
838	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
839	rb = kzalloc_node(size, GFP_KERNEL, node);
840	if (!rb)
841		goto fail;
842
843	rb->user_page = perf_mmap_alloc_page(cpu);
844	if (!rb->user_page)
845		goto fail_user_page;
846
847	for (i = 0; i < nr_pages; i++) {
848		rb->data_pages[i] = perf_mmap_alloc_page(cpu);
849		if (!rb->data_pages[i])
850			goto fail_data_pages;
851	}
852
853	rb->nr_pages = nr_pages;
854
855	ring_buffer_init(rb, watermark, flags);
856
857	return rb;
858
859fail_data_pages:
860	for (i--; i >= 0; i--)
861		perf_mmap_free_page(rb->data_pages[i]);
862
863	perf_mmap_free_page(rb->user_page);
864
865fail_user_page:
866	kfree(rb);
867
868fail:
869	return NULL;
870}
871
872void rb_free(struct perf_buffer *rb)
 
 
 
 
 
 
 
 
873{
874	int i;
875
876	perf_mmap_free_page(rb->user_page);
877	for (i = 0; i < rb->nr_pages; i++)
878		perf_mmap_free_page(rb->data_pages[i]);
879	kfree(rb);
880}
881
882#else
 
 
 
 
 
883static struct page *
884__perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
885{
886	/* The '>' counts in the user page. */
887	if (pgoff > data_page_nr(rb))
888		return NULL;
889
890	return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
891}
892
893static void perf_mmap_unmark_page(void *addr)
894{
895	struct page *page = vmalloc_to_page(addr);
896
897	page->mapping = NULL;
898}
899
900static void rb_free_work(struct work_struct *work)
901{
902	struct perf_buffer *rb;
903	void *base;
904	int i, nr;
905
906	rb = container_of(work, struct perf_buffer, work);
907	nr = data_page_nr(rb);
908
909	base = rb->user_page;
910	/* The '<=' counts in the user page. */
911	for (i = 0; i <= nr; i++)
912		perf_mmap_unmark_page(base + (i * PAGE_SIZE));
913
914	vfree(base);
915	kfree(rb);
916}
917
918void rb_free(struct perf_buffer *rb)
919{
920	schedule_work(&rb->work);
921}
922
923struct perf_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
924{
925	struct perf_buffer *rb;
926	unsigned long size;
927	void *all_buf;
928	int node;
929
930	size = sizeof(struct perf_buffer);
931	size += sizeof(void *);
932
933	node = (cpu == -1) ? cpu : cpu_to_node(cpu);
934	rb = kzalloc_node(size, GFP_KERNEL, node);
935	if (!rb)
936		goto fail;
937
938	INIT_WORK(&rb->work, rb_free_work);
939
940	all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
941	if (!all_buf)
942		goto fail_all_buf;
943
944	rb->user_page = all_buf;
945	rb->data_pages[0] = all_buf + PAGE_SIZE;
946	if (nr_pages) {
947		rb->nr_pages = 1;
948		rb->page_order = ilog2(nr_pages);
949	}
950
951	ring_buffer_init(rb, watermark, flags);
952
953	return rb;
954
955fail_all_buf:
956	kfree(rb);
957
958fail:
959	return NULL;
960}
961
962#endif
963
964struct page *
965perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
966{
967	if (rb->aux_nr_pages) {
968		/* above AUX space */
969		if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
970			return NULL;
971
972		/* AUX space */
973		if (pgoff >= rb->aux_pgoff) {
974			int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
975			return virt_to_page(rb->aux_pages[aux_pgoff]);
976		}
977	}
978
979	return __perf_mmap_to_page(rb, pgoff);
980}