Loading...
1/*
2 * Performance events ring-buffer code:
3 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
9 * For licensing details see kernel-base/COPYING
10 */
11
12#include <linux/perf_event.h>
13#include <linux/vmalloc.h>
14#include <linux/slab.h>
15#include <linux/circ_buf.h>
16#include <linux/poll.h>
17
18#include "internal.h"
19
20static void perf_output_wakeup(struct perf_output_handle *handle)
21{
22 atomic_set(&handle->rb->poll, POLLIN);
23
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
26}
27
28/*
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
32 *
33 * We only publish the head (and generate a wakeup) when the outer-most
34 * event completes.
35 */
36static void perf_output_get_handle(struct perf_output_handle *handle)
37{
38 struct ring_buffer *rb = handle->rb;
39
40 preempt_disable();
41 local_inc(&rb->nest);
42 handle->wakeup = local_read(&rb->wakeup);
43}
44
45static void perf_output_put_handle(struct perf_output_handle *handle)
46{
47 struct ring_buffer *rb = handle->rb;
48 unsigned long head;
49
50again:
51 head = local_read(&rb->head);
52
53 /*
54 * IRQ/NMI can happen here, which means we can miss a head update.
55 */
56
57 if (!local_dec_and_test(&rb->nest))
58 goto out;
59
60 /*
61 * Since the mmap() consumer (userspace) can run on a different CPU:
62 *
63 * kernel user
64 *
65 * if (LOAD ->data_tail) { LOAD ->data_head
66 * (A) smp_rmb() (C)
67 * STORE $data LOAD $data
68 * smp_wmb() (B) smp_mb() (D)
69 * STORE ->data_head STORE ->data_tail
70 * }
71 *
72 * Where A pairs with D, and B pairs with C.
73 *
74 * In our case (A) is a control dependency that separates the load of
75 * the ->data_tail and the stores of $data. In case ->data_tail
76 * indicates there is no room in the buffer to store $data we do not.
77 *
78 * D needs to be a full barrier since it separates the data READ
79 * from the tail WRITE.
80 *
81 * For B a WMB is sufficient since it separates two WRITEs, and for C
82 * an RMB is sufficient since it separates two READs.
83 *
84 * See perf_output_begin().
85 */
86 smp_wmb(); /* B, matches C */
87 rb->user_page->data_head = head;
88
89 /*
90 * Now check if we missed an update -- rely on previous implied
91 * compiler barriers to force a re-read.
92 */
93 if (unlikely(head != local_read(&rb->head))) {
94 local_inc(&rb->nest);
95 goto again;
96 }
97
98 if (handle->wakeup != local_read(&rb->wakeup))
99 perf_output_wakeup(handle);
100
101out:
102 preempt_enable();
103}
104
105int perf_output_begin(struct perf_output_handle *handle,
106 struct perf_event *event, unsigned int size)
107{
108 struct ring_buffer *rb;
109 unsigned long tail, offset, head;
110 int have_lost, page_shift;
111 struct {
112 struct perf_event_header header;
113 u64 id;
114 u64 lost;
115 } lost_event;
116
117 rcu_read_lock();
118 /*
119 * For inherited events we send all the output towards the parent.
120 */
121 if (event->parent)
122 event = event->parent;
123
124 rb = rcu_dereference(event->rb);
125 if (unlikely(!rb))
126 goto out;
127
128 if (unlikely(!rb->nr_pages))
129 goto out;
130
131 handle->rb = rb;
132 handle->event = event;
133
134 have_lost = local_read(&rb->lost);
135 if (unlikely(have_lost)) {
136 size += sizeof(lost_event);
137 if (event->attr.sample_id_all)
138 size += event->id_header_size;
139 }
140
141 perf_output_get_handle(handle);
142
143 do {
144 tail = READ_ONCE(rb->user_page->data_tail);
145 offset = head = local_read(&rb->head);
146 if (!rb->overwrite &&
147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
148 goto fail;
149
150 /*
151 * The above forms a control dependency barrier separating the
152 * @tail load above from the data stores below. Since the @tail
153 * load is required to compute the branch to fail below.
154 *
155 * A, matches D; the full memory barrier userspace SHOULD issue
156 * after reading the data and before storing the new tail
157 * position.
158 *
159 * See perf_output_put_handle().
160 */
161
162 head += size;
163 } while (local_cmpxchg(&rb->head, offset, head) != offset);
164
165 /*
166 * We rely on the implied barrier() by local_cmpxchg() to ensure
167 * none of the data stores below can be lifted up by the compiler.
168 */
169
170 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
171 local_add(rb->watermark, &rb->wakeup);
172
173 page_shift = PAGE_SHIFT + page_order(rb);
174
175 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
176 offset &= (1UL << page_shift) - 1;
177 handle->addr = rb->data_pages[handle->page] + offset;
178 handle->size = (1UL << page_shift) - offset;
179
180 if (unlikely(have_lost)) {
181 struct perf_sample_data sample_data;
182
183 lost_event.header.size = sizeof(lost_event);
184 lost_event.header.type = PERF_RECORD_LOST;
185 lost_event.header.misc = 0;
186 lost_event.id = event->id;
187 lost_event.lost = local_xchg(&rb->lost, 0);
188
189 perf_event_header__init_id(&lost_event.header,
190 &sample_data, event);
191 perf_output_put(handle, lost_event);
192 perf_event__output_id_sample(event, handle, &sample_data);
193 }
194
195 return 0;
196
197fail:
198 local_inc(&rb->lost);
199 perf_output_put_handle(handle);
200out:
201 rcu_read_unlock();
202
203 return -ENOSPC;
204}
205
206unsigned int perf_output_copy(struct perf_output_handle *handle,
207 const void *buf, unsigned int len)
208{
209 return __output_copy(handle, buf, len);
210}
211
212unsigned int perf_output_skip(struct perf_output_handle *handle,
213 unsigned int len)
214{
215 return __output_skip(handle, NULL, len);
216}
217
218void perf_output_end(struct perf_output_handle *handle)
219{
220 perf_output_put_handle(handle);
221 rcu_read_unlock();
222}
223
224static void rb_irq_work(struct irq_work *work);
225
226static void
227ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
228{
229 long max_size = perf_data_size(rb);
230
231 if (watermark)
232 rb->watermark = min(max_size, watermark);
233
234 if (!rb->watermark)
235 rb->watermark = max_size / 2;
236
237 if (flags & RING_BUFFER_WRITABLE)
238 rb->overwrite = 0;
239 else
240 rb->overwrite = 1;
241
242 atomic_set(&rb->refcount, 1);
243
244 INIT_LIST_HEAD(&rb->event_list);
245 spin_lock_init(&rb->event_lock);
246 init_irq_work(&rb->irq_work, rb_irq_work);
247}
248
249static void ring_buffer_put_async(struct ring_buffer *rb)
250{
251 if (!atomic_dec_and_test(&rb->refcount))
252 return;
253
254 rb->rcu_head.next = (void *)rb;
255 irq_work_queue(&rb->irq_work);
256}
257
258/*
259 * This is called before hardware starts writing to the AUX area to
260 * obtain an output handle and make sure there's room in the buffer.
261 * When the capture completes, call perf_aux_output_end() to commit
262 * the recorded data to the buffer.
263 *
264 * The ordering is similar to that of perf_output_{begin,end}, with
265 * the exception of (B), which should be taken care of by the pmu
266 * driver, since ordering rules will differ depending on hardware.
267 */
268void *perf_aux_output_begin(struct perf_output_handle *handle,
269 struct perf_event *event)
270{
271 struct perf_event *output_event = event;
272 unsigned long aux_head, aux_tail;
273 struct ring_buffer *rb;
274
275 if (output_event->parent)
276 output_event = output_event->parent;
277
278 /*
279 * Since this will typically be open across pmu::add/pmu::del, we
280 * grab ring_buffer's refcount instead of holding rcu read lock
281 * to make sure it doesn't disappear under us.
282 */
283 rb = ring_buffer_get(output_event);
284 if (!rb)
285 return NULL;
286
287 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
288 goto err;
289
290 /*
291 * Nesting is not supported for AUX area, make sure nested
292 * writers are caught early
293 */
294 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
295 goto err_put;
296
297 aux_head = local_read(&rb->aux_head);
298
299 handle->rb = rb;
300 handle->event = event;
301 handle->head = aux_head;
302 handle->size = 0;
303
304 /*
305 * In overwrite mode, AUX data stores do not depend on aux_tail,
306 * therefore (A) control dependency barrier does not exist. The
307 * (B) <-> (C) ordering is still observed by the pmu driver.
308 */
309 if (!rb->aux_overwrite) {
310 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
311 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
312 if (aux_head - aux_tail < perf_aux_size(rb))
313 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
314
315 /*
316 * handle->size computation depends on aux_tail load; this forms a
317 * control dependency barrier separating aux_tail load from aux data
318 * store that will be enabled on successful return
319 */
320 if (!handle->size) { /* A, matches D */
321 event->pending_disable = 1;
322 perf_output_wakeup(handle);
323 local_set(&rb->aux_nest, 0);
324 goto err_put;
325 }
326 }
327
328 return handle->rb->aux_priv;
329
330err_put:
331 rb_free_aux(rb);
332
333err:
334 ring_buffer_put_async(rb);
335 handle->event = NULL;
336
337 return NULL;
338}
339
340/*
341 * Commit the data written by hardware into the ring buffer by adjusting
342 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
343 * pmu driver's responsibility to observe ordering rules of the hardware,
344 * so that all the data is externally visible before this is called.
345 */
346void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
347 bool truncated)
348{
349 struct ring_buffer *rb = handle->rb;
350 bool wakeup = truncated;
351 unsigned long aux_head;
352 u64 flags = 0;
353
354 if (truncated)
355 flags |= PERF_AUX_FLAG_TRUNCATED;
356
357 /* in overwrite mode, driver provides aux_head via handle */
358 if (rb->aux_overwrite) {
359 flags |= PERF_AUX_FLAG_OVERWRITE;
360
361 aux_head = handle->head;
362 local_set(&rb->aux_head, aux_head);
363 } else {
364 aux_head = local_read(&rb->aux_head);
365 local_add(size, &rb->aux_head);
366 }
367
368 if (size || flags) {
369 /*
370 * Only send RECORD_AUX if we have something useful to communicate
371 */
372
373 perf_event_aux_event(handle->event, aux_head, size, flags);
374 }
375
376 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
377
378 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
379 wakeup = true;
380 local_add(rb->aux_watermark, &rb->aux_wakeup);
381 }
382
383 if (wakeup) {
384 if (truncated)
385 handle->event->pending_disable = 1;
386 perf_output_wakeup(handle);
387 }
388
389 handle->event = NULL;
390
391 local_set(&rb->aux_nest, 0);
392 rb_free_aux(rb);
393 ring_buffer_put_async(rb);
394}
395
396/*
397 * Skip over a given number of bytes in the AUX buffer, due to, for example,
398 * hardware's alignment constraints.
399 */
400int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
401{
402 struct ring_buffer *rb = handle->rb;
403 unsigned long aux_head;
404
405 if (size > handle->size)
406 return -ENOSPC;
407
408 local_add(size, &rb->aux_head);
409
410 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
411 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
412 perf_output_wakeup(handle);
413 local_add(rb->aux_watermark, &rb->aux_wakeup);
414 handle->wakeup = local_read(&rb->aux_wakeup) +
415 rb->aux_watermark;
416 }
417
418 handle->head = aux_head;
419 handle->size -= size;
420
421 return 0;
422}
423
424void *perf_get_aux(struct perf_output_handle *handle)
425{
426 /* this is only valid between perf_aux_output_begin and *_end */
427 if (!handle->event)
428 return NULL;
429
430 return handle->rb->aux_priv;
431}
432
433#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
434
435static struct page *rb_alloc_aux_page(int node, int order)
436{
437 struct page *page;
438
439 if (order > MAX_ORDER)
440 order = MAX_ORDER;
441
442 do {
443 page = alloc_pages_node(node, PERF_AUX_GFP, order);
444 } while (!page && order--);
445
446 if (page && order) {
447 /*
448 * Communicate the allocation size to the driver:
449 * if we managed to secure a high-order allocation,
450 * set its first page's private to this order;
451 * !PagePrivate(page) means it's just a normal page.
452 */
453 split_page(page, order);
454 SetPagePrivate(page);
455 set_page_private(page, order);
456 }
457
458 return page;
459}
460
461static void rb_free_aux_page(struct ring_buffer *rb, int idx)
462{
463 struct page *page = virt_to_page(rb->aux_pages[idx]);
464
465 ClearPagePrivate(page);
466 page->mapping = NULL;
467 __free_page(page);
468}
469
470static void __rb_free_aux(struct ring_buffer *rb)
471{
472 int pg;
473
474 if (rb->aux_priv) {
475 rb->free_aux(rb->aux_priv);
476 rb->free_aux = NULL;
477 rb->aux_priv = NULL;
478 }
479
480 if (rb->aux_nr_pages) {
481 for (pg = 0; pg < rb->aux_nr_pages; pg++)
482 rb_free_aux_page(rb, pg);
483
484 kfree(rb->aux_pages);
485 rb->aux_nr_pages = 0;
486 }
487}
488
489int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
490 pgoff_t pgoff, int nr_pages, long watermark, int flags)
491{
492 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
493 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
494 int ret = -ENOMEM, max_order = 0;
495
496 if (!has_aux(event))
497 return -ENOTSUPP;
498
499 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
500 /*
501 * We need to start with the max_order that fits in nr_pages,
502 * not the other way around, hence ilog2() and not get_order.
503 */
504 max_order = ilog2(nr_pages);
505
506 /*
507 * PMU requests more than one contiguous chunks of memory
508 * for SW double buffering
509 */
510 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
511 !overwrite) {
512 if (!max_order)
513 return -EINVAL;
514
515 max_order--;
516 }
517 }
518
519 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
520 if (!rb->aux_pages)
521 return -ENOMEM;
522
523 rb->free_aux = event->pmu->free_aux;
524 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
525 struct page *page;
526 int last, order;
527
528 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
529 page = rb_alloc_aux_page(node, order);
530 if (!page)
531 goto out;
532
533 for (last = rb->aux_nr_pages + (1 << page_private(page));
534 last > rb->aux_nr_pages; rb->aux_nr_pages++)
535 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
536 }
537
538 /*
539 * In overwrite mode, PMUs that don't support SG may not handle more
540 * than one contiguous allocation, since they rely on PMI to do double
541 * buffering. In this case, the entire buffer has to be one contiguous
542 * chunk.
543 */
544 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
545 overwrite) {
546 struct page *page = virt_to_page(rb->aux_pages[0]);
547
548 if (page_private(page) != max_order)
549 goto out;
550 }
551
552 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
553 overwrite);
554 if (!rb->aux_priv)
555 goto out;
556
557 ret = 0;
558
559 /*
560 * aux_pages (and pmu driver's private data, aux_priv) will be
561 * referenced in both producer's and consumer's contexts, thus
562 * we keep a refcount here to make sure either of the two can
563 * reference them safely.
564 */
565 atomic_set(&rb->aux_refcount, 1);
566
567 rb->aux_overwrite = overwrite;
568 rb->aux_watermark = watermark;
569
570 if (!rb->aux_watermark && !rb->aux_overwrite)
571 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
572
573out:
574 if (!ret)
575 rb->aux_pgoff = pgoff;
576 else
577 __rb_free_aux(rb);
578
579 return ret;
580}
581
582void rb_free_aux(struct ring_buffer *rb)
583{
584 if (atomic_dec_and_test(&rb->aux_refcount))
585 irq_work_queue(&rb->irq_work);
586}
587
588static void rb_irq_work(struct irq_work *work)
589{
590 struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
591
592 if (!atomic_read(&rb->aux_refcount))
593 __rb_free_aux(rb);
594
595 if (rb->rcu_head.next == (void *)rb)
596 call_rcu(&rb->rcu_head, rb_free_rcu);
597}
598
599#ifndef CONFIG_PERF_USE_VMALLOC
600
601/*
602 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
603 */
604
605static struct page *
606__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
607{
608 if (pgoff > rb->nr_pages)
609 return NULL;
610
611 if (pgoff == 0)
612 return virt_to_page(rb->user_page);
613
614 return virt_to_page(rb->data_pages[pgoff - 1]);
615}
616
617static void *perf_mmap_alloc_page(int cpu)
618{
619 struct page *page;
620 int node;
621
622 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
623 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
624 if (!page)
625 return NULL;
626
627 return page_address(page);
628}
629
630struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
631{
632 struct ring_buffer *rb;
633 unsigned long size;
634 int i;
635
636 size = sizeof(struct ring_buffer);
637 size += nr_pages * sizeof(void *);
638
639 rb = kzalloc(size, GFP_KERNEL);
640 if (!rb)
641 goto fail;
642
643 rb->user_page = perf_mmap_alloc_page(cpu);
644 if (!rb->user_page)
645 goto fail_user_page;
646
647 for (i = 0; i < nr_pages; i++) {
648 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
649 if (!rb->data_pages[i])
650 goto fail_data_pages;
651 }
652
653 rb->nr_pages = nr_pages;
654
655 ring_buffer_init(rb, watermark, flags);
656
657 return rb;
658
659fail_data_pages:
660 for (i--; i >= 0; i--)
661 free_page((unsigned long)rb->data_pages[i]);
662
663 free_page((unsigned long)rb->user_page);
664
665fail_user_page:
666 kfree(rb);
667
668fail:
669 return NULL;
670}
671
672static void perf_mmap_free_page(unsigned long addr)
673{
674 struct page *page = virt_to_page((void *)addr);
675
676 page->mapping = NULL;
677 __free_page(page);
678}
679
680void rb_free(struct ring_buffer *rb)
681{
682 int i;
683
684 perf_mmap_free_page((unsigned long)rb->user_page);
685 for (i = 0; i < rb->nr_pages; i++)
686 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
687 kfree(rb);
688}
689
690#else
691static int data_page_nr(struct ring_buffer *rb)
692{
693 return rb->nr_pages << page_order(rb);
694}
695
696static struct page *
697__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
698{
699 /* The '>' counts in the user page. */
700 if (pgoff > data_page_nr(rb))
701 return NULL;
702
703 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
704}
705
706static void perf_mmap_unmark_page(void *addr)
707{
708 struct page *page = vmalloc_to_page(addr);
709
710 page->mapping = NULL;
711}
712
713static void rb_free_work(struct work_struct *work)
714{
715 struct ring_buffer *rb;
716 void *base;
717 int i, nr;
718
719 rb = container_of(work, struct ring_buffer, work);
720 nr = data_page_nr(rb);
721
722 base = rb->user_page;
723 /* The '<=' counts in the user page. */
724 for (i = 0; i <= nr; i++)
725 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
726
727 vfree(base);
728 kfree(rb);
729}
730
731void rb_free(struct ring_buffer *rb)
732{
733 schedule_work(&rb->work);
734}
735
736struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
737{
738 struct ring_buffer *rb;
739 unsigned long size;
740 void *all_buf;
741
742 size = sizeof(struct ring_buffer);
743 size += sizeof(void *);
744
745 rb = kzalloc(size, GFP_KERNEL);
746 if (!rb)
747 goto fail;
748
749 INIT_WORK(&rb->work, rb_free_work);
750
751 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
752 if (!all_buf)
753 goto fail_all_buf;
754
755 rb->user_page = all_buf;
756 rb->data_pages[0] = all_buf + PAGE_SIZE;
757 if (nr_pages) {
758 rb->nr_pages = 1;
759 rb->page_order = ilog2(nr_pages);
760 }
761
762 ring_buffer_init(rb, watermark, flags);
763
764 return rb;
765
766fail_all_buf:
767 kfree(rb);
768
769fail:
770 return NULL;
771}
772
773#endif
774
775struct page *
776perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
777{
778 if (rb->aux_nr_pages) {
779 /* above AUX space */
780 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
781 return NULL;
782
783 /* AUX space */
784 if (pgoff >= rb->aux_pgoff)
785 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
786 }
787
788 return __perf_mmap_to_page(rb, pgoff);
789}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Performance events ring-buffer code:
4 *
5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 */
10
11#include <linux/perf_event.h>
12#include <linux/vmalloc.h>
13#include <linux/slab.h>
14#include <linux/circ_buf.h>
15#include <linux/poll.h>
16#include <linux/nospec.h>
17
18#include "internal.h"
19
20static void perf_output_wakeup(struct perf_output_handle *handle)
21{
22 atomic_set(&handle->rb->poll, EPOLLIN);
23
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
26}
27
28/*
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
32 *
33 * We only publish the head (and generate a wakeup) when the outer-most
34 * event completes.
35 */
36static void perf_output_get_handle(struct perf_output_handle *handle)
37{
38 struct ring_buffer *rb = handle->rb;
39
40 preempt_disable();
41
42 /*
43 * Avoid an explicit LOAD/STORE such that architectures with memops
44 * can use them.
45 */
46 (*(volatile unsigned int *)&rb->nest)++;
47 handle->wakeup = local_read(&rb->wakeup);
48}
49
50static void perf_output_put_handle(struct perf_output_handle *handle)
51{
52 struct ring_buffer *rb = handle->rb;
53 unsigned long head;
54 unsigned int nest;
55
56 /*
57 * If this isn't the outermost nesting, we don't have to update
58 * @rb->user_page->data_head.
59 */
60 nest = READ_ONCE(rb->nest);
61 if (nest > 1) {
62 WRITE_ONCE(rb->nest, nest - 1);
63 goto out;
64 }
65
66again:
67 /*
68 * In order to avoid publishing a head value that goes backwards,
69 * we must ensure the load of @rb->head happens after we've
70 * incremented @rb->nest.
71 *
72 * Otherwise we can observe a @rb->head value before one published
73 * by an IRQ/NMI happening between the load and the increment.
74 */
75 barrier();
76 head = local_read(&rb->head);
77
78 /*
79 * IRQ/NMI can happen here and advance @rb->head, causing our
80 * load above to be stale.
81 */
82
83 /*
84 * Since the mmap() consumer (userspace) can run on a different CPU:
85 *
86 * kernel user
87 *
88 * if (LOAD ->data_tail) { LOAD ->data_head
89 * (A) smp_rmb() (C)
90 * STORE $data LOAD $data
91 * smp_wmb() (B) smp_mb() (D)
92 * STORE ->data_head STORE ->data_tail
93 * }
94 *
95 * Where A pairs with D, and B pairs with C.
96 *
97 * In our case (A) is a control dependency that separates the load of
98 * the ->data_tail and the stores of $data. In case ->data_tail
99 * indicates there is no room in the buffer to store $data we do not.
100 *
101 * D needs to be a full barrier since it separates the data READ
102 * from the tail WRITE.
103 *
104 * For B a WMB is sufficient since it separates two WRITEs, and for C
105 * an RMB is sufficient since it separates two READs.
106 *
107 * See perf_output_begin().
108 */
109 smp_wmb(); /* B, matches C */
110 WRITE_ONCE(rb->user_page->data_head, head);
111
112 /*
113 * We must publish the head before decrementing the nest count,
114 * otherwise an IRQ/NMI can publish a more recent head value and our
115 * write will (temporarily) publish a stale value.
116 */
117 barrier();
118 WRITE_ONCE(rb->nest, 0);
119
120 /*
121 * Ensure we decrement @rb->nest before we validate the @rb->head.
122 * Otherwise we cannot be sure we caught the 'last' nested update.
123 */
124 barrier();
125 if (unlikely(head != local_read(&rb->head))) {
126 WRITE_ONCE(rb->nest, 1);
127 goto again;
128 }
129
130 if (handle->wakeup != local_read(&rb->wakeup))
131 perf_output_wakeup(handle);
132
133out:
134 preempt_enable();
135}
136
137static __always_inline bool
138ring_buffer_has_space(unsigned long head, unsigned long tail,
139 unsigned long data_size, unsigned int size,
140 bool backward)
141{
142 if (!backward)
143 return CIRC_SPACE(head, tail, data_size) >= size;
144 else
145 return CIRC_SPACE(tail, head, data_size) >= size;
146}
147
148static __always_inline int
149__perf_output_begin(struct perf_output_handle *handle,
150 struct perf_event *event, unsigned int size,
151 bool backward)
152{
153 struct ring_buffer *rb;
154 unsigned long tail, offset, head;
155 int have_lost, page_shift;
156 struct {
157 struct perf_event_header header;
158 u64 id;
159 u64 lost;
160 } lost_event;
161
162 rcu_read_lock();
163 /*
164 * For inherited events we send all the output towards the parent.
165 */
166 if (event->parent)
167 event = event->parent;
168
169 rb = rcu_dereference(event->rb);
170 if (unlikely(!rb))
171 goto out;
172
173 if (unlikely(rb->paused)) {
174 if (rb->nr_pages)
175 local_inc(&rb->lost);
176 goto out;
177 }
178
179 handle->rb = rb;
180 handle->event = event;
181
182 have_lost = local_read(&rb->lost);
183 if (unlikely(have_lost)) {
184 size += sizeof(lost_event);
185 if (event->attr.sample_id_all)
186 size += event->id_header_size;
187 }
188
189 perf_output_get_handle(handle);
190
191 do {
192 tail = READ_ONCE(rb->user_page->data_tail);
193 offset = head = local_read(&rb->head);
194 if (!rb->overwrite) {
195 if (unlikely(!ring_buffer_has_space(head, tail,
196 perf_data_size(rb),
197 size, backward)))
198 goto fail;
199 }
200
201 /*
202 * The above forms a control dependency barrier separating the
203 * @tail load above from the data stores below. Since the @tail
204 * load is required to compute the branch to fail below.
205 *
206 * A, matches D; the full memory barrier userspace SHOULD issue
207 * after reading the data and before storing the new tail
208 * position.
209 *
210 * See perf_output_put_handle().
211 */
212
213 if (!backward)
214 head += size;
215 else
216 head -= size;
217 } while (local_cmpxchg(&rb->head, offset, head) != offset);
218
219 if (backward) {
220 offset = head;
221 head = (u64)(-head);
222 }
223
224 /*
225 * We rely on the implied barrier() by local_cmpxchg() to ensure
226 * none of the data stores below can be lifted up by the compiler.
227 */
228
229 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
230 local_add(rb->watermark, &rb->wakeup);
231
232 page_shift = PAGE_SHIFT + page_order(rb);
233
234 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
235 offset &= (1UL << page_shift) - 1;
236 handle->addr = rb->data_pages[handle->page] + offset;
237 handle->size = (1UL << page_shift) - offset;
238
239 if (unlikely(have_lost)) {
240 struct perf_sample_data sample_data;
241
242 lost_event.header.size = sizeof(lost_event);
243 lost_event.header.type = PERF_RECORD_LOST;
244 lost_event.header.misc = 0;
245 lost_event.id = event->id;
246 lost_event.lost = local_xchg(&rb->lost, 0);
247
248 perf_event_header__init_id(&lost_event.header,
249 &sample_data, event);
250 perf_output_put(handle, lost_event);
251 perf_event__output_id_sample(event, handle, &sample_data);
252 }
253
254 return 0;
255
256fail:
257 local_inc(&rb->lost);
258 perf_output_put_handle(handle);
259out:
260 rcu_read_unlock();
261
262 return -ENOSPC;
263}
264
265int perf_output_begin_forward(struct perf_output_handle *handle,
266 struct perf_event *event, unsigned int size)
267{
268 return __perf_output_begin(handle, event, size, false);
269}
270
271int perf_output_begin_backward(struct perf_output_handle *handle,
272 struct perf_event *event, unsigned int size)
273{
274 return __perf_output_begin(handle, event, size, true);
275}
276
277int perf_output_begin(struct perf_output_handle *handle,
278 struct perf_event *event, unsigned int size)
279{
280
281 return __perf_output_begin(handle, event, size,
282 unlikely(is_write_backward(event)));
283}
284
285unsigned int perf_output_copy(struct perf_output_handle *handle,
286 const void *buf, unsigned int len)
287{
288 return __output_copy(handle, buf, len);
289}
290
291unsigned int perf_output_skip(struct perf_output_handle *handle,
292 unsigned int len)
293{
294 return __output_skip(handle, NULL, len);
295}
296
297void perf_output_end(struct perf_output_handle *handle)
298{
299 perf_output_put_handle(handle);
300 rcu_read_unlock();
301}
302
303static void
304ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
305{
306 long max_size = perf_data_size(rb);
307
308 if (watermark)
309 rb->watermark = min(max_size, watermark);
310
311 if (!rb->watermark)
312 rb->watermark = max_size / 2;
313
314 if (flags & RING_BUFFER_WRITABLE)
315 rb->overwrite = 0;
316 else
317 rb->overwrite = 1;
318
319 refcount_set(&rb->refcount, 1);
320
321 INIT_LIST_HEAD(&rb->event_list);
322 spin_lock_init(&rb->event_lock);
323
324 /*
325 * perf_output_begin() only checks rb->paused, therefore
326 * rb->paused must be true if we have no pages for output.
327 */
328 if (!rb->nr_pages)
329 rb->paused = 1;
330}
331
332void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
333{
334 /*
335 * OVERWRITE is determined by perf_aux_output_end() and can't
336 * be passed in directly.
337 */
338 if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
339 return;
340
341 handle->aux_flags |= flags;
342}
343EXPORT_SYMBOL_GPL(perf_aux_output_flag);
344
345/*
346 * This is called before hardware starts writing to the AUX area to
347 * obtain an output handle and make sure there's room in the buffer.
348 * When the capture completes, call perf_aux_output_end() to commit
349 * the recorded data to the buffer.
350 *
351 * The ordering is similar to that of perf_output_{begin,end}, with
352 * the exception of (B), which should be taken care of by the pmu
353 * driver, since ordering rules will differ depending on hardware.
354 *
355 * Call this from pmu::start(); see the comment in perf_aux_output_end()
356 * about its use in pmu callbacks. Both can also be called from the PMI
357 * handler if needed.
358 */
359void *perf_aux_output_begin(struct perf_output_handle *handle,
360 struct perf_event *event)
361{
362 struct perf_event *output_event = event;
363 unsigned long aux_head, aux_tail;
364 struct ring_buffer *rb;
365 unsigned int nest;
366
367 if (output_event->parent)
368 output_event = output_event->parent;
369
370 /*
371 * Since this will typically be open across pmu::add/pmu::del, we
372 * grab ring_buffer's refcount instead of holding rcu read lock
373 * to make sure it doesn't disappear under us.
374 */
375 rb = ring_buffer_get(output_event);
376 if (!rb)
377 return NULL;
378
379 if (!rb_has_aux(rb))
380 goto err;
381
382 /*
383 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
384 * about to get freed, so we leave immediately.
385 *
386 * Checking rb::aux_mmap_count and rb::refcount has to be done in
387 * the same order, see perf_mmap_close. Otherwise we end up freeing
388 * aux pages in this path, which is a bug, because in_atomic().
389 */
390 if (!atomic_read(&rb->aux_mmap_count))
391 goto err;
392
393 if (!refcount_inc_not_zero(&rb->aux_refcount))
394 goto err;
395
396 nest = READ_ONCE(rb->aux_nest);
397 /*
398 * Nesting is not supported for AUX area, make sure nested
399 * writers are caught early
400 */
401 if (WARN_ON_ONCE(nest))
402 goto err_put;
403
404 WRITE_ONCE(rb->aux_nest, nest + 1);
405
406 aux_head = rb->aux_head;
407
408 handle->rb = rb;
409 handle->event = event;
410 handle->head = aux_head;
411 handle->size = 0;
412 handle->aux_flags = 0;
413
414 /*
415 * In overwrite mode, AUX data stores do not depend on aux_tail,
416 * therefore (A) control dependency barrier does not exist. The
417 * (B) <-> (C) ordering is still observed by the pmu driver.
418 */
419 if (!rb->aux_overwrite) {
420 aux_tail = READ_ONCE(rb->user_page->aux_tail);
421 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
422 if (aux_head - aux_tail < perf_aux_size(rb))
423 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
424
425 /*
426 * handle->size computation depends on aux_tail load; this forms a
427 * control dependency barrier separating aux_tail load from aux data
428 * store that will be enabled on successful return
429 */
430 if (!handle->size) { /* A, matches D */
431 event->pending_disable = smp_processor_id();
432 perf_output_wakeup(handle);
433 WRITE_ONCE(rb->aux_nest, 0);
434 goto err_put;
435 }
436 }
437
438 return handle->rb->aux_priv;
439
440err_put:
441 /* can't be last */
442 rb_free_aux(rb);
443
444err:
445 ring_buffer_put(rb);
446 handle->event = NULL;
447
448 return NULL;
449}
450EXPORT_SYMBOL_GPL(perf_aux_output_begin);
451
452static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
453{
454 if (rb->aux_overwrite)
455 return false;
456
457 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
458 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
459 return true;
460 }
461
462 return false;
463}
464
465/*
466 * Commit the data written by hardware into the ring buffer by adjusting
467 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
468 * pmu driver's responsibility to observe ordering rules of the hardware,
469 * so that all the data is externally visible before this is called.
470 *
471 * Note: this has to be called from pmu::stop() callback, as the assumption
472 * of the AUX buffer management code is that after pmu::stop(), the AUX
473 * transaction must be stopped and therefore drop the AUX reference count.
474 */
475void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
476{
477 bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
478 struct ring_buffer *rb = handle->rb;
479 unsigned long aux_head;
480
481 /* in overwrite mode, driver provides aux_head via handle */
482 if (rb->aux_overwrite) {
483 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
484
485 aux_head = handle->head;
486 rb->aux_head = aux_head;
487 } else {
488 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
489
490 aux_head = rb->aux_head;
491 rb->aux_head += size;
492 }
493
494 /*
495 * Only send RECORD_AUX if we have something useful to communicate
496 *
497 * Note: the OVERWRITE records by themselves are not considered
498 * useful, as they don't communicate any *new* information,
499 * aside from the short-lived offset, that becomes history at
500 * the next event sched-in and therefore isn't useful.
501 * The userspace that needs to copy out AUX data in overwrite
502 * mode should know to use user_page::aux_head for the actual
503 * offset. So, from now on we don't output AUX records that
504 * have *only* OVERWRITE flag set.
505 */
506 if (size || (handle->aux_flags & ~(u64)PERF_AUX_FLAG_OVERWRITE))
507 perf_event_aux_event(handle->event, aux_head, size,
508 handle->aux_flags);
509
510 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
511 if (rb_need_aux_wakeup(rb))
512 wakeup = true;
513
514 if (wakeup) {
515 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
516 handle->event->pending_disable = smp_processor_id();
517 perf_output_wakeup(handle);
518 }
519
520 handle->event = NULL;
521
522 WRITE_ONCE(rb->aux_nest, 0);
523 /* can't be last */
524 rb_free_aux(rb);
525 ring_buffer_put(rb);
526}
527EXPORT_SYMBOL_GPL(perf_aux_output_end);
528
529/*
530 * Skip over a given number of bytes in the AUX buffer, due to, for example,
531 * hardware's alignment constraints.
532 */
533int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
534{
535 struct ring_buffer *rb = handle->rb;
536
537 if (size > handle->size)
538 return -ENOSPC;
539
540 rb->aux_head += size;
541
542 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
543 if (rb_need_aux_wakeup(rb)) {
544 perf_output_wakeup(handle);
545 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
546 }
547
548 handle->head = rb->aux_head;
549 handle->size -= size;
550
551 return 0;
552}
553EXPORT_SYMBOL_GPL(perf_aux_output_skip);
554
555void *perf_get_aux(struct perf_output_handle *handle)
556{
557 /* this is only valid between perf_aux_output_begin and *_end */
558 if (!handle->event)
559 return NULL;
560
561 return handle->rb->aux_priv;
562}
563EXPORT_SYMBOL_GPL(perf_get_aux);
564
565#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
566
567static struct page *rb_alloc_aux_page(int node, int order)
568{
569 struct page *page;
570
571 if (order > MAX_ORDER)
572 order = MAX_ORDER;
573
574 do {
575 page = alloc_pages_node(node, PERF_AUX_GFP, order);
576 } while (!page && order--);
577
578 if (page && order) {
579 /*
580 * Communicate the allocation size to the driver:
581 * if we managed to secure a high-order allocation,
582 * set its first page's private to this order;
583 * !PagePrivate(page) means it's just a normal page.
584 */
585 split_page(page, order);
586 SetPagePrivate(page);
587 set_page_private(page, order);
588 }
589
590 return page;
591}
592
593static void rb_free_aux_page(struct ring_buffer *rb, int idx)
594{
595 struct page *page = virt_to_page(rb->aux_pages[idx]);
596
597 ClearPagePrivate(page);
598 page->mapping = NULL;
599 __free_page(page);
600}
601
602static void __rb_free_aux(struct ring_buffer *rb)
603{
604 int pg;
605
606 /*
607 * Should never happen, the last reference should be dropped from
608 * perf_mmap_close() path, which first stops aux transactions (which
609 * in turn are the atomic holders of aux_refcount) and then does the
610 * last rb_free_aux().
611 */
612 WARN_ON_ONCE(in_atomic());
613
614 if (rb->aux_priv) {
615 rb->free_aux(rb->aux_priv);
616 rb->free_aux = NULL;
617 rb->aux_priv = NULL;
618 }
619
620 if (rb->aux_nr_pages) {
621 for (pg = 0; pg < rb->aux_nr_pages; pg++)
622 rb_free_aux_page(rb, pg);
623
624 kfree(rb->aux_pages);
625 rb->aux_nr_pages = 0;
626 }
627}
628
629int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
630 pgoff_t pgoff, int nr_pages, long watermark, int flags)
631{
632 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
633 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
634 int ret = -ENOMEM, max_order;
635
636 if (!has_aux(event))
637 return -EOPNOTSUPP;
638
639 /*
640 * We need to start with the max_order that fits in nr_pages,
641 * not the other way around, hence ilog2() and not get_order.
642 */
643 max_order = ilog2(nr_pages);
644
645 /*
646 * PMU requests more than one contiguous chunks of memory
647 * for SW double buffering
648 */
649 if (!overwrite) {
650 if (!max_order)
651 return -EINVAL;
652
653 max_order--;
654 }
655
656 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
657 node);
658 if (!rb->aux_pages)
659 return -ENOMEM;
660
661 rb->free_aux = event->pmu->free_aux;
662 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
663 struct page *page;
664 int last, order;
665
666 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
667 page = rb_alloc_aux_page(node, order);
668 if (!page)
669 goto out;
670
671 for (last = rb->aux_nr_pages + (1 << page_private(page));
672 last > rb->aux_nr_pages; rb->aux_nr_pages++)
673 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
674 }
675
676 /*
677 * In overwrite mode, PMUs that don't support SG may not handle more
678 * than one contiguous allocation, since they rely on PMI to do double
679 * buffering. In this case, the entire buffer has to be one contiguous
680 * chunk.
681 */
682 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
683 overwrite) {
684 struct page *page = virt_to_page(rb->aux_pages[0]);
685
686 if (page_private(page) != max_order)
687 goto out;
688 }
689
690 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
691 overwrite);
692 if (!rb->aux_priv)
693 goto out;
694
695 ret = 0;
696
697 /*
698 * aux_pages (and pmu driver's private data, aux_priv) will be
699 * referenced in both producer's and consumer's contexts, thus
700 * we keep a refcount here to make sure either of the two can
701 * reference them safely.
702 */
703 refcount_set(&rb->aux_refcount, 1);
704
705 rb->aux_overwrite = overwrite;
706 rb->aux_watermark = watermark;
707
708 if (!rb->aux_watermark && !rb->aux_overwrite)
709 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
710
711out:
712 if (!ret)
713 rb->aux_pgoff = pgoff;
714 else
715 __rb_free_aux(rb);
716
717 return ret;
718}
719
720void rb_free_aux(struct ring_buffer *rb)
721{
722 if (refcount_dec_and_test(&rb->aux_refcount))
723 __rb_free_aux(rb);
724}
725
726#ifndef CONFIG_PERF_USE_VMALLOC
727
728/*
729 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
730 */
731
732static struct page *
733__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
734{
735 if (pgoff > rb->nr_pages)
736 return NULL;
737
738 if (pgoff == 0)
739 return virt_to_page(rb->user_page);
740
741 return virt_to_page(rb->data_pages[pgoff - 1]);
742}
743
744static void *perf_mmap_alloc_page(int cpu)
745{
746 struct page *page;
747 int node;
748
749 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
750 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
751 if (!page)
752 return NULL;
753
754 return page_address(page);
755}
756
757struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
758{
759 struct ring_buffer *rb;
760 unsigned long size;
761 int i;
762
763 size = sizeof(struct ring_buffer);
764 size += nr_pages * sizeof(void *);
765
766 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
767 goto fail;
768
769 rb = kzalloc(size, GFP_KERNEL);
770 if (!rb)
771 goto fail;
772
773 rb->user_page = perf_mmap_alloc_page(cpu);
774 if (!rb->user_page)
775 goto fail_user_page;
776
777 for (i = 0; i < nr_pages; i++) {
778 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
779 if (!rb->data_pages[i])
780 goto fail_data_pages;
781 }
782
783 rb->nr_pages = nr_pages;
784
785 ring_buffer_init(rb, watermark, flags);
786
787 return rb;
788
789fail_data_pages:
790 for (i--; i >= 0; i--)
791 free_page((unsigned long)rb->data_pages[i]);
792
793 free_page((unsigned long)rb->user_page);
794
795fail_user_page:
796 kfree(rb);
797
798fail:
799 return NULL;
800}
801
802static void perf_mmap_free_page(unsigned long addr)
803{
804 struct page *page = virt_to_page((void *)addr);
805
806 page->mapping = NULL;
807 __free_page(page);
808}
809
810void rb_free(struct ring_buffer *rb)
811{
812 int i;
813
814 perf_mmap_free_page((unsigned long)rb->user_page);
815 for (i = 0; i < rb->nr_pages; i++)
816 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
817 kfree(rb);
818}
819
820#else
821static int data_page_nr(struct ring_buffer *rb)
822{
823 return rb->nr_pages << page_order(rb);
824}
825
826static struct page *
827__perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
828{
829 /* The '>' counts in the user page. */
830 if (pgoff > data_page_nr(rb))
831 return NULL;
832
833 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
834}
835
836static void perf_mmap_unmark_page(void *addr)
837{
838 struct page *page = vmalloc_to_page(addr);
839
840 page->mapping = NULL;
841}
842
843static void rb_free_work(struct work_struct *work)
844{
845 struct ring_buffer *rb;
846 void *base;
847 int i, nr;
848
849 rb = container_of(work, struct ring_buffer, work);
850 nr = data_page_nr(rb);
851
852 base = rb->user_page;
853 /* The '<=' counts in the user page. */
854 for (i = 0; i <= nr; i++)
855 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
856
857 vfree(base);
858 kfree(rb);
859}
860
861void rb_free(struct ring_buffer *rb)
862{
863 schedule_work(&rb->work);
864}
865
866struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
867{
868 struct ring_buffer *rb;
869 unsigned long size;
870 void *all_buf;
871
872 size = sizeof(struct ring_buffer);
873 size += sizeof(void *);
874
875 rb = kzalloc(size, GFP_KERNEL);
876 if (!rb)
877 goto fail;
878
879 INIT_WORK(&rb->work, rb_free_work);
880
881 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
882 if (!all_buf)
883 goto fail_all_buf;
884
885 rb->user_page = all_buf;
886 rb->data_pages[0] = all_buf + PAGE_SIZE;
887 if (nr_pages) {
888 rb->nr_pages = 1;
889 rb->page_order = ilog2(nr_pages);
890 }
891
892 ring_buffer_init(rb, watermark, flags);
893
894 return rb;
895
896fail_all_buf:
897 kfree(rb);
898
899fail:
900 return NULL;
901}
902
903#endif
904
905struct page *
906perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
907{
908 if (rb->aux_nr_pages) {
909 /* above AUX space */
910 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
911 return NULL;
912
913 /* AUX space */
914 if (pgoff >= rb->aux_pgoff) {
915 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
916 return virt_to_page(rb->aux_pages[aux_pgoff]);
917 }
918 }
919
920 return __perf_mmap_to_page(rb, pgoff);
921}