Loading...
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/trace_clock.h>
8#include <linux/spinlock.h>
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/hardirq.h>
12#include <linux/kmemcheck.h>
13#include <linux/module.h>
14#include <linux/percpu.h>
15#include <linux/mutex.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/list.h>
20#include <linux/cpu.h>
21#include <linux/fs.h>
22
23#include <asm/local.h>
24#include "trace.h"
25
26/*
27 * The ring buffer header is special. We must manually up keep it.
28 */
29int ring_buffer_print_entry_header(struct trace_seq *s)
30{
31 int ret;
32
33 ret = trace_seq_printf(s, "# compressed entry header\n");
34 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
35 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
36 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
37 ret = trace_seq_printf(s, "\n");
38 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
39 RINGBUF_TYPE_PADDING);
40 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
41 RINGBUF_TYPE_TIME_EXTEND);
42 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
43 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
44
45 return ret;
46}
47
48/*
49 * The ring buffer is made up of a list of pages. A separate list of pages is
50 * allocated for each CPU. A writer may only write to a buffer that is
51 * associated with the CPU it is currently executing on. A reader may read
52 * from any per cpu buffer.
53 *
54 * The reader is special. For each per cpu buffer, the reader has its own
55 * reader page. When a reader has read the entire reader page, this reader
56 * page is swapped with another page in the ring buffer.
57 *
58 * Now, as long as the writer is off the reader page, the reader can do what
59 * ever it wants with that page. The writer will never write to that page
60 * again (as long as it is out of the ring buffer).
61 *
62 * Here's some silly ASCII art.
63 *
64 * +------+
65 * |reader| RING BUFFER
66 * |page |
67 * +------+ +---+ +---+ +---+
68 * | |-->| |-->| |
69 * +---+ +---+ +---+
70 * ^ |
71 * | |
72 * +---------------+
73 *
74 *
75 * +------+
76 * |reader| RING BUFFER
77 * |page |------------------v
78 * +------+ +---+ +---+ +---+
79 * | |-->| |-->| |
80 * +---+ +---+ +---+
81 * ^ |
82 * | |
83 * +---------------+
84 *
85 *
86 * +------+
87 * |reader| RING BUFFER
88 * |page |------------------v
89 * +------+ +---+ +---+ +---+
90 * ^ | |-->| |-->| |
91 * | +---+ +---+ +---+
92 * | |
93 * | |
94 * +------------------------------+
95 *
96 *
97 * +------+
98 * |buffer| RING BUFFER
99 * |page |------------------v
100 * +------+ +---+ +---+ +---+
101 * ^ | | | |-->| |
102 * | New +---+ +---+ +---+
103 * | Reader------^ |
104 * | page |
105 * +------------------------------+
106 *
107 *
108 * After we make this swap, the reader can hand this page off to the splice
109 * code and be done with it. It can even allocate a new page if it needs to
110 * and swap that into the ring buffer.
111 *
112 * We will be using cmpxchg soon to make all this lockless.
113 *
114 */
115
116/*
117 * A fast way to enable or disable all ring buffers is to
118 * call tracing_on or tracing_off. Turning off the ring buffers
119 * prevents all ring buffers from being recorded to.
120 * Turning this switch on, makes it OK to write to the
121 * ring buffer, if the ring buffer is enabled itself.
122 *
123 * There's three layers that must be on in order to write
124 * to the ring buffer.
125 *
126 * 1) This global flag must be set.
127 * 2) The ring buffer must be enabled for recording.
128 * 3) The per cpu buffer must be enabled for recording.
129 *
130 * In case of an anomaly, this global flag has a bit set that
131 * will permantly disable all ring buffers.
132 */
133
134/*
135 * Global flag to disable all recording to ring buffers
136 * This has two bits: ON, DISABLED
137 *
138 * ON DISABLED
139 * ---- ----------
140 * 0 0 : ring buffers are off
141 * 1 0 : ring buffers are on
142 * X 1 : ring buffers are permanently disabled
143 */
144
145enum {
146 RB_BUFFERS_ON_BIT = 0,
147 RB_BUFFERS_DISABLED_BIT = 1,
148};
149
150enum {
151 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
152 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
153};
154
155static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
156
157#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
158
159/**
160 * tracing_on - enable all tracing buffers
161 *
162 * This function enables all tracing buffers that may have been
163 * disabled with tracing_off.
164 */
165void tracing_on(void)
166{
167 set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
168}
169EXPORT_SYMBOL_GPL(tracing_on);
170
171/**
172 * tracing_off - turn off all tracing buffers
173 *
174 * This function stops all tracing buffers from recording data.
175 * It does not disable any overhead the tracers themselves may
176 * be causing. This function simply causes all recording to
177 * the ring buffers to fail.
178 */
179void tracing_off(void)
180{
181 clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
182}
183EXPORT_SYMBOL_GPL(tracing_off);
184
185/**
186 * tracing_off_permanent - permanently disable ring buffers
187 *
188 * This function, once called, will disable all ring buffers
189 * permanently.
190 */
191void tracing_off_permanent(void)
192{
193 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
194}
195
196/**
197 * tracing_is_on - show state of ring buffers enabled
198 */
199int tracing_is_on(void)
200{
201 return ring_buffer_flags == RB_BUFFERS_ON;
202}
203EXPORT_SYMBOL_GPL(tracing_is_on);
204
205#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
206#define RB_ALIGNMENT 4U
207#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
209
210#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
211# define RB_FORCE_8BYTE_ALIGNMENT 0
212# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
213#else
214# define RB_FORCE_8BYTE_ALIGNMENT 1
215# define RB_ARCH_ALIGNMENT 8U
216#endif
217
218/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
219#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
220
221enum {
222 RB_LEN_TIME_EXTEND = 8,
223 RB_LEN_TIME_STAMP = 16,
224};
225
226#define skip_time_extend(event) \
227 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
228
229static inline int rb_null_event(struct ring_buffer_event *event)
230{
231 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
232}
233
234static void rb_event_set_padding(struct ring_buffer_event *event)
235{
236 /* padding has a NULL time_delta */
237 event->type_len = RINGBUF_TYPE_PADDING;
238 event->time_delta = 0;
239}
240
241static unsigned
242rb_event_data_length(struct ring_buffer_event *event)
243{
244 unsigned length;
245
246 if (event->type_len)
247 length = event->type_len * RB_ALIGNMENT;
248 else
249 length = event->array[0];
250 return length + RB_EVNT_HDR_SIZE;
251}
252
253/*
254 * Return the length of the given event. Will return
255 * the length of the time extend if the event is a
256 * time extend.
257 */
258static inline unsigned
259rb_event_length(struct ring_buffer_event *event)
260{
261 switch (event->type_len) {
262 case RINGBUF_TYPE_PADDING:
263 if (rb_null_event(event))
264 /* undefined */
265 return -1;
266 return event->array[0] + RB_EVNT_HDR_SIZE;
267
268 case RINGBUF_TYPE_TIME_EXTEND:
269 return RB_LEN_TIME_EXTEND;
270
271 case RINGBUF_TYPE_TIME_STAMP:
272 return RB_LEN_TIME_STAMP;
273
274 case RINGBUF_TYPE_DATA:
275 return rb_event_data_length(event);
276 default:
277 BUG();
278 }
279 /* not hit */
280 return 0;
281}
282
283/*
284 * Return total length of time extend and data,
285 * or just the event length for all other events.
286 */
287static inline unsigned
288rb_event_ts_length(struct ring_buffer_event *event)
289{
290 unsigned len = 0;
291
292 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
293 /* time extends include the data event after it */
294 len = RB_LEN_TIME_EXTEND;
295 event = skip_time_extend(event);
296 }
297 return len + rb_event_length(event);
298}
299
300/**
301 * ring_buffer_event_length - return the length of the event
302 * @event: the event to get the length of
303 *
304 * Returns the size of the data load of a data event.
305 * If the event is something other than a data event, it
306 * returns the size of the event itself. With the exception
307 * of a TIME EXTEND, where it still returns the size of the
308 * data load of the data event after it.
309 */
310unsigned ring_buffer_event_length(struct ring_buffer_event *event)
311{
312 unsigned length;
313
314 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
315 event = skip_time_extend(event);
316
317 length = rb_event_length(event);
318 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
319 return length;
320 length -= RB_EVNT_HDR_SIZE;
321 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
322 length -= sizeof(event->array[0]);
323 return length;
324}
325EXPORT_SYMBOL_GPL(ring_buffer_event_length);
326
327/* inline for ring buffer fast paths */
328static void *
329rb_event_data(struct ring_buffer_event *event)
330{
331 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
332 event = skip_time_extend(event);
333 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
334 /* If length is in len field, then array[0] has the data */
335 if (event->type_len)
336 return (void *)&event->array[0];
337 /* Otherwise length is in array[0] and array[1] has the data */
338 return (void *)&event->array[1];
339}
340
341/**
342 * ring_buffer_event_data - return the data of the event
343 * @event: the event to get the data from
344 */
345void *ring_buffer_event_data(struct ring_buffer_event *event)
346{
347 return rb_event_data(event);
348}
349EXPORT_SYMBOL_GPL(ring_buffer_event_data);
350
351#define for_each_buffer_cpu(buffer, cpu) \
352 for_each_cpu(cpu, buffer->cpumask)
353
354#define TS_SHIFT 27
355#define TS_MASK ((1ULL << TS_SHIFT) - 1)
356#define TS_DELTA_TEST (~TS_MASK)
357
358/* Flag when events were overwritten */
359#define RB_MISSED_EVENTS (1 << 31)
360/* Missed count stored at end */
361#define RB_MISSED_STORED (1 << 30)
362
363struct buffer_data_page {
364 u64 time_stamp; /* page time stamp */
365 local_t commit; /* write committed index */
366 unsigned char data[]; /* data of buffer page */
367};
368
369/*
370 * Note, the buffer_page list must be first. The buffer pages
371 * are allocated in cache lines, which means that each buffer
372 * page will be at the beginning of a cache line, and thus
373 * the least significant bits will be zero. We use this to
374 * add flags in the list struct pointers, to make the ring buffer
375 * lockless.
376 */
377struct buffer_page {
378 struct list_head list; /* list of buffer pages */
379 local_t write; /* index for next write */
380 unsigned read; /* index for next read */
381 local_t entries; /* entries on this page */
382 unsigned long real_end; /* real end of data */
383 struct buffer_data_page *page; /* Actual data page */
384};
385
386/*
387 * The buffer page counters, write and entries, must be reset
388 * atomically when crossing page boundaries. To synchronize this
389 * update, two counters are inserted into the number. One is
390 * the actual counter for the write position or count on the page.
391 *
392 * The other is a counter of updaters. Before an update happens
393 * the update partition of the counter is incremented. This will
394 * allow the updater to update the counter atomically.
395 *
396 * The counter is 20 bits, and the state data is 12.
397 */
398#define RB_WRITE_MASK 0xfffff
399#define RB_WRITE_INTCNT (1 << 20)
400
401static void rb_init_page(struct buffer_data_page *bpage)
402{
403 local_set(&bpage->commit, 0);
404}
405
406/**
407 * ring_buffer_page_len - the size of data on the page.
408 * @page: The page to read
409 *
410 * Returns the amount of data on the page, including buffer page header.
411 */
412size_t ring_buffer_page_len(void *page)
413{
414 return local_read(&((struct buffer_data_page *)page)->commit)
415 + BUF_PAGE_HDR_SIZE;
416}
417
418/*
419 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
420 * this issue out.
421 */
422static void free_buffer_page(struct buffer_page *bpage)
423{
424 free_page((unsigned long)bpage->page);
425 kfree(bpage);
426}
427
428/*
429 * We need to fit the time_stamp delta into 27 bits.
430 */
431static inline int test_time_stamp(u64 delta)
432{
433 if (delta & TS_DELTA_TEST)
434 return 1;
435 return 0;
436}
437
438#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
439
440/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
441#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
442
443int ring_buffer_print_page_header(struct trace_seq *s)
444{
445 struct buffer_data_page field;
446 int ret;
447
448 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
449 "offset:0;\tsize:%u;\tsigned:%u;\n",
450 (unsigned int)sizeof(field.time_stamp),
451 (unsigned int)is_signed_type(u64));
452
453 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
454 "offset:%u;\tsize:%u;\tsigned:%u;\n",
455 (unsigned int)offsetof(typeof(field), commit),
456 (unsigned int)sizeof(field.commit),
457 (unsigned int)is_signed_type(long));
458
459 ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
460 "offset:%u;\tsize:%u;\tsigned:%u;\n",
461 (unsigned int)offsetof(typeof(field), commit),
462 1,
463 (unsigned int)is_signed_type(long));
464
465 ret = trace_seq_printf(s, "\tfield: char data;\t"
466 "offset:%u;\tsize:%u;\tsigned:%u;\n",
467 (unsigned int)offsetof(typeof(field), data),
468 (unsigned int)BUF_PAGE_SIZE,
469 (unsigned int)is_signed_type(char));
470
471 return ret;
472}
473
474/*
475 * head_page == tail_page && head == tail then buffer is empty.
476 */
477struct ring_buffer_per_cpu {
478 int cpu;
479 atomic_t record_disabled;
480 struct ring_buffer *buffer;
481 spinlock_t reader_lock; /* serialize readers */
482 arch_spinlock_t lock;
483 struct lock_class_key lock_key;
484 struct list_head *pages;
485 struct buffer_page *head_page; /* read from head */
486 struct buffer_page *tail_page; /* write to tail */
487 struct buffer_page *commit_page; /* committed pages */
488 struct buffer_page *reader_page;
489 unsigned long lost_events;
490 unsigned long last_overrun;
491 local_t commit_overrun;
492 local_t overrun;
493 local_t entries;
494 local_t committing;
495 local_t commits;
496 unsigned long read;
497 u64 write_stamp;
498 u64 read_stamp;
499};
500
501struct ring_buffer {
502 unsigned pages;
503 unsigned flags;
504 int cpus;
505 atomic_t record_disabled;
506 cpumask_var_t cpumask;
507
508 struct lock_class_key *reader_lock_key;
509
510 struct mutex mutex;
511
512 struct ring_buffer_per_cpu **buffers;
513
514#ifdef CONFIG_HOTPLUG_CPU
515 struct notifier_block cpu_notify;
516#endif
517 u64 (*clock)(void);
518};
519
520struct ring_buffer_iter {
521 struct ring_buffer_per_cpu *cpu_buffer;
522 unsigned long head;
523 struct buffer_page *head_page;
524 struct buffer_page *cache_reader_page;
525 unsigned long cache_read;
526 u64 read_stamp;
527};
528
529/* buffer may be either ring_buffer or ring_buffer_per_cpu */
530#define RB_WARN_ON(b, cond) \
531 ({ \
532 int _____ret = unlikely(cond); \
533 if (_____ret) { \
534 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
535 struct ring_buffer_per_cpu *__b = \
536 (void *)b; \
537 atomic_inc(&__b->buffer->record_disabled); \
538 } else \
539 atomic_inc(&b->record_disabled); \
540 WARN_ON(1); \
541 } \
542 _____ret; \
543 })
544
545/* Up this if you want to test the TIME_EXTENTS and normalization */
546#define DEBUG_SHIFT 0
547
548static inline u64 rb_time_stamp(struct ring_buffer *buffer)
549{
550 /* shift to debug/test normalization and TIME_EXTENTS */
551 return buffer->clock() << DEBUG_SHIFT;
552}
553
554u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
555{
556 u64 time;
557
558 preempt_disable_notrace();
559 time = rb_time_stamp(buffer);
560 preempt_enable_no_resched_notrace();
561
562 return time;
563}
564EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
565
566void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
567 int cpu, u64 *ts)
568{
569 /* Just stupid testing the normalize function and deltas */
570 *ts >>= DEBUG_SHIFT;
571}
572EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
573
574/*
575 * Making the ring buffer lockless makes things tricky.
576 * Although writes only happen on the CPU that they are on,
577 * and they only need to worry about interrupts. Reads can
578 * happen on any CPU.
579 *
580 * The reader page is always off the ring buffer, but when the
581 * reader finishes with a page, it needs to swap its page with
582 * a new one from the buffer. The reader needs to take from
583 * the head (writes go to the tail). But if a writer is in overwrite
584 * mode and wraps, it must push the head page forward.
585 *
586 * Here lies the problem.
587 *
588 * The reader must be careful to replace only the head page, and
589 * not another one. As described at the top of the file in the
590 * ASCII art, the reader sets its old page to point to the next
591 * page after head. It then sets the page after head to point to
592 * the old reader page. But if the writer moves the head page
593 * during this operation, the reader could end up with the tail.
594 *
595 * We use cmpxchg to help prevent this race. We also do something
596 * special with the page before head. We set the LSB to 1.
597 *
598 * When the writer must push the page forward, it will clear the
599 * bit that points to the head page, move the head, and then set
600 * the bit that points to the new head page.
601 *
602 * We also don't want an interrupt coming in and moving the head
603 * page on another writer. Thus we use the second LSB to catch
604 * that too. Thus:
605 *
606 * head->list->prev->next bit 1 bit 0
607 * ------- -------
608 * Normal page 0 0
609 * Points to head page 0 1
610 * New head page 1 0
611 *
612 * Note we can not trust the prev pointer of the head page, because:
613 *
614 * +----+ +-----+ +-----+
615 * | |------>| T |---X--->| N |
616 * | |<------| | | |
617 * +----+ +-----+ +-----+
618 * ^ ^ |
619 * | +-----+ | |
620 * +----------| R |----------+ |
621 * | |<-----------+
622 * +-----+
623 *
624 * Key: ---X--> HEAD flag set in pointer
625 * T Tail page
626 * R Reader page
627 * N Next page
628 *
629 * (see __rb_reserve_next() to see where this happens)
630 *
631 * What the above shows is that the reader just swapped out
632 * the reader page with a page in the buffer, but before it
633 * could make the new header point back to the new page added
634 * it was preempted by a writer. The writer moved forward onto
635 * the new page added by the reader and is about to move forward
636 * again.
637 *
638 * You can see, it is legitimate for the previous pointer of
639 * the head (or any page) not to point back to itself. But only
640 * temporarially.
641 */
642
643#define RB_PAGE_NORMAL 0UL
644#define RB_PAGE_HEAD 1UL
645#define RB_PAGE_UPDATE 2UL
646
647
648#define RB_FLAG_MASK 3UL
649
650/* PAGE_MOVED is not part of the mask */
651#define RB_PAGE_MOVED 4UL
652
653/*
654 * rb_list_head - remove any bit
655 */
656static struct list_head *rb_list_head(struct list_head *list)
657{
658 unsigned long val = (unsigned long)list;
659
660 return (struct list_head *)(val & ~RB_FLAG_MASK);
661}
662
663/*
664 * rb_is_head_page - test if the given page is the head page
665 *
666 * Because the reader may move the head_page pointer, we can
667 * not trust what the head page is (it may be pointing to
668 * the reader page). But if the next page is a header page,
669 * its flags will be non zero.
670 */
671static inline int
672rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
673 struct buffer_page *page, struct list_head *list)
674{
675 unsigned long val;
676
677 val = (unsigned long)list->next;
678
679 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
680 return RB_PAGE_MOVED;
681
682 return val & RB_FLAG_MASK;
683}
684
685/*
686 * rb_is_reader_page
687 *
688 * The unique thing about the reader page, is that, if the
689 * writer is ever on it, the previous pointer never points
690 * back to the reader page.
691 */
692static int rb_is_reader_page(struct buffer_page *page)
693{
694 struct list_head *list = page->list.prev;
695
696 return rb_list_head(list->next) != &page->list;
697}
698
699/*
700 * rb_set_list_to_head - set a list_head to be pointing to head.
701 */
702static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
703 struct list_head *list)
704{
705 unsigned long *ptr;
706
707 ptr = (unsigned long *)&list->next;
708 *ptr |= RB_PAGE_HEAD;
709 *ptr &= ~RB_PAGE_UPDATE;
710}
711
712/*
713 * rb_head_page_activate - sets up head page
714 */
715static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
716{
717 struct buffer_page *head;
718
719 head = cpu_buffer->head_page;
720 if (!head)
721 return;
722
723 /*
724 * Set the previous list pointer to have the HEAD flag.
725 */
726 rb_set_list_to_head(cpu_buffer, head->list.prev);
727}
728
729static void rb_list_head_clear(struct list_head *list)
730{
731 unsigned long *ptr = (unsigned long *)&list->next;
732
733 *ptr &= ~RB_FLAG_MASK;
734}
735
736/*
737 * rb_head_page_dactivate - clears head page ptr (for free list)
738 */
739static void
740rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
741{
742 struct list_head *hd;
743
744 /* Go through the whole list and clear any pointers found. */
745 rb_list_head_clear(cpu_buffer->pages);
746
747 list_for_each(hd, cpu_buffer->pages)
748 rb_list_head_clear(hd);
749}
750
751static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
752 struct buffer_page *head,
753 struct buffer_page *prev,
754 int old_flag, int new_flag)
755{
756 struct list_head *list;
757 unsigned long val = (unsigned long)&head->list;
758 unsigned long ret;
759
760 list = &prev->list;
761
762 val &= ~RB_FLAG_MASK;
763
764 ret = cmpxchg((unsigned long *)&list->next,
765 val | old_flag, val | new_flag);
766
767 /* check if the reader took the page */
768 if ((ret & ~RB_FLAG_MASK) != val)
769 return RB_PAGE_MOVED;
770
771 return ret & RB_FLAG_MASK;
772}
773
774static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
775 struct buffer_page *head,
776 struct buffer_page *prev,
777 int old_flag)
778{
779 return rb_head_page_set(cpu_buffer, head, prev,
780 old_flag, RB_PAGE_UPDATE);
781}
782
783static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
784 struct buffer_page *head,
785 struct buffer_page *prev,
786 int old_flag)
787{
788 return rb_head_page_set(cpu_buffer, head, prev,
789 old_flag, RB_PAGE_HEAD);
790}
791
792static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
793 struct buffer_page *head,
794 struct buffer_page *prev,
795 int old_flag)
796{
797 return rb_head_page_set(cpu_buffer, head, prev,
798 old_flag, RB_PAGE_NORMAL);
799}
800
801static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
802 struct buffer_page **bpage)
803{
804 struct list_head *p = rb_list_head((*bpage)->list.next);
805
806 *bpage = list_entry(p, struct buffer_page, list);
807}
808
809static struct buffer_page *
810rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
811{
812 struct buffer_page *head;
813 struct buffer_page *page;
814 struct list_head *list;
815 int i;
816
817 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
818 return NULL;
819
820 /* sanity check */
821 list = cpu_buffer->pages;
822 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
823 return NULL;
824
825 page = head = cpu_buffer->head_page;
826 /*
827 * It is possible that the writer moves the header behind
828 * where we started, and we miss in one loop.
829 * A second loop should grab the header, but we'll do
830 * three loops just because I'm paranoid.
831 */
832 for (i = 0; i < 3; i++) {
833 do {
834 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
835 cpu_buffer->head_page = page;
836 return page;
837 }
838 rb_inc_page(cpu_buffer, &page);
839 } while (page != head);
840 }
841
842 RB_WARN_ON(cpu_buffer, 1);
843
844 return NULL;
845}
846
847static int rb_head_page_replace(struct buffer_page *old,
848 struct buffer_page *new)
849{
850 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
851 unsigned long val;
852 unsigned long ret;
853
854 val = *ptr & ~RB_FLAG_MASK;
855 val |= RB_PAGE_HEAD;
856
857 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
858
859 return ret == val;
860}
861
862/*
863 * rb_tail_page_update - move the tail page forward
864 *
865 * Returns 1 if moved tail page, 0 if someone else did.
866 */
867static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
868 struct buffer_page *tail_page,
869 struct buffer_page *next_page)
870{
871 struct buffer_page *old_tail;
872 unsigned long old_entries;
873 unsigned long old_write;
874 int ret = 0;
875
876 /*
877 * The tail page now needs to be moved forward.
878 *
879 * We need to reset the tail page, but without messing
880 * with possible erasing of data brought in by interrupts
881 * that have moved the tail page and are currently on it.
882 *
883 * We add a counter to the write field to denote this.
884 */
885 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
886 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
887
888 /*
889 * Just make sure we have seen our old_write and synchronize
890 * with any interrupts that come in.
891 */
892 barrier();
893
894 /*
895 * If the tail page is still the same as what we think
896 * it is, then it is up to us to update the tail
897 * pointer.
898 */
899 if (tail_page == cpu_buffer->tail_page) {
900 /* Zero the write counter */
901 unsigned long val = old_write & ~RB_WRITE_MASK;
902 unsigned long eval = old_entries & ~RB_WRITE_MASK;
903
904 /*
905 * This will only succeed if an interrupt did
906 * not come in and change it. In which case, we
907 * do not want to modify it.
908 *
909 * We add (void) to let the compiler know that we do not care
910 * about the return value of these functions. We use the
911 * cmpxchg to only update if an interrupt did not already
912 * do it for us. If the cmpxchg fails, we don't care.
913 */
914 (void)local_cmpxchg(&next_page->write, old_write, val);
915 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
916
917 /*
918 * No need to worry about races with clearing out the commit.
919 * it only can increment when a commit takes place. But that
920 * only happens in the outer most nested commit.
921 */
922 local_set(&next_page->page->commit, 0);
923
924 old_tail = cmpxchg(&cpu_buffer->tail_page,
925 tail_page, next_page);
926
927 if (old_tail == tail_page)
928 ret = 1;
929 }
930
931 return ret;
932}
933
934static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
935 struct buffer_page *bpage)
936{
937 unsigned long val = (unsigned long)bpage;
938
939 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
940 return 1;
941
942 return 0;
943}
944
945/**
946 * rb_check_list - make sure a pointer to a list has the last bits zero
947 */
948static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
949 struct list_head *list)
950{
951 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
952 return 1;
953 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
954 return 1;
955 return 0;
956}
957
958/**
959 * check_pages - integrity check of buffer pages
960 * @cpu_buffer: CPU buffer with pages to test
961 *
962 * As a safety measure we check to make sure the data pages have not
963 * been corrupted.
964 */
965static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
966{
967 struct list_head *head = cpu_buffer->pages;
968 struct buffer_page *bpage, *tmp;
969
970 rb_head_page_deactivate(cpu_buffer);
971
972 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
973 return -1;
974 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
975 return -1;
976
977 if (rb_check_list(cpu_buffer, head))
978 return -1;
979
980 list_for_each_entry_safe(bpage, tmp, head, list) {
981 if (RB_WARN_ON(cpu_buffer,
982 bpage->list.next->prev != &bpage->list))
983 return -1;
984 if (RB_WARN_ON(cpu_buffer,
985 bpage->list.prev->next != &bpage->list))
986 return -1;
987 if (rb_check_list(cpu_buffer, &bpage->list))
988 return -1;
989 }
990
991 rb_head_page_activate(cpu_buffer);
992
993 return 0;
994}
995
996static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
997 unsigned nr_pages)
998{
999 struct buffer_page *bpage, *tmp;
1000 LIST_HEAD(pages);
1001 unsigned i;
1002
1003 WARN_ON(!nr_pages);
1004
1005 for (i = 0; i < nr_pages; i++) {
1006 struct page *page;
1007 /*
1008 * __GFP_NORETRY flag makes sure that the allocation fails
1009 * gracefully without invoking oom-killer and the system is
1010 * not destabilized.
1011 */
1012 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1013 GFP_KERNEL | __GFP_NORETRY,
1014 cpu_to_node(cpu_buffer->cpu));
1015 if (!bpage)
1016 goto free_pages;
1017
1018 rb_check_bpage(cpu_buffer, bpage);
1019
1020 list_add(&bpage->list, &pages);
1021
1022 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
1023 GFP_KERNEL | __GFP_NORETRY, 0);
1024 if (!page)
1025 goto free_pages;
1026 bpage->page = page_address(page);
1027 rb_init_page(bpage->page);
1028 }
1029
1030 /*
1031 * The ring buffer page list is a circular list that does not
1032 * start and end with a list head. All page list items point to
1033 * other pages.
1034 */
1035 cpu_buffer->pages = pages.next;
1036 list_del(&pages);
1037
1038 rb_check_pages(cpu_buffer);
1039
1040 return 0;
1041
1042 free_pages:
1043 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1044 list_del_init(&bpage->list);
1045 free_buffer_page(bpage);
1046 }
1047 return -ENOMEM;
1048}
1049
1050static struct ring_buffer_per_cpu *
1051rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1052{
1053 struct ring_buffer_per_cpu *cpu_buffer;
1054 struct buffer_page *bpage;
1055 struct page *page;
1056 int ret;
1057
1058 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1059 GFP_KERNEL, cpu_to_node(cpu));
1060 if (!cpu_buffer)
1061 return NULL;
1062
1063 cpu_buffer->cpu = cpu;
1064 cpu_buffer->buffer = buffer;
1065 spin_lock_init(&cpu_buffer->reader_lock);
1066 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1067 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1068
1069 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1070 GFP_KERNEL, cpu_to_node(cpu));
1071 if (!bpage)
1072 goto fail_free_buffer;
1073
1074 rb_check_bpage(cpu_buffer, bpage);
1075
1076 cpu_buffer->reader_page = bpage;
1077 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1078 if (!page)
1079 goto fail_free_reader;
1080 bpage->page = page_address(page);
1081 rb_init_page(bpage->page);
1082
1083 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1084
1085 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1086 if (ret < 0)
1087 goto fail_free_reader;
1088
1089 cpu_buffer->head_page
1090 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1091 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1092
1093 rb_head_page_activate(cpu_buffer);
1094
1095 return cpu_buffer;
1096
1097 fail_free_reader:
1098 free_buffer_page(cpu_buffer->reader_page);
1099
1100 fail_free_buffer:
1101 kfree(cpu_buffer);
1102 return NULL;
1103}
1104
1105static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1106{
1107 struct list_head *head = cpu_buffer->pages;
1108 struct buffer_page *bpage, *tmp;
1109
1110 free_buffer_page(cpu_buffer->reader_page);
1111
1112 rb_head_page_deactivate(cpu_buffer);
1113
1114 if (head) {
1115 list_for_each_entry_safe(bpage, tmp, head, list) {
1116 list_del_init(&bpage->list);
1117 free_buffer_page(bpage);
1118 }
1119 bpage = list_entry(head, struct buffer_page, list);
1120 free_buffer_page(bpage);
1121 }
1122
1123 kfree(cpu_buffer);
1124}
1125
1126#ifdef CONFIG_HOTPLUG_CPU
1127static int rb_cpu_notify(struct notifier_block *self,
1128 unsigned long action, void *hcpu);
1129#endif
1130
1131/**
1132 * ring_buffer_alloc - allocate a new ring_buffer
1133 * @size: the size in bytes per cpu that is needed.
1134 * @flags: attributes to set for the ring buffer.
1135 *
1136 * Currently the only flag that is available is the RB_FL_OVERWRITE
1137 * flag. This flag means that the buffer will overwrite old data
1138 * when the buffer wraps. If this flag is not set, the buffer will
1139 * drop data when the tail hits the head.
1140 */
1141struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1142 struct lock_class_key *key)
1143{
1144 struct ring_buffer *buffer;
1145 int bsize;
1146 int cpu;
1147
1148 /* keep it in its own cache line */
1149 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1150 GFP_KERNEL);
1151 if (!buffer)
1152 return NULL;
1153
1154 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1155 goto fail_free_buffer;
1156
1157 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1158 buffer->flags = flags;
1159 buffer->clock = trace_clock_local;
1160 buffer->reader_lock_key = key;
1161
1162 /* need at least two pages */
1163 if (buffer->pages < 2)
1164 buffer->pages = 2;
1165
1166 /*
1167 * In case of non-hotplug cpu, if the ring-buffer is allocated
1168 * in early initcall, it will not be notified of secondary cpus.
1169 * In that off case, we need to allocate for all possible cpus.
1170 */
1171#ifdef CONFIG_HOTPLUG_CPU
1172 get_online_cpus();
1173 cpumask_copy(buffer->cpumask, cpu_online_mask);
1174#else
1175 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1176#endif
1177 buffer->cpus = nr_cpu_ids;
1178
1179 bsize = sizeof(void *) * nr_cpu_ids;
1180 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1181 GFP_KERNEL);
1182 if (!buffer->buffers)
1183 goto fail_free_cpumask;
1184
1185 for_each_buffer_cpu(buffer, cpu) {
1186 buffer->buffers[cpu] =
1187 rb_allocate_cpu_buffer(buffer, cpu);
1188 if (!buffer->buffers[cpu])
1189 goto fail_free_buffers;
1190 }
1191
1192#ifdef CONFIG_HOTPLUG_CPU
1193 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1194 buffer->cpu_notify.priority = 0;
1195 register_cpu_notifier(&buffer->cpu_notify);
1196#endif
1197
1198 put_online_cpus();
1199 mutex_init(&buffer->mutex);
1200
1201 return buffer;
1202
1203 fail_free_buffers:
1204 for_each_buffer_cpu(buffer, cpu) {
1205 if (buffer->buffers[cpu])
1206 rb_free_cpu_buffer(buffer->buffers[cpu]);
1207 }
1208 kfree(buffer->buffers);
1209
1210 fail_free_cpumask:
1211 free_cpumask_var(buffer->cpumask);
1212 put_online_cpus();
1213
1214 fail_free_buffer:
1215 kfree(buffer);
1216 return NULL;
1217}
1218EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1219
1220/**
1221 * ring_buffer_free - free a ring buffer.
1222 * @buffer: the buffer to free.
1223 */
1224void
1225ring_buffer_free(struct ring_buffer *buffer)
1226{
1227 int cpu;
1228
1229 get_online_cpus();
1230
1231#ifdef CONFIG_HOTPLUG_CPU
1232 unregister_cpu_notifier(&buffer->cpu_notify);
1233#endif
1234
1235 for_each_buffer_cpu(buffer, cpu)
1236 rb_free_cpu_buffer(buffer->buffers[cpu]);
1237
1238 put_online_cpus();
1239
1240 kfree(buffer->buffers);
1241 free_cpumask_var(buffer->cpumask);
1242
1243 kfree(buffer);
1244}
1245EXPORT_SYMBOL_GPL(ring_buffer_free);
1246
1247void ring_buffer_set_clock(struct ring_buffer *buffer,
1248 u64 (*clock)(void))
1249{
1250 buffer->clock = clock;
1251}
1252
1253static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1254
1255static void
1256rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1257{
1258 struct buffer_page *bpage;
1259 struct list_head *p;
1260 unsigned i;
1261
1262 spin_lock_irq(&cpu_buffer->reader_lock);
1263 rb_head_page_deactivate(cpu_buffer);
1264
1265 for (i = 0; i < nr_pages; i++) {
1266 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1267 goto out;
1268 p = cpu_buffer->pages->next;
1269 bpage = list_entry(p, struct buffer_page, list);
1270 list_del_init(&bpage->list);
1271 free_buffer_page(bpage);
1272 }
1273 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1274 goto out;
1275
1276 rb_reset_cpu(cpu_buffer);
1277 rb_check_pages(cpu_buffer);
1278
1279out:
1280 spin_unlock_irq(&cpu_buffer->reader_lock);
1281}
1282
1283static void
1284rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1285 struct list_head *pages, unsigned nr_pages)
1286{
1287 struct buffer_page *bpage;
1288 struct list_head *p;
1289 unsigned i;
1290
1291 spin_lock_irq(&cpu_buffer->reader_lock);
1292 rb_head_page_deactivate(cpu_buffer);
1293
1294 for (i = 0; i < nr_pages; i++) {
1295 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1296 goto out;
1297 p = pages->next;
1298 bpage = list_entry(p, struct buffer_page, list);
1299 list_del_init(&bpage->list);
1300 list_add_tail(&bpage->list, cpu_buffer->pages);
1301 }
1302 rb_reset_cpu(cpu_buffer);
1303 rb_check_pages(cpu_buffer);
1304
1305out:
1306 spin_unlock_irq(&cpu_buffer->reader_lock);
1307}
1308
1309/**
1310 * ring_buffer_resize - resize the ring buffer
1311 * @buffer: the buffer to resize.
1312 * @size: the new size.
1313 *
1314 * Minimum size is 2 * BUF_PAGE_SIZE.
1315 *
1316 * Returns -1 on failure.
1317 */
1318int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1319{
1320 struct ring_buffer_per_cpu *cpu_buffer;
1321 unsigned nr_pages, rm_pages, new_pages;
1322 struct buffer_page *bpage, *tmp;
1323 unsigned long buffer_size;
1324 LIST_HEAD(pages);
1325 int i, cpu;
1326
1327 /*
1328 * Always succeed at resizing a non-existent buffer:
1329 */
1330 if (!buffer)
1331 return size;
1332
1333 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1334 size *= BUF_PAGE_SIZE;
1335 buffer_size = buffer->pages * BUF_PAGE_SIZE;
1336
1337 /* we need a minimum of two pages */
1338 if (size < BUF_PAGE_SIZE * 2)
1339 size = BUF_PAGE_SIZE * 2;
1340
1341 if (size == buffer_size)
1342 return size;
1343
1344 atomic_inc(&buffer->record_disabled);
1345
1346 /* Make sure all writers are done with this buffer. */
1347 synchronize_sched();
1348
1349 mutex_lock(&buffer->mutex);
1350 get_online_cpus();
1351
1352 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1353
1354 if (size < buffer_size) {
1355
1356 /* easy case, just free pages */
1357 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
1358 goto out_fail;
1359
1360 rm_pages = buffer->pages - nr_pages;
1361
1362 for_each_buffer_cpu(buffer, cpu) {
1363 cpu_buffer = buffer->buffers[cpu];
1364 rb_remove_pages(cpu_buffer, rm_pages);
1365 }
1366 goto out;
1367 }
1368
1369 /*
1370 * This is a bit more difficult. We only want to add pages
1371 * when we can allocate enough for all CPUs. We do this
1372 * by allocating all the pages and storing them on a local
1373 * link list. If we succeed in our allocation, then we
1374 * add these pages to the cpu_buffers. Otherwise we just free
1375 * them all and return -ENOMEM;
1376 */
1377 if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1378 goto out_fail;
1379
1380 new_pages = nr_pages - buffer->pages;
1381
1382 for_each_buffer_cpu(buffer, cpu) {
1383 for (i = 0; i < new_pages; i++) {
1384 struct page *page;
1385 /*
1386 * __GFP_NORETRY flag makes sure that the allocation
1387 * fails gracefully without invoking oom-killer and
1388 * the system is not destabilized.
1389 */
1390 bpage = kzalloc_node(ALIGN(sizeof(*bpage),
1391 cache_line_size()),
1392 GFP_KERNEL | __GFP_NORETRY,
1393 cpu_to_node(cpu));
1394 if (!bpage)
1395 goto free_pages;
1396 list_add(&bpage->list, &pages);
1397 page = alloc_pages_node(cpu_to_node(cpu),
1398 GFP_KERNEL | __GFP_NORETRY, 0);
1399 if (!page)
1400 goto free_pages;
1401 bpage->page = page_address(page);
1402 rb_init_page(bpage->page);
1403 }
1404 }
1405
1406 for_each_buffer_cpu(buffer, cpu) {
1407 cpu_buffer = buffer->buffers[cpu];
1408 rb_insert_pages(cpu_buffer, &pages, new_pages);
1409 }
1410
1411 if (RB_WARN_ON(buffer, !list_empty(&pages)))
1412 goto out_fail;
1413
1414 out:
1415 buffer->pages = nr_pages;
1416 put_online_cpus();
1417 mutex_unlock(&buffer->mutex);
1418
1419 atomic_dec(&buffer->record_disabled);
1420
1421 return size;
1422
1423 free_pages:
1424 list_for_each_entry_safe(bpage, tmp, &pages, list) {
1425 list_del_init(&bpage->list);
1426 free_buffer_page(bpage);
1427 }
1428 put_online_cpus();
1429 mutex_unlock(&buffer->mutex);
1430 atomic_dec(&buffer->record_disabled);
1431 return -ENOMEM;
1432
1433 /*
1434 * Something went totally wrong, and we are too paranoid
1435 * to even clean up the mess.
1436 */
1437 out_fail:
1438 put_online_cpus();
1439 mutex_unlock(&buffer->mutex);
1440 atomic_dec(&buffer->record_disabled);
1441 return -1;
1442}
1443EXPORT_SYMBOL_GPL(ring_buffer_resize);
1444
1445void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1446{
1447 mutex_lock(&buffer->mutex);
1448 if (val)
1449 buffer->flags |= RB_FL_OVERWRITE;
1450 else
1451 buffer->flags &= ~RB_FL_OVERWRITE;
1452 mutex_unlock(&buffer->mutex);
1453}
1454EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1455
1456static inline void *
1457__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1458{
1459 return bpage->data + index;
1460}
1461
1462static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1463{
1464 return bpage->page->data + index;
1465}
1466
1467static inline struct ring_buffer_event *
1468rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1469{
1470 return __rb_page_index(cpu_buffer->reader_page,
1471 cpu_buffer->reader_page->read);
1472}
1473
1474static inline struct ring_buffer_event *
1475rb_iter_head_event(struct ring_buffer_iter *iter)
1476{
1477 return __rb_page_index(iter->head_page, iter->head);
1478}
1479
1480static inline unsigned long rb_page_write(struct buffer_page *bpage)
1481{
1482 return local_read(&bpage->write) & RB_WRITE_MASK;
1483}
1484
1485static inline unsigned rb_page_commit(struct buffer_page *bpage)
1486{
1487 return local_read(&bpage->page->commit);
1488}
1489
1490static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1491{
1492 return local_read(&bpage->entries) & RB_WRITE_MASK;
1493}
1494
1495/* Size is determined by what has been committed */
1496static inline unsigned rb_page_size(struct buffer_page *bpage)
1497{
1498 return rb_page_commit(bpage);
1499}
1500
1501static inline unsigned
1502rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1503{
1504 return rb_page_commit(cpu_buffer->commit_page);
1505}
1506
1507static inline unsigned
1508rb_event_index(struct ring_buffer_event *event)
1509{
1510 unsigned long addr = (unsigned long)event;
1511
1512 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1513}
1514
1515static inline int
1516rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1517 struct ring_buffer_event *event)
1518{
1519 unsigned long addr = (unsigned long)event;
1520 unsigned long index;
1521
1522 index = rb_event_index(event);
1523 addr &= PAGE_MASK;
1524
1525 return cpu_buffer->commit_page->page == (void *)addr &&
1526 rb_commit_index(cpu_buffer) == index;
1527}
1528
1529static void
1530rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1531{
1532 unsigned long max_count;
1533
1534 /*
1535 * We only race with interrupts and NMIs on this CPU.
1536 * If we own the commit event, then we can commit
1537 * all others that interrupted us, since the interruptions
1538 * are in stack format (they finish before they come
1539 * back to us). This allows us to do a simple loop to
1540 * assign the commit to the tail.
1541 */
1542 again:
1543 max_count = cpu_buffer->buffer->pages * 100;
1544
1545 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1546 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1547 return;
1548 if (RB_WARN_ON(cpu_buffer,
1549 rb_is_reader_page(cpu_buffer->tail_page)))
1550 return;
1551 local_set(&cpu_buffer->commit_page->page->commit,
1552 rb_page_write(cpu_buffer->commit_page));
1553 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1554 cpu_buffer->write_stamp =
1555 cpu_buffer->commit_page->page->time_stamp;
1556 /* add barrier to keep gcc from optimizing too much */
1557 barrier();
1558 }
1559 while (rb_commit_index(cpu_buffer) !=
1560 rb_page_write(cpu_buffer->commit_page)) {
1561
1562 local_set(&cpu_buffer->commit_page->page->commit,
1563 rb_page_write(cpu_buffer->commit_page));
1564 RB_WARN_ON(cpu_buffer,
1565 local_read(&cpu_buffer->commit_page->page->commit) &
1566 ~RB_WRITE_MASK);
1567 barrier();
1568 }
1569
1570 /* again, keep gcc from optimizing */
1571 barrier();
1572
1573 /*
1574 * If an interrupt came in just after the first while loop
1575 * and pushed the tail page forward, we will be left with
1576 * a dangling commit that will never go forward.
1577 */
1578 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1579 goto again;
1580}
1581
1582static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1583{
1584 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1585 cpu_buffer->reader_page->read = 0;
1586}
1587
1588static void rb_inc_iter(struct ring_buffer_iter *iter)
1589{
1590 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1591
1592 /*
1593 * The iterator could be on the reader page (it starts there).
1594 * But the head could have moved, since the reader was
1595 * found. Check for this case and assign the iterator
1596 * to the head page instead of next.
1597 */
1598 if (iter->head_page == cpu_buffer->reader_page)
1599 iter->head_page = rb_set_head_page(cpu_buffer);
1600 else
1601 rb_inc_page(cpu_buffer, &iter->head_page);
1602
1603 iter->read_stamp = iter->head_page->page->time_stamp;
1604 iter->head = 0;
1605}
1606
1607/* Slow path, do not inline */
1608static noinline struct ring_buffer_event *
1609rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1610{
1611 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1612
1613 /* Not the first event on the page? */
1614 if (rb_event_index(event)) {
1615 event->time_delta = delta & TS_MASK;
1616 event->array[0] = delta >> TS_SHIFT;
1617 } else {
1618 /* nope, just zero it */
1619 event->time_delta = 0;
1620 event->array[0] = 0;
1621 }
1622
1623 return skip_time_extend(event);
1624}
1625
1626/**
1627 * ring_buffer_update_event - update event type and data
1628 * @event: the even to update
1629 * @type: the type of event
1630 * @length: the size of the event field in the ring buffer
1631 *
1632 * Update the type and data fields of the event. The length
1633 * is the actual size that is written to the ring buffer,
1634 * and with this, we can determine what to place into the
1635 * data field.
1636 */
1637static void
1638rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1639 struct ring_buffer_event *event, unsigned length,
1640 int add_timestamp, u64 delta)
1641{
1642 /* Only a commit updates the timestamp */
1643 if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1644 delta = 0;
1645
1646 /*
1647 * If we need to add a timestamp, then we
1648 * add it to the start of the resevered space.
1649 */
1650 if (unlikely(add_timestamp)) {
1651 event = rb_add_time_stamp(event, delta);
1652 length -= RB_LEN_TIME_EXTEND;
1653 delta = 0;
1654 }
1655
1656 event->time_delta = delta;
1657 length -= RB_EVNT_HDR_SIZE;
1658 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1659 event->type_len = 0;
1660 event->array[0] = length;
1661 } else
1662 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1663}
1664
1665/*
1666 * rb_handle_head_page - writer hit the head page
1667 *
1668 * Returns: +1 to retry page
1669 * 0 to continue
1670 * -1 on error
1671 */
1672static int
1673rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1674 struct buffer_page *tail_page,
1675 struct buffer_page *next_page)
1676{
1677 struct buffer_page *new_head;
1678 int entries;
1679 int type;
1680 int ret;
1681
1682 entries = rb_page_entries(next_page);
1683
1684 /*
1685 * The hard part is here. We need to move the head
1686 * forward, and protect against both readers on
1687 * other CPUs and writers coming in via interrupts.
1688 */
1689 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1690 RB_PAGE_HEAD);
1691
1692 /*
1693 * type can be one of four:
1694 * NORMAL - an interrupt already moved it for us
1695 * HEAD - we are the first to get here.
1696 * UPDATE - we are the interrupt interrupting
1697 * a current move.
1698 * MOVED - a reader on another CPU moved the next
1699 * pointer to its reader page. Give up
1700 * and try again.
1701 */
1702
1703 switch (type) {
1704 case RB_PAGE_HEAD:
1705 /*
1706 * We changed the head to UPDATE, thus
1707 * it is our responsibility to update
1708 * the counters.
1709 */
1710 local_add(entries, &cpu_buffer->overrun);
1711
1712 /*
1713 * The entries will be zeroed out when we move the
1714 * tail page.
1715 */
1716
1717 /* still more to do */
1718 break;
1719
1720 case RB_PAGE_UPDATE:
1721 /*
1722 * This is an interrupt that interrupt the
1723 * previous update. Still more to do.
1724 */
1725 break;
1726 case RB_PAGE_NORMAL:
1727 /*
1728 * An interrupt came in before the update
1729 * and processed this for us.
1730 * Nothing left to do.
1731 */
1732 return 1;
1733 case RB_PAGE_MOVED:
1734 /*
1735 * The reader is on another CPU and just did
1736 * a swap with our next_page.
1737 * Try again.
1738 */
1739 return 1;
1740 default:
1741 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1742 return -1;
1743 }
1744
1745 /*
1746 * Now that we are here, the old head pointer is
1747 * set to UPDATE. This will keep the reader from
1748 * swapping the head page with the reader page.
1749 * The reader (on another CPU) will spin till
1750 * we are finished.
1751 *
1752 * We just need to protect against interrupts
1753 * doing the job. We will set the next pointer
1754 * to HEAD. After that, we set the old pointer
1755 * to NORMAL, but only if it was HEAD before.
1756 * otherwise we are an interrupt, and only
1757 * want the outer most commit to reset it.
1758 */
1759 new_head = next_page;
1760 rb_inc_page(cpu_buffer, &new_head);
1761
1762 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1763 RB_PAGE_NORMAL);
1764
1765 /*
1766 * Valid returns are:
1767 * HEAD - an interrupt came in and already set it.
1768 * NORMAL - One of two things:
1769 * 1) We really set it.
1770 * 2) A bunch of interrupts came in and moved
1771 * the page forward again.
1772 */
1773 switch (ret) {
1774 case RB_PAGE_HEAD:
1775 case RB_PAGE_NORMAL:
1776 /* OK */
1777 break;
1778 default:
1779 RB_WARN_ON(cpu_buffer, 1);
1780 return -1;
1781 }
1782
1783 /*
1784 * It is possible that an interrupt came in,
1785 * set the head up, then more interrupts came in
1786 * and moved it again. When we get back here,
1787 * the page would have been set to NORMAL but we
1788 * just set it back to HEAD.
1789 *
1790 * How do you detect this? Well, if that happened
1791 * the tail page would have moved.
1792 */
1793 if (ret == RB_PAGE_NORMAL) {
1794 /*
1795 * If the tail had moved passed next, then we need
1796 * to reset the pointer.
1797 */
1798 if (cpu_buffer->tail_page != tail_page &&
1799 cpu_buffer->tail_page != next_page)
1800 rb_head_page_set_normal(cpu_buffer, new_head,
1801 next_page,
1802 RB_PAGE_HEAD);
1803 }
1804
1805 /*
1806 * If this was the outer most commit (the one that
1807 * changed the original pointer from HEAD to UPDATE),
1808 * then it is up to us to reset it to NORMAL.
1809 */
1810 if (type == RB_PAGE_HEAD) {
1811 ret = rb_head_page_set_normal(cpu_buffer, next_page,
1812 tail_page,
1813 RB_PAGE_UPDATE);
1814 if (RB_WARN_ON(cpu_buffer,
1815 ret != RB_PAGE_UPDATE))
1816 return -1;
1817 }
1818
1819 return 0;
1820}
1821
1822static unsigned rb_calculate_event_length(unsigned length)
1823{
1824 struct ring_buffer_event event; /* Used only for sizeof array */
1825
1826 /* zero length can cause confusions */
1827 if (!length)
1828 length = 1;
1829
1830 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1831 length += sizeof(event.array[0]);
1832
1833 length += RB_EVNT_HDR_SIZE;
1834 length = ALIGN(length, RB_ARCH_ALIGNMENT);
1835
1836 return length;
1837}
1838
1839static inline void
1840rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1841 struct buffer_page *tail_page,
1842 unsigned long tail, unsigned long length)
1843{
1844 struct ring_buffer_event *event;
1845
1846 /*
1847 * Only the event that crossed the page boundary
1848 * must fill the old tail_page with padding.
1849 */
1850 if (tail >= BUF_PAGE_SIZE) {
1851 /*
1852 * If the page was filled, then we still need
1853 * to update the real_end. Reset it to zero
1854 * and the reader will ignore it.
1855 */
1856 if (tail == BUF_PAGE_SIZE)
1857 tail_page->real_end = 0;
1858
1859 local_sub(length, &tail_page->write);
1860 return;
1861 }
1862
1863 event = __rb_page_index(tail_page, tail);
1864 kmemcheck_annotate_bitfield(event, bitfield);
1865
1866 /*
1867 * Save the original length to the meta data.
1868 * This will be used by the reader to add lost event
1869 * counter.
1870 */
1871 tail_page->real_end = tail;
1872
1873 /*
1874 * If this event is bigger than the minimum size, then
1875 * we need to be careful that we don't subtract the
1876 * write counter enough to allow another writer to slip
1877 * in on this page.
1878 * We put in a discarded commit instead, to make sure
1879 * that this space is not used again.
1880 *
1881 * If we are less than the minimum size, we don't need to
1882 * worry about it.
1883 */
1884 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1885 /* No room for any events */
1886
1887 /* Mark the rest of the page with padding */
1888 rb_event_set_padding(event);
1889
1890 /* Set the write back to the previous setting */
1891 local_sub(length, &tail_page->write);
1892 return;
1893 }
1894
1895 /* Put in a discarded event */
1896 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1897 event->type_len = RINGBUF_TYPE_PADDING;
1898 /* time delta must be non zero */
1899 event->time_delta = 1;
1900
1901 /* Set write to end of buffer */
1902 length = (tail + length) - BUF_PAGE_SIZE;
1903 local_sub(length, &tail_page->write);
1904}
1905
1906/*
1907 * This is the slow path, force gcc not to inline it.
1908 */
1909static noinline struct ring_buffer_event *
1910rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1911 unsigned long length, unsigned long tail,
1912 struct buffer_page *tail_page, u64 ts)
1913{
1914 struct buffer_page *commit_page = cpu_buffer->commit_page;
1915 struct ring_buffer *buffer = cpu_buffer->buffer;
1916 struct buffer_page *next_page;
1917 int ret;
1918
1919 next_page = tail_page;
1920
1921 rb_inc_page(cpu_buffer, &next_page);
1922
1923 /*
1924 * If for some reason, we had an interrupt storm that made
1925 * it all the way around the buffer, bail, and warn
1926 * about it.
1927 */
1928 if (unlikely(next_page == commit_page)) {
1929 local_inc(&cpu_buffer->commit_overrun);
1930 goto out_reset;
1931 }
1932
1933 /*
1934 * This is where the fun begins!
1935 *
1936 * We are fighting against races between a reader that
1937 * could be on another CPU trying to swap its reader
1938 * page with the buffer head.
1939 *
1940 * We are also fighting against interrupts coming in and
1941 * moving the head or tail on us as well.
1942 *
1943 * If the next page is the head page then we have filled
1944 * the buffer, unless the commit page is still on the
1945 * reader page.
1946 */
1947 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
1948
1949 /*
1950 * If the commit is not on the reader page, then
1951 * move the header page.
1952 */
1953 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1954 /*
1955 * If we are not in overwrite mode,
1956 * this is easy, just stop here.
1957 */
1958 if (!(buffer->flags & RB_FL_OVERWRITE))
1959 goto out_reset;
1960
1961 ret = rb_handle_head_page(cpu_buffer,
1962 tail_page,
1963 next_page);
1964 if (ret < 0)
1965 goto out_reset;
1966 if (ret)
1967 goto out_again;
1968 } else {
1969 /*
1970 * We need to be careful here too. The
1971 * commit page could still be on the reader
1972 * page. We could have a small buffer, and
1973 * have filled up the buffer with events
1974 * from interrupts and such, and wrapped.
1975 *
1976 * Note, if the tail page is also the on the
1977 * reader_page, we let it move out.
1978 */
1979 if (unlikely((cpu_buffer->commit_page !=
1980 cpu_buffer->tail_page) &&
1981 (cpu_buffer->commit_page ==
1982 cpu_buffer->reader_page))) {
1983 local_inc(&cpu_buffer->commit_overrun);
1984 goto out_reset;
1985 }
1986 }
1987 }
1988
1989 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1990 if (ret) {
1991 /*
1992 * Nested commits always have zero deltas, so
1993 * just reread the time stamp
1994 */
1995 ts = rb_time_stamp(buffer);
1996 next_page->page->time_stamp = ts;
1997 }
1998
1999 out_again:
2000
2001 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2002
2003 /* fail and let the caller try again */
2004 return ERR_PTR(-EAGAIN);
2005
2006 out_reset:
2007 /* reset write */
2008 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2009
2010 return NULL;
2011}
2012
2013static struct ring_buffer_event *
2014__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2015 unsigned long length, u64 ts,
2016 u64 delta, int add_timestamp)
2017{
2018 struct buffer_page *tail_page;
2019 struct ring_buffer_event *event;
2020 unsigned long tail, write;
2021
2022 /*
2023 * If the time delta since the last event is too big to
2024 * hold in the time field of the event, then we append a
2025 * TIME EXTEND event ahead of the data event.
2026 */
2027 if (unlikely(add_timestamp))
2028 length += RB_LEN_TIME_EXTEND;
2029
2030 tail_page = cpu_buffer->tail_page;
2031 write = local_add_return(length, &tail_page->write);
2032
2033 /* set write to only the index of the write */
2034 write &= RB_WRITE_MASK;
2035 tail = write - length;
2036
2037 /* See if we shot pass the end of this buffer page */
2038 if (unlikely(write > BUF_PAGE_SIZE))
2039 return rb_move_tail(cpu_buffer, length, tail,
2040 tail_page, ts);
2041
2042 /* We reserved something on the buffer */
2043
2044 event = __rb_page_index(tail_page, tail);
2045 kmemcheck_annotate_bitfield(event, bitfield);
2046 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2047
2048 local_inc(&tail_page->entries);
2049
2050 /*
2051 * If this is the first commit on the page, then update
2052 * its timestamp.
2053 */
2054 if (!tail)
2055 tail_page->page->time_stamp = ts;
2056
2057 return event;
2058}
2059
2060static inline int
2061rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2062 struct ring_buffer_event *event)
2063{
2064 unsigned long new_index, old_index;
2065 struct buffer_page *bpage;
2066 unsigned long index;
2067 unsigned long addr;
2068
2069 new_index = rb_event_index(event);
2070 old_index = new_index + rb_event_ts_length(event);
2071 addr = (unsigned long)event;
2072 addr &= PAGE_MASK;
2073
2074 bpage = cpu_buffer->tail_page;
2075
2076 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2077 unsigned long write_mask =
2078 local_read(&bpage->write) & ~RB_WRITE_MASK;
2079 /*
2080 * This is on the tail page. It is possible that
2081 * a write could come in and move the tail page
2082 * and write to the next page. That is fine
2083 * because we just shorten what is on this page.
2084 */
2085 old_index += write_mask;
2086 new_index += write_mask;
2087 index = local_cmpxchg(&bpage->write, old_index, new_index);
2088 if (index == old_index)
2089 return 1;
2090 }
2091
2092 /* could not discard */
2093 return 0;
2094}
2095
2096static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2097{
2098 local_inc(&cpu_buffer->committing);
2099 local_inc(&cpu_buffer->commits);
2100}
2101
2102static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2103{
2104 unsigned long commits;
2105
2106 if (RB_WARN_ON(cpu_buffer,
2107 !local_read(&cpu_buffer->committing)))
2108 return;
2109
2110 again:
2111 commits = local_read(&cpu_buffer->commits);
2112 /* synchronize with interrupts */
2113 barrier();
2114 if (local_read(&cpu_buffer->committing) == 1)
2115 rb_set_commit_to_write(cpu_buffer);
2116
2117 local_dec(&cpu_buffer->committing);
2118
2119 /* synchronize with interrupts */
2120 barrier();
2121
2122 /*
2123 * Need to account for interrupts coming in between the
2124 * updating of the commit page and the clearing of the
2125 * committing counter.
2126 */
2127 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2128 !local_read(&cpu_buffer->committing)) {
2129 local_inc(&cpu_buffer->committing);
2130 goto again;
2131 }
2132}
2133
2134static struct ring_buffer_event *
2135rb_reserve_next_event(struct ring_buffer *buffer,
2136 struct ring_buffer_per_cpu *cpu_buffer,
2137 unsigned long length)
2138{
2139 struct ring_buffer_event *event;
2140 u64 ts, delta;
2141 int nr_loops = 0;
2142 int add_timestamp;
2143 u64 diff;
2144
2145 rb_start_commit(cpu_buffer);
2146
2147#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2148 /*
2149 * Due to the ability to swap a cpu buffer from a buffer
2150 * it is possible it was swapped before we committed.
2151 * (committing stops a swap). We check for it here and
2152 * if it happened, we have to fail the write.
2153 */
2154 barrier();
2155 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2156 local_dec(&cpu_buffer->committing);
2157 local_dec(&cpu_buffer->commits);
2158 return NULL;
2159 }
2160#endif
2161
2162 length = rb_calculate_event_length(length);
2163 again:
2164 add_timestamp = 0;
2165 delta = 0;
2166
2167 /*
2168 * We allow for interrupts to reenter here and do a trace.
2169 * If one does, it will cause this original code to loop
2170 * back here. Even with heavy interrupts happening, this
2171 * should only happen a few times in a row. If this happens
2172 * 1000 times in a row, there must be either an interrupt
2173 * storm or we have something buggy.
2174 * Bail!
2175 */
2176 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2177 goto out_fail;
2178
2179 ts = rb_time_stamp(cpu_buffer->buffer);
2180 diff = ts - cpu_buffer->write_stamp;
2181
2182 /* make sure this diff is calculated here */
2183 barrier();
2184
2185 /* Did the write stamp get updated already? */
2186 if (likely(ts >= cpu_buffer->write_stamp)) {
2187 delta = diff;
2188 if (unlikely(test_time_stamp(delta))) {
2189 int local_clock_stable = 1;
2190#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2191 local_clock_stable = sched_clock_stable;
2192#endif
2193 WARN_ONCE(delta > (1ULL << 59),
2194 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2195 (unsigned long long)delta,
2196 (unsigned long long)ts,
2197 (unsigned long long)cpu_buffer->write_stamp,
2198 local_clock_stable ? "" :
2199 "If you just came from a suspend/resume,\n"
2200 "please switch to the trace global clock:\n"
2201 " echo global > /sys/kernel/debug/tracing/trace_clock\n");
2202 add_timestamp = 1;
2203 }
2204 }
2205
2206 event = __rb_reserve_next(cpu_buffer, length, ts,
2207 delta, add_timestamp);
2208 if (unlikely(PTR_ERR(event) == -EAGAIN))
2209 goto again;
2210
2211 if (!event)
2212 goto out_fail;
2213
2214 return event;
2215
2216 out_fail:
2217 rb_end_commit(cpu_buffer);
2218 return NULL;
2219}
2220
2221#ifdef CONFIG_TRACING
2222
2223#define TRACE_RECURSIVE_DEPTH 16
2224
2225/* Keep this code out of the fast path cache */
2226static noinline void trace_recursive_fail(void)
2227{
2228 /* Disable all tracing before we do anything else */
2229 tracing_off_permanent();
2230
2231 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2232 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2233 trace_recursion_buffer(),
2234 hardirq_count() >> HARDIRQ_SHIFT,
2235 softirq_count() >> SOFTIRQ_SHIFT,
2236 in_nmi());
2237
2238 WARN_ON_ONCE(1);
2239}
2240
2241static inline int trace_recursive_lock(void)
2242{
2243 trace_recursion_inc();
2244
2245 if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
2246 return 0;
2247
2248 trace_recursive_fail();
2249
2250 return -1;
2251}
2252
2253static inline void trace_recursive_unlock(void)
2254{
2255 WARN_ON_ONCE(!trace_recursion_buffer());
2256
2257 trace_recursion_dec();
2258}
2259
2260#else
2261
2262#define trace_recursive_lock() (0)
2263#define trace_recursive_unlock() do { } while (0)
2264
2265#endif
2266
2267/**
2268 * ring_buffer_lock_reserve - reserve a part of the buffer
2269 * @buffer: the ring buffer to reserve from
2270 * @length: the length of the data to reserve (excluding event header)
2271 *
2272 * Returns a reseverd event on the ring buffer to copy directly to.
2273 * The user of this interface will need to get the body to write into
2274 * and can use the ring_buffer_event_data() interface.
2275 *
2276 * The length is the length of the data needed, not the event length
2277 * which also includes the event header.
2278 *
2279 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2280 * If NULL is returned, then nothing has been allocated or locked.
2281 */
2282struct ring_buffer_event *
2283ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2284{
2285 struct ring_buffer_per_cpu *cpu_buffer;
2286 struct ring_buffer_event *event;
2287 int cpu;
2288
2289 if (ring_buffer_flags != RB_BUFFERS_ON)
2290 return NULL;
2291
2292 /* If we are tracing schedule, we don't want to recurse */
2293 preempt_disable_notrace();
2294
2295 if (atomic_read(&buffer->record_disabled))
2296 goto out_nocheck;
2297
2298 if (trace_recursive_lock())
2299 goto out_nocheck;
2300
2301 cpu = raw_smp_processor_id();
2302
2303 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2304 goto out;
2305
2306 cpu_buffer = buffer->buffers[cpu];
2307
2308 if (atomic_read(&cpu_buffer->record_disabled))
2309 goto out;
2310
2311 if (length > BUF_MAX_DATA_SIZE)
2312 goto out;
2313
2314 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2315 if (!event)
2316 goto out;
2317
2318 return event;
2319
2320 out:
2321 trace_recursive_unlock();
2322
2323 out_nocheck:
2324 preempt_enable_notrace();
2325 return NULL;
2326}
2327EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2328
2329static void
2330rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2331 struct ring_buffer_event *event)
2332{
2333 u64 delta;
2334
2335 /*
2336 * The event first in the commit queue updates the
2337 * time stamp.
2338 */
2339 if (rb_event_is_commit(cpu_buffer, event)) {
2340 /*
2341 * A commit event that is first on a page
2342 * updates the write timestamp with the page stamp
2343 */
2344 if (!rb_event_index(event))
2345 cpu_buffer->write_stamp =
2346 cpu_buffer->commit_page->page->time_stamp;
2347 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2348 delta = event->array[0];
2349 delta <<= TS_SHIFT;
2350 delta += event->time_delta;
2351 cpu_buffer->write_stamp += delta;
2352 } else
2353 cpu_buffer->write_stamp += event->time_delta;
2354 }
2355}
2356
2357static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2358 struct ring_buffer_event *event)
2359{
2360 local_inc(&cpu_buffer->entries);
2361 rb_update_write_stamp(cpu_buffer, event);
2362 rb_end_commit(cpu_buffer);
2363}
2364
2365/**
2366 * ring_buffer_unlock_commit - commit a reserved
2367 * @buffer: The buffer to commit to
2368 * @event: The event pointer to commit.
2369 *
2370 * This commits the data to the ring buffer, and releases any locks held.
2371 *
2372 * Must be paired with ring_buffer_lock_reserve.
2373 */
2374int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2375 struct ring_buffer_event *event)
2376{
2377 struct ring_buffer_per_cpu *cpu_buffer;
2378 int cpu = raw_smp_processor_id();
2379
2380 cpu_buffer = buffer->buffers[cpu];
2381
2382 rb_commit(cpu_buffer, event);
2383
2384 trace_recursive_unlock();
2385
2386 preempt_enable_notrace();
2387
2388 return 0;
2389}
2390EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2391
2392static inline void rb_event_discard(struct ring_buffer_event *event)
2393{
2394 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2395 event = skip_time_extend(event);
2396
2397 /* array[0] holds the actual length for the discarded event */
2398 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2399 event->type_len = RINGBUF_TYPE_PADDING;
2400 /* time delta must be non zero */
2401 if (!event->time_delta)
2402 event->time_delta = 1;
2403}
2404
2405/*
2406 * Decrement the entries to the page that an event is on.
2407 * The event does not even need to exist, only the pointer
2408 * to the page it is on. This may only be called before the commit
2409 * takes place.
2410 */
2411static inline void
2412rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2413 struct ring_buffer_event *event)
2414{
2415 unsigned long addr = (unsigned long)event;
2416 struct buffer_page *bpage = cpu_buffer->commit_page;
2417 struct buffer_page *start;
2418
2419 addr &= PAGE_MASK;
2420
2421 /* Do the likely case first */
2422 if (likely(bpage->page == (void *)addr)) {
2423 local_dec(&bpage->entries);
2424 return;
2425 }
2426
2427 /*
2428 * Because the commit page may be on the reader page we
2429 * start with the next page and check the end loop there.
2430 */
2431 rb_inc_page(cpu_buffer, &bpage);
2432 start = bpage;
2433 do {
2434 if (bpage->page == (void *)addr) {
2435 local_dec(&bpage->entries);
2436 return;
2437 }
2438 rb_inc_page(cpu_buffer, &bpage);
2439 } while (bpage != start);
2440
2441 /* commit not part of this buffer?? */
2442 RB_WARN_ON(cpu_buffer, 1);
2443}
2444
2445/**
2446 * ring_buffer_commit_discard - discard an event that has not been committed
2447 * @buffer: the ring buffer
2448 * @event: non committed event to discard
2449 *
2450 * Sometimes an event that is in the ring buffer needs to be ignored.
2451 * This function lets the user discard an event in the ring buffer
2452 * and then that event will not be read later.
2453 *
2454 * This function only works if it is called before the the item has been
2455 * committed. It will try to free the event from the ring buffer
2456 * if another event has not been added behind it.
2457 *
2458 * If another event has been added behind it, it will set the event
2459 * up as discarded, and perform the commit.
2460 *
2461 * If this function is called, do not call ring_buffer_unlock_commit on
2462 * the event.
2463 */
2464void ring_buffer_discard_commit(struct ring_buffer *buffer,
2465 struct ring_buffer_event *event)
2466{
2467 struct ring_buffer_per_cpu *cpu_buffer;
2468 int cpu;
2469
2470 /* The event is discarded regardless */
2471 rb_event_discard(event);
2472
2473 cpu = smp_processor_id();
2474 cpu_buffer = buffer->buffers[cpu];
2475
2476 /*
2477 * This must only be called if the event has not been
2478 * committed yet. Thus we can assume that preemption
2479 * is still disabled.
2480 */
2481 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2482
2483 rb_decrement_entry(cpu_buffer, event);
2484 if (rb_try_to_discard(cpu_buffer, event))
2485 goto out;
2486
2487 /*
2488 * The commit is still visible by the reader, so we
2489 * must still update the timestamp.
2490 */
2491 rb_update_write_stamp(cpu_buffer, event);
2492 out:
2493 rb_end_commit(cpu_buffer);
2494
2495 trace_recursive_unlock();
2496
2497 preempt_enable_notrace();
2498
2499}
2500EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2501
2502/**
2503 * ring_buffer_write - write data to the buffer without reserving
2504 * @buffer: The ring buffer to write to.
2505 * @length: The length of the data being written (excluding the event header)
2506 * @data: The data to write to the buffer.
2507 *
2508 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2509 * one function. If you already have the data to write to the buffer, it
2510 * may be easier to simply call this function.
2511 *
2512 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2513 * and not the length of the event which would hold the header.
2514 */
2515int ring_buffer_write(struct ring_buffer *buffer,
2516 unsigned long length,
2517 void *data)
2518{
2519 struct ring_buffer_per_cpu *cpu_buffer;
2520 struct ring_buffer_event *event;
2521 void *body;
2522 int ret = -EBUSY;
2523 int cpu;
2524
2525 if (ring_buffer_flags != RB_BUFFERS_ON)
2526 return -EBUSY;
2527
2528 preempt_disable_notrace();
2529
2530 if (atomic_read(&buffer->record_disabled))
2531 goto out;
2532
2533 cpu = raw_smp_processor_id();
2534
2535 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2536 goto out;
2537
2538 cpu_buffer = buffer->buffers[cpu];
2539
2540 if (atomic_read(&cpu_buffer->record_disabled))
2541 goto out;
2542
2543 if (length > BUF_MAX_DATA_SIZE)
2544 goto out;
2545
2546 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2547 if (!event)
2548 goto out;
2549
2550 body = rb_event_data(event);
2551
2552 memcpy(body, data, length);
2553
2554 rb_commit(cpu_buffer, event);
2555
2556 ret = 0;
2557 out:
2558 preempt_enable_notrace();
2559
2560 return ret;
2561}
2562EXPORT_SYMBOL_GPL(ring_buffer_write);
2563
2564static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2565{
2566 struct buffer_page *reader = cpu_buffer->reader_page;
2567 struct buffer_page *head = rb_set_head_page(cpu_buffer);
2568 struct buffer_page *commit = cpu_buffer->commit_page;
2569
2570 /* In case of error, head will be NULL */
2571 if (unlikely(!head))
2572 return 1;
2573
2574 return reader->read == rb_page_commit(reader) &&
2575 (commit == reader ||
2576 (commit == head &&
2577 head->read == rb_page_commit(commit)));
2578}
2579
2580/**
2581 * ring_buffer_record_disable - stop all writes into the buffer
2582 * @buffer: The ring buffer to stop writes to.
2583 *
2584 * This prevents all writes to the buffer. Any attempt to write
2585 * to the buffer after this will fail and return NULL.
2586 *
2587 * The caller should call synchronize_sched() after this.
2588 */
2589void ring_buffer_record_disable(struct ring_buffer *buffer)
2590{
2591 atomic_inc(&buffer->record_disabled);
2592}
2593EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2594
2595/**
2596 * ring_buffer_record_enable - enable writes to the buffer
2597 * @buffer: The ring buffer to enable writes
2598 *
2599 * Note, multiple disables will need the same number of enables
2600 * to truly enable the writing (much like preempt_disable).
2601 */
2602void ring_buffer_record_enable(struct ring_buffer *buffer)
2603{
2604 atomic_dec(&buffer->record_disabled);
2605}
2606EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2607
2608/**
2609 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2610 * @buffer: The ring buffer to stop writes to.
2611 * @cpu: The CPU buffer to stop
2612 *
2613 * This prevents all writes to the buffer. Any attempt to write
2614 * to the buffer after this will fail and return NULL.
2615 *
2616 * The caller should call synchronize_sched() after this.
2617 */
2618void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2619{
2620 struct ring_buffer_per_cpu *cpu_buffer;
2621
2622 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2623 return;
2624
2625 cpu_buffer = buffer->buffers[cpu];
2626 atomic_inc(&cpu_buffer->record_disabled);
2627}
2628EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2629
2630/**
2631 * ring_buffer_record_enable_cpu - enable writes to the buffer
2632 * @buffer: The ring buffer to enable writes
2633 * @cpu: The CPU to enable.
2634 *
2635 * Note, multiple disables will need the same number of enables
2636 * to truly enable the writing (much like preempt_disable).
2637 */
2638void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2639{
2640 struct ring_buffer_per_cpu *cpu_buffer;
2641
2642 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2643 return;
2644
2645 cpu_buffer = buffer->buffers[cpu];
2646 atomic_dec(&cpu_buffer->record_disabled);
2647}
2648EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2649
2650/*
2651 * The total entries in the ring buffer is the running counter
2652 * of entries entered into the ring buffer, minus the sum of
2653 * the entries read from the ring buffer and the number of
2654 * entries that were overwritten.
2655 */
2656static inline unsigned long
2657rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2658{
2659 return local_read(&cpu_buffer->entries) -
2660 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2661}
2662
2663/**
2664 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2665 * @buffer: The ring buffer
2666 * @cpu: The per CPU buffer to get the entries from.
2667 */
2668unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2669{
2670 struct ring_buffer_per_cpu *cpu_buffer;
2671
2672 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2673 return 0;
2674
2675 cpu_buffer = buffer->buffers[cpu];
2676
2677 return rb_num_of_entries(cpu_buffer);
2678}
2679EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
2680
2681/**
2682 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2683 * @buffer: The ring buffer
2684 * @cpu: The per CPU buffer to get the number of overruns from
2685 */
2686unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2687{
2688 struct ring_buffer_per_cpu *cpu_buffer;
2689 unsigned long ret;
2690
2691 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2692 return 0;
2693
2694 cpu_buffer = buffer->buffers[cpu];
2695 ret = local_read(&cpu_buffer->overrun);
2696
2697 return ret;
2698}
2699EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
2700
2701/**
2702 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
2703 * @buffer: The ring buffer
2704 * @cpu: The per CPU buffer to get the number of overruns from
2705 */
2706unsigned long
2707ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2708{
2709 struct ring_buffer_per_cpu *cpu_buffer;
2710 unsigned long ret;
2711
2712 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2713 return 0;
2714
2715 cpu_buffer = buffer->buffers[cpu];
2716 ret = local_read(&cpu_buffer->commit_overrun);
2717
2718 return ret;
2719}
2720EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2721
2722/**
2723 * ring_buffer_entries - get the number of entries in a buffer
2724 * @buffer: The ring buffer
2725 *
2726 * Returns the total number of entries in the ring buffer
2727 * (all CPU entries)
2728 */
2729unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2730{
2731 struct ring_buffer_per_cpu *cpu_buffer;
2732 unsigned long entries = 0;
2733 int cpu;
2734
2735 /* if you care about this being correct, lock the buffer */
2736 for_each_buffer_cpu(buffer, cpu) {
2737 cpu_buffer = buffer->buffers[cpu];
2738 entries += rb_num_of_entries(cpu_buffer);
2739 }
2740
2741 return entries;
2742}
2743EXPORT_SYMBOL_GPL(ring_buffer_entries);
2744
2745/**
2746 * ring_buffer_overruns - get the number of overruns in buffer
2747 * @buffer: The ring buffer
2748 *
2749 * Returns the total number of overruns in the ring buffer
2750 * (all CPU entries)
2751 */
2752unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2753{
2754 struct ring_buffer_per_cpu *cpu_buffer;
2755 unsigned long overruns = 0;
2756 int cpu;
2757
2758 /* if you care about this being correct, lock the buffer */
2759 for_each_buffer_cpu(buffer, cpu) {
2760 cpu_buffer = buffer->buffers[cpu];
2761 overruns += local_read(&cpu_buffer->overrun);
2762 }
2763
2764 return overruns;
2765}
2766EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2767
2768static void rb_iter_reset(struct ring_buffer_iter *iter)
2769{
2770 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2771
2772 /* Iterator usage is expected to have record disabled */
2773 if (list_empty(&cpu_buffer->reader_page->list)) {
2774 iter->head_page = rb_set_head_page(cpu_buffer);
2775 if (unlikely(!iter->head_page))
2776 return;
2777 iter->head = iter->head_page->read;
2778 } else {
2779 iter->head_page = cpu_buffer->reader_page;
2780 iter->head = cpu_buffer->reader_page->read;
2781 }
2782 if (iter->head)
2783 iter->read_stamp = cpu_buffer->read_stamp;
2784 else
2785 iter->read_stamp = iter->head_page->page->time_stamp;
2786 iter->cache_reader_page = cpu_buffer->reader_page;
2787 iter->cache_read = cpu_buffer->read;
2788}
2789
2790/**
2791 * ring_buffer_iter_reset - reset an iterator
2792 * @iter: The iterator to reset
2793 *
2794 * Resets the iterator, so that it will start from the beginning
2795 * again.
2796 */
2797void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2798{
2799 struct ring_buffer_per_cpu *cpu_buffer;
2800 unsigned long flags;
2801
2802 if (!iter)
2803 return;
2804
2805 cpu_buffer = iter->cpu_buffer;
2806
2807 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2808 rb_iter_reset(iter);
2809 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2810}
2811EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2812
2813/**
2814 * ring_buffer_iter_empty - check if an iterator has no more to read
2815 * @iter: The iterator to check
2816 */
2817int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2818{
2819 struct ring_buffer_per_cpu *cpu_buffer;
2820
2821 cpu_buffer = iter->cpu_buffer;
2822
2823 return iter->head_page == cpu_buffer->commit_page &&
2824 iter->head == rb_commit_index(cpu_buffer);
2825}
2826EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2827
2828static void
2829rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2830 struct ring_buffer_event *event)
2831{
2832 u64 delta;
2833
2834 switch (event->type_len) {
2835 case RINGBUF_TYPE_PADDING:
2836 return;
2837
2838 case RINGBUF_TYPE_TIME_EXTEND:
2839 delta = event->array[0];
2840 delta <<= TS_SHIFT;
2841 delta += event->time_delta;
2842 cpu_buffer->read_stamp += delta;
2843 return;
2844
2845 case RINGBUF_TYPE_TIME_STAMP:
2846 /* FIXME: not implemented */
2847 return;
2848
2849 case RINGBUF_TYPE_DATA:
2850 cpu_buffer->read_stamp += event->time_delta;
2851 return;
2852
2853 default:
2854 BUG();
2855 }
2856 return;
2857}
2858
2859static void
2860rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2861 struct ring_buffer_event *event)
2862{
2863 u64 delta;
2864
2865 switch (event->type_len) {
2866 case RINGBUF_TYPE_PADDING:
2867 return;
2868
2869 case RINGBUF_TYPE_TIME_EXTEND:
2870 delta = event->array[0];
2871 delta <<= TS_SHIFT;
2872 delta += event->time_delta;
2873 iter->read_stamp += delta;
2874 return;
2875
2876 case RINGBUF_TYPE_TIME_STAMP:
2877 /* FIXME: not implemented */
2878 return;
2879
2880 case RINGBUF_TYPE_DATA:
2881 iter->read_stamp += event->time_delta;
2882 return;
2883
2884 default:
2885 BUG();
2886 }
2887 return;
2888}
2889
2890static struct buffer_page *
2891rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2892{
2893 struct buffer_page *reader = NULL;
2894 unsigned long overwrite;
2895 unsigned long flags;
2896 int nr_loops = 0;
2897 int ret;
2898
2899 local_irq_save(flags);
2900 arch_spin_lock(&cpu_buffer->lock);
2901
2902 again:
2903 /*
2904 * This should normally only loop twice. But because the
2905 * start of the reader inserts an empty page, it causes
2906 * a case where we will loop three times. There should be no
2907 * reason to loop four times (that I know of).
2908 */
2909 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2910 reader = NULL;
2911 goto out;
2912 }
2913
2914 reader = cpu_buffer->reader_page;
2915
2916 /* If there's more to read, return this page */
2917 if (cpu_buffer->reader_page->read < rb_page_size(reader))
2918 goto out;
2919
2920 /* Never should we have an index greater than the size */
2921 if (RB_WARN_ON(cpu_buffer,
2922 cpu_buffer->reader_page->read > rb_page_size(reader)))
2923 goto out;
2924
2925 /* check if we caught up to the tail */
2926 reader = NULL;
2927 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2928 goto out;
2929
2930 /*
2931 * Reset the reader page to size zero.
2932 */
2933 local_set(&cpu_buffer->reader_page->write, 0);
2934 local_set(&cpu_buffer->reader_page->entries, 0);
2935 local_set(&cpu_buffer->reader_page->page->commit, 0);
2936 cpu_buffer->reader_page->real_end = 0;
2937
2938 spin:
2939 /*
2940 * Splice the empty reader page into the list around the head.
2941 */
2942 reader = rb_set_head_page(cpu_buffer);
2943 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2944 cpu_buffer->reader_page->list.prev = reader->list.prev;
2945
2946 /*
2947 * cpu_buffer->pages just needs to point to the buffer, it
2948 * has no specific buffer page to point to. Lets move it out
2949 * of our way so we don't accidentally swap it.
2950 */
2951 cpu_buffer->pages = reader->list.prev;
2952
2953 /* The reader page will be pointing to the new head */
2954 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
2955
2956 /*
2957 * We want to make sure we read the overruns after we set up our
2958 * pointers to the next object. The writer side does a
2959 * cmpxchg to cross pages which acts as the mb on the writer
2960 * side. Note, the reader will constantly fail the swap
2961 * while the writer is updating the pointers, so this
2962 * guarantees that the overwrite recorded here is the one we
2963 * want to compare with the last_overrun.
2964 */
2965 smp_mb();
2966 overwrite = local_read(&(cpu_buffer->overrun));
2967
2968 /*
2969 * Here's the tricky part.
2970 *
2971 * We need to move the pointer past the header page.
2972 * But we can only do that if a writer is not currently
2973 * moving it. The page before the header page has the
2974 * flag bit '1' set if it is pointing to the page we want.
2975 * but if the writer is in the process of moving it
2976 * than it will be '2' or already moved '0'.
2977 */
2978
2979 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
2980
2981 /*
2982 * If we did not convert it, then we must try again.
2983 */
2984 if (!ret)
2985 goto spin;
2986
2987 /*
2988 * Yeah! We succeeded in replacing the page.
2989 *
2990 * Now make the new head point back to the reader page.
2991 */
2992 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2993 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2994
2995 /* Finally update the reader page to the new head */
2996 cpu_buffer->reader_page = reader;
2997 rb_reset_reader_page(cpu_buffer);
2998
2999 if (overwrite != cpu_buffer->last_overrun) {
3000 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3001 cpu_buffer->last_overrun = overwrite;
3002 }
3003
3004 goto again;
3005
3006 out:
3007 arch_spin_unlock(&cpu_buffer->lock);
3008 local_irq_restore(flags);
3009
3010 return reader;
3011}
3012
3013static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3014{
3015 struct ring_buffer_event *event;
3016 struct buffer_page *reader;
3017 unsigned length;
3018
3019 reader = rb_get_reader_page(cpu_buffer);
3020
3021 /* This function should not be called when buffer is empty */
3022 if (RB_WARN_ON(cpu_buffer, !reader))
3023 return;
3024
3025 event = rb_reader_event(cpu_buffer);
3026
3027 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3028 cpu_buffer->read++;
3029
3030 rb_update_read_stamp(cpu_buffer, event);
3031
3032 length = rb_event_length(event);
3033 cpu_buffer->reader_page->read += length;
3034}
3035
3036static void rb_advance_iter(struct ring_buffer_iter *iter)
3037{
3038 struct ring_buffer_per_cpu *cpu_buffer;
3039 struct ring_buffer_event *event;
3040 unsigned length;
3041
3042 cpu_buffer = iter->cpu_buffer;
3043
3044 /*
3045 * Check if we are at the end of the buffer.
3046 */
3047 if (iter->head >= rb_page_size(iter->head_page)) {
3048 /* discarded commits can make the page empty */
3049 if (iter->head_page == cpu_buffer->commit_page)
3050 return;
3051 rb_inc_iter(iter);
3052 return;
3053 }
3054
3055 event = rb_iter_head_event(iter);
3056
3057 length = rb_event_length(event);
3058
3059 /*
3060 * This should not be called to advance the header if we are
3061 * at the tail of the buffer.
3062 */
3063 if (RB_WARN_ON(cpu_buffer,
3064 (iter->head_page == cpu_buffer->commit_page) &&
3065 (iter->head + length > rb_commit_index(cpu_buffer))))
3066 return;
3067
3068 rb_update_iter_read_stamp(iter, event);
3069
3070 iter->head += length;
3071
3072 /* check for end of page padding */
3073 if ((iter->head >= rb_page_size(iter->head_page)) &&
3074 (iter->head_page != cpu_buffer->commit_page))
3075 rb_advance_iter(iter);
3076}
3077
3078static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3079{
3080 return cpu_buffer->lost_events;
3081}
3082
3083static struct ring_buffer_event *
3084rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3085 unsigned long *lost_events)
3086{
3087 struct ring_buffer_event *event;
3088 struct buffer_page *reader;
3089 int nr_loops = 0;
3090
3091 again:
3092 /*
3093 * We repeat when a time extend is encountered.
3094 * Since the time extend is always attached to a data event,
3095 * we should never loop more than once.
3096 * (We never hit the following condition more than twice).
3097 */
3098 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3099 return NULL;
3100
3101 reader = rb_get_reader_page(cpu_buffer);
3102 if (!reader)
3103 return NULL;
3104
3105 event = rb_reader_event(cpu_buffer);
3106
3107 switch (event->type_len) {
3108 case RINGBUF_TYPE_PADDING:
3109 if (rb_null_event(event))
3110 RB_WARN_ON(cpu_buffer, 1);
3111 /*
3112 * Because the writer could be discarding every
3113 * event it creates (which would probably be bad)
3114 * if we were to go back to "again" then we may never
3115 * catch up, and will trigger the warn on, or lock
3116 * the box. Return the padding, and we will release
3117 * the current locks, and try again.
3118 */
3119 return event;
3120
3121 case RINGBUF_TYPE_TIME_EXTEND:
3122 /* Internal data, OK to advance */
3123 rb_advance_reader(cpu_buffer);
3124 goto again;
3125
3126 case RINGBUF_TYPE_TIME_STAMP:
3127 /* FIXME: not implemented */
3128 rb_advance_reader(cpu_buffer);
3129 goto again;
3130
3131 case RINGBUF_TYPE_DATA:
3132 if (ts) {
3133 *ts = cpu_buffer->read_stamp + event->time_delta;
3134 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3135 cpu_buffer->cpu, ts);
3136 }
3137 if (lost_events)
3138 *lost_events = rb_lost_events(cpu_buffer);
3139 return event;
3140
3141 default:
3142 BUG();
3143 }
3144
3145 return NULL;
3146}
3147EXPORT_SYMBOL_GPL(ring_buffer_peek);
3148
3149static struct ring_buffer_event *
3150rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3151{
3152 struct ring_buffer *buffer;
3153 struct ring_buffer_per_cpu *cpu_buffer;
3154 struct ring_buffer_event *event;
3155 int nr_loops = 0;
3156
3157 cpu_buffer = iter->cpu_buffer;
3158 buffer = cpu_buffer->buffer;
3159
3160 /*
3161 * Check if someone performed a consuming read to
3162 * the buffer. A consuming read invalidates the iterator
3163 * and we need to reset the iterator in this case.
3164 */
3165 if (unlikely(iter->cache_read != cpu_buffer->read ||
3166 iter->cache_reader_page != cpu_buffer->reader_page))
3167 rb_iter_reset(iter);
3168
3169 again:
3170 if (ring_buffer_iter_empty(iter))
3171 return NULL;
3172
3173 /*
3174 * We repeat when a time extend is encountered.
3175 * Since the time extend is always attached to a data event,
3176 * we should never loop more than once.
3177 * (We never hit the following condition more than twice).
3178 */
3179 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3180 return NULL;
3181
3182 if (rb_per_cpu_empty(cpu_buffer))
3183 return NULL;
3184
3185 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3186 rb_inc_iter(iter);
3187 goto again;
3188 }
3189
3190 event = rb_iter_head_event(iter);
3191
3192 switch (event->type_len) {
3193 case RINGBUF_TYPE_PADDING:
3194 if (rb_null_event(event)) {
3195 rb_inc_iter(iter);
3196 goto again;
3197 }
3198 rb_advance_iter(iter);
3199 return event;
3200
3201 case RINGBUF_TYPE_TIME_EXTEND:
3202 /* Internal data, OK to advance */
3203 rb_advance_iter(iter);
3204 goto again;
3205
3206 case RINGBUF_TYPE_TIME_STAMP:
3207 /* FIXME: not implemented */
3208 rb_advance_iter(iter);
3209 goto again;
3210
3211 case RINGBUF_TYPE_DATA:
3212 if (ts) {
3213 *ts = iter->read_stamp + event->time_delta;
3214 ring_buffer_normalize_time_stamp(buffer,
3215 cpu_buffer->cpu, ts);
3216 }
3217 return event;
3218
3219 default:
3220 BUG();
3221 }
3222
3223 return NULL;
3224}
3225EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3226
3227static inline int rb_ok_to_lock(void)
3228{
3229 /*
3230 * If an NMI die dumps out the content of the ring buffer
3231 * do not grab locks. We also permanently disable the ring
3232 * buffer too. A one time deal is all you get from reading
3233 * the ring buffer from an NMI.
3234 */
3235 if (likely(!in_nmi()))
3236 return 1;
3237
3238 tracing_off_permanent();
3239 return 0;
3240}
3241
3242/**
3243 * ring_buffer_peek - peek at the next event to be read
3244 * @buffer: The ring buffer to read
3245 * @cpu: The cpu to peak at
3246 * @ts: The timestamp counter of this event.
3247 * @lost_events: a variable to store if events were lost (may be NULL)
3248 *
3249 * This will return the event that will be read next, but does
3250 * not consume the data.
3251 */
3252struct ring_buffer_event *
3253ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3254 unsigned long *lost_events)
3255{
3256 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3257 struct ring_buffer_event *event;
3258 unsigned long flags;
3259 int dolock;
3260
3261 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3262 return NULL;
3263
3264 dolock = rb_ok_to_lock();
3265 again:
3266 local_irq_save(flags);
3267 if (dolock)
3268 spin_lock(&cpu_buffer->reader_lock);
3269 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3270 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3271 rb_advance_reader(cpu_buffer);
3272 if (dolock)
3273 spin_unlock(&cpu_buffer->reader_lock);
3274 local_irq_restore(flags);
3275
3276 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3277 goto again;
3278
3279 return event;
3280}
3281
3282/**
3283 * ring_buffer_iter_peek - peek at the next event to be read
3284 * @iter: The ring buffer iterator
3285 * @ts: The timestamp counter of this event.
3286 *
3287 * This will return the event that will be read next, but does
3288 * not increment the iterator.
3289 */
3290struct ring_buffer_event *
3291ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3292{
3293 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3294 struct ring_buffer_event *event;
3295 unsigned long flags;
3296
3297 again:
3298 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3299 event = rb_iter_peek(iter, ts);
3300 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3301
3302 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3303 goto again;
3304
3305 return event;
3306}
3307
3308/**
3309 * ring_buffer_consume - return an event and consume it
3310 * @buffer: The ring buffer to get the next event from
3311 * @cpu: the cpu to read the buffer from
3312 * @ts: a variable to store the timestamp (may be NULL)
3313 * @lost_events: a variable to store if events were lost (may be NULL)
3314 *
3315 * Returns the next event in the ring buffer, and that event is consumed.
3316 * Meaning, that sequential reads will keep returning a different event,
3317 * and eventually empty the ring buffer if the producer is slower.
3318 */
3319struct ring_buffer_event *
3320ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3321 unsigned long *lost_events)
3322{
3323 struct ring_buffer_per_cpu *cpu_buffer;
3324 struct ring_buffer_event *event = NULL;
3325 unsigned long flags;
3326 int dolock;
3327
3328 dolock = rb_ok_to_lock();
3329
3330 again:
3331 /* might be called in atomic */
3332 preempt_disable();
3333
3334 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3335 goto out;
3336
3337 cpu_buffer = buffer->buffers[cpu];
3338 local_irq_save(flags);
3339 if (dolock)
3340 spin_lock(&cpu_buffer->reader_lock);
3341
3342 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3343 if (event) {
3344 cpu_buffer->lost_events = 0;
3345 rb_advance_reader(cpu_buffer);
3346 }
3347
3348 if (dolock)
3349 spin_unlock(&cpu_buffer->reader_lock);
3350 local_irq_restore(flags);
3351
3352 out:
3353 preempt_enable();
3354
3355 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3356 goto again;
3357
3358 return event;
3359}
3360EXPORT_SYMBOL_GPL(ring_buffer_consume);
3361
3362/**
3363 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3364 * @buffer: The ring buffer to read from
3365 * @cpu: The cpu buffer to iterate over
3366 *
3367 * This performs the initial preparations necessary to iterate
3368 * through the buffer. Memory is allocated, buffer recording
3369 * is disabled, and the iterator pointer is returned to the caller.
3370 *
3371 * Disabling buffer recordng prevents the reading from being
3372 * corrupted. This is not a consuming read, so a producer is not
3373 * expected.
3374 *
3375 * After a sequence of ring_buffer_read_prepare calls, the user is
3376 * expected to make at least one call to ring_buffer_prepare_sync.
3377 * Afterwards, ring_buffer_read_start is invoked to get things going
3378 * for real.
3379 *
3380 * This overall must be paired with ring_buffer_finish.
3381 */
3382struct ring_buffer_iter *
3383ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3384{
3385 struct ring_buffer_per_cpu *cpu_buffer;
3386 struct ring_buffer_iter *iter;
3387
3388 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3389 return NULL;
3390
3391 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3392 if (!iter)
3393 return NULL;
3394
3395 cpu_buffer = buffer->buffers[cpu];
3396
3397 iter->cpu_buffer = cpu_buffer;
3398
3399 atomic_inc(&cpu_buffer->record_disabled);
3400
3401 return iter;
3402}
3403EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3404
3405/**
3406 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3407 *
3408 * All previously invoked ring_buffer_read_prepare calls to prepare
3409 * iterators will be synchronized. Afterwards, read_buffer_read_start
3410 * calls on those iterators are allowed.
3411 */
3412void
3413ring_buffer_read_prepare_sync(void)
3414{
3415 synchronize_sched();
3416}
3417EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3418
3419/**
3420 * ring_buffer_read_start - start a non consuming read of the buffer
3421 * @iter: The iterator returned by ring_buffer_read_prepare
3422 *
3423 * This finalizes the startup of an iteration through the buffer.
3424 * The iterator comes from a call to ring_buffer_read_prepare and
3425 * an intervening ring_buffer_read_prepare_sync must have been
3426 * performed.
3427 *
3428 * Must be paired with ring_buffer_finish.
3429 */
3430void
3431ring_buffer_read_start(struct ring_buffer_iter *iter)
3432{
3433 struct ring_buffer_per_cpu *cpu_buffer;
3434 unsigned long flags;
3435
3436 if (!iter)
3437 return;
3438
3439 cpu_buffer = iter->cpu_buffer;
3440
3441 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3442 arch_spin_lock(&cpu_buffer->lock);
3443 rb_iter_reset(iter);
3444 arch_spin_unlock(&cpu_buffer->lock);
3445 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3446}
3447EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3448
3449/**
3450 * ring_buffer_finish - finish reading the iterator of the buffer
3451 * @iter: The iterator retrieved by ring_buffer_start
3452 *
3453 * This re-enables the recording to the buffer, and frees the
3454 * iterator.
3455 */
3456void
3457ring_buffer_read_finish(struct ring_buffer_iter *iter)
3458{
3459 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3460
3461 atomic_dec(&cpu_buffer->record_disabled);
3462 kfree(iter);
3463}
3464EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3465
3466/**
3467 * ring_buffer_read - read the next item in the ring buffer by the iterator
3468 * @iter: The ring buffer iterator
3469 * @ts: The time stamp of the event read.
3470 *
3471 * This reads the next event in the ring buffer and increments the iterator.
3472 */
3473struct ring_buffer_event *
3474ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3475{
3476 struct ring_buffer_event *event;
3477 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3478 unsigned long flags;
3479
3480 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3481 again:
3482 event = rb_iter_peek(iter, ts);
3483 if (!event)
3484 goto out;
3485
3486 if (event->type_len == RINGBUF_TYPE_PADDING)
3487 goto again;
3488
3489 rb_advance_iter(iter);
3490 out:
3491 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3492
3493 return event;
3494}
3495EXPORT_SYMBOL_GPL(ring_buffer_read);
3496
3497/**
3498 * ring_buffer_size - return the size of the ring buffer (in bytes)
3499 * @buffer: The ring buffer.
3500 */
3501unsigned long ring_buffer_size(struct ring_buffer *buffer)
3502{
3503 return BUF_PAGE_SIZE * buffer->pages;
3504}
3505EXPORT_SYMBOL_GPL(ring_buffer_size);
3506
3507static void
3508rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3509{
3510 rb_head_page_deactivate(cpu_buffer);
3511
3512 cpu_buffer->head_page
3513 = list_entry(cpu_buffer->pages, struct buffer_page, list);
3514 local_set(&cpu_buffer->head_page->write, 0);
3515 local_set(&cpu_buffer->head_page->entries, 0);
3516 local_set(&cpu_buffer->head_page->page->commit, 0);
3517
3518 cpu_buffer->head_page->read = 0;
3519
3520 cpu_buffer->tail_page = cpu_buffer->head_page;
3521 cpu_buffer->commit_page = cpu_buffer->head_page;
3522
3523 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3524 local_set(&cpu_buffer->reader_page->write, 0);
3525 local_set(&cpu_buffer->reader_page->entries, 0);
3526 local_set(&cpu_buffer->reader_page->page->commit, 0);
3527 cpu_buffer->reader_page->read = 0;
3528
3529 local_set(&cpu_buffer->commit_overrun, 0);
3530 local_set(&cpu_buffer->overrun, 0);
3531 local_set(&cpu_buffer->entries, 0);
3532 local_set(&cpu_buffer->committing, 0);
3533 local_set(&cpu_buffer->commits, 0);
3534 cpu_buffer->read = 0;
3535
3536 cpu_buffer->write_stamp = 0;
3537 cpu_buffer->read_stamp = 0;
3538
3539 cpu_buffer->lost_events = 0;
3540 cpu_buffer->last_overrun = 0;
3541
3542 rb_head_page_activate(cpu_buffer);
3543}
3544
3545/**
3546 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3547 * @buffer: The ring buffer to reset a per cpu buffer of
3548 * @cpu: The CPU buffer to be reset
3549 */
3550void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3551{
3552 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3553 unsigned long flags;
3554
3555 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3556 return;
3557
3558 atomic_inc(&cpu_buffer->record_disabled);
3559
3560 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3561
3562 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3563 goto out;
3564
3565 arch_spin_lock(&cpu_buffer->lock);
3566
3567 rb_reset_cpu(cpu_buffer);
3568
3569 arch_spin_unlock(&cpu_buffer->lock);
3570
3571 out:
3572 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3573
3574 atomic_dec(&cpu_buffer->record_disabled);
3575}
3576EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
3577
3578/**
3579 * ring_buffer_reset - reset a ring buffer
3580 * @buffer: The ring buffer to reset all cpu buffers
3581 */
3582void ring_buffer_reset(struct ring_buffer *buffer)
3583{
3584 int cpu;
3585
3586 for_each_buffer_cpu(buffer, cpu)
3587 ring_buffer_reset_cpu(buffer, cpu);
3588}
3589EXPORT_SYMBOL_GPL(ring_buffer_reset);
3590
3591/**
3592 * rind_buffer_empty - is the ring buffer empty?
3593 * @buffer: The ring buffer to test
3594 */
3595int ring_buffer_empty(struct ring_buffer *buffer)
3596{
3597 struct ring_buffer_per_cpu *cpu_buffer;
3598 unsigned long flags;
3599 int dolock;
3600 int cpu;
3601 int ret;
3602
3603 dolock = rb_ok_to_lock();
3604
3605 /* yes this is racy, but if you don't like the race, lock the buffer */
3606 for_each_buffer_cpu(buffer, cpu) {
3607 cpu_buffer = buffer->buffers[cpu];
3608 local_irq_save(flags);
3609 if (dolock)
3610 spin_lock(&cpu_buffer->reader_lock);
3611 ret = rb_per_cpu_empty(cpu_buffer);
3612 if (dolock)
3613 spin_unlock(&cpu_buffer->reader_lock);
3614 local_irq_restore(flags);
3615
3616 if (!ret)
3617 return 0;
3618 }
3619
3620 return 1;
3621}
3622EXPORT_SYMBOL_GPL(ring_buffer_empty);
3623
3624/**
3625 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3626 * @buffer: The ring buffer
3627 * @cpu: The CPU buffer to test
3628 */
3629int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3630{
3631 struct ring_buffer_per_cpu *cpu_buffer;
3632 unsigned long flags;
3633 int dolock;
3634 int ret;
3635
3636 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3637 return 1;
3638
3639 dolock = rb_ok_to_lock();
3640
3641 cpu_buffer = buffer->buffers[cpu];
3642 local_irq_save(flags);
3643 if (dolock)
3644 spin_lock(&cpu_buffer->reader_lock);
3645 ret = rb_per_cpu_empty(cpu_buffer);
3646 if (dolock)
3647 spin_unlock(&cpu_buffer->reader_lock);
3648 local_irq_restore(flags);
3649
3650 return ret;
3651}
3652EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
3653
3654#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3655/**
3656 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3657 * @buffer_a: One buffer to swap with
3658 * @buffer_b: The other buffer to swap with
3659 *
3660 * This function is useful for tracers that want to take a "snapshot"
3661 * of a CPU buffer and has another back up buffer lying around.
3662 * it is expected that the tracer handles the cpu buffer not being
3663 * used at the moment.
3664 */
3665int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3666 struct ring_buffer *buffer_b, int cpu)
3667{
3668 struct ring_buffer_per_cpu *cpu_buffer_a;
3669 struct ring_buffer_per_cpu *cpu_buffer_b;
3670 int ret = -EINVAL;
3671
3672 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
3673 !cpumask_test_cpu(cpu, buffer_b->cpumask))
3674 goto out;
3675
3676 /* At least make sure the two buffers are somewhat the same */
3677 if (buffer_a->pages != buffer_b->pages)
3678 goto out;
3679
3680 ret = -EAGAIN;
3681
3682 if (ring_buffer_flags != RB_BUFFERS_ON)
3683 goto out;
3684
3685 if (atomic_read(&buffer_a->record_disabled))
3686 goto out;
3687
3688 if (atomic_read(&buffer_b->record_disabled))
3689 goto out;
3690
3691 cpu_buffer_a = buffer_a->buffers[cpu];
3692 cpu_buffer_b = buffer_b->buffers[cpu];
3693
3694 if (atomic_read(&cpu_buffer_a->record_disabled))
3695 goto out;
3696
3697 if (atomic_read(&cpu_buffer_b->record_disabled))
3698 goto out;
3699
3700 /*
3701 * We can't do a synchronize_sched here because this
3702 * function can be called in atomic context.
3703 * Normally this will be called from the same CPU as cpu.
3704 * If not it's up to the caller to protect this.
3705 */
3706 atomic_inc(&cpu_buffer_a->record_disabled);
3707 atomic_inc(&cpu_buffer_b->record_disabled);
3708
3709 ret = -EBUSY;
3710 if (local_read(&cpu_buffer_a->committing))
3711 goto out_dec;
3712 if (local_read(&cpu_buffer_b->committing))
3713 goto out_dec;
3714
3715 buffer_a->buffers[cpu] = cpu_buffer_b;
3716 buffer_b->buffers[cpu] = cpu_buffer_a;
3717
3718 cpu_buffer_b->buffer = buffer_a;
3719 cpu_buffer_a->buffer = buffer_b;
3720
3721 ret = 0;
3722
3723out_dec:
3724 atomic_dec(&cpu_buffer_a->record_disabled);
3725 atomic_dec(&cpu_buffer_b->record_disabled);
3726out:
3727 return ret;
3728}
3729EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
3730#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
3731
3732/**
3733 * ring_buffer_alloc_read_page - allocate a page to read from buffer
3734 * @buffer: the buffer to allocate for.
3735 *
3736 * This function is used in conjunction with ring_buffer_read_page.
3737 * When reading a full page from the ring buffer, these functions
3738 * can be used to speed up the process. The calling function should
3739 * allocate a few pages first with this function. Then when it
3740 * needs to get pages from the ring buffer, it passes the result
3741 * of this function into ring_buffer_read_page, which will swap
3742 * the page that was allocated, with the read page of the buffer.
3743 *
3744 * Returns:
3745 * The page allocated, or NULL on error.
3746 */
3747void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
3748{
3749 struct buffer_data_page *bpage;
3750 struct page *page;
3751
3752 page = alloc_pages_node(cpu_to_node(cpu),
3753 GFP_KERNEL | __GFP_NORETRY, 0);
3754 if (!page)
3755 return NULL;
3756
3757 bpage = page_address(page);
3758
3759 rb_init_page(bpage);
3760
3761 return bpage;
3762}
3763EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
3764
3765/**
3766 * ring_buffer_free_read_page - free an allocated read page
3767 * @buffer: the buffer the page was allocate for
3768 * @data: the page to free
3769 *
3770 * Free a page allocated from ring_buffer_alloc_read_page.
3771 */
3772void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
3773{
3774 free_page((unsigned long)data);
3775}
3776EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
3777
3778/**
3779 * ring_buffer_read_page - extract a page from the ring buffer
3780 * @buffer: buffer to extract from
3781 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
3782 * @len: amount to extract
3783 * @cpu: the cpu of the buffer to extract
3784 * @full: should the extraction only happen when the page is full.
3785 *
3786 * This function will pull out a page from the ring buffer and consume it.
3787 * @data_page must be the address of the variable that was returned
3788 * from ring_buffer_alloc_read_page. This is because the page might be used
3789 * to swap with a page in the ring buffer.
3790 *
3791 * for example:
3792 * rpage = ring_buffer_alloc_read_page(buffer);
3793 * if (!rpage)
3794 * return error;
3795 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
3796 * if (ret >= 0)
3797 * process_page(rpage, ret);
3798 *
3799 * When @full is set, the function will not return true unless
3800 * the writer is off the reader page.
3801 *
3802 * Note: it is up to the calling functions to handle sleeps and wakeups.
3803 * The ring buffer can be used anywhere in the kernel and can not
3804 * blindly call wake_up. The layer that uses the ring buffer must be
3805 * responsible for that.
3806 *
3807 * Returns:
3808 * >=0 if data has been transferred, returns the offset of consumed data.
3809 * <0 if no data has been transferred.
3810 */
3811int ring_buffer_read_page(struct ring_buffer *buffer,
3812 void **data_page, size_t len, int cpu, int full)
3813{
3814 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3815 struct ring_buffer_event *event;
3816 struct buffer_data_page *bpage;
3817 struct buffer_page *reader;
3818 unsigned long missed_events;
3819 unsigned long flags;
3820 unsigned int commit;
3821 unsigned int read;
3822 u64 save_timestamp;
3823 int ret = -1;
3824
3825 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3826 goto out;
3827
3828 /*
3829 * If len is not big enough to hold the page header, then
3830 * we can not copy anything.
3831 */
3832 if (len <= BUF_PAGE_HDR_SIZE)
3833 goto out;
3834
3835 len -= BUF_PAGE_HDR_SIZE;
3836
3837 if (!data_page)
3838 goto out;
3839
3840 bpage = *data_page;
3841 if (!bpage)
3842 goto out;
3843
3844 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3845
3846 reader = rb_get_reader_page(cpu_buffer);
3847 if (!reader)
3848 goto out_unlock;
3849
3850 event = rb_reader_event(cpu_buffer);
3851
3852 read = reader->read;
3853 commit = rb_page_commit(reader);
3854
3855 /* Check if any events were dropped */
3856 missed_events = cpu_buffer->lost_events;
3857
3858 /*
3859 * If this page has been partially read or
3860 * if len is not big enough to read the rest of the page or
3861 * a writer is still on the page, then
3862 * we must copy the data from the page to the buffer.
3863 * Otherwise, we can simply swap the page with the one passed in.
3864 */
3865 if (read || (len < (commit - read)) ||
3866 cpu_buffer->reader_page == cpu_buffer->commit_page) {
3867 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
3868 unsigned int rpos = read;
3869 unsigned int pos = 0;
3870 unsigned int size;
3871
3872 if (full)
3873 goto out_unlock;
3874
3875 if (len > (commit - read))
3876 len = (commit - read);
3877
3878 /* Always keep the time extend and data together */
3879 size = rb_event_ts_length(event);
3880
3881 if (len < size)
3882 goto out_unlock;
3883
3884 /* save the current timestamp, since the user will need it */
3885 save_timestamp = cpu_buffer->read_stamp;
3886
3887 /* Need to copy one event at a time */
3888 do {
3889 /* We need the size of one event, because
3890 * rb_advance_reader only advances by one event,
3891 * whereas rb_event_ts_length may include the size of
3892 * one or two events.
3893 * We have already ensured there's enough space if this
3894 * is a time extend. */
3895 size = rb_event_length(event);
3896 memcpy(bpage->data + pos, rpage->data + rpos, size);
3897
3898 len -= size;
3899
3900 rb_advance_reader(cpu_buffer);
3901 rpos = reader->read;
3902 pos += size;
3903
3904 if (rpos >= commit)
3905 break;
3906
3907 event = rb_reader_event(cpu_buffer);
3908 /* Always keep the time extend and data together */
3909 size = rb_event_ts_length(event);
3910 } while (len >= size);
3911
3912 /* update bpage */
3913 local_set(&bpage->commit, pos);
3914 bpage->time_stamp = save_timestamp;
3915
3916 /* we copied everything to the beginning */
3917 read = 0;
3918 } else {
3919 /* update the entry counter */
3920 cpu_buffer->read += rb_page_entries(reader);
3921
3922 /* swap the pages */
3923 rb_init_page(bpage);
3924 bpage = reader->page;
3925 reader->page = *data_page;
3926 local_set(&reader->write, 0);
3927 local_set(&reader->entries, 0);
3928 reader->read = 0;
3929 *data_page = bpage;
3930
3931 /*
3932 * Use the real_end for the data size,
3933 * This gives us a chance to store the lost events
3934 * on the page.
3935 */
3936 if (reader->real_end)
3937 local_set(&bpage->commit, reader->real_end);
3938 }
3939 ret = read;
3940
3941 cpu_buffer->lost_events = 0;
3942
3943 commit = local_read(&bpage->commit);
3944 /*
3945 * Set a flag in the commit field if we lost events
3946 */
3947 if (missed_events) {
3948 /* If there is room at the end of the page to save the
3949 * missed events, then record it there.
3950 */
3951 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
3952 memcpy(&bpage->data[commit], &missed_events,
3953 sizeof(missed_events));
3954 local_add(RB_MISSED_STORED, &bpage->commit);
3955 commit += sizeof(missed_events);
3956 }
3957 local_add(RB_MISSED_EVENTS, &bpage->commit);
3958 }
3959
3960 /*
3961 * This page may be off to user land. Zero it out here.
3962 */
3963 if (commit < BUF_PAGE_SIZE)
3964 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3965
3966 out_unlock:
3967 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3968
3969 out:
3970 return ret;
3971}
3972EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3973
3974#ifdef CONFIG_TRACING
3975static ssize_t
3976rb_simple_read(struct file *filp, char __user *ubuf,
3977 size_t cnt, loff_t *ppos)
3978{
3979 unsigned long *p = filp->private_data;
3980 char buf[64];
3981 int r;
3982
3983 if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3984 r = sprintf(buf, "permanently disabled\n");
3985 else
3986 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3987
3988 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3989}
3990
3991static ssize_t
3992rb_simple_write(struct file *filp, const char __user *ubuf,
3993 size_t cnt, loff_t *ppos)
3994{
3995 unsigned long *p = filp->private_data;
3996 unsigned long val;
3997 int ret;
3998
3999 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4000 if (ret)
4001 return ret;
4002
4003 if (val)
4004 set_bit(RB_BUFFERS_ON_BIT, p);
4005 else
4006 clear_bit(RB_BUFFERS_ON_BIT, p);
4007
4008 (*ppos)++;
4009
4010 return cnt;
4011}
4012
4013static const struct file_operations rb_simple_fops = {
4014 .open = tracing_open_generic,
4015 .read = rb_simple_read,
4016 .write = rb_simple_write,
4017 .llseek = default_llseek,
4018};
4019
4020
4021static __init int rb_init_debugfs(void)
4022{
4023 struct dentry *d_tracer;
4024
4025 d_tracer = tracing_init_dentry();
4026
4027 trace_create_file("tracing_on", 0644, d_tracer,
4028 &ring_buffer_flags, &rb_simple_fops);
4029
4030 return 0;
4031}
4032
4033fs_initcall(rb_init_debugfs);
4034#endif
4035
4036#ifdef CONFIG_HOTPLUG_CPU
4037static int rb_cpu_notify(struct notifier_block *self,
4038 unsigned long action, void *hcpu)
4039{
4040 struct ring_buffer *buffer =
4041 container_of(self, struct ring_buffer, cpu_notify);
4042 long cpu = (long)hcpu;
4043
4044 switch (action) {
4045 case CPU_UP_PREPARE:
4046 case CPU_UP_PREPARE_FROZEN:
4047 if (cpumask_test_cpu(cpu, buffer->cpumask))
4048 return NOTIFY_OK;
4049
4050 buffer->buffers[cpu] =
4051 rb_allocate_cpu_buffer(buffer, cpu);
4052 if (!buffer->buffers[cpu]) {
4053 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4054 cpu);
4055 return NOTIFY_OK;
4056 }
4057 smp_wmb();
4058 cpumask_set_cpu(cpu, buffer->cpumask);
4059 break;
4060 case CPU_DOWN_PREPARE:
4061 case CPU_DOWN_PREPARE_FROZEN:
4062 /*
4063 * Do nothing.
4064 * If we were to free the buffer, then the user would
4065 * lose any trace that was in the buffer.
4066 */
4067 break;
4068 default:
4069 break;
4070 }
4071 return NOTIFY_OK;
4072}
4073#endif
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/trace_clock.h>
8#include <linux/spinlock.h>
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/hardirq.h>
12#include <linux/kmemcheck.h>
13#include <linux/module.h>
14#include <linux/percpu.h>
15#include <linux/mutex.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/list.h>
20#include <linux/cpu.h>
21#include <linux/fs.h>
22
23#include <asm/local.h>
24#include "trace.h"
25
26static void update_pages_handler(struct work_struct *work);
27
28/*
29 * The ring buffer header is special. We must manually up keep it.
30 */
31int ring_buffer_print_entry_header(struct trace_seq *s)
32{
33 int ret;
34
35 ret = trace_seq_printf(s, "# compressed entry header\n");
36 ret = trace_seq_printf(s, "\ttype_len : 5 bits\n");
37 ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n");
38 ret = trace_seq_printf(s, "\tarray : 32 bits\n");
39 ret = trace_seq_printf(s, "\n");
40 ret = trace_seq_printf(s, "\tpadding : type == %d\n",
41 RINGBUF_TYPE_PADDING);
42 ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
43 RINGBUF_TYPE_TIME_EXTEND);
44 ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
45 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
46
47 return ret;
48}
49
50/*
51 * The ring buffer is made up of a list of pages. A separate list of pages is
52 * allocated for each CPU. A writer may only write to a buffer that is
53 * associated with the CPU it is currently executing on. A reader may read
54 * from any per cpu buffer.
55 *
56 * The reader is special. For each per cpu buffer, the reader has its own
57 * reader page. When a reader has read the entire reader page, this reader
58 * page is swapped with another page in the ring buffer.
59 *
60 * Now, as long as the writer is off the reader page, the reader can do what
61 * ever it wants with that page. The writer will never write to that page
62 * again (as long as it is out of the ring buffer).
63 *
64 * Here's some silly ASCII art.
65 *
66 * +------+
67 * |reader| RING BUFFER
68 * |page |
69 * +------+ +---+ +---+ +---+
70 * | |-->| |-->| |
71 * +---+ +---+ +---+
72 * ^ |
73 * | |
74 * +---------------+
75 *
76 *
77 * +------+
78 * |reader| RING BUFFER
79 * |page |------------------v
80 * +------+ +---+ +---+ +---+
81 * | |-->| |-->| |
82 * +---+ +---+ +---+
83 * ^ |
84 * | |
85 * +---------------+
86 *
87 *
88 * +------+
89 * |reader| RING BUFFER
90 * |page |------------------v
91 * +------+ +---+ +---+ +---+
92 * ^ | |-->| |-->| |
93 * | +---+ +---+ +---+
94 * | |
95 * | |
96 * +------------------------------+
97 *
98 *
99 * +------+
100 * |buffer| RING BUFFER
101 * |page |------------------v
102 * +------+ +---+ +---+ +---+
103 * ^ | | | |-->| |
104 * | New +---+ +---+ +---+
105 * | Reader------^ |
106 * | page |
107 * +------------------------------+
108 *
109 *
110 * After we make this swap, the reader can hand this page off to the splice
111 * code and be done with it. It can even allocate a new page if it needs to
112 * and swap that into the ring buffer.
113 *
114 * We will be using cmpxchg soon to make all this lockless.
115 *
116 */
117
118/*
119 * A fast way to enable or disable all ring buffers is to
120 * call tracing_on or tracing_off. Turning off the ring buffers
121 * prevents all ring buffers from being recorded to.
122 * Turning this switch on, makes it OK to write to the
123 * ring buffer, if the ring buffer is enabled itself.
124 *
125 * There's three layers that must be on in order to write
126 * to the ring buffer.
127 *
128 * 1) This global flag must be set.
129 * 2) The ring buffer must be enabled for recording.
130 * 3) The per cpu buffer must be enabled for recording.
131 *
132 * In case of an anomaly, this global flag has a bit set that
133 * will permantly disable all ring buffers.
134 */
135
136/*
137 * Global flag to disable all recording to ring buffers
138 * This has two bits: ON, DISABLED
139 *
140 * ON DISABLED
141 * ---- ----------
142 * 0 0 : ring buffers are off
143 * 1 0 : ring buffers are on
144 * X 1 : ring buffers are permanently disabled
145 */
146
147enum {
148 RB_BUFFERS_ON_BIT = 0,
149 RB_BUFFERS_DISABLED_BIT = 1,
150};
151
152enum {
153 RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT,
154 RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT,
155};
156
157static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
158
159/* Used for individual buffers (after the counter) */
160#define RB_BUFFER_OFF (1 << 20)
161
162#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
163
164/**
165 * tracing_off_permanent - permanently disable ring buffers
166 *
167 * This function, once called, will disable all ring buffers
168 * permanently.
169 */
170void tracing_off_permanent(void)
171{
172 set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
173}
174
175#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
176#define RB_ALIGNMENT 4U
177#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
178#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
179
180#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
181# define RB_FORCE_8BYTE_ALIGNMENT 0
182# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
183#else
184# define RB_FORCE_8BYTE_ALIGNMENT 1
185# define RB_ARCH_ALIGNMENT 8U
186#endif
187
188/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
189#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
190
191enum {
192 RB_LEN_TIME_EXTEND = 8,
193 RB_LEN_TIME_STAMP = 16,
194};
195
196#define skip_time_extend(event) \
197 ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
198
199static inline int rb_null_event(struct ring_buffer_event *event)
200{
201 return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
202}
203
204static void rb_event_set_padding(struct ring_buffer_event *event)
205{
206 /* padding has a NULL time_delta */
207 event->type_len = RINGBUF_TYPE_PADDING;
208 event->time_delta = 0;
209}
210
211static unsigned
212rb_event_data_length(struct ring_buffer_event *event)
213{
214 unsigned length;
215
216 if (event->type_len)
217 length = event->type_len * RB_ALIGNMENT;
218 else
219 length = event->array[0];
220 return length + RB_EVNT_HDR_SIZE;
221}
222
223/*
224 * Return the length of the given event. Will return
225 * the length of the time extend if the event is a
226 * time extend.
227 */
228static inline unsigned
229rb_event_length(struct ring_buffer_event *event)
230{
231 switch (event->type_len) {
232 case RINGBUF_TYPE_PADDING:
233 if (rb_null_event(event))
234 /* undefined */
235 return -1;
236 return event->array[0] + RB_EVNT_HDR_SIZE;
237
238 case RINGBUF_TYPE_TIME_EXTEND:
239 return RB_LEN_TIME_EXTEND;
240
241 case RINGBUF_TYPE_TIME_STAMP:
242 return RB_LEN_TIME_STAMP;
243
244 case RINGBUF_TYPE_DATA:
245 return rb_event_data_length(event);
246 default:
247 BUG();
248 }
249 /* not hit */
250 return 0;
251}
252
253/*
254 * Return total length of time extend and data,
255 * or just the event length for all other events.
256 */
257static inline unsigned
258rb_event_ts_length(struct ring_buffer_event *event)
259{
260 unsigned len = 0;
261
262 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
263 /* time extends include the data event after it */
264 len = RB_LEN_TIME_EXTEND;
265 event = skip_time_extend(event);
266 }
267 return len + rb_event_length(event);
268}
269
270/**
271 * ring_buffer_event_length - return the length of the event
272 * @event: the event to get the length of
273 *
274 * Returns the size of the data load of a data event.
275 * If the event is something other than a data event, it
276 * returns the size of the event itself. With the exception
277 * of a TIME EXTEND, where it still returns the size of the
278 * data load of the data event after it.
279 */
280unsigned ring_buffer_event_length(struct ring_buffer_event *event)
281{
282 unsigned length;
283
284 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
285 event = skip_time_extend(event);
286
287 length = rb_event_length(event);
288 if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
289 return length;
290 length -= RB_EVNT_HDR_SIZE;
291 if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
292 length -= sizeof(event->array[0]);
293 return length;
294}
295EXPORT_SYMBOL_GPL(ring_buffer_event_length);
296
297/* inline for ring buffer fast paths */
298static void *
299rb_event_data(struct ring_buffer_event *event)
300{
301 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
302 event = skip_time_extend(event);
303 BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
304 /* If length is in len field, then array[0] has the data */
305 if (event->type_len)
306 return (void *)&event->array[0];
307 /* Otherwise length is in array[0] and array[1] has the data */
308 return (void *)&event->array[1];
309}
310
311/**
312 * ring_buffer_event_data - return the data of the event
313 * @event: the event to get the data from
314 */
315void *ring_buffer_event_data(struct ring_buffer_event *event)
316{
317 return rb_event_data(event);
318}
319EXPORT_SYMBOL_GPL(ring_buffer_event_data);
320
321#define for_each_buffer_cpu(buffer, cpu) \
322 for_each_cpu(cpu, buffer->cpumask)
323
324#define TS_SHIFT 27
325#define TS_MASK ((1ULL << TS_SHIFT) - 1)
326#define TS_DELTA_TEST (~TS_MASK)
327
328/* Flag when events were overwritten */
329#define RB_MISSED_EVENTS (1 << 31)
330/* Missed count stored at end */
331#define RB_MISSED_STORED (1 << 30)
332
333struct buffer_data_page {
334 u64 time_stamp; /* page time stamp */
335 local_t commit; /* write committed index */
336 unsigned char data[]; /* data of buffer page */
337};
338
339/*
340 * Note, the buffer_page list must be first. The buffer pages
341 * are allocated in cache lines, which means that each buffer
342 * page will be at the beginning of a cache line, and thus
343 * the least significant bits will be zero. We use this to
344 * add flags in the list struct pointers, to make the ring buffer
345 * lockless.
346 */
347struct buffer_page {
348 struct list_head list; /* list of buffer pages */
349 local_t write; /* index for next write */
350 unsigned read; /* index for next read */
351 local_t entries; /* entries on this page */
352 unsigned long real_end; /* real end of data */
353 struct buffer_data_page *page; /* Actual data page */
354};
355
356/*
357 * The buffer page counters, write and entries, must be reset
358 * atomically when crossing page boundaries. To synchronize this
359 * update, two counters are inserted into the number. One is
360 * the actual counter for the write position or count on the page.
361 *
362 * The other is a counter of updaters. Before an update happens
363 * the update partition of the counter is incremented. This will
364 * allow the updater to update the counter atomically.
365 *
366 * The counter is 20 bits, and the state data is 12.
367 */
368#define RB_WRITE_MASK 0xfffff
369#define RB_WRITE_INTCNT (1 << 20)
370
371static void rb_init_page(struct buffer_data_page *bpage)
372{
373 local_set(&bpage->commit, 0);
374}
375
376/**
377 * ring_buffer_page_len - the size of data on the page.
378 * @page: The page to read
379 *
380 * Returns the amount of data on the page, including buffer page header.
381 */
382size_t ring_buffer_page_len(void *page)
383{
384 return local_read(&((struct buffer_data_page *)page)->commit)
385 + BUF_PAGE_HDR_SIZE;
386}
387
388/*
389 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
390 * this issue out.
391 */
392static void free_buffer_page(struct buffer_page *bpage)
393{
394 free_page((unsigned long)bpage->page);
395 kfree(bpage);
396}
397
398/*
399 * We need to fit the time_stamp delta into 27 bits.
400 */
401static inline int test_time_stamp(u64 delta)
402{
403 if (delta & TS_DELTA_TEST)
404 return 1;
405 return 0;
406}
407
408#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
409
410/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
411#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
412
413int ring_buffer_print_page_header(struct trace_seq *s)
414{
415 struct buffer_data_page field;
416 int ret;
417
418 ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
419 "offset:0;\tsize:%u;\tsigned:%u;\n",
420 (unsigned int)sizeof(field.time_stamp),
421 (unsigned int)is_signed_type(u64));
422
423 ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
424 "offset:%u;\tsize:%u;\tsigned:%u;\n",
425 (unsigned int)offsetof(typeof(field), commit),
426 (unsigned int)sizeof(field.commit),
427 (unsigned int)is_signed_type(long));
428
429 ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
430 "offset:%u;\tsize:%u;\tsigned:%u;\n",
431 (unsigned int)offsetof(typeof(field), commit),
432 1,
433 (unsigned int)is_signed_type(long));
434
435 ret = trace_seq_printf(s, "\tfield: char data;\t"
436 "offset:%u;\tsize:%u;\tsigned:%u;\n",
437 (unsigned int)offsetof(typeof(field), data),
438 (unsigned int)BUF_PAGE_SIZE,
439 (unsigned int)is_signed_type(char));
440
441 return ret;
442}
443
444/*
445 * head_page == tail_page && head == tail then buffer is empty.
446 */
447struct ring_buffer_per_cpu {
448 int cpu;
449 atomic_t record_disabled;
450 struct ring_buffer *buffer;
451 raw_spinlock_t reader_lock; /* serialize readers */
452 arch_spinlock_t lock;
453 struct lock_class_key lock_key;
454 unsigned int nr_pages;
455 struct list_head *pages;
456 struct buffer_page *head_page; /* read from head */
457 struct buffer_page *tail_page; /* write to tail */
458 struct buffer_page *commit_page; /* committed pages */
459 struct buffer_page *reader_page;
460 unsigned long lost_events;
461 unsigned long last_overrun;
462 local_t entries_bytes;
463 local_t commit_overrun;
464 local_t overrun;
465 local_t entries;
466 local_t committing;
467 local_t commits;
468 unsigned long read;
469 unsigned long read_bytes;
470 u64 write_stamp;
471 u64 read_stamp;
472 /* ring buffer pages to update, > 0 to add, < 0 to remove */
473 int nr_pages_to_update;
474 struct list_head new_pages; /* new pages to add */
475 struct work_struct update_pages_work;
476 struct completion update_done;
477};
478
479struct ring_buffer {
480 unsigned flags;
481 int cpus;
482 atomic_t record_disabled;
483 atomic_t resize_disabled;
484 cpumask_var_t cpumask;
485
486 struct lock_class_key *reader_lock_key;
487
488 struct mutex mutex;
489
490 struct ring_buffer_per_cpu **buffers;
491
492#ifdef CONFIG_HOTPLUG_CPU
493 struct notifier_block cpu_notify;
494#endif
495 u64 (*clock)(void);
496};
497
498struct ring_buffer_iter {
499 struct ring_buffer_per_cpu *cpu_buffer;
500 unsigned long head;
501 struct buffer_page *head_page;
502 struct buffer_page *cache_reader_page;
503 unsigned long cache_read;
504 u64 read_stamp;
505};
506
507/* buffer may be either ring_buffer or ring_buffer_per_cpu */
508#define RB_WARN_ON(b, cond) \
509 ({ \
510 int _____ret = unlikely(cond); \
511 if (_____ret) { \
512 if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
513 struct ring_buffer_per_cpu *__b = \
514 (void *)b; \
515 atomic_inc(&__b->buffer->record_disabled); \
516 } else \
517 atomic_inc(&b->record_disabled); \
518 WARN_ON(1); \
519 } \
520 _____ret; \
521 })
522
523/* Up this if you want to test the TIME_EXTENTS and normalization */
524#define DEBUG_SHIFT 0
525
526static inline u64 rb_time_stamp(struct ring_buffer *buffer)
527{
528 /* shift to debug/test normalization and TIME_EXTENTS */
529 return buffer->clock() << DEBUG_SHIFT;
530}
531
532u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
533{
534 u64 time;
535
536 preempt_disable_notrace();
537 time = rb_time_stamp(buffer);
538 preempt_enable_no_resched_notrace();
539
540 return time;
541}
542EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
543
544void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
545 int cpu, u64 *ts)
546{
547 /* Just stupid testing the normalize function and deltas */
548 *ts >>= DEBUG_SHIFT;
549}
550EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
551
552/*
553 * Making the ring buffer lockless makes things tricky.
554 * Although writes only happen on the CPU that they are on,
555 * and they only need to worry about interrupts. Reads can
556 * happen on any CPU.
557 *
558 * The reader page is always off the ring buffer, but when the
559 * reader finishes with a page, it needs to swap its page with
560 * a new one from the buffer. The reader needs to take from
561 * the head (writes go to the tail). But if a writer is in overwrite
562 * mode and wraps, it must push the head page forward.
563 *
564 * Here lies the problem.
565 *
566 * The reader must be careful to replace only the head page, and
567 * not another one. As described at the top of the file in the
568 * ASCII art, the reader sets its old page to point to the next
569 * page after head. It then sets the page after head to point to
570 * the old reader page. But if the writer moves the head page
571 * during this operation, the reader could end up with the tail.
572 *
573 * We use cmpxchg to help prevent this race. We also do something
574 * special with the page before head. We set the LSB to 1.
575 *
576 * When the writer must push the page forward, it will clear the
577 * bit that points to the head page, move the head, and then set
578 * the bit that points to the new head page.
579 *
580 * We also don't want an interrupt coming in and moving the head
581 * page on another writer. Thus we use the second LSB to catch
582 * that too. Thus:
583 *
584 * head->list->prev->next bit 1 bit 0
585 * ------- -------
586 * Normal page 0 0
587 * Points to head page 0 1
588 * New head page 1 0
589 *
590 * Note we can not trust the prev pointer of the head page, because:
591 *
592 * +----+ +-----+ +-----+
593 * | |------>| T |---X--->| N |
594 * | |<------| | | |
595 * +----+ +-----+ +-----+
596 * ^ ^ |
597 * | +-----+ | |
598 * +----------| R |----------+ |
599 * | |<-----------+
600 * +-----+
601 *
602 * Key: ---X--> HEAD flag set in pointer
603 * T Tail page
604 * R Reader page
605 * N Next page
606 *
607 * (see __rb_reserve_next() to see where this happens)
608 *
609 * What the above shows is that the reader just swapped out
610 * the reader page with a page in the buffer, but before it
611 * could make the new header point back to the new page added
612 * it was preempted by a writer. The writer moved forward onto
613 * the new page added by the reader and is about to move forward
614 * again.
615 *
616 * You can see, it is legitimate for the previous pointer of
617 * the head (or any page) not to point back to itself. But only
618 * temporarially.
619 */
620
621#define RB_PAGE_NORMAL 0UL
622#define RB_PAGE_HEAD 1UL
623#define RB_PAGE_UPDATE 2UL
624
625
626#define RB_FLAG_MASK 3UL
627
628/* PAGE_MOVED is not part of the mask */
629#define RB_PAGE_MOVED 4UL
630
631/*
632 * rb_list_head - remove any bit
633 */
634static struct list_head *rb_list_head(struct list_head *list)
635{
636 unsigned long val = (unsigned long)list;
637
638 return (struct list_head *)(val & ~RB_FLAG_MASK);
639}
640
641/*
642 * rb_is_head_page - test if the given page is the head page
643 *
644 * Because the reader may move the head_page pointer, we can
645 * not trust what the head page is (it may be pointing to
646 * the reader page). But if the next page is a header page,
647 * its flags will be non zero.
648 */
649static inline int
650rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
651 struct buffer_page *page, struct list_head *list)
652{
653 unsigned long val;
654
655 val = (unsigned long)list->next;
656
657 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
658 return RB_PAGE_MOVED;
659
660 return val & RB_FLAG_MASK;
661}
662
663/*
664 * rb_is_reader_page
665 *
666 * The unique thing about the reader page, is that, if the
667 * writer is ever on it, the previous pointer never points
668 * back to the reader page.
669 */
670static int rb_is_reader_page(struct buffer_page *page)
671{
672 struct list_head *list = page->list.prev;
673
674 return rb_list_head(list->next) != &page->list;
675}
676
677/*
678 * rb_set_list_to_head - set a list_head to be pointing to head.
679 */
680static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
681 struct list_head *list)
682{
683 unsigned long *ptr;
684
685 ptr = (unsigned long *)&list->next;
686 *ptr |= RB_PAGE_HEAD;
687 *ptr &= ~RB_PAGE_UPDATE;
688}
689
690/*
691 * rb_head_page_activate - sets up head page
692 */
693static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
694{
695 struct buffer_page *head;
696
697 head = cpu_buffer->head_page;
698 if (!head)
699 return;
700
701 /*
702 * Set the previous list pointer to have the HEAD flag.
703 */
704 rb_set_list_to_head(cpu_buffer, head->list.prev);
705}
706
707static void rb_list_head_clear(struct list_head *list)
708{
709 unsigned long *ptr = (unsigned long *)&list->next;
710
711 *ptr &= ~RB_FLAG_MASK;
712}
713
714/*
715 * rb_head_page_dactivate - clears head page ptr (for free list)
716 */
717static void
718rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
719{
720 struct list_head *hd;
721
722 /* Go through the whole list and clear any pointers found. */
723 rb_list_head_clear(cpu_buffer->pages);
724
725 list_for_each(hd, cpu_buffer->pages)
726 rb_list_head_clear(hd);
727}
728
729static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
730 struct buffer_page *head,
731 struct buffer_page *prev,
732 int old_flag, int new_flag)
733{
734 struct list_head *list;
735 unsigned long val = (unsigned long)&head->list;
736 unsigned long ret;
737
738 list = &prev->list;
739
740 val &= ~RB_FLAG_MASK;
741
742 ret = cmpxchg((unsigned long *)&list->next,
743 val | old_flag, val | new_flag);
744
745 /* check if the reader took the page */
746 if ((ret & ~RB_FLAG_MASK) != val)
747 return RB_PAGE_MOVED;
748
749 return ret & RB_FLAG_MASK;
750}
751
752static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
753 struct buffer_page *head,
754 struct buffer_page *prev,
755 int old_flag)
756{
757 return rb_head_page_set(cpu_buffer, head, prev,
758 old_flag, RB_PAGE_UPDATE);
759}
760
761static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
762 struct buffer_page *head,
763 struct buffer_page *prev,
764 int old_flag)
765{
766 return rb_head_page_set(cpu_buffer, head, prev,
767 old_flag, RB_PAGE_HEAD);
768}
769
770static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
771 struct buffer_page *head,
772 struct buffer_page *prev,
773 int old_flag)
774{
775 return rb_head_page_set(cpu_buffer, head, prev,
776 old_flag, RB_PAGE_NORMAL);
777}
778
779static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
780 struct buffer_page **bpage)
781{
782 struct list_head *p = rb_list_head((*bpage)->list.next);
783
784 *bpage = list_entry(p, struct buffer_page, list);
785}
786
787static struct buffer_page *
788rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
789{
790 struct buffer_page *head;
791 struct buffer_page *page;
792 struct list_head *list;
793 int i;
794
795 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
796 return NULL;
797
798 /* sanity check */
799 list = cpu_buffer->pages;
800 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
801 return NULL;
802
803 page = head = cpu_buffer->head_page;
804 /*
805 * It is possible that the writer moves the header behind
806 * where we started, and we miss in one loop.
807 * A second loop should grab the header, but we'll do
808 * three loops just because I'm paranoid.
809 */
810 for (i = 0; i < 3; i++) {
811 do {
812 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
813 cpu_buffer->head_page = page;
814 return page;
815 }
816 rb_inc_page(cpu_buffer, &page);
817 } while (page != head);
818 }
819
820 RB_WARN_ON(cpu_buffer, 1);
821
822 return NULL;
823}
824
825static int rb_head_page_replace(struct buffer_page *old,
826 struct buffer_page *new)
827{
828 unsigned long *ptr = (unsigned long *)&old->list.prev->next;
829 unsigned long val;
830 unsigned long ret;
831
832 val = *ptr & ~RB_FLAG_MASK;
833 val |= RB_PAGE_HEAD;
834
835 ret = cmpxchg(ptr, val, (unsigned long)&new->list);
836
837 return ret == val;
838}
839
840/*
841 * rb_tail_page_update - move the tail page forward
842 *
843 * Returns 1 if moved tail page, 0 if someone else did.
844 */
845static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
846 struct buffer_page *tail_page,
847 struct buffer_page *next_page)
848{
849 struct buffer_page *old_tail;
850 unsigned long old_entries;
851 unsigned long old_write;
852 int ret = 0;
853
854 /*
855 * The tail page now needs to be moved forward.
856 *
857 * We need to reset the tail page, but without messing
858 * with possible erasing of data brought in by interrupts
859 * that have moved the tail page and are currently on it.
860 *
861 * We add a counter to the write field to denote this.
862 */
863 old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
864 old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
865
866 /*
867 * Just make sure we have seen our old_write and synchronize
868 * with any interrupts that come in.
869 */
870 barrier();
871
872 /*
873 * If the tail page is still the same as what we think
874 * it is, then it is up to us to update the tail
875 * pointer.
876 */
877 if (tail_page == cpu_buffer->tail_page) {
878 /* Zero the write counter */
879 unsigned long val = old_write & ~RB_WRITE_MASK;
880 unsigned long eval = old_entries & ~RB_WRITE_MASK;
881
882 /*
883 * This will only succeed if an interrupt did
884 * not come in and change it. In which case, we
885 * do not want to modify it.
886 *
887 * We add (void) to let the compiler know that we do not care
888 * about the return value of these functions. We use the
889 * cmpxchg to only update if an interrupt did not already
890 * do it for us. If the cmpxchg fails, we don't care.
891 */
892 (void)local_cmpxchg(&next_page->write, old_write, val);
893 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
894
895 /*
896 * No need to worry about races with clearing out the commit.
897 * it only can increment when a commit takes place. But that
898 * only happens in the outer most nested commit.
899 */
900 local_set(&next_page->page->commit, 0);
901
902 old_tail = cmpxchg(&cpu_buffer->tail_page,
903 tail_page, next_page);
904
905 if (old_tail == tail_page)
906 ret = 1;
907 }
908
909 return ret;
910}
911
912static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
913 struct buffer_page *bpage)
914{
915 unsigned long val = (unsigned long)bpage;
916
917 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
918 return 1;
919
920 return 0;
921}
922
923/**
924 * rb_check_list - make sure a pointer to a list has the last bits zero
925 */
926static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
927 struct list_head *list)
928{
929 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
930 return 1;
931 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
932 return 1;
933 return 0;
934}
935
936/**
937 * check_pages - integrity check of buffer pages
938 * @cpu_buffer: CPU buffer with pages to test
939 *
940 * As a safety measure we check to make sure the data pages have not
941 * been corrupted.
942 */
943static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
944{
945 struct list_head *head = cpu_buffer->pages;
946 struct buffer_page *bpage, *tmp;
947
948 /* Reset the head page if it exists */
949 if (cpu_buffer->head_page)
950 rb_set_head_page(cpu_buffer);
951
952 rb_head_page_deactivate(cpu_buffer);
953
954 if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
955 return -1;
956 if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
957 return -1;
958
959 if (rb_check_list(cpu_buffer, head))
960 return -1;
961
962 list_for_each_entry_safe(bpage, tmp, head, list) {
963 if (RB_WARN_ON(cpu_buffer,
964 bpage->list.next->prev != &bpage->list))
965 return -1;
966 if (RB_WARN_ON(cpu_buffer,
967 bpage->list.prev->next != &bpage->list))
968 return -1;
969 if (rb_check_list(cpu_buffer, &bpage->list))
970 return -1;
971 }
972
973 rb_head_page_activate(cpu_buffer);
974
975 return 0;
976}
977
978static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
979{
980 int i;
981 struct buffer_page *bpage, *tmp;
982
983 for (i = 0; i < nr_pages; i++) {
984 struct page *page;
985 /*
986 * __GFP_NORETRY flag makes sure that the allocation fails
987 * gracefully without invoking oom-killer and the system is
988 * not destabilized.
989 */
990 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
991 GFP_KERNEL | __GFP_NORETRY,
992 cpu_to_node(cpu));
993 if (!bpage)
994 goto free_pages;
995
996 list_add(&bpage->list, pages);
997
998 page = alloc_pages_node(cpu_to_node(cpu),
999 GFP_KERNEL | __GFP_NORETRY, 0);
1000 if (!page)
1001 goto free_pages;
1002 bpage->page = page_address(page);
1003 rb_init_page(bpage->page);
1004 }
1005
1006 return 0;
1007
1008free_pages:
1009 list_for_each_entry_safe(bpage, tmp, pages, list) {
1010 list_del_init(&bpage->list);
1011 free_buffer_page(bpage);
1012 }
1013
1014 return -ENOMEM;
1015}
1016
1017static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1018 unsigned nr_pages)
1019{
1020 LIST_HEAD(pages);
1021
1022 WARN_ON(!nr_pages);
1023
1024 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1025 return -ENOMEM;
1026
1027 /*
1028 * The ring buffer page list is a circular list that does not
1029 * start and end with a list head. All page list items point to
1030 * other pages.
1031 */
1032 cpu_buffer->pages = pages.next;
1033 list_del(&pages);
1034
1035 cpu_buffer->nr_pages = nr_pages;
1036
1037 rb_check_pages(cpu_buffer);
1038
1039 return 0;
1040}
1041
1042static struct ring_buffer_per_cpu *
1043rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1044{
1045 struct ring_buffer_per_cpu *cpu_buffer;
1046 struct buffer_page *bpage;
1047 struct page *page;
1048 int ret;
1049
1050 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1051 GFP_KERNEL, cpu_to_node(cpu));
1052 if (!cpu_buffer)
1053 return NULL;
1054
1055 cpu_buffer->cpu = cpu;
1056 cpu_buffer->buffer = buffer;
1057 raw_spin_lock_init(&cpu_buffer->reader_lock);
1058 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1059 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1060 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1061 init_completion(&cpu_buffer->update_done);
1062
1063 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1064 GFP_KERNEL, cpu_to_node(cpu));
1065 if (!bpage)
1066 goto fail_free_buffer;
1067
1068 rb_check_bpage(cpu_buffer, bpage);
1069
1070 cpu_buffer->reader_page = bpage;
1071 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1072 if (!page)
1073 goto fail_free_reader;
1074 bpage->page = page_address(page);
1075 rb_init_page(bpage->page);
1076
1077 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1078 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1079
1080 ret = rb_allocate_pages(cpu_buffer, nr_pages);
1081 if (ret < 0)
1082 goto fail_free_reader;
1083
1084 cpu_buffer->head_page
1085 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1086 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1087
1088 rb_head_page_activate(cpu_buffer);
1089
1090 return cpu_buffer;
1091
1092 fail_free_reader:
1093 free_buffer_page(cpu_buffer->reader_page);
1094
1095 fail_free_buffer:
1096 kfree(cpu_buffer);
1097 return NULL;
1098}
1099
1100static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1101{
1102 struct list_head *head = cpu_buffer->pages;
1103 struct buffer_page *bpage, *tmp;
1104
1105 free_buffer_page(cpu_buffer->reader_page);
1106
1107 rb_head_page_deactivate(cpu_buffer);
1108
1109 if (head) {
1110 list_for_each_entry_safe(bpage, tmp, head, list) {
1111 list_del_init(&bpage->list);
1112 free_buffer_page(bpage);
1113 }
1114 bpage = list_entry(head, struct buffer_page, list);
1115 free_buffer_page(bpage);
1116 }
1117
1118 kfree(cpu_buffer);
1119}
1120
1121#ifdef CONFIG_HOTPLUG_CPU
1122static int rb_cpu_notify(struct notifier_block *self,
1123 unsigned long action, void *hcpu);
1124#endif
1125
1126/**
1127 * ring_buffer_alloc - allocate a new ring_buffer
1128 * @size: the size in bytes per cpu that is needed.
1129 * @flags: attributes to set for the ring buffer.
1130 *
1131 * Currently the only flag that is available is the RB_FL_OVERWRITE
1132 * flag. This flag means that the buffer will overwrite old data
1133 * when the buffer wraps. If this flag is not set, the buffer will
1134 * drop data when the tail hits the head.
1135 */
1136struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1137 struct lock_class_key *key)
1138{
1139 struct ring_buffer *buffer;
1140 int bsize;
1141 int cpu, nr_pages;
1142
1143 /* keep it in its own cache line */
1144 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1145 GFP_KERNEL);
1146 if (!buffer)
1147 return NULL;
1148
1149 if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1150 goto fail_free_buffer;
1151
1152 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1153 buffer->flags = flags;
1154 buffer->clock = trace_clock_local;
1155 buffer->reader_lock_key = key;
1156
1157 /* need at least two pages */
1158 if (nr_pages < 2)
1159 nr_pages = 2;
1160
1161 /*
1162 * In case of non-hotplug cpu, if the ring-buffer is allocated
1163 * in early initcall, it will not be notified of secondary cpus.
1164 * In that off case, we need to allocate for all possible cpus.
1165 */
1166#ifdef CONFIG_HOTPLUG_CPU
1167 get_online_cpus();
1168 cpumask_copy(buffer->cpumask, cpu_online_mask);
1169#else
1170 cpumask_copy(buffer->cpumask, cpu_possible_mask);
1171#endif
1172 buffer->cpus = nr_cpu_ids;
1173
1174 bsize = sizeof(void *) * nr_cpu_ids;
1175 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1176 GFP_KERNEL);
1177 if (!buffer->buffers)
1178 goto fail_free_cpumask;
1179
1180 for_each_buffer_cpu(buffer, cpu) {
1181 buffer->buffers[cpu] =
1182 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1183 if (!buffer->buffers[cpu])
1184 goto fail_free_buffers;
1185 }
1186
1187#ifdef CONFIG_HOTPLUG_CPU
1188 buffer->cpu_notify.notifier_call = rb_cpu_notify;
1189 buffer->cpu_notify.priority = 0;
1190 register_cpu_notifier(&buffer->cpu_notify);
1191#endif
1192
1193 put_online_cpus();
1194 mutex_init(&buffer->mutex);
1195
1196 return buffer;
1197
1198 fail_free_buffers:
1199 for_each_buffer_cpu(buffer, cpu) {
1200 if (buffer->buffers[cpu])
1201 rb_free_cpu_buffer(buffer->buffers[cpu]);
1202 }
1203 kfree(buffer->buffers);
1204
1205 fail_free_cpumask:
1206 free_cpumask_var(buffer->cpumask);
1207 put_online_cpus();
1208
1209 fail_free_buffer:
1210 kfree(buffer);
1211 return NULL;
1212}
1213EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1214
1215/**
1216 * ring_buffer_free - free a ring buffer.
1217 * @buffer: the buffer to free.
1218 */
1219void
1220ring_buffer_free(struct ring_buffer *buffer)
1221{
1222 int cpu;
1223
1224 get_online_cpus();
1225
1226#ifdef CONFIG_HOTPLUG_CPU
1227 unregister_cpu_notifier(&buffer->cpu_notify);
1228#endif
1229
1230 for_each_buffer_cpu(buffer, cpu)
1231 rb_free_cpu_buffer(buffer->buffers[cpu]);
1232
1233 put_online_cpus();
1234
1235 kfree(buffer->buffers);
1236 free_cpumask_var(buffer->cpumask);
1237
1238 kfree(buffer);
1239}
1240EXPORT_SYMBOL_GPL(ring_buffer_free);
1241
1242void ring_buffer_set_clock(struct ring_buffer *buffer,
1243 u64 (*clock)(void))
1244{
1245 buffer->clock = clock;
1246}
1247
1248static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1249
1250static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1251{
1252 return local_read(&bpage->entries) & RB_WRITE_MASK;
1253}
1254
1255static inline unsigned long rb_page_write(struct buffer_page *bpage)
1256{
1257 return local_read(&bpage->write) & RB_WRITE_MASK;
1258}
1259
1260static int
1261rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1262{
1263 struct list_head *tail_page, *to_remove, *next_page;
1264 struct buffer_page *to_remove_page, *tmp_iter_page;
1265 struct buffer_page *last_page, *first_page;
1266 unsigned int nr_removed;
1267 unsigned long head_bit;
1268 int page_entries;
1269
1270 head_bit = 0;
1271
1272 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1273 atomic_inc(&cpu_buffer->record_disabled);
1274 /*
1275 * We don't race with the readers since we have acquired the reader
1276 * lock. We also don't race with writers after disabling recording.
1277 * This makes it easy to figure out the first and the last page to be
1278 * removed from the list. We unlink all the pages in between including
1279 * the first and last pages. This is done in a busy loop so that we
1280 * lose the least number of traces.
1281 * The pages are freed after we restart recording and unlock readers.
1282 */
1283 tail_page = &cpu_buffer->tail_page->list;
1284
1285 /*
1286 * tail page might be on reader page, we remove the next page
1287 * from the ring buffer
1288 */
1289 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1290 tail_page = rb_list_head(tail_page->next);
1291 to_remove = tail_page;
1292
1293 /* start of pages to remove */
1294 first_page = list_entry(rb_list_head(to_remove->next),
1295 struct buffer_page, list);
1296
1297 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1298 to_remove = rb_list_head(to_remove)->next;
1299 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1300 }
1301
1302 next_page = rb_list_head(to_remove)->next;
1303
1304 /*
1305 * Now we remove all pages between tail_page and next_page.
1306 * Make sure that we have head_bit value preserved for the
1307 * next page
1308 */
1309 tail_page->next = (struct list_head *)((unsigned long)next_page |
1310 head_bit);
1311 next_page = rb_list_head(next_page);
1312 next_page->prev = tail_page;
1313
1314 /* make sure pages points to a valid page in the ring buffer */
1315 cpu_buffer->pages = next_page;
1316
1317 /* update head page */
1318 if (head_bit)
1319 cpu_buffer->head_page = list_entry(next_page,
1320 struct buffer_page, list);
1321
1322 /*
1323 * change read pointer to make sure any read iterators reset
1324 * themselves
1325 */
1326 cpu_buffer->read = 0;
1327
1328 /* pages are removed, resume tracing and then free the pages */
1329 atomic_dec(&cpu_buffer->record_disabled);
1330 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1331
1332 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1333
1334 /* last buffer page to remove */
1335 last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1336 list);
1337 tmp_iter_page = first_page;
1338
1339 do {
1340 to_remove_page = tmp_iter_page;
1341 rb_inc_page(cpu_buffer, &tmp_iter_page);
1342
1343 /* update the counters */
1344 page_entries = rb_page_entries(to_remove_page);
1345 if (page_entries) {
1346 /*
1347 * If something was added to this page, it was full
1348 * since it is not the tail page. So we deduct the
1349 * bytes consumed in ring buffer from here.
1350 * Increment overrun to account for the lost events.
1351 */
1352 local_add(page_entries, &cpu_buffer->overrun);
1353 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1354 }
1355
1356 /*
1357 * We have already removed references to this list item, just
1358 * free up the buffer_page and its page
1359 */
1360 free_buffer_page(to_remove_page);
1361 nr_removed--;
1362
1363 } while (to_remove_page != last_page);
1364
1365 RB_WARN_ON(cpu_buffer, nr_removed);
1366
1367 return nr_removed == 0;
1368}
1369
1370static int
1371rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1372{
1373 struct list_head *pages = &cpu_buffer->new_pages;
1374 int retries, success;
1375
1376 raw_spin_lock_irq(&cpu_buffer->reader_lock);
1377 /*
1378 * We are holding the reader lock, so the reader page won't be swapped
1379 * in the ring buffer. Now we are racing with the writer trying to
1380 * move head page and the tail page.
1381 * We are going to adapt the reader page update process where:
1382 * 1. We first splice the start and end of list of new pages between
1383 * the head page and its previous page.
1384 * 2. We cmpxchg the prev_page->next to point from head page to the
1385 * start of new pages list.
1386 * 3. Finally, we update the head->prev to the end of new list.
1387 *
1388 * We will try this process 10 times, to make sure that we don't keep
1389 * spinning.
1390 */
1391 retries = 10;
1392 success = 0;
1393 while (retries--) {
1394 struct list_head *head_page, *prev_page, *r;
1395 struct list_head *last_page, *first_page;
1396 struct list_head *head_page_with_bit;
1397
1398 head_page = &rb_set_head_page(cpu_buffer)->list;
1399 prev_page = head_page->prev;
1400
1401 first_page = pages->next;
1402 last_page = pages->prev;
1403
1404 head_page_with_bit = (struct list_head *)
1405 ((unsigned long)head_page | RB_PAGE_HEAD);
1406
1407 last_page->next = head_page_with_bit;
1408 first_page->prev = prev_page;
1409
1410 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1411
1412 if (r == head_page_with_bit) {
1413 /*
1414 * yay, we replaced the page pointer to our new list,
1415 * now, we just have to update to head page's prev
1416 * pointer to point to end of list
1417 */
1418 head_page->prev = last_page;
1419 success = 1;
1420 break;
1421 }
1422 }
1423
1424 if (success)
1425 INIT_LIST_HEAD(pages);
1426 /*
1427 * If we weren't successful in adding in new pages, warn and stop
1428 * tracing
1429 */
1430 RB_WARN_ON(cpu_buffer, !success);
1431 raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1432
1433 /* free pages if they weren't inserted */
1434 if (!success) {
1435 struct buffer_page *bpage, *tmp;
1436 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1437 list) {
1438 list_del_init(&bpage->list);
1439 free_buffer_page(bpage);
1440 }
1441 }
1442 return success;
1443}
1444
1445static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1446{
1447 int success;
1448
1449 if (cpu_buffer->nr_pages_to_update > 0)
1450 success = rb_insert_pages(cpu_buffer);
1451 else
1452 success = rb_remove_pages(cpu_buffer,
1453 -cpu_buffer->nr_pages_to_update);
1454
1455 if (success)
1456 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1457}
1458
1459static void update_pages_handler(struct work_struct *work)
1460{
1461 struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1462 struct ring_buffer_per_cpu, update_pages_work);
1463 rb_update_pages(cpu_buffer);
1464 complete(&cpu_buffer->update_done);
1465}
1466
1467/**
1468 * ring_buffer_resize - resize the ring buffer
1469 * @buffer: the buffer to resize.
1470 * @size: the new size.
1471 *
1472 * Minimum size is 2 * BUF_PAGE_SIZE.
1473 *
1474 * Returns 0 on success and < 0 on failure.
1475 */
1476int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1477 int cpu_id)
1478{
1479 struct ring_buffer_per_cpu *cpu_buffer;
1480 unsigned nr_pages;
1481 int cpu, err = 0;
1482
1483 /*
1484 * Always succeed at resizing a non-existent buffer:
1485 */
1486 if (!buffer)
1487 return size;
1488
1489 /* Make sure the requested buffer exists */
1490 if (cpu_id != RING_BUFFER_ALL_CPUS &&
1491 !cpumask_test_cpu(cpu_id, buffer->cpumask))
1492 return size;
1493
1494 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1495 size *= BUF_PAGE_SIZE;
1496
1497 /* we need a minimum of two pages */
1498 if (size < BUF_PAGE_SIZE * 2)
1499 size = BUF_PAGE_SIZE * 2;
1500
1501 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1502
1503 /*
1504 * Don't succeed if resizing is disabled, as a reader might be
1505 * manipulating the ring buffer and is expecting a sane state while
1506 * this is true.
1507 */
1508 if (atomic_read(&buffer->resize_disabled))
1509 return -EBUSY;
1510
1511 /* prevent another thread from changing buffer sizes */
1512 mutex_lock(&buffer->mutex);
1513
1514 if (cpu_id == RING_BUFFER_ALL_CPUS) {
1515 /* calculate the pages to update */
1516 for_each_buffer_cpu(buffer, cpu) {
1517 cpu_buffer = buffer->buffers[cpu];
1518
1519 cpu_buffer->nr_pages_to_update = nr_pages -
1520 cpu_buffer->nr_pages;
1521 /*
1522 * nothing more to do for removing pages or no update
1523 */
1524 if (cpu_buffer->nr_pages_to_update <= 0)
1525 continue;
1526 /*
1527 * to add pages, make sure all new pages can be
1528 * allocated without receiving ENOMEM
1529 */
1530 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1531 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1532 &cpu_buffer->new_pages, cpu)) {
1533 /* not enough memory for new pages */
1534 err = -ENOMEM;
1535 goto out_err;
1536 }
1537 }
1538
1539 get_online_cpus();
1540 /*
1541 * Fire off all the required work handlers
1542 * We can't schedule on offline CPUs, but it's not necessary
1543 * since we can change their buffer sizes without any race.
1544 */
1545 for_each_buffer_cpu(buffer, cpu) {
1546 cpu_buffer = buffer->buffers[cpu];
1547 if (!cpu_buffer->nr_pages_to_update)
1548 continue;
1549
1550 if (cpu_online(cpu))
1551 schedule_work_on(cpu,
1552 &cpu_buffer->update_pages_work);
1553 else
1554 rb_update_pages(cpu_buffer);
1555 }
1556
1557 /* wait for all the updates to complete */
1558 for_each_buffer_cpu(buffer, cpu) {
1559 cpu_buffer = buffer->buffers[cpu];
1560 if (!cpu_buffer->nr_pages_to_update)
1561 continue;
1562
1563 if (cpu_online(cpu))
1564 wait_for_completion(&cpu_buffer->update_done);
1565 cpu_buffer->nr_pages_to_update = 0;
1566 }
1567
1568 put_online_cpus();
1569 } else {
1570 cpu_buffer = buffer->buffers[cpu_id];
1571
1572 if (nr_pages == cpu_buffer->nr_pages)
1573 goto out;
1574
1575 cpu_buffer->nr_pages_to_update = nr_pages -
1576 cpu_buffer->nr_pages;
1577
1578 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1579 if (cpu_buffer->nr_pages_to_update > 0 &&
1580 __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1581 &cpu_buffer->new_pages, cpu_id)) {
1582 err = -ENOMEM;
1583 goto out_err;
1584 }
1585
1586 get_online_cpus();
1587
1588 if (cpu_online(cpu_id)) {
1589 schedule_work_on(cpu_id,
1590 &cpu_buffer->update_pages_work);
1591 wait_for_completion(&cpu_buffer->update_done);
1592 } else
1593 rb_update_pages(cpu_buffer);
1594
1595 cpu_buffer->nr_pages_to_update = 0;
1596 put_online_cpus();
1597 }
1598
1599 out:
1600 /*
1601 * The ring buffer resize can happen with the ring buffer
1602 * enabled, so that the update disturbs the tracing as little
1603 * as possible. But if the buffer is disabled, we do not need
1604 * to worry about that, and we can take the time to verify
1605 * that the buffer is not corrupt.
1606 */
1607 if (atomic_read(&buffer->record_disabled)) {
1608 atomic_inc(&buffer->record_disabled);
1609 /*
1610 * Even though the buffer was disabled, we must make sure
1611 * that it is truly disabled before calling rb_check_pages.
1612 * There could have been a race between checking
1613 * record_disable and incrementing it.
1614 */
1615 synchronize_sched();
1616 for_each_buffer_cpu(buffer, cpu) {
1617 cpu_buffer = buffer->buffers[cpu];
1618 rb_check_pages(cpu_buffer);
1619 }
1620 atomic_dec(&buffer->record_disabled);
1621 }
1622
1623 mutex_unlock(&buffer->mutex);
1624 return size;
1625
1626 out_err:
1627 for_each_buffer_cpu(buffer, cpu) {
1628 struct buffer_page *bpage, *tmp;
1629
1630 cpu_buffer = buffer->buffers[cpu];
1631 cpu_buffer->nr_pages_to_update = 0;
1632
1633 if (list_empty(&cpu_buffer->new_pages))
1634 continue;
1635
1636 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1637 list) {
1638 list_del_init(&bpage->list);
1639 free_buffer_page(bpage);
1640 }
1641 }
1642 mutex_unlock(&buffer->mutex);
1643 return err;
1644}
1645EXPORT_SYMBOL_GPL(ring_buffer_resize);
1646
1647void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1648{
1649 mutex_lock(&buffer->mutex);
1650 if (val)
1651 buffer->flags |= RB_FL_OVERWRITE;
1652 else
1653 buffer->flags &= ~RB_FL_OVERWRITE;
1654 mutex_unlock(&buffer->mutex);
1655}
1656EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1657
1658static inline void *
1659__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1660{
1661 return bpage->data + index;
1662}
1663
1664static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1665{
1666 return bpage->page->data + index;
1667}
1668
1669static inline struct ring_buffer_event *
1670rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1671{
1672 return __rb_page_index(cpu_buffer->reader_page,
1673 cpu_buffer->reader_page->read);
1674}
1675
1676static inline struct ring_buffer_event *
1677rb_iter_head_event(struct ring_buffer_iter *iter)
1678{
1679 return __rb_page_index(iter->head_page, iter->head);
1680}
1681
1682static inline unsigned rb_page_commit(struct buffer_page *bpage)
1683{
1684 return local_read(&bpage->page->commit);
1685}
1686
1687/* Size is determined by what has been committed */
1688static inline unsigned rb_page_size(struct buffer_page *bpage)
1689{
1690 return rb_page_commit(bpage);
1691}
1692
1693static inline unsigned
1694rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1695{
1696 return rb_page_commit(cpu_buffer->commit_page);
1697}
1698
1699static inline unsigned
1700rb_event_index(struct ring_buffer_event *event)
1701{
1702 unsigned long addr = (unsigned long)event;
1703
1704 return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1705}
1706
1707static inline int
1708rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1709 struct ring_buffer_event *event)
1710{
1711 unsigned long addr = (unsigned long)event;
1712 unsigned long index;
1713
1714 index = rb_event_index(event);
1715 addr &= PAGE_MASK;
1716
1717 return cpu_buffer->commit_page->page == (void *)addr &&
1718 rb_commit_index(cpu_buffer) == index;
1719}
1720
1721static void
1722rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1723{
1724 unsigned long max_count;
1725
1726 /*
1727 * We only race with interrupts and NMIs on this CPU.
1728 * If we own the commit event, then we can commit
1729 * all others that interrupted us, since the interruptions
1730 * are in stack format (they finish before they come
1731 * back to us). This allows us to do a simple loop to
1732 * assign the commit to the tail.
1733 */
1734 again:
1735 max_count = cpu_buffer->nr_pages * 100;
1736
1737 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1738 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1739 return;
1740 if (RB_WARN_ON(cpu_buffer,
1741 rb_is_reader_page(cpu_buffer->tail_page)))
1742 return;
1743 local_set(&cpu_buffer->commit_page->page->commit,
1744 rb_page_write(cpu_buffer->commit_page));
1745 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1746 cpu_buffer->write_stamp =
1747 cpu_buffer->commit_page->page->time_stamp;
1748 /* add barrier to keep gcc from optimizing too much */
1749 barrier();
1750 }
1751 while (rb_commit_index(cpu_buffer) !=
1752 rb_page_write(cpu_buffer->commit_page)) {
1753
1754 local_set(&cpu_buffer->commit_page->page->commit,
1755 rb_page_write(cpu_buffer->commit_page));
1756 RB_WARN_ON(cpu_buffer,
1757 local_read(&cpu_buffer->commit_page->page->commit) &
1758 ~RB_WRITE_MASK);
1759 barrier();
1760 }
1761
1762 /* again, keep gcc from optimizing */
1763 barrier();
1764
1765 /*
1766 * If an interrupt came in just after the first while loop
1767 * and pushed the tail page forward, we will be left with
1768 * a dangling commit that will never go forward.
1769 */
1770 if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1771 goto again;
1772}
1773
1774static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1775{
1776 cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1777 cpu_buffer->reader_page->read = 0;
1778}
1779
1780static void rb_inc_iter(struct ring_buffer_iter *iter)
1781{
1782 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1783
1784 /*
1785 * The iterator could be on the reader page (it starts there).
1786 * But the head could have moved, since the reader was
1787 * found. Check for this case and assign the iterator
1788 * to the head page instead of next.
1789 */
1790 if (iter->head_page == cpu_buffer->reader_page)
1791 iter->head_page = rb_set_head_page(cpu_buffer);
1792 else
1793 rb_inc_page(cpu_buffer, &iter->head_page);
1794
1795 iter->read_stamp = iter->head_page->page->time_stamp;
1796 iter->head = 0;
1797}
1798
1799/* Slow path, do not inline */
1800static noinline struct ring_buffer_event *
1801rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1802{
1803 event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1804
1805 /* Not the first event on the page? */
1806 if (rb_event_index(event)) {
1807 event->time_delta = delta & TS_MASK;
1808 event->array[0] = delta >> TS_SHIFT;
1809 } else {
1810 /* nope, just zero it */
1811 event->time_delta = 0;
1812 event->array[0] = 0;
1813 }
1814
1815 return skip_time_extend(event);
1816}
1817
1818/**
1819 * ring_buffer_update_event - update event type and data
1820 * @event: the even to update
1821 * @type: the type of event
1822 * @length: the size of the event field in the ring buffer
1823 *
1824 * Update the type and data fields of the event. The length
1825 * is the actual size that is written to the ring buffer,
1826 * and with this, we can determine what to place into the
1827 * data field.
1828 */
1829static void
1830rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1831 struct ring_buffer_event *event, unsigned length,
1832 int add_timestamp, u64 delta)
1833{
1834 /* Only a commit updates the timestamp */
1835 if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1836 delta = 0;
1837
1838 /*
1839 * If we need to add a timestamp, then we
1840 * add it to the start of the resevered space.
1841 */
1842 if (unlikely(add_timestamp)) {
1843 event = rb_add_time_stamp(event, delta);
1844 length -= RB_LEN_TIME_EXTEND;
1845 delta = 0;
1846 }
1847
1848 event->time_delta = delta;
1849 length -= RB_EVNT_HDR_SIZE;
1850 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1851 event->type_len = 0;
1852 event->array[0] = length;
1853 } else
1854 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1855}
1856
1857/*
1858 * rb_handle_head_page - writer hit the head page
1859 *
1860 * Returns: +1 to retry page
1861 * 0 to continue
1862 * -1 on error
1863 */
1864static int
1865rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1866 struct buffer_page *tail_page,
1867 struct buffer_page *next_page)
1868{
1869 struct buffer_page *new_head;
1870 int entries;
1871 int type;
1872 int ret;
1873
1874 entries = rb_page_entries(next_page);
1875
1876 /*
1877 * The hard part is here. We need to move the head
1878 * forward, and protect against both readers on
1879 * other CPUs and writers coming in via interrupts.
1880 */
1881 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1882 RB_PAGE_HEAD);
1883
1884 /*
1885 * type can be one of four:
1886 * NORMAL - an interrupt already moved it for us
1887 * HEAD - we are the first to get here.
1888 * UPDATE - we are the interrupt interrupting
1889 * a current move.
1890 * MOVED - a reader on another CPU moved the next
1891 * pointer to its reader page. Give up
1892 * and try again.
1893 */
1894
1895 switch (type) {
1896 case RB_PAGE_HEAD:
1897 /*
1898 * We changed the head to UPDATE, thus
1899 * it is our responsibility to update
1900 * the counters.
1901 */
1902 local_add(entries, &cpu_buffer->overrun);
1903 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1904
1905 /*
1906 * The entries will be zeroed out when we move the
1907 * tail page.
1908 */
1909
1910 /* still more to do */
1911 break;
1912
1913 case RB_PAGE_UPDATE:
1914 /*
1915 * This is an interrupt that interrupt the
1916 * previous update. Still more to do.
1917 */
1918 break;
1919 case RB_PAGE_NORMAL:
1920 /*
1921 * An interrupt came in before the update
1922 * and processed this for us.
1923 * Nothing left to do.
1924 */
1925 return 1;
1926 case RB_PAGE_MOVED:
1927 /*
1928 * The reader is on another CPU and just did
1929 * a swap with our next_page.
1930 * Try again.
1931 */
1932 return 1;
1933 default:
1934 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1935 return -1;
1936 }
1937
1938 /*
1939 * Now that we are here, the old head pointer is
1940 * set to UPDATE. This will keep the reader from
1941 * swapping the head page with the reader page.
1942 * The reader (on another CPU) will spin till
1943 * we are finished.
1944 *
1945 * We just need to protect against interrupts
1946 * doing the job. We will set the next pointer
1947 * to HEAD. After that, we set the old pointer
1948 * to NORMAL, but only if it was HEAD before.
1949 * otherwise we are an interrupt, and only
1950 * want the outer most commit to reset it.
1951 */
1952 new_head = next_page;
1953 rb_inc_page(cpu_buffer, &new_head);
1954
1955 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1956 RB_PAGE_NORMAL);
1957
1958 /*
1959 * Valid returns are:
1960 * HEAD - an interrupt came in and already set it.
1961 * NORMAL - One of two things:
1962 * 1) We really set it.
1963 * 2) A bunch of interrupts came in and moved
1964 * the page forward again.
1965 */
1966 switch (ret) {
1967 case RB_PAGE_HEAD:
1968 case RB_PAGE_NORMAL:
1969 /* OK */
1970 break;
1971 default:
1972 RB_WARN_ON(cpu_buffer, 1);
1973 return -1;
1974 }
1975
1976 /*
1977 * It is possible that an interrupt came in,
1978 * set the head up, then more interrupts came in
1979 * and moved it again. When we get back here,
1980 * the page would have been set to NORMAL but we
1981 * just set it back to HEAD.
1982 *
1983 * How do you detect this? Well, if that happened
1984 * the tail page would have moved.
1985 */
1986 if (ret == RB_PAGE_NORMAL) {
1987 /*
1988 * If the tail had moved passed next, then we need
1989 * to reset the pointer.
1990 */
1991 if (cpu_buffer->tail_page != tail_page &&
1992 cpu_buffer->tail_page != next_page)
1993 rb_head_page_set_normal(cpu_buffer, new_head,
1994 next_page,
1995 RB_PAGE_HEAD);
1996 }
1997
1998 /*
1999 * If this was the outer most commit (the one that
2000 * changed the original pointer from HEAD to UPDATE),
2001 * then it is up to us to reset it to NORMAL.
2002 */
2003 if (type == RB_PAGE_HEAD) {
2004 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2005 tail_page,
2006 RB_PAGE_UPDATE);
2007 if (RB_WARN_ON(cpu_buffer,
2008 ret != RB_PAGE_UPDATE))
2009 return -1;
2010 }
2011
2012 return 0;
2013}
2014
2015static unsigned rb_calculate_event_length(unsigned length)
2016{
2017 struct ring_buffer_event event; /* Used only for sizeof array */
2018
2019 /* zero length can cause confusions */
2020 if (!length)
2021 length = 1;
2022
2023 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2024 length += sizeof(event.array[0]);
2025
2026 length += RB_EVNT_HDR_SIZE;
2027 length = ALIGN(length, RB_ARCH_ALIGNMENT);
2028
2029 return length;
2030}
2031
2032static inline void
2033rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2034 struct buffer_page *tail_page,
2035 unsigned long tail, unsigned long length)
2036{
2037 struct ring_buffer_event *event;
2038
2039 /*
2040 * Only the event that crossed the page boundary
2041 * must fill the old tail_page with padding.
2042 */
2043 if (tail >= BUF_PAGE_SIZE) {
2044 /*
2045 * If the page was filled, then we still need
2046 * to update the real_end. Reset it to zero
2047 * and the reader will ignore it.
2048 */
2049 if (tail == BUF_PAGE_SIZE)
2050 tail_page->real_end = 0;
2051
2052 local_sub(length, &tail_page->write);
2053 return;
2054 }
2055
2056 event = __rb_page_index(tail_page, tail);
2057 kmemcheck_annotate_bitfield(event, bitfield);
2058
2059 /* account for padding bytes */
2060 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2061
2062 /*
2063 * Save the original length to the meta data.
2064 * This will be used by the reader to add lost event
2065 * counter.
2066 */
2067 tail_page->real_end = tail;
2068
2069 /*
2070 * If this event is bigger than the minimum size, then
2071 * we need to be careful that we don't subtract the
2072 * write counter enough to allow another writer to slip
2073 * in on this page.
2074 * We put in a discarded commit instead, to make sure
2075 * that this space is not used again.
2076 *
2077 * If we are less than the minimum size, we don't need to
2078 * worry about it.
2079 */
2080 if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2081 /* No room for any events */
2082
2083 /* Mark the rest of the page with padding */
2084 rb_event_set_padding(event);
2085
2086 /* Set the write back to the previous setting */
2087 local_sub(length, &tail_page->write);
2088 return;
2089 }
2090
2091 /* Put in a discarded event */
2092 event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2093 event->type_len = RINGBUF_TYPE_PADDING;
2094 /* time delta must be non zero */
2095 event->time_delta = 1;
2096
2097 /* Set write to end of buffer */
2098 length = (tail + length) - BUF_PAGE_SIZE;
2099 local_sub(length, &tail_page->write);
2100}
2101
2102/*
2103 * This is the slow path, force gcc not to inline it.
2104 */
2105static noinline struct ring_buffer_event *
2106rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2107 unsigned long length, unsigned long tail,
2108 struct buffer_page *tail_page, u64 ts)
2109{
2110 struct buffer_page *commit_page = cpu_buffer->commit_page;
2111 struct ring_buffer *buffer = cpu_buffer->buffer;
2112 struct buffer_page *next_page;
2113 int ret;
2114
2115 next_page = tail_page;
2116
2117 rb_inc_page(cpu_buffer, &next_page);
2118
2119 /*
2120 * If for some reason, we had an interrupt storm that made
2121 * it all the way around the buffer, bail, and warn
2122 * about it.
2123 */
2124 if (unlikely(next_page == commit_page)) {
2125 local_inc(&cpu_buffer->commit_overrun);
2126 goto out_reset;
2127 }
2128
2129 /*
2130 * This is where the fun begins!
2131 *
2132 * We are fighting against races between a reader that
2133 * could be on another CPU trying to swap its reader
2134 * page with the buffer head.
2135 *
2136 * We are also fighting against interrupts coming in and
2137 * moving the head or tail on us as well.
2138 *
2139 * If the next page is the head page then we have filled
2140 * the buffer, unless the commit page is still on the
2141 * reader page.
2142 */
2143 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2144
2145 /*
2146 * If the commit is not on the reader page, then
2147 * move the header page.
2148 */
2149 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2150 /*
2151 * If we are not in overwrite mode,
2152 * this is easy, just stop here.
2153 */
2154 if (!(buffer->flags & RB_FL_OVERWRITE))
2155 goto out_reset;
2156
2157 ret = rb_handle_head_page(cpu_buffer,
2158 tail_page,
2159 next_page);
2160 if (ret < 0)
2161 goto out_reset;
2162 if (ret)
2163 goto out_again;
2164 } else {
2165 /*
2166 * We need to be careful here too. The
2167 * commit page could still be on the reader
2168 * page. We could have a small buffer, and
2169 * have filled up the buffer with events
2170 * from interrupts and such, and wrapped.
2171 *
2172 * Note, if the tail page is also the on the
2173 * reader_page, we let it move out.
2174 */
2175 if (unlikely((cpu_buffer->commit_page !=
2176 cpu_buffer->tail_page) &&
2177 (cpu_buffer->commit_page ==
2178 cpu_buffer->reader_page))) {
2179 local_inc(&cpu_buffer->commit_overrun);
2180 goto out_reset;
2181 }
2182 }
2183 }
2184
2185 ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2186 if (ret) {
2187 /*
2188 * Nested commits always have zero deltas, so
2189 * just reread the time stamp
2190 */
2191 ts = rb_time_stamp(buffer);
2192 next_page->page->time_stamp = ts;
2193 }
2194
2195 out_again:
2196
2197 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2198
2199 /* fail and let the caller try again */
2200 return ERR_PTR(-EAGAIN);
2201
2202 out_reset:
2203 /* reset write */
2204 rb_reset_tail(cpu_buffer, tail_page, tail, length);
2205
2206 return NULL;
2207}
2208
2209static struct ring_buffer_event *
2210__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2211 unsigned long length, u64 ts,
2212 u64 delta, int add_timestamp)
2213{
2214 struct buffer_page *tail_page;
2215 struct ring_buffer_event *event;
2216 unsigned long tail, write;
2217
2218 /*
2219 * If the time delta since the last event is too big to
2220 * hold in the time field of the event, then we append a
2221 * TIME EXTEND event ahead of the data event.
2222 */
2223 if (unlikely(add_timestamp))
2224 length += RB_LEN_TIME_EXTEND;
2225
2226 tail_page = cpu_buffer->tail_page;
2227 write = local_add_return(length, &tail_page->write);
2228
2229 /* set write to only the index of the write */
2230 write &= RB_WRITE_MASK;
2231 tail = write - length;
2232
2233 /* See if we shot pass the end of this buffer page */
2234 if (unlikely(write > BUF_PAGE_SIZE))
2235 return rb_move_tail(cpu_buffer, length, tail,
2236 tail_page, ts);
2237
2238 /* We reserved something on the buffer */
2239
2240 event = __rb_page_index(tail_page, tail);
2241 kmemcheck_annotate_bitfield(event, bitfield);
2242 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2243
2244 local_inc(&tail_page->entries);
2245
2246 /*
2247 * If this is the first commit on the page, then update
2248 * its timestamp.
2249 */
2250 if (!tail)
2251 tail_page->page->time_stamp = ts;
2252
2253 /* account for these added bytes */
2254 local_add(length, &cpu_buffer->entries_bytes);
2255
2256 return event;
2257}
2258
2259static inline int
2260rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2261 struct ring_buffer_event *event)
2262{
2263 unsigned long new_index, old_index;
2264 struct buffer_page *bpage;
2265 unsigned long index;
2266 unsigned long addr;
2267
2268 new_index = rb_event_index(event);
2269 old_index = new_index + rb_event_ts_length(event);
2270 addr = (unsigned long)event;
2271 addr &= PAGE_MASK;
2272
2273 bpage = cpu_buffer->tail_page;
2274
2275 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2276 unsigned long write_mask =
2277 local_read(&bpage->write) & ~RB_WRITE_MASK;
2278 unsigned long event_length = rb_event_length(event);
2279 /*
2280 * This is on the tail page. It is possible that
2281 * a write could come in and move the tail page
2282 * and write to the next page. That is fine
2283 * because we just shorten what is on this page.
2284 */
2285 old_index += write_mask;
2286 new_index += write_mask;
2287 index = local_cmpxchg(&bpage->write, old_index, new_index);
2288 if (index == old_index) {
2289 /* update counters */
2290 local_sub(event_length, &cpu_buffer->entries_bytes);
2291 return 1;
2292 }
2293 }
2294
2295 /* could not discard */
2296 return 0;
2297}
2298
2299static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2300{
2301 local_inc(&cpu_buffer->committing);
2302 local_inc(&cpu_buffer->commits);
2303}
2304
2305static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2306{
2307 unsigned long commits;
2308
2309 if (RB_WARN_ON(cpu_buffer,
2310 !local_read(&cpu_buffer->committing)))
2311 return;
2312
2313 again:
2314 commits = local_read(&cpu_buffer->commits);
2315 /* synchronize with interrupts */
2316 barrier();
2317 if (local_read(&cpu_buffer->committing) == 1)
2318 rb_set_commit_to_write(cpu_buffer);
2319
2320 local_dec(&cpu_buffer->committing);
2321
2322 /* synchronize with interrupts */
2323 barrier();
2324
2325 /*
2326 * Need to account for interrupts coming in between the
2327 * updating of the commit page and the clearing of the
2328 * committing counter.
2329 */
2330 if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2331 !local_read(&cpu_buffer->committing)) {
2332 local_inc(&cpu_buffer->committing);
2333 goto again;
2334 }
2335}
2336
2337static struct ring_buffer_event *
2338rb_reserve_next_event(struct ring_buffer *buffer,
2339 struct ring_buffer_per_cpu *cpu_buffer,
2340 unsigned long length)
2341{
2342 struct ring_buffer_event *event;
2343 u64 ts, delta;
2344 int nr_loops = 0;
2345 int add_timestamp;
2346 u64 diff;
2347
2348 rb_start_commit(cpu_buffer);
2349
2350#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2351 /*
2352 * Due to the ability to swap a cpu buffer from a buffer
2353 * it is possible it was swapped before we committed.
2354 * (committing stops a swap). We check for it here and
2355 * if it happened, we have to fail the write.
2356 */
2357 barrier();
2358 if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2359 local_dec(&cpu_buffer->committing);
2360 local_dec(&cpu_buffer->commits);
2361 return NULL;
2362 }
2363#endif
2364
2365 length = rb_calculate_event_length(length);
2366 again:
2367 add_timestamp = 0;
2368 delta = 0;
2369
2370 /*
2371 * We allow for interrupts to reenter here and do a trace.
2372 * If one does, it will cause this original code to loop
2373 * back here. Even with heavy interrupts happening, this
2374 * should only happen a few times in a row. If this happens
2375 * 1000 times in a row, there must be either an interrupt
2376 * storm or we have something buggy.
2377 * Bail!
2378 */
2379 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2380 goto out_fail;
2381
2382 ts = rb_time_stamp(cpu_buffer->buffer);
2383 diff = ts - cpu_buffer->write_stamp;
2384
2385 /* make sure this diff is calculated here */
2386 barrier();
2387
2388 /* Did the write stamp get updated already? */
2389 if (likely(ts >= cpu_buffer->write_stamp)) {
2390 delta = diff;
2391 if (unlikely(test_time_stamp(delta))) {
2392 int local_clock_stable = 1;
2393#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2394 local_clock_stable = sched_clock_stable;
2395#endif
2396 WARN_ONCE(delta > (1ULL << 59),
2397 KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2398 (unsigned long long)delta,
2399 (unsigned long long)ts,
2400 (unsigned long long)cpu_buffer->write_stamp,
2401 local_clock_stable ? "" :
2402 "If you just came from a suspend/resume,\n"
2403 "please switch to the trace global clock:\n"
2404 " echo global > /sys/kernel/debug/tracing/trace_clock\n");
2405 add_timestamp = 1;
2406 }
2407 }
2408
2409 event = __rb_reserve_next(cpu_buffer, length, ts,
2410 delta, add_timestamp);
2411 if (unlikely(PTR_ERR(event) == -EAGAIN))
2412 goto again;
2413
2414 if (!event)
2415 goto out_fail;
2416
2417 return event;
2418
2419 out_fail:
2420 rb_end_commit(cpu_buffer);
2421 return NULL;
2422}
2423
2424#ifdef CONFIG_TRACING
2425
2426#define TRACE_RECURSIVE_DEPTH 16
2427
2428/* Keep this code out of the fast path cache */
2429static noinline void trace_recursive_fail(void)
2430{
2431 /* Disable all tracing before we do anything else */
2432 tracing_off_permanent();
2433
2434 printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2435 "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2436 trace_recursion_buffer(),
2437 hardirq_count() >> HARDIRQ_SHIFT,
2438 softirq_count() >> SOFTIRQ_SHIFT,
2439 in_nmi());
2440
2441 WARN_ON_ONCE(1);
2442}
2443
2444static inline int trace_recursive_lock(void)
2445{
2446 trace_recursion_inc();
2447
2448 if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
2449 return 0;
2450
2451 trace_recursive_fail();
2452
2453 return -1;
2454}
2455
2456static inline void trace_recursive_unlock(void)
2457{
2458 WARN_ON_ONCE(!trace_recursion_buffer());
2459
2460 trace_recursion_dec();
2461}
2462
2463#else
2464
2465#define trace_recursive_lock() (0)
2466#define trace_recursive_unlock() do { } while (0)
2467
2468#endif
2469
2470/**
2471 * ring_buffer_lock_reserve - reserve a part of the buffer
2472 * @buffer: the ring buffer to reserve from
2473 * @length: the length of the data to reserve (excluding event header)
2474 *
2475 * Returns a reseverd event on the ring buffer to copy directly to.
2476 * The user of this interface will need to get the body to write into
2477 * and can use the ring_buffer_event_data() interface.
2478 *
2479 * The length is the length of the data needed, not the event length
2480 * which also includes the event header.
2481 *
2482 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2483 * If NULL is returned, then nothing has been allocated or locked.
2484 */
2485struct ring_buffer_event *
2486ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2487{
2488 struct ring_buffer_per_cpu *cpu_buffer;
2489 struct ring_buffer_event *event;
2490 int cpu;
2491
2492 if (ring_buffer_flags != RB_BUFFERS_ON)
2493 return NULL;
2494
2495 /* If we are tracing schedule, we don't want to recurse */
2496 preempt_disable_notrace();
2497
2498 if (atomic_read(&buffer->record_disabled))
2499 goto out_nocheck;
2500
2501 if (trace_recursive_lock())
2502 goto out_nocheck;
2503
2504 cpu = raw_smp_processor_id();
2505
2506 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2507 goto out;
2508
2509 cpu_buffer = buffer->buffers[cpu];
2510
2511 if (atomic_read(&cpu_buffer->record_disabled))
2512 goto out;
2513
2514 if (length > BUF_MAX_DATA_SIZE)
2515 goto out;
2516
2517 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2518 if (!event)
2519 goto out;
2520
2521 return event;
2522
2523 out:
2524 trace_recursive_unlock();
2525
2526 out_nocheck:
2527 preempt_enable_notrace();
2528 return NULL;
2529}
2530EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2531
2532static void
2533rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2534 struct ring_buffer_event *event)
2535{
2536 u64 delta;
2537
2538 /*
2539 * The event first in the commit queue updates the
2540 * time stamp.
2541 */
2542 if (rb_event_is_commit(cpu_buffer, event)) {
2543 /*
2544 * A commit event that is first on a page
2545 * updates the write timestamp with the page stamp
2546 */
2547 if (!rb_event_index(event))
2548 cpu_buffer->write_stamp =
2549 cpu_buffer->commit_page->page->time_stamp;
2550 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2551 delta = event->array[0];
2552 delta <<= TS_SHIFT;
2553 delta += event->time_delta;
2554 cpu_buffer->write_stamp += delta;
2555 } else
2556 cpu_buffer->write_stamp += event->time_delta;
2557 }
2558}
2559
2560static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2561 struct ring_buffer_event *event)
2562{
2563 local_inc(&cpu_buffer->entries);
2564 rb_update_write_stamp(cpu_buffer, event);
2565 rb_end_commit(cpu_buffer);
2566}
2567
2568/**
2569 * ring_buffer_unlock_commit - commit a reserved
2570 * @buffer: The buffer to commit to
2571 * @event: The event pointer to commit.
2572 *
2573 * This commits the data to the ring buffer, and releases any locks held.
2574 *
2575 * Must be paired with ring_buffer_lock_reserve.
2576 */
2577int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2578 struct ring_buffer_event *event)
2579{
2580 struct ring_buffer_per_cpu *cpu_buffer;
2581 int cpu = raw_smp_processor_id();
2582
2583 cpu_buffer = buffer->buffers[cpu];
2584
2585 rb_commit(cpu_buffer, event);
2586
2587 trace_recursive_unlock();
2588
2589 preempt_enable_notrace();
2590
2591 return 0;
2592}
2593EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2594
2595static inline void rb_event_discard(struct ring_buffer_event *event)
2596{
2597 if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2598 event = skip_time_extend(event);
2599
2600 /* array[0] holds the actual length for the discarded event */
2601 event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2602 event->type_len = RINGBUF_TYPE_PADDING;
2603 /* time delta must be non zero */
2604 if (!event->time_delta)
2605 event->time_delta = 1;
2606}
2607
2608/*
2609 * Decrement the entries to the page that an event is on.
2610 * The event does not even need to exist, only the pointer
2611 * to the page it is on. This may only be called before the commit
2612 * takes place.
2613 */
2614static inline void
2615rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2616 struct ring_buffer_event *event)
2617{
2618 unsigned long addr = (unsigned long)event;
2619 struct buffer_page *bpage = cpu_buffer->commit_page;
2620 struct buffer_page *start;
2621
2622 addr &= PAGE_MASK;
2623
2624 /* Do the likely case first */
2625 if (likely(bpage->page == (void *)addr)) {
2626 local_dec(&bpage->entries);
2627 return;
2628 }
2629
2630 /*
2631 * Because the commit page may be on the reader page we
2632 * start with the next page and check the end loop there.
2633 */
2634 rb_inc_page(cpu_buffer, &bpage);
2635 start = bpage;
2636 do {
2637 if (bpage->page == (void *)addr) {
2638 local_dec(&bpage->entries);
2639 return;
2640 }
2641 rb_inc_page(cpu_buffer, &bpage);
2642 } while (bpage != start);
2643
2644 /* commit not part of this buffer?? */
2645 RB_WARN_ON(cpu_buffer, 1);
2646}
2647
2648/**
2649 * ring_buffer_commit_discard - discard an event that has not been committed
2650 * @buffer: the ring buffer
2651 * @event: non committed event to discard
2652 *
2653 * Sometimes an event that is in the ring buffer needs to be ignored.
2654 * This function lets the user discard an event in the ring buffer
2655 * and then that event will not be read later.
2656 *
2657 * This function only works if it is called before the the item has been
2658 * committed. It will try to free the event from the ring buffer
2659 * if another event has not been added behind it.
2660 *
2661 * If another event has been added behind it, it will set the event
2662 * up as discarded, and perform the commit.
2663 *
2664 * If this function is called, do not call ring_buffer_unlock_commit on
2665 * the event.
2666 */
2667void ring_buffer_discard_commit(struct ring_buffer *buffer,
2668 struct ring_buffer_event *event)
2669{
2670 struct ring_buffer_per_cpu *cpu_buffer;
2671 int cpu;
2672
2673 /* The event is discarded regardless */
2674 rb_event_discard(event);
2675
2676 cpu = smp_processor_id();
2677 cpu_buffer = buffer->buffers[cpu];
2678
2679 /*
2680 * This must only be called if the event has not been
2681 * committed yet. Thus we can assume that preemption
2682 * is still disabled.
2683 */
2684 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2685
2686 rb_decrement_entry(cpu_buffer, event);
2687 if (rb_try_to_discard(cpu_buffer, event))
2688 goto out;
2689
2690 /*
2691 * The commit is still visible by the reader, so we
2692 * must still update the timestamp.
2693 */
2694 rb_update_write_stamp(cpu_buffer, event);
2695 out:
2696 rb_end_commit(cpu_buffer);
2697
2698 trace_recursive_unlock();
2699
2700 preempt_enable_notrace();
2701
2702}
2703EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2704
2705/**
2706 * ring_buffer_write - write data to the buffer without reserving
2707 * @buffer: The ring buffer to write to.
2708 * @length: The length of the data being written (excluding the event header)
2709 * @data: The data to write to the buffer.
2710 *
2711 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2712 * one function. If you already have the data to write to the buffer, it
2713 * may be easier to simply call this function.
2714 *
2715 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2716 * and not the length of the event which would hold the header.
2717 */
2718int ring_buffer_write(struct ring_buffer *buffer,
2719 unsigned long length,
2720 void *data)
2721{
2722 struct ring_buffer_per_cpu *cpu_buffer;
2723 struct ring_buffer_event *event;
2724 void *body;
2725 int ret = -EBUSY;
2726 int cpu;
2727
2728 if (ring_buffer_flags != RB_BUFFERS_ON)
2729 return -EBUSY;
2730
2731 preempt_disable_notrace();
2732
2733 if (atomic_read(&buffer->record_disabled))
2734 goto out;
2735
2736 cpu = raw_smp_processor_id();
2737
2738 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2739 goto out;
2740
2741 cpu_buffer = buffer->buffers[cpu];
2742
2743 if (atomic_read(&cpu_buffer->record_disabled))
2744 goto out;
2745
2746 if (length > BUF_MAX_DATA_SIZE)
2747 goto out;
2748
2749 event = rb_reserve_next_event(buffer, cpu_buffer, length);
2750 if (!event)
2751 goto out;
2752
2753 body = rb_event_data(event);
2754
2755 memcpy(body, data, length);
2756
2757 rb_commit(cpu_buffer, event);
2758
2759 ret = 0;
2760 out:
2761 preempt_enable_notrace();
2762
2763 return ret;
2764}
2765EXPORT_SYMBOL_GPL(ring_buffer_write);
2766
2767static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2768{
2769 struct buffer_page *reader = cpu_buffer->reader_page;
2770 struct buffer_page *head = rb_set_head_page(cpu_buffer);
2771 struct buffer_page *commit = cpu_buffer->commit_page;
2772
2773 /* In case of error, head will be NULL */
2774 if (unlikely(!head))
2775 return 1;
2776
2777 return reader->read == rb_page_commit(reader) &&
2778 (commit == reader ||
2779 (commit == head &&
2780 head->read == rb_page_commit(commit)));
2781}
2782
2783/**
2784 * ring_buffer_record_disable - stop all writes into the buffer
2785 * @buffer: The ring buffer to stop writes to.
2786 *
2787 * This prevents all writes to the buffer. Any attempt to write
2788 * to the buffer after this will fail and return NULL.
2789 *
2790 * The caller should call synchronize_sched() after this.
2791 */
2792void ring_buffer_record_disable(struct ring_buffer *buffer)
2793{
2794 atomic_inc(&buffer->record_disabled);
2795}
2796EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2797
2798/**
2799 * ring_buffer_record_enable - enable writes to the buffer
2800 * @buffer: The ring buffer to enable writes
2801 *
2802 * Note, multiple disables will need the same number of enables
2803 * to truly enable the writing (much like preempt_disable).
2804 */
2805void ring_buffer_record_enable(struct ring_buffer *buffer)
2806{
2807 atomic_dec(&buffer->record_disabled);
2808}
2809EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2810
2811/**
2812 * ring_buffer_record_off - stop all writes into the buffer
2813 * @buffer: The ring buffer to stop writes to.
2814 *
2815 * This prevents all writes to the buffer. Any attempt to write
2816 * to the buffer after this will fail and return NULL.
2817 *
2818 * This is different than ring_buffer_record_disable() as
2819 * it works like an on/off switch, where as the disable() verison
2820 * must be paired with a enable().
2821 */
2822void ring_buffer_record_off(struct ring_buffer *buffer)
2823{
2824 unsigned int rd;
2825 unsigned int new_rd;
2826
2827 do {
2828 rd = atomic_read(&buffer->record_disabled);
2829 new_rd = rd | RB_BUFFER_OFF;
2830 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2831}
2832EXPORT_SYMBOL_GPL(ring_buffer_record_off);
2833
2834/**
2835 * ring_buffer_record_on - restart writes into the buffer
2836 * @buffer: The ring buffer to start writes to.
2837 *
2838 * This enables all writes to the buffer that was disabled by
2839 * ring_buffer_record_off().
2840 *
2841 * This is different than ring_buffer_record_enable() as
2842 * it works like an on/off switch, where as the enable() verison
2843 * must be paired with a disable().
2844 */
2845void ring_buffer_record_on(struct ring_buffer *buffer)
2846{
2847 unsigned int rd;
2848 unsigned int new_rd;
2849
2850 do {
2851 rd = atomic_read(&buffer->record_disabled);
2852 new_rd = rd & ~RB_BUFFER_OFF;
2853 } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
2854}
2855EXPORT_SYMBOL_GPL(ring_buffer_record_on);
2856
2857/**
2858 * ring_buffer_record_is_on - return true if the ring buffer can write
2859 * @buffer: The ring buffer to see if write is enabled
2860 *
2861 * Returns true if the ring buffer is in a state that it accepts writes.
2862 */
2863int ring_buffer_record_is_on(struct ring_buffer *buffer)
2864{
2865 return !atomic_read(&buffer->record_disabled);
2866}
2867
2868/**
2869 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2870 * @buffer: The ring buffer to stop writes to.
2871 * @cpu: The CPU buffer to stop
2872 *
2873 * This prevents all writes to the buffer. Any attempt to write
2874 * to the buffer after this will fail and return NULL.
2875 *
2876 * The caller should call synchronize_sched() after this.
2877 */
2878void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2879{
2880 struct ring_buffer_per_cpu *cpu_buffer;
2881
2882 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2883 return;
2884
2885 cpu_buffer = buffer->buffers[cpu];
2886 atomic_inc(&cpu_buffer->record_disabled);
2887}
2888EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2889
2890/**
2891 * ring_buffer_record_enable_cpu - enable writes to the buffer
2892 * @buffer: The ring buffer to enable writes
2893 * @cpu: The CPU to enable.
2894 *
2895 * Note, multiple disables will need the same number of enables
2896 * to truly enable the writing (much like preempt_disable).
2897 */
2898void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2899{
2900 struct ring_buffer_per_cpu *cpu_buffer;
2901
2902 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2903 return;
2904
2905 cpu_buffer = buffer->buffers[cpu];
2906 atomic_dec(&cpu_buffer->record_disabled);
2907}
2908EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2909
2910/*
2911 * The total entries in the ring buffer is the running counter
2912 * of entries entered into the ring buffer, minus the sum of
2913 * the entries read from the ring buffer and the number of
2914 * entries that were overwritten.
2915 */
2916static inline unsigned long
2917rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2918{
2919 return local_read(&cpu_buffer->entries) -
2920 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2921}
2922
2923/**
2924 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
2925 * @buffer: The ring buffer
2926 * @cpu: The per CPU buffer to read from.
2927 */
2928unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
2929{
2930 unsigned long flags;
2931 struct ring_buffer_per_cpu *cpu_buffer;
2932 struct buffer_page *bpage;
2933 unsigned long ret;
2934
2935 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2936 return 0;
2937
2938 cpu_buffer = buffer->buffers[cpu];
2939 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2940 /*
2941 * if the tail is on reader_page, oldest time stamp is on the reader
2942 * page
2943 */
2944 if (cpu_buffer->tail_page == cpu_buffer->reader_page)
2945 bpage = cpu_buffer->reader_page;
2946 else
2947 bpage = rb_set_head_page(cpu_buffer);
2948 ret = bpage->page->time_stamp;
2949 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2950
2951 return ret;
2952}
2953EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
2954
2955/**
2956 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
2957 * @buffer: The ring buffer
2958 * @cpu: The per CPU buffer to read from.
2959 */
2960unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
2961{
2962 struct ring_buffer_per_cpu *cpu_buffer;
2963 unsigned long ret;
2964
2965 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2966 return 0;
2967
2968 cpu_buffer = buffer->buffers[cpu];
2969 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
2970
2971 return ret;
2972}
2973EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
2974
2975/**
2976 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2977 * @buffer: The ring buffer
2978 * @cpu: The per CPU buffer to get the entries from.
2979 */
2980unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2981{
2982 struct ring_buffer_per_cpu *cpu_buffer;
2983
2984 if (!cpumask_test_cpu(cpu, buffer->cpumask))
2985 return 0;
2986
2987 cpu_buffer = buffer->buffers[cpu];
2988
2989 return rb_num_of_entries(cpu_buffer);
2990}
2991EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
2992
2993/**
2994 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
2995 * @buffer: The ring buffer
2996 * @cpu: The per CPU buffer to get the number of overruns from
2997 */
2998unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2999{
3000 struct ring_buffer_per_cpu *cpu_buffer;
3001 unsigned long ret;
3002
3003 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3004 return 0;
3005
3006 cpu_buffer = buffer->buffers[cpu];
3007 ret = local_read(&cpu_buffer->overrun);
3008
3009 return ret;
3010}
3011EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3012
3013/**
3014 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
3015 * @buffer: The ring buffer
3016 * @cpu: The per CPU buffer to get the number of overruns from
3017 */
3018unsigned long
3019ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3020{
3021 struct ring_buffer_per_cpu *cpu_buffer;
3022 unsigned long ret;
3023
3024 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3025 return 0;
3026
3027 cpu_buffer = buffer->buffers[cpu];
3028 ret = local_read(&cpu_buffer->commit_overrun);
3029
3030 return ret;
3031}
3032EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3033
3034/**
3035 * ring_buffer_entries - get the number of entries in a buffer
3036 * @buffer: The ring buffer
3037 *
3038 * Returns the total number of entries in the ring buffer
3039 * (all CPU entries)
3040 */
3041unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3042{
3043 struct ring_buffer_per_cpu *cpu_buffer;
3044 unsigned long entries = 0;
3045 int cpu;
3046
3047 /* if you care about this being correct, lock the buffer */
3048 for_each_buffer_cpu(buffer, cpu) {
3049 cpu_buffer = buffer->buffers[cpu];
3050 entries += rb_num_of_entries(cpu_buffer);
3051 }
3052
3053 return entries;
3054}
3055EXPORT_SYMBOL_GPL(ring_buffer_entries);
3056
3057/**
3058 * ring_buffer_overruns - get the number of overruns in buffer
3059 * @buffer: The ring buffer
3060 *
3061 * Returns the total number of overruns in the ring buffer
3062 * (all CPU entries)
3063 */
3064unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3065{
3066 struct ring_buffer_per_cpu *cpu_buffer;
3067 unsigned long overruns = 0;
3068 int cpu;
3069
3070 /* if you care about this being correct, lock the buffer */
3071 for_each_buffer_cpu(buffer, cpu) {
3072 cpu_buffer = buffer->buffers[cpu];
3073 overruns += local_read(&cpu_buffer->overrun);
3074 }
3075
3076 return overruns;
3077}
3078EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3079
3080static void rb_iter_reset(struct ring_buffer_iter *iter)
3081{
3082 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3083
3084 /* Iterator usage is expected to have record disabled */
3085 if (list_empty(&cpu_buffer->reader_page->list)) {
3086 iter->head_page = rb_set_head_page(cpu_buffer);
3087 if (unlikely(!iter->head_page))
3088 return;
3089 iter->head = iter->head_page->read;
3090 } else {
3091 iter->head_page = cpu_buffer->reader_page;
3092 iter->head = cpu_buffer->reader_page->read;
3093 }
3094 if (iter->head)
3095 iter->read_stamp = cpu_buffer->read_stamp;
3096 else
3097 iter->read_stamp = iter->head_page->page->time_stamp;
3098 iter->cache_reader_page = cpu_buffer->reader_page;
3099 iter->cache_read = cpu_buffer->read;
3100}
3101
3102/**
3103 * ring_buffer_iter_reset - reset an iterator
3104 * @iter: The iterator to reset
3105 *
3106 * Resets the iterator, so that it will start from the beginning
3107 * again.
3108 */
3109void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3110{
3111 struct ring_buffer_per_cpu *cpu_buffer;
3112 unsigned long flags;
3113
3114 if (!iter)
3115 return;
3116
3117 cpu_buffer = iter->cpu_buffer;
3118
3119 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3120 rb_iter_reset(iter);
3121 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3122}
3123EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3124
3125/**
3126 * ring_buffer_iter_empty - check if an iterator has no more to read
3127 * @iter: The iterator to check
3128 */
3129int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3130{
3131 struct ring_buffer_per_cpu *cpu_buffer;
3132
3133 cpu_buffer = iter->cpu_buffer;
3134
3135 return iter->head_page == cpu_buffer->commit_page &&
3136 iter->head == rb_commit_index(cpu_buffer);
3137}
3138EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3139
3140static void
3141rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3142 struct ring_buffer_event *event)
3143{
3144 u64 delta;
3145
3146 switch (event->type_len) {
3147 case RINGBUF_TYPE_PADDING:
3148 return;
3149
3150 case RINGBUF_TYPE_TIME_EXTEND:
3151 delta = event->array[0];
3152 delta <<= TS_SHIFT;
3153 delta += event->time_delta;
3154 cpu_buffer->read_stamp += delta;
3155 return;
3156
3157 case RINGBUF_TYPE_TIME_STAMP:
3158 /* FIXME: not implemented */
3159 return;
3160
3161 case RINGBUF_TYPE_DATA:
3162 cpu_buffer->read_stamp += event->time_delta;
3163 return;
3164
3165 default:
3166 BUG();
3167 }
3168 return;
3169}
3170
3171static void
3172rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3173 struct ring_buffer_event *event)
3174{
3175 u64 delta;
3176
3177 switch (event->type_len) {
3178 case RINGBUF_TYPE_PADDING:
3179 return;
3180
3181 case RINGBUF_TYPE_TIME_EXTEND:
3182 delta = event->array[0];
3183 delta <<= TS_SHIFT;
3184 delta += event->time_delta;
3185 iter->read_stamp += delta;
3186 return;
3187
3188 case RINGBUF_TYPE_TIME_STAMP:
3189 /* FIXME: not implemented */
3190 return;
3191
3192 case RINGBUF_TYPE_DATA:
3193 iter->read_stamp += event->time_delta;
3194 return;
3195
3196 default:
3197 BUG();
3198 }
3199 return;
3200}
3201
3202static struct buffer_page *
3203rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3204{
3205 struct buffer_page *reader = NULL;
3206 unsigned long overwrite;
3207 unsigned long flags;
3208 int nr_loops = 0;
3209 int ret;
3210
3211 local_irq_save(flags);
3212 arch_spin_lock(&cpu_buffer->lock);
3213
3214 again:
3215 /*
3216 * This should normally only loop twice. But because the
3217 * start of the reader inserts an empty page, it causes
3218 * a case where we will loop three times. There should be no
3219 * reason to loop four times (that I know of).
3220 */
3221 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3222 reader = NULL;
3223 goto out;
3224 }
3225
3226 reader = cpu_buffer->reader_page;
3227
3228 /* If there's more to read, return this page */
3229 if (cpu_buffer->reader_page->read < rb_page_size(reader))
3230 goto out;
3231
3232 /* Never should we have an index greater than the size */
3233 if (RB_WARN_ON(cpu_buffer,
3234 cpu_buffer->reader_page->read > rb_page_size(reader)))
3235 goto out;
3236
3237 /* check if we caught up to the tail */
3238 reader = NULL;
3239 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3240 goto out;
3241
3242 /*
3243 * Reset the reader page to size zero.
3244 */
3245 local_set(&cpu_buffer->reader_page->write, 0);
3246 local_set(&cpu_buffer->reader_page->entries, 0);
3247 local_set(&cpu_buffer->reader_page->page->commit, 0);
3248 cpu_buffer->reader_page->real_end = 0;
3249
3250 spin:
3251 /*
3252 * Splice the empty reader page into the list around the head.
3253 */
3254 reader = rb_set_head_page(cpu_buffer);
3255 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3256 cpu_buffer->reader_page->list.prev = reader->list.prev;
3257
3258 /*
3259 * cpu_buffer->pages just needs to point to the buffer, it
3260 * has no specific buffer page to point to. Lets move it out
3261 * of our way so we don't accidentally swap it.
3262 */
3263 cpu_buffer->pages = reader->list.prev;
3264
3265 /* The reader page will be pointing to the new head */
3266 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3267
3268 /*
3269 * We want to make sure we read the overruns after we set up our
3270 * pointers to the next object. The writer side does a
3271 * cmpxchg to cross pages which acts as the mb on the writer
3272 * side. Note, the reader will constantly fail the swap
3273 * while the writer is updating the pointers, so this
3274 * guarantees that the overwrite recorded here is the one we
3275 * want to compare with the last_overrun.
3276 */
3277 smp_mb();
3278 overwrite = local_read(&(cpu_buffer->overrun));
3279
3280 /*
3281 * Here's the tricky part.
3282 *
3283 * We need to move the pointer past the header page.
3284 * But we can only do that if a writer is not currently
3285 * moving it. The page before the header page has the
3286 * flag bit '1' set if it is pointing to the page we want.
3287 * but if the writer is in the process of moving it
3288 * than it will be '2' or already moved '0'.
3289 */
3290
3291 ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3292
3293 /*
3294 * If we did not convert it, then we must try again.
3295 */
3296 if (!ret)
3297 goto spin;
3298
3299 /*
3300 * Yeah! We succeeded in replacing the page.
3301 *
3302 * Now make the new head point back to the reader page.
3303 */
3304 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3305 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3306
3307 /* Finally update the reader page to the new head */
3308 cpu_buffer->reader_page = reader;
3309 rb_reset_reader_page(cpu_buffer);
3310
3311 if (overwrite != cpu_buffer->last_overrun) {
3312 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3313 cpu_buffer->last_overrun = overwrite;
3314 }
3315
3316 goto again;
3317
3318 out:
3319 arch_spin_unlock(&cpu_buffer->lock);
3320 local_irq_restore(flags);
3321
3322 return reader;
3323}
3324
3325static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3326{
3327 struct ring_buffer_event *event;
3328 struct buffer_page *reader;
3329 unsigned length;
3330
3331 reader = rb_get_reader_page(cpu_buffer);
3332
3333 /* This function should not be called when buffer is empty */
3334 if (RB_WARN_ON(cpu_buffer, !reader))
3335 return;
3336
3337 event = rb_reader_event(cpu_buffer);
3338
3339 if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3340 cpu_buffer->read++;
3341
3342 rb_update_read_stamp(cpu_buffer, event);
3343
3344 length = rb_event_length(event);
3345 cpu_buffer->reader_page->read += length;
3346}
3347
3348static void rb_advance_iter(struct ring_buffer_iter *iter)
3349{
3350 struct ring_buffer_per_cpu *cpu_buffer;
3351 struct ring_buffer_event *event;
3352 unsigned length;
3353
3354 cpu_buffer = iter->cpu_buffer;
3355
3356 /*
3357 * Check if we are at the end of the buffer.
3358 */
3359 if (iter->head >= rb_page_size(iter->head_page)) {
3360 /* discarded commits can make the page empty */
3361 if (iter->head_page == cpu_buffer->commit_page)
3362 return;
3363 rb_inc_iter(iter);
3364 return;
3365 }
3366
3367 event = rb_iter_head_event(iter);
3368
3369 length = rb_event_length(event);
3370
3371 /*
3372 * This should not be called to advance the header if we are
3373 * at the tail of the buffer.
3374 */
3375 if (RB_WARN_ON(cpu_buffer,
3376 (iter->head_page == cpu_buffer->commit_page) &&
3377 (iter->head + length > rb_commit_index(cpu_buffer))))
3378 return;
3379
3380 rb_update_iter_read_stamp(iter, event);
3381
3382 iter->head += length;
3383
3384 /* check for end of page padding */
3385 if ((iter->head >= rb_page_size(iter->head_page)) &&
3386 (iter->head_page != cpu_buffer->commit_page))
3387 rb_advance_iter(iter);
3388}
3389
3390static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3391{
3392 return cpu_buffer->lost_events;
3393}
3394
3395static struct ring_buffer_event *
3396rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3397 unsigned long *lost_events)
3398{
3399 struct ring_buffer_event *event;
3400 struct buffer_page *reader;
3401 int nr_loops = 0;
3402
3403 again:
3404 /*
3405 * We repeat when a time extend is encountered.
3406 * Since the time extend is always attached to a data event,
3407 * we should never loop more than once.
3408 * (We never hit the following condition more than twice).
3409 */
3410 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3411 return NULL;
3412
3413 reader = rb_get_reader_page(cpu_buffer);
3414 if (!reader)
3415 return NULL;
3416
3417 event = rb_reader_event(cpu_buffer);
3418
3419 switch (event->type_len) {
3420 case RINGBUF_TYPE_PADDING:
3421 if (rb_null_event(event))
3422 RB_WARN_ON(cpu_buffer, 1);
3423 /*
3424 * Because the writer could be discarding every
3425 * event it creates (which would probably be bad)
3426 * if we were to go back to "again" then we may never
3427 * catch up, and will trigger the warn on, or lock
3428 * the box. Return the padding, and we will release
3429 * the current locks, and try again.
3430 */
3431 return event;
3432
3433 case RINGBUF_TYPE_TIME_EXTEND:
3434 /* Internal data, OK to advance */
3435 rb_advance_reader(cpu_buffer);
3436 goto again;
3437
3438 case RINGBUF_TYPE_TIME_STAMP:
3439 /* FIXME: not implemented */
3440 rb_advance_reader(cpu_buffer);
3441 goto again;
3442
3443 case RINGBUF_TYPE_DATA:
3444 if (ts) {
3445 *ts = cpu_buffer->read_stamp + event->time_delta;
3446 ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3447 cpu_buffer->cpu, ts);
3448 }
3449 if (lost_events)
3450 *lost_events = rb_lost_events(cpu_buffer);
3451 return event;
3452
3453 default:
3454 BUG();
3455 }
3456
3457 return NULL;
3458}
3459EXPORT_SYMBOL_GPL(ring_buffer_peek);
3460
3461static struct ring_buffer_event *
3462rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3463{
3464 struct ring_buffer *buffer;
3465 struct ring_buffer_per_cpu *cpu_buffer;
3466 struct ring_buffer_event *event;
3467 int nr_loops = 0;
3468
3469 cpu_buffer = iter->cpu_buffer;
3470 buffer = cpu_buffer->buffer;
3471
3472 /*
3473 * Check if someone performed a consuming read to
3474 * the buffer. A consuming read invalidates the iterator
3475 * and we need to reset the iterator in this case.
3476 */
3477 if (unlikely(iter->cache_read != cpu_buffer->read ||
3478 iter->cache_reader_page != cpu_buffer->reader_page))
3479 rb_iter_reset(iter);
3480
3481 again:
3482 if (ring_buffer_iter_empty(iter))
3483 return NULL;
3484
3485 /*
3486 * We repeat when a time extend is encountered.
3487 * Since the time extend is always attached to a data event,
3488 * we should never loop more than once.
3489 * (We never hit the following condition more than twice).
3490 */
3491 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3492 return NULL;
3493
3494 if (rb_per_cpu_empty(cpu_buffer))
3495 return NULL;
3496
3497 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3498 rb_inc_iter(iter);
3499 goto again;
3500 }
3501
3502 event = rb_iter_head_event(iter);
3503
3504 switch (event->type_len) {
3505 case RINGBUF_TYPE_PADDING:
3506 if (rb_null_event(event)) {
3507 rb_inc_iter(iter);
3508 goto again;
3509 }
3510 rb_advance_iter(iter);
3511 return event;
3512
3513 case RINGBUF_TYPE_TIME_EXTEND:
3514 /* Internal data, OK to advance */
3515 rb_advance_iter(iter);
3516 goto again;
3517
3518 case RINGBUF_TYPE_TIME_STAMP:
3519 /* FIXME: not implemented */
3520 rb_advance_iter(iter);
3521 goto again;
3522
3523 case RINGBUF_TYPE_DATA:
3524 if (ts) {
3525 *ts = iter->read_stamp + event->time_delta;
3526 ring_buffer_normalize_time_stamp(buffer,
3527 cpu_buffer->cpu, ts);
3528 }
3529 return event;
3530
3531 default:
3532 BUG();
3533 }
3534
3535 return NULL;
3536}
3537EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3538
3539static inline int rb_ok_to_lock(void)
3540{
3541 /*
3542 * If an NMI die dumps out the content of the ring buffer
3543 * do not grab locks. We also permanently disable the ring
3544 * buffer too. A one time deal is all you get from reading
3545 * the ring buffer from an NMI.
3546 */
3547 if (likely(!in_nmi()))
3548 return 1;
3549
3550 tracing_off_permanent();
3551 return 0;
3552}
3553
3554/**
3555 * ring_buffer_peek - peek at the next event to be read
3556 * @buffer: The ring buffer to read
3557 * @cpu: The cpu to peak at
3558 * @ts: The timestamp counter of this event.
3559 * @lost_events: a variable to store if events were lost (may be NULL)
3560 *
3561 * This will return the event that will be read next, but does
3562 * not consume the data.
3563 */
3564struct ring_buffer_event *
3565ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3566 unsigned long *lost_events)
3567{
3568 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3569 struct ring_buffer_event *event;
3570 unsigned long flags;
3571 int dolock;
3572
3573 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3574 return NULL;
3575
3576 dolock = rb_ok_to_lock();
3577 again:
3578 local_irq_save(flags);
3579 if (dolock)
3580 raw_spin_lock(&cpu_buffer->reader_lock);
3581 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3582 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3583 rb_advance_reader(cpu_buffer);
3584 if (dolock)
3585 raw_spin_unlock(&cpu_buffer->reader_lock);
3586 local_irq_restore(flags);
3587
3588 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3589 goto again;
3590
3591 return event;
3592}
3593
3594/**
3595 * ring_buffer_iter_peek - peek at the next event to be read
3596 * @iter: The ring buffer iterator
3597 * @ts: The timestamp counter of this event.
3598 *
3599 * This will return the event that will be read next, but does
3600 * not increment the iterator.
3601 */
3602struct ring_buffer_event *
3603ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3604{
3605 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3606 struct ring_buffer_event *event;
3607 unsigned long flags;
3608
3609 again:
3610 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3611 event = rb_iter_peek(iter, ts);
3612 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3613
3614 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3615 goto again;
3616
3617 return event;
3618}
3619
3620/**
3621 * ring_buffer_consume - return an event and consume it
3622 * @buffer: The ring buffer to get the next event from
3623 * @cpu: the cpu to read the buffer from
3624 * @ts: a variable to store the timestamp (may be NULL)
3625 * @lost_events: a variable to store if events were lost (may be NULL)
3626 *
3627 * Returns the next event in the ring buffer, and that event is consumed.
3628 * Meaning, that sequential reads will keep returning a different event,
3629 * and eventually empty the ring buffer if the producer is slower.
3630 */
3631struct ring_buffer_event *
3632ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3633 unsigned long *lost_events)
3634{
3635 struct ring_buffer_per_cpu *cpu_buffer;
3636 struct ring_buffer_event *event = NULL;
3637 unsigned long flags;
3638 int dolock;
3639
3640 dolock = rb_ok_to_lock();
3641
3642 again:
3643 /* might be called in atomic */
3644 preempt_disable();
3645
3646 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3647 goto out;
3648
3649 cpu_buffer = buffer->buffers[cpu];
3650 local_irq_save(flags);
3651 if (dolock)
3652 raw_spin_lock(&cpu_buffer->reader_lock);
3653
3654 event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3655 if (event) {
3656 cpu_buffer->lost_events = 0;
3657 rb_advance_reader(cpu_buffer);
3658 }
3659
3660 if (dolock)
3661 raw_spin_unlock(&cpu_buffer->reader_lock);
3662 local_irq_restore(flags);
3663
3664 out:
3665 preempt_enable();
3666
3667 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3668 goto again;
3669
3670 return event;
3671}
3672EXPORT_SYMBOL_GPL(ring_buffer_consume);
3673
3674/**
3675 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3676 * @buffer: The ring buffer to read from
3677 * @cpu: The cpu buffer to iterate over
3678 *
3679 * This performs the initial preparations necessary to iterate
3680 * through the buffer. Memory is allocated, buffer recording
3681 * is disabled, and the iterator pointer is returned to the caller.
3682 *
3683 * Disabling buffer recordng prevents the reading from being
3684 * corrupted. This is not a consuming read, so a producer is not
3685 * expected.
3686 *
3687 * After a sequence of ring_buffer_read_prepare calls, the user is
3688 * expected to make at least one call to ring_buffer_prepare_sync.
3689 * Afterwards, ring_buffer_read_start is invoked to get things going
3690 * for real.
3691 *
3692 * This overall must be paired with ring_buffer_finish.
3693 */
3694struct ring_buffer_iter *
3695ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3696{
3697 struct ring_buffer_per_cpu *cpu_buffer;
3698 struct ring_buffer_iter *iter;
3699
3700 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3701 return NULL;
3702
3703 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3704 if (!iter)
3705 return NULL;
3706
3707 cpu_buffer = buffer->buffers[cpu];
3708
3709 iter->cpu_buffer = cpu_buffer;
3710
3711 atomic_inc(&buffer->resize_disabled);
3712 atomic_inc(&cpu_buffer->record_disabled);
3713
3714 return iter;
3715}
3716EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3717
3718/**
3719 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3720 *
3721 * All previously invoked ring_buffer_read_prepare calls to prepare
3722 * iterators will be synchronized. Afterwards, read_buffer_read_start
3723 * calls on those iterators are allowed.
3724 */
3725void
3726ring_buffer_read_prepare_sync(void)
3727{
3728 synchronize_sched();
3729}
3730EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3731
3732/**
3733 * ring_buffer_read_start - start a non consuming read of the buffer
3734 * @iter: The iterator returned by ring_buffer_read_prepare
3735 *
3736 * This finalizes the startup of an iteration through the buffer.
3737 * The iterator comes from a call to ring_buffer_read_prepare and
3738 * an intervening ring_buffer_read_prepare_sync must have been
3739 * performed.
3740 *
3741 * Must be paired with ring_buffer_finish.
3742 */
3743void
3744ring_buffer_read_start(struct ring_buffer_iter *iter)
3745{
3746 struct ring_buffer_per_cpu *cpu_buffer;
3747 unsigned long flags;
3748
3749 if (!iter)
3750 return;
3751
3752 cpu_buffer = iter->cpu_buffer;
3753
3754 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3755 arch_spin_lock(&cpu_buffer->lock);
3756 rb_iter_reset(iter);
3757 arch_spin_unlock(&cpu_buffer->lock);
3758 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3759}
3760EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3761
3762/**
3763 * ring_buffer_finish - finish reading the iterator of the buffer
3764 * @iter: The iterator retrieved by ring_buffer_start
3765 *
3766 * This re-enables the recording to the buffer, and frees the
3767 * iterator.
3768 */
3769void
3770ring_buffer_read_finish(struct ring_buffer_iter *iter)
3771{
3772 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3773
3774 /*
3775 * Ring buffer is disabled from recording, here's a good place
3776 * to check the integrity of the ring buffer.
3777 */
3778 rb_check_pages(cpu_buffer);
3779
3780 atomic_dec(&cpu_buffer->record_disabled);
3781 atomic_dec(&cpu_buffer->buffer->resize_disabled);
3782 kfree(iter);
3783}
3784EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3785
3786/**
3787 * ring_buffer_read - read the next item in the ring buffer by the iterator
3788 * @iter: The ring buffer iterator
3789 * @ts: The time stamp of the event read.
3790 *
3791 * This reads the next event in the ring buffer and increments the iterator.
3792 */
3793struct ring_buffer_event *
3794ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3795{
3796 struct ring_buffer_event *event;
3797 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3798 unsigned long flags;
3799
3800 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3801 again:
3802 event = rb_iter_peek(iter, ts);
3803 if (!event)
3804 goto out;
3805
3806 if (event->type_len == RINGBUF_TYPE_PADDING)
3807 goto again;
3808
3809 rb_advance_iter(iter);
3810 out:
3811 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3812
3813 return event;
3814}
3815EXPORT_SYMBOL_GPL(ring_buffer_read);
3816
3817/**
3818 * ring_buffer_size - return the size of the ring buffer (in bytes)
3819 * @buffer: The ring buffer.
3820 */
3821unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
3822{
3823 /*
3824 * Earlier, this method returned
3825 * BUF_PAGE_SIZE * buffer->nr_pages
3826 * Since the nr_pages field is now removed, we have converted this to
3827 * return the per cpu buffer value.
3828 */
3829 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3830 return 0;
3831
3832 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
3833}
3834EXPORT_SYMBOL_GPL(ring_buffer_size);
3835
3836static void
3837rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3838{
3839 rb_head_page_deactivate(cpu_buffer);
3840
3841 cpu_buffer->head_page
3842 = list_entry(cpu_buffer->pages, struct buffer_page, list);
3843 local_set(&cpu_buffer->head_page->write, 0);
3844 local_set(&cpu_buffer->head_page->entries, 0);
3845 local_set(&cpu_buffer->head_page->page->commit, 0);
3846
3847 cpu_buffer->head_page->read = 0;
3848
3849 cpu_buffer->tail_page = cpu_buffer->head_page;
3850 cpu_buffer->commit_page = cpu_buffer->head_page;
3851
3852 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
3853 INIT_LIST_HEAD(&cpu_buffer->new_pages);
3854 local_set(&cpu_buffer->reader_page->write, 0);
3855 local_set(&cpu_buffer->reader_page->entries, 0);
3856 local_set(&cpu_buffer->reader_page->page->commit, 0);
3857 cpu_buffer->reader_page->read = 0;
3858
3859 local_set(&cpu_buffer->commit_overrun, 0);
3860 local_set(&cpu_buffer->entries_bytes, 0);
3861 local_set(&cpu_buffer->overrun, 0);
3862 local_set(&cpu_buffer->entries, 0);
3863 local_set(&cpu_buffer->committing, 0);
3864 local_set(&cpu_buffer->commits, 0);
3865 cpu_buffer->read = 0;
3866 cpu_buffer->read_bytes = 0;
3867
3868 cpu_buffer->write_stamp = 0;
3869 cpu_buffer->read_stamp = 0;
3870
3871 cpu_buffer->lost_events = 0;
3872 cpu_buffer->last_overrun = 0;
3873
3874 rb_head_page_activate(cpu_buffer);
3875}
3876
3877/**
3878 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3879 * @buffer: The ring buffer to reset a per cpu buffer of
3880 * @cpu: The CPU buffer to be reset
3881 */
3882void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3883{
3884 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3885 unsigned long flags;
3886
3887 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3888 return;
3889
3890 atomic_inc(&buffer->resize_disabled);
3891 atomic_inc(&cpu_buffer->record_disabled);
3892
3893 /* Make sure all commits have finished */
3894 synchronize_sched();
3895
3896 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3897
3898 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3899 goto out;
3900
3901 arch_spin_lock(&cpu_buffer->lock);
3902
3903 rb_reset_cpu(cpu_buffer);
3904
3905 arch_spin_unlock(&cpu_buffer->lock);
3906
3907 out:
3908 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3909
3910 atomic_dec(&cpu_buffer->record_disabled);
3911 atomic_dec(&buffer->resize_disabled);
3912}
3913EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
3914
3915/**
3916 * ring_buffer_reset - reset a ring buffer
3917 * @buffer: The ring buffer to reset all cpu buffers
3918 */
3919void ring_buffer_reset(struct ring_buffer *buffer)
3920{
3921 int cpu;
3922
3923 for_each_buffer_cpu(buffer, cpu)
3924 ring_buffer_reset_cpu(buffer, cpu);
3925}
3926EXPORT_SYMBOL_GPL(ring_buffer_reset);
3927
3928/**
3929 * rind_buffer_empty - is the ring buffer empty?
3930 * @buffer: The ring buffer to test
3931 */
3932int ring_buffer_empty(struct ring_buffer *buffer)
3933{
3934 struct ring_buffer_per_cpu *cpu_buffer;
3935 unsigned long flags;
3936 int dolock;
3937 int cpu;
3938 int ret;
3939
3940 dolock = rb_ok_to_lock();
3941
3942 /* yes this is racy, but if you don't like the race, lock the buffer */
3943 for_each_buffer_cpu(buffer, cpu) {
3944 cpu_buffer = buffer->buffers[cpu];
3945 local_irq_save(flags);
3946 if (dolock)
3947 raw_spin_lock(&cpu_buffer->reader_lock);
3948 ret = rb_per_cpu_empty(cpu_buffer);
3949 if (dolock)
3950 raw_spin_unlock(&cpu_buffer->reader_lock);
3951 local_irq_restore(flags);
3952
3953 if (!ret)
3954 return 0;
3955 }
3956
3957 return 1;
3958}
3959EXPORT_SYMBOL_GPL(ring_buffer_empty);
3960
3961/**
3962 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3963 * @buffer: The ring buffer
3964 * @cpu: The CPU buffer to test
3965 */
3966int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3967{
3968 struct ring_buffer_per_cpu *cpu_buffer;
3969 unsigned long flags;
3970 int dolock;
3971 int ret;
3972
3973 if (!cpumask_test_cpu(cpu, buffer->cpumask))
3974 return 1;
3975
3976 dolock = rb_ok_to_lock();
3977
3978 cpu_buffer = buffer->buffers[cpu];
3979 local_irq_save(flags);
3980 if (dolock)
3981 raw_spin_lock(&cpu_buffer->reader_lock);
3982 ret = rb_per_cpu_empty(cpu_buffer);
3983 if (dolock)
3984 raw_spin_unlock(&cpu_buffer->reader_lock);
3985 local_irq_restore(flags);
3986
3987 return ret;
3988}
3989EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
3990
3991#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3992/**
3993 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3994 * @buffer_a: One buffer to swap with
3995 * @buffer_b: The other buffer to swap with
3996 *
3997 * This function is useful for tracers that want to take a "snapshot"
3998 * of a CPU buffer and has another back up buffer lying around.
3999 * it is expected that the tracer handles the cpu buffer not being
4000 * used at the moment.
4001 */
4002int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4003 struct ring_buffer *buffer_b, int cpu)
4004{
4005 struct ring_buffer_per_cpu *cpu_buffer_a;
4006 struct ring_buffer_per_cpu *cpu_buffer_b;
4007 int ret = -EINVAL;
4008
4009 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4010 !cpumask_test_cpu(cpu, buffer_b->cpumask))
4011 goto out;
4012
4013 cpu_buffer_a = buffer_a->buffers[cpu];
4014 cpu_buffer_b = buffer_b->buffers[cpu];
4015
4016 /* At least make sure the two buffers are somewhat the same */
4017 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4018 goto out;
4019
4020 ret = -EAGAIN;
4021
4022 if (ring_buffer_flags != RB_BUFFERS_ON)
4023 goto out;
4024
4025 if (atomic_read(&buffer_a->record_disabled))
4026 goto out;
4027
4028 if (atomic_read(&buffer_b->record_disabled))
4029 goto out;
4030
4031 if (atomic_read(&cpu_buffer_a->record_disabled))
4032 goto out;
4033
4034 if (atomic_read(&cpu_buffer_b->record_disabled))
4035 goto out;
4036
4037 /*
4038 * We can't do a synchronize_sched here because this
4039 * function can be called in atomic context.
4040 * Normally this will be called from the same CPU as cpu.
4041 * If not it's up to the caller to protect this.
4042 */
4043 atomic_inc(&cpu_buffer_a->record_disabled);
4044 atomic_inc(&cpu_buffer_b->record_disabled);
4045
4046 ret = -EBUSY;
4047 if (local_read(&cpu_buffer_a->committing))
4048 goto out_dec;
4049 if (local_read(&cpu_buffer_b->committing))
4050 goto out_dec;
4051
4052 buffer_a->buffers[cpu] = cpu_buffer_b;
4053 buffer_b->buffers[cpu] = cpu_buffer_a;
4054
4055 cpu_buffer_b->buffer = buffer_a;
4056 cpu_buffer_a->buffer = buffer_b;
4057
4058 ret = 0;
4059
4060out_dec:
4061 atomic_dec(&cpu_buffer_a->record_disabled);
4062 atomic_dec(&cpu_buffer_b->record_disabled);
4063out:
4064 return ret;
4065}
4066EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4067#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4068
4069/**
4070 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4071 * @buffer: the buffer to allocate for.
4072 *
4073 * This function is used in conjunction with ring_buffer_read_page.
4074 * When reading a full page from the ring buffer, these functions
4075 * can be used to speed up the process. The calling function should
4076 * allocate a few pages first with this function. Then when it
4077 * needs to get pages from the ring buffer, it passes the result
4078 * of this function into ring_buffer_read_page, which will swap
4079 * the page that was allocated, with the read page of the buffer.
4080 *
4081 * Returns:
4082 * The page allocated, or NULL on error.
4083 */
4084void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4085{
4086 struct buffer_data_page *bpage;
4087 struct page *page;
4088
4089 page = alloc_pages_node(cpu_to_node(cpu),
4090 GFP_KERNEL | __GFP_NORETRY, 0);
4091 if (!page)
4092 return NULL;
4093
4094 bpage = page_address(page);
4095
4096 rb_init_page(bpage);
4097
4098 return bpage;
4099}
4100EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4101
4102/**
4103 * ring_buffer_free_read_page - free an allocated read page
4104 * @buffer: the buffer the page was allocate for
4105 * @data: the page to free
4106 *
4107 * Free a page allocated from ring_buffer_alloc_read_page.
4108 */
4109void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4110{
4111 free_page((unsigned long)data);
4112}
4113EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4114
4115/**
4116 * ring_buffer_read_page - extract a page from the ring buffer
4117 * @buffer: buffer to extract from
4118 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4119 * @len: amount to extract
4120 * @cpu: the cpu of the buffer to extract
4121 * @full: should the extraction only happen when the page is full.
4122 *
4123 * This function will pull out a page from the ring buffer and consume it.
4124 * @data_page must be the address of the variable that was returned
4125 * from ring_buffer_alloc_read_page. This is because the page might be used
4126 * to swap with a page in the ring buffer.
4127 *
4128 * for example:
4129 * rpage = ring_buffer_alloc_read_page(buffer);
4130 * if (!rpage)
4131 * return error;
4132 * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4133 * if (ret >= 0)
4134 * process_page(rpage, ret);
4135 *
4136 * When @full is set, the function will not return true unless
4137 * the writer is off the reader page.
4138 *
4139 * Note: it is up to the calling functions to handle sleeps and wakeups.
4140 * The ring buffer can be used anywhere in the kernel and can not
4141 * blindly call wake_up. The layer that uses the ring buffer must be
4142 * responsible for that.
4143 *
4144 * Returns:
4145 * >=0 if data has been transferred, returns the offset of consumed data.
4146 * <0 if no data has been transferred.
4147 */
4148int ring_buffer_read_page(struct ring_buffer *buffer,
4149 void **data_page, size_t len, int cpu, int full)
4150{
4151 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4152 struct ring_buffer_event *event;
4153 struct buffer_data_page *bpage;
4154 struct buffer_page *reader;
4155 unsigned long missed_events;
4156 unsigned long flags;
4157 unsigned int commit;
4158 unsigned int read;
4159 u64 save_timestamp;
4160 int ret = -1;
4161
4162 if (!cpumask_test_cpu(cpu, buffer->cpumask))
4163 goto out;
4164
4165 /*
4166 * If len is not big enough to hold the page header, then
4167 * we can not copy anything.
4168 */
4169 if (len <= BUF_PAGE_HDR_SIZE)
4170 goto out;
4171
4172 len -= BUF_PAGE_HDR_SIZE;
4173
4174 if (!data_page)
4175 goto out;
4176
4177 bpage = *data_page;
4178 if (!bpage)
4179 goto out;
4180
4181 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4182
4183 reader = rb_get_reader_page(cpu_buffer);
4184 if (!reader)
4185 goto out_unlock;
4186
4187 event = rb_reader_event(cpu_buffer);
4188
4189 read = reader->read;
4190 commit = rb_page_commit(reader);
4191
4192 /* Check if any events were dropped */
4193 missed_events = cpu_buffer->lost_events;
4194
4195 /*
4196 * If this page has been partially read or
4197 * if len is not big enough to read the rest of the page or
4198 * a writer is still on the page, then
4199 * we must copy the data from the page to the buffer.
4200 * Otherwise, we can simply swap the page with the one passed in.
4201 */
4202 if (read || (len < (commit - read)) ||
4203 cpu_buffer->reader_page == cpu_buffer->commit_page) {
4204 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4205 unsigned int rpos = read;
4206 unsigned int pos = 0;
4207 unsigned int size;
4208
4209 if (full)
4210 goto out_unlock;
4211
4212 if (len > (commit - read))
4213 len = (commit - read);
4214
4215 /* Always keep the time extend and data together */
4216 size = rb_event_ts_length(event);
4217
4218 if (len < size)
4219 goto out_unlock;
4220
4221 /* save the current timestamp, since the user will need it */
4222 save_timestamp = cpu_buffer->read_stamp;
4223
4224 /* Need to copy one event at a time */
4225 do {
4226 /* We need the size of one event, because
4227 * rb_advance_reader only advances by one event,
4228 * whereas rb_event_ts_length may include the size of
4229 * one or two events.
4230 * We have already ensured there's enough space if this
4231 * is a time extend. */
4232 size = rb_event_length(event);
4233 memcpy(bpage->data + pos, rpage->data + rpos, size);
4234
4235 len -= size;
4236
4237 rb_advance_reader(cpu_buffer);
4238 rpos = reader->read;
4239 pos += size;
4240
4241 if (rpos >= commit)
4242 break;
4243
4244 event = rb_reader_event(cpu_buffer);
4245 /* Always keep the time extend and data together */
4246 size = rb_event_ts_length(event);
4247 } while (len >= size);
4248
4249 /* update bpage */
4250 local_set(&bpage->commit, pos);
4251 bpage->time_stamp = save_timestamp;
4252
4253 /* we copied everything to the beginning */
4254 read = 0;
4255 } else {
4256 /* update the entry counter */
4257 cpu_buffer->read += rb_page_entries(reader);
4258 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4259
4260 /* swap the pages */
4261 rb_init_page(bpage);
4262 bpage = reader->page;
4263 reader->page = *data_page;
4264 local_set(&reader->write, 0);
4265 local_set(&reader->entries, 0);
4266 reader->read = 0;
4267 *data_page = bpage;
4268
4269 /*
4270 * Use the real_end for the data size,
4271 * This gives us a chance to store the lost events
4272 * on the page.
4273 */
4274 if (reader->real_end)
4275 local_set(&bpage->commit, reader->real_end);
4276 }
4277 ret = read;
4278
4279 cpu_buffer->lost_events = 0;
4280
4281 commit = local_read(&bpage->commit);
4282 /*
4283 * Set a flag in the commit field if we lost events
4284 */
4285 if (missed_events) {
4286 /* If there is room at the end of the page to save the
4287 * missed events, then record it there.
4288 */
4289 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4290 memcpy(&bpage->data[commit], &missed_events,
4291 sizeof(missed_events));
4292 local_add(RB_MISSED_STORED, &bpage->commit);
4293 commit += sizeof(missed_events);
4294 }
4295 local_add(RB_MISSED_EVENTS, &bpage->commit);
4296 }
4297
4298 /*
4299 * This page may be off to user land. Zero it out here.
4300 */
4301 if (commit < BUF_PAGE_SIZE)
4302 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4303
4304 out_unlock:
4305 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4306
4307 out:
4308 return ret;
4309}
4310EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4311
4312#ifdef CONFIG_HOTPLUG_CPU
4313static int rb_cpu_notify(struct notifier_block *self,
4314 unsigned long action, void *hcpu)
4315{
4316 struct ring_buffer *buffer =
4317 container_of(self, struct ring_buffer, cpu_notify);
4318 long cpu = (long)hcpu;
4319 int cpu_i, nr_pages_same;
4320 unsigned int nr_pages;
4321
4322 switch (action) {
4323 case CPU_UP_PREPARE:
4324 case CPU_UP_PREPARE_FROZEN:
4325 if (cpumask_test_cpu(cpu, buffer->cpumask))
4326 return NOTIFY_OK;
4327
4328 nr_pages = 0;
4329 nr_pages_same = 1;
4330 /* check if all cpu sizes are same */
4331 for_each_buffer_cpu(buffer, cpu_i) {
4332 /* fill in the size from first enabled cpu */
4333 if (nr_pages == 0)
4334 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4335 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4336 nr_pages_same = 0;
4337 break;
4338 }
4339 }
4340 /* allocate minimum pages, user can later expand it */
4341 if (!nr_pages_same)
4342 nr_pages = 2;
4343 buffer->buffers[cpu] =
4344 rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4345 if (!buffer->buffers[cpu]) {
4346 WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4347 cpu);
4348 return NOTIFY_OK;
4349 }
4350 smp_wmb();
4351 cpumask_set_cpu(cpu, buffer->cpumask);
4352 break;
4353 case CPU_DOWN_PREPARE:
4354 case CPU_DOWN_PREPARE_FROZEN:
4355 /*
4356 * Do nothing.
4357 * If we were to free the buffer, then the user would
4358 * lose any trace that was in the buffer.
4359 */
4360 break;
4361 default:
4362 break;
4363 }
4364 return NOTIFY_OK;
4365}
4366#endif