Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Generic ring buffer
   3 *
   4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   5 */
   6#include <linux/ftrace_event.h>
   7#include <linux/ring_buffer.h>
   8#include <linux/trace_clock.h>
   9#include <linux/trace_seq.h>
  10#include <linux/spinlock.h>
  11#include <linux/irq_work.h>
  12#include <linux/debugfs.h>
  13#include <linux/uaccess.h>
  14#include <linux/hardirq.h>
  15#include <linux/kthread.h>	/* for self test */
  16#include <linux/kmemcheck.h>
  17#include <linux/module.h>
  18#include <linux/percpu.h>
  19#include <linux/mutex.h>
  20#include <linux/delay.h>
  21#include <linux/slab.h>
  22#include <linux/init.h>
  23#include <linux/hash.h>
  24#include <linux/list.h>
  25#include <linux/cpu.h>
  26#include <linux/fs.h>
  27
  28#include <asm/local.h>
  29
  30static void update_pages_handler(struct work_struct *work);
  31
  32/*
  33 * The ring buffer header is special. We must manually up keep it.
  34 */
  35int ring_buffer_print_entry_header(struct trace_seq *s)
  36{
  37	int ret;
  38
  39	ret = trace_seq_puts(s, "# compressed entry header\n");
  40	ret = trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  41	ret = trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  42	ret = trace_seq_puts(s, "\tarray       :   32 bits\n");
  43	ret = trace_seq_putc(s, '\n');
  44	ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
  45			       RINGBUF_TYPE_PADDING);
  46	ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  47			       RINGBUF_TYPE_TIME_EXTEND);
  48	ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
  49			       RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  50
  51	return ret;
  52}
  53
  54/*
  55 * The ring buffer is made up of a list of pages. A separate list of pages is
  56 * allocated for each CPU. A writer may only write to a buffer that is
  57 * associated with the CPU it is currently executing on.  A reader may read
  58 * from any per cpu buffer.
  59 *
  60 * The reader is special. For each per cpu buffer, the reader has its own
  61 * reader page. When a reader has read the entire reader page, this reader
  62 * page is swapped with another page in the ring buffer.
  63 *
  64 * Now, as long as the writer is off the reader page, the reader can do what
  65 * ever it wants with that page. The writer will never write to that page
  66 * again (as long as it is out of the ring buffer).
  67 *
  68 * Here's some silly ASCII art.
  69 *
  70 *   +------+
  71 *   |reader|          RING BUFFER
  72 *   |page  |
  73 *   +------+        +---+   +---+   +---+
  74 *                   |   |-->|   |-->|   |
  75 *                   +---+   +---+   +---+
  76 *                     ^               |
  77 *                     |               |
  78 *                     +---------------+
  79 *
  80 *
  81 *   +------+
  82 *   |reader|          RING BUFFER
  83 *   |page  |------------------v
  84 *   +------+        +---+   +---+   +---+
  85 *                   |   |-->|   |-->|   |
  86 *                   +---+   +---+   +---+
  87 *                     ^               |
  88 *                     |               |
  89 *                     +---------------+
  90 *
  91 *
  92 *   +------+
  93 *   |reader|          RING BUFFER
  94 *   |page  |------------------v
  95 *   +------+        +---+   +---+   +---+
  96 *      ^            |   |-->|   |-->|   |
  97 *      |            +---+   +---+   +---+
  98 *      |                              |
  99 *      |                              |
 100 *      +------------------------------+
 101 *
 102 *
 103 *   +------+
 104 *   |buffer|          RING BUFFER
 105 *   |page  |------------------v
 106 *   +------+        +---+   +---+   +---+
 107 *      ^            |   |   |   |-->|   |
 108 *      |   New      +---+   +---+   +---+
 109 *      |  Reader------^               |
 110 *      |   page                       |
 111 *      +------------------------------+
 112 *
 113 *
 114 * After we make this swap, the reader can hand this page off to the splice
 115 * code and be done with it. It can even allocate a new page if it needs to
 116 * and swap that into the ring buffer.
 117 *
 118 * We will be using cmpxchg soon to make all this lockless.
 119 *
 120 */
 121
 122/*
 123 * A fast way to enable or disable all ring buffers is to
 124 * call tracing_on or tracing_off. Turning off the ring buffers
 125 * prevents all ring buffers from being recorded to.
 126 * Turning this switch on, makes it OK to write to the
 127 * ring buffer, if the ring buffer is enabled itself.
 128 *
 129 * There's three layers that must be on in order to write
 130 * to the ring buffer.
 131 *
 132 * 1) This global flag must be set.
 133 * 2) The ring buffer must be enabled for recording.
 134 * 3) The per cpu buffer must be enabled for recording.
 135 *
 136 * In case of an anomaly, this global flag has a bit set that
 137 * will permantly disable all ring buffers.
 138 */
 139
 140/*
 141 * Global flag to disable all recording to ring buffers
 142 *  This has two bits: ON, DISABLED
 143 *
 144 *  ON   DISABLED
 145 * ---- ----------
 146 *   0      0        : ring buffers are off
 147 *   1      0        : ring buffers are on
 148 *   X      1        : ring buffers are permanently disabled
 149 */
 150
 151enum {
 152	RB_BUFFERS_ON_BIT	= 0,
 153	RB_BUFFERS_DISABLED_BIT	= 1,
 154};
 155
 156enum {
 157	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT,
 158	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT,
 159};
 160
 161static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
 162
 163/* Used for individual buffers (after the counter) */
 164#define RB_BUFFER_OFF		(1 << 20)
 165
 166#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 167
 168/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169 * tracing_off_permanent - permanently disable ring buffers
 170 *
 171 * This function, once called, will disable all ring buffers
 172 * permanently.
 173 */
 174void tracing_off_permanent(void)
 175{
 176	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
 177}
 178
 
 
 
 
 
 
 
 
 
 179#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 180#define RB_ALIGNMENT		4U
 181#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 182#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 183
 184#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 185# define RB_FORCE_8BYTE_ALIGNMENT	0
 186# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 187#else
 188# define RB_FORCE_8BYTE_ALIGNMENT	1
 189# define RB_ARCH_ALIGNMENT		8U
 190#endif
 191
 192#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 193
 194/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 195#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 196
 197enum {
 198	RB_LEN_TIME_EXTEND = 8,
 199	RB_LEN_TIME_STAMP = 16,
 200};
 201
 202#define skip_time_extend(event) \
 203	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 204
 205static inline int rb_null_event(struct ring_buffer_event *event)
 206{
 207	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 208}
 209
 210static void rb_event_set_padding(struct ring_buffer_event *event)
 211{
 212	/* padding has a NULL time_delta */
 213	event->type_len = RINGBUF_TYPE_PADDING;
 214	event->time_delta = 0;
 215}
 216
 217static unsigned
 218rb_event_data_length(struct ring_buffer_event *event)
 219{
 220	unsigned length;
 221
 222	if (event->type_len)
 223		length = event->type_len * RB_ALIGNMENT;
 224	else
 225		length = event->array[0];
 226	return length + RB_EVNT_HDR_SIZE;
 227}
 228
 229/*
 230 * Return the length of the given event. Will return
 231 * the length of the time extend if the event is a
 232 * time extend.
 233 */
 234static inline unsigned
 235rb_event_length(struct ring_buffer_event *event)
 236{
 237	switch (event->type_len) {
 238	case RINGBUF_TYPE_PADDING:
 239		if (rb_null_event(event))
 240			/* undefined */
 241			return -1;
 242		return  event->array[0] + RB_EVNT_HDR_SIZE;
 243
 244	case RINGBUF_TYPE_TIME_EXTEND:
 245		return RB_LEN_TIME_EXTEND;
 246
 247	case RINGBUF_TYPE_TIME_STAMP:
 248		return RB_LEN_TIME_STAMP;
 249
 250	case RINGBUF_TYPE_DATA:
 251		return rb_event_data_length(event);
 252	default:
 253		BUG();
 254	}
 255	/* not hit */
 256	return 0;
 257}
 258
 259/*
 260 * Return total length of time extend and data,
 261 *   or just the event length for all other events.
 262 */
 263static inline unsigned
 264rb_event_ts_length(struct ring_buffer_event *event)
 265{
 266	unsigned len = 0;
 267
 268	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
 269		/* time extends include the data event after it */
 270		len = RB_LEN_TIME_EXTEND;
 271		event = skip_time_extend(event);
 272	}
 273	return len + rb_event_length(event);
 274}
 275
 276/**
 277 * ring_buffer_event_length - return the length of the event
 278 * @event: the event to get the length of
 279 *
 280 * Returns the size of the data load of a data event.
 281 * If the event is something other than a data event, it
 282 * returns the size of the event itself. With the exception
 283 * of a TIME EXTEND, where it still returns the size of the
 284 * data load of the data event after it.
 285 */
 286unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 287{
 288	unsigned length;
 289
 290	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 291		event = skip_time_extend(event);
 292
 293	length = rb_event_length(event);
 294	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 295		return length;
 296	length -= RB_EVNT_HDR_SIZE;
 297	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 298                length -= sizeof(event->array[0]);
 299	return length;
 300}
 301EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 302
 303/* inline for ring buffer fast paths */
 304static void *
 305rb_event_data(struct ring_buffer_event *event)
 306{
 307	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 308		event = skip_time_extend(event);
 309	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 310	/* If length is in len field, then array[0] has the data */
 311	if (event->type_len)
 312		return (void *)&event->array[0];
 313	/* Otherwise length is in array[0] and array[1] has the data */
 314	return (void *)&event->array[1];
 315}
 316
 317/**
 318 * ring_buffer_event_data - return the data of the event
 319 * @event: the event to get the data from
 320 */
 321void *ring_buffer_event_data(struct ring_buffer_event *event)
 322{
 323	return rb_event_data(event);
 324}
 325EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 326
 327#define for_each_buffer_cpu(buffer, cpu)		\
 328	for_each_cpu(cpu, buffer->cpumask)
 329
 330#define TS_SHIFT	27
 331#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 332#define TS_DELTA_TEST	(~TS_MASK)
 333
 334/* Flag when events were overwritten */
 335#define RB_MISSED_EVENTS	(1 << 31)
 336/* Missed count stored at end */
 337#define RB_MISSED_STORED	(1 << 30)
 338
 339struct buffer_data_page {
 340	u64		 time_stamp;	/* page time stamp */
 341	local_t		 commit;	/* write committed index */
 342	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 343};
 344
 345/*
 346 * Note, the buffer_page list must be first. The buffer pages
 347 * are allocated in cache lines, which means that each buffer
 348 * page will be at the beginning of a cache line, and thus
 349 * the least significant bits will be zero. We use this to
 350 * add flags in the list struct pointers, to make the ring buffer
 351 * lockless.
 352 */
 353struct buffer_page {
 354	struct list_head list;		/* list of buffer pages */
 355	local_t		 write;		/* index for next write */
 356	unsigned	 read;		/* index for next read */
 357	local_t		 entries;	/* entries on this page */
 358	unsigned long	 real_end;	/* real end of data */
 359	struct buffer_data_page *page;	/* Actual data page */
 360};
 361
 362/*
 363 * The buffer page counters, write and entries, must be reset
 364 * atomically when crossing page boundaries. To synchronize this
 365 * update, two counters are inserted into the number. One is
 366 * the actual counter for the write position or count on the page.
 367 *
 368 * The other is a counter of updaters. Before an update happens
 369 * the update partition of the counter is incremented. This will
 370 * allow the updater to update the counter atomically.
 371 *
 372 * The counter is 20 bits, and the state data is 12.
 373 */
 374#define RB_WRITE_MASK		0xfffff
 375#define RB_WRITE_INTCNT		(1 << 20)
 376
 377static void rb_init_page(struct buffer_data_page *bpage)
 378{
 379	local_set(&bpage->commit, 0);
 380}
 381
 382/**
 383 * ring_buffer_page_len - the size of data on the page.
 384 * @page: The page to read
 385 *
 386 * Returns the amount of data on the page, including buffer page header.
 387 */
 388size_t ring_buffer_page_len(void *page)
 389{
 390	return local_read(&((struct buffer_data_page *)page)->commit)
 391		+ BUF_PAGE_HDR_SIZE;
 392}
 393
 394/*
 395 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 396 * this issue out.
 397 */
 398static void free_buffer_page(struct buffer_page *bpage)
 399{
 400	free_page((unsigned long)bpage->page);
 401	kfree(bpage);
 402}
 403
 404/*
 405 * We need to fit the time_stamp delta into 27 bits.
 406 */
 407static inline int test_time_stamp(u64 delta)
 408{
 409	if (delta & TS_DELTA_TEST)
 410		return 1;
 411	return 0;
 412}
 413
 414#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 415
 416/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 417#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 418
 419int ring_buffer_print_page_header(struct trace_seq *s)
 420{
 421	struct buffer_data_page field;
 422	int ret;
 423
 424	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 425			       "offset:0;\tsize:%u;\tsigned:%u;\n",
 426			       (unsigned int)sizeof(field.time_stamp),
 427			       (unsigned int)is_signed_type(u64));
 428
 429	ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
 430			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
 431			       (unsigned int)offsetof(typeof(field), commit),
 432			       (unsigned int)sizeof(field.commit),
 433			       (unsigned int)is_signed_type(long));
 434
 435	ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
 436			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
 437			       (unsigned int)offsetof(typeof(field), commit),
 438			       1,
 439			       (unsigned int)is_signed_type(long));
 440
 441	ret = trace_seq_printf(s, "\tfield: char data;\t"
 442			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
 443			       (unsigned int)offsetof(typeof(field), data),
 444			       (unsigned int)BUF_PAGE_SIZE,
 445			       (unsigned int)is_signed_type(char));
 446
 447	return ret;
 448}
 449
 450struct rb_irq_work {
 451	struct irq_work			work;
 452	wait_queue_head_t		waiters;
 453	bool				waiters_pending;
 454};
 455
 456/*
 457 * head_page == tail_page && head == tail then buffer is empty.
 458 */
 459struct ring_buffer_per_cpu {
 460	int				cpu;
 461	atomic_t			record_disabled;
 462	struct ring_buffer		*buffer;
 463	raw_spinlock_t			reader_lock;	/* serialize readers */
 464	arch_spinlock_t			lock;
 465	struct lock_class_key		lock_key;
 466	unsigned int			nr_pages;
 467	struct list_head		*pages;
 468	struct buffer_page		*head_page;	/* read from head */
 469	struct buffer_page		*tail_page;	/* write to tail */
 470	struct buffer_page		*commit_page;	/* committed pages */
 471	struct buffer_page		*reader_page;
 472	unsigned long			lost_events;
 473	unsigned long			last_overrun;
 474	local_t				entries_bytes;
 475	local_t				entries;
 476	local_t				overrun;
 477	local_t				commit_overrun;
 478	local_t				dropped_events;
 
 479	local_t				committing;
 480	local_t				commits;
 481	unsigned long			read;
 482	unsigned long			read_bytes;
 483	u64				write_stamp;
 484	u64				read_stamp;
 485	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 486	int				nr_pages_to_update;
 487	struct list_head		new_pages; /* new pages to add */
 488	struct work_struct		update_pages_work;
 489	struct completion		update_done;
 490
 491	struct rb_irq_work		irq_work;
 492};
 493
 494struct ring_buffer {
 
 495	unsigned			flags;
 496	int				cpus;
 497	atomic_t			record_disabled;
 498	atomic_t			resize_disabled;
 499	cpumask_var_t			cpumask;
 500
 501	struct lock_class_key		*reader_lock_key;
 502
 503	struct mutex			mutex;
 504
 505	struct ring_buffer_per_cpu	**buffers;
 506
 507#ifdef CONFIG_HOTPLUG_CPU
 508	struct notifier_block		cpu_notify;
 509#endif
 510	u64				(*clock)(void);
 511
 512	struct rb_irq_work		irq_work;
 513};
 514
 515struct ring_buffer_iter {
 516	struct ring_buffer_per_cpu	*cpu_buffer;
 517	unsigned long			head;
 518	struct buffer_page		*head_page;
 519	struct buffer_page		*cache_reader_page;
 520	unsigned long			cache_read;
 521	u64				read_stamp;
 522};
 523
 524/*
 525 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 526 *
 527 * Schedules a delayed work to wake up any task that is blocked on the
 528 * ring buffer waiters queue.
 529 */
 530static void rb_wake_up_waiters(struct irq_work *work)
 531{
 532	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 533
 534	wake_up_all(&rbwork->waiters);
 535}
 536
 537/**
 538 * ring_buffer_wait - wait for input to the ring buffer
 539 * @buffer: buffer to wait on
 540 * @cpu: the cpu buffer to wait on
 541 *
 542 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 543 * as data is added to any of the @buffer's cpu buffers. Otherwise
 544 * it will wait for data to be added to a specific cpu buffer.
 545 */
 546void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
 547{
 548	struct ring_buffer_per_cpu *cpu_buffer;
 549	DEFINE_WAIT(wait);
 550	struct rb_irq_work *work;
 551
 552	/*
 553	 * Depending on what the caller is waiting for, either any
 554	 * data in any cpu buffer, or a specific buffer, put the
 555	 * caller on the appropriate wait queue.
 556	 */
 557	if (cpu == RING_BUFFER_ALL_CPUS)
 558		work = &buffer->irq_work;
 559	else {
 560		cpu_buffer = buffer->buffers[cpu];
 561		work = &cpu_buffer->irq_work;
 562	}
 563
 564
 565	prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 566
 567	/*
 568	 * The events can happen in critical sections where
 569	 * checking a work queue can cause deadlocks.
 570	 * After adding a task to the queue, this flag is set
 571	 * only to notify events to try to wake up the queue
 572	 * using irq_work.
 573	 *
 574	 * We don't clear it even if the buffer is no longer
 575	 * empty. The flag only causes the next event to run
 576	 * irq_work to do the work queue wake up. The worse
 577	 * that can happen if we race with !trace_empty() is that
 578	 * an event will cause an irq_work to try to wake up
 579	 * an empty queue.
 580	 *
 581	 * There's no reason to protect this flag either, as
 582	 * the work queue and irq_work logic will do the necessary
 583	 * synchronization for the wake ups. The only thing
 584	 * that is necessary is that the wake up happens after
 585	 * a task has been queued. It's OK for spurious wake ups.
 586	 */
 587	work->waiters_pending = true;
 588
 589	if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
 590	    (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
 591		schedule();
 592
 593	finish_wait(&work->waiters, &wait);
 594}
 595
 596/**
 597 * ring_buffer_poll_wait - poll on buffer input
 598 * @buffer: buffer to wait on
 599 * @cpu: the cpu buffer to wait on
 600 * @filp: the file descriptor
 601 * @poll_table: The poll descriptor
 602 *
 603 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 604 * as data is added to any of the @buffer's cpu buffers. Otherwise
 605 * it will wait for data to be added to a specific cpu buffer.
 606 *
 607 * Returns POLLIN | POLLRDNORM if data exists in the buffers,
 608 * zero otherwise.
 609 */
 610int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
 611			  struct file *filp, poll_table *poll_table)
 612{
 613	struct ring_buffer_per_cpu *cpu_buffer;
 614	struct rb_irq_work *work;
 615
 616	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
 617	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
 618		return POLLIN | POLLRDNORM;
 619
 620	if (cpu == RING_BUFFER_ALL_CPUS)
 621		work = &buffer->irq_work;
 622	else {
 623		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 624			return -EINVAL;
 625
 626		cpu_buffer = buffer->buffers[cpu];
 627		work = &cpu_buffer->irq_work;
 628	}
 629
 630	work->waiters_pending = true;
 631	poll_wait(filp, &work->waiters, poll_table);
 632
 633	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
 634	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
 635		return POLLIN | POLLRDNORM;
 636	return 0;
 637}
 638
 639/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 640#define RB_WARN_ON(b, cond)						\
 641	({								\
 642		int _____ret = unlikely(cond);				\
 643		if (_____ret) {						\
 644			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
 645				struct ring_buffer_per_cpu *__b =	\
 646					(void *)b;			\
 647				atomic_inc(&__b->buffer->record_disabled); \
 648			} else						\
 649				atomic_inc(&b->record_disabled);	\
 650			WARN_ON(1);					\
 651		}							\
 652		_____ret;						\
 653	})
 654
 655/* Up this if you want to test the TIME_EXTENTS and normalization */
 656#define DEBUG_SHIFT 0
 657
 658static inline u64 rb_time_stamp(struct ring_buffer *buffer)
 659{
 660	/* shift to debug/test normalization and TIME_EXTENTS */
 661	return buffer->clock() << DEBUG_SHIFT;
 662}
 663
 664u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 665{
 666	u64 time;
 667
 668	preempt_disable_notrace();
 669	time = rb_time_stamp(buffer);
 670	preempt_enable_no_resched_notrace();
 671
 672	return time;
 673}
 674EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 675
 676void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
 677				      int cpu, u64 *ts)
 678{
 679	/* Just stupid testing the normalize function and deltas */
 680	*ts >>= DEBUG_SHIFT;
 681}
 682EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 683
 684/*
 685 * Making the ring buffer lockless makes things tricky.
 686 * Although writes only happen on the CPU that they are on,
 687 * and they only need to worry about interrupts. Reads can
 688 * happen on any CPU.
 689 *
 690 * The reader page is always off the ring buffer, but when the
 691 * reader finishes with a page, it needs to swap its page with
 692 * a new one from the buffer. The reader needs to take from
 693 * the head (writes go to the tail). But if a writer is in overwrite
 694 * mode and wraps, it must push the head page forward.
 695 *
 696 * Here lies the problem.
 697 *
 698 * The reader must be careful to replace only the head page, and
 699 * not another one. As described at the top of the file in the
 700 * ASCII art, the reader sets its old page to point to the next
 701 * page after head. It then sets the page after head to point to
 702 * the old reader page. But if the writer moves the head page
 703 * during this operation, the reader could end up with the tail.
 704 *
 705 * We use cmpxchg to help prevent this race. We also do something
 706 * special with the page before head. We set the LSB to 1.
 707 *
 708 * When the writer must push the page forward, it will clear the
 709 * bit that points to the head page, move the head, and then set
 710 * the bit that points to the new head page.
 711 *
 712 * We also don't want an interrupt coming in and moving the head
 713 * page on another writer. Thus we use the second LSB to catch
 714 * that too. Thus:
 715 *
 716 * head->list->prev->next        bit 1          bit 0
 717 *                              -------        -------
 718 * Normal page                     0              0
 719 * Points to head page             0              1
 720 * New head page                   1              0
 721 *
 722 * Note we can not trust the prev pointer of the head page, because:
 723 *
 724 * +----+       +-----+        +-----+
 725 * |    |------>|  T  |---X--->|  N  |
 726 * |    |<------|     |        |     |
 727 * +----+       +-----+        +-----+
 728 *   ^                           ^ |
 729 *   |          +-----+          | |
 730 *   +----------|  R  |----------+ |
 731 *              |     |<-----------+
 732 *              +-----+
 733 *
 734 * Key:  ---X-->  HEAD flag set in pointer
 735 *         T      Tail page
 736 *         R      Reader page
 737 *         N      Next page
 738 *
 739 * (see __rb_reserve_next() to see where this happens)
 740 *
 741 *  What the above shows is that the reader just swapped out
 742 *  the reader page with a page in the buffer, but before it
 743 *  could make the new header point back to the new page added
 744 *  it was preempted by a writer. The writer moved forward onto
 745 *  the new page added by the reader and is about to move forward
 746 *  again.
 747 *
 748 *  You can see, it is legitimate for the previous pointer of
 749 *  the head (or any page) not to point back to itself. But only
 750 *  temporarially.
 751 */
 752
 753#define RB_PAGE_NORMAL		0UL
 754#define RB_PAGE_HEAD		1UL
 755#define RB_PAGE_UPDATE		2UL
 756
 757
 758#define RB_FLAG_MASK		3UL
 759
 760/* PAGE_MOVED is not part of the mask */
 761#define RB_PAGE_MOVED		4UL
 762
 763/*
 764 * rb_list_head - remove any bit
 765 */
 766static struct list_head *rb_list_head(struct list_head *list)
 767{
 768	unsigned long val = (unsigned long)list;
 769
 770	return (struct list_head *)(val & ~RB_FLAG_MASK);
 771}
 772
 773/*
 774 * rb_is_head_page - test if the given page is the head page
 775 *
 776 * Because the reader may move the head_page pointer, we can
 777 * not trust what the head page is (it may be pointing to
 778 * the reader page). But if the next page is a header page,
 779 * its flags will be non zero.
 780 */
 781static inline int
 782rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 783		struct buffer_page *page, struct list_head *list)
 784{
 785	unsigned long val;
 786
 787	val = (unsigned long)list->next;
 788
 789	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
 790		return RB_PAGE_MOVED;
 791
 792	return val & RB_FLAG_MASK;
 793}
 794
 795/*
 796 * rb_is_reader_page
 797 *
 798 * The unique thing about the reader page, is that, if the
 799 * writer is ever on it, the previous pointer never points
 800 * back to the reader page.
 801 */
 802static int rb_is_reader_page(struct buffer_page *page)
 803{
 804	struct list_head *list = page->list.prev;
 805
 806	return rb_list_head(list->next) != &page->list;
 807}
 808
 809/*
 810 * rb_set_list_to_head - set a list_head to be pointing to head.
 811 */
 812static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
 813				struct list_head *list)
 814{
 815	unsigned long *ptr;
 816
 817	ptr = (unsigned long *)&list->next;
 818	*ptr |= RB_PAGE_HEAD;
 819	*ptr &= ~RB_PAGE_UPDATE;
 820}
 821
 822/*
 823 * rb_head_page_activate - sets up head page
 824 */
 825static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
 826{
 827	struct buffer_page *head;
 828
 829	head = cpu_buffer->head_page;
 830	if (!head)
 831		return;
 832
 833	/*
 834	 * Set the previous list pointer to have the HEAD flag.
 835	 */
 836	rb_set_list_to_head(cpu_buffer, head->list.prev);
 837}
 838
 839static void rb_list_head_clear(struct list_head *list)
 840{
 841	unsigned long *ptr = (unsigned long *)&list->next;
 842
 843	*ptr &= ~RB_FLAG_MASK;
 844}
 845
 846/*
 847 * rb_head_page_dactivate - clears head page ptr (for free list)
 848 */
 849static void
 850rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
 851{
 852	struct list_head *hd;
 853
 854	/* Go through the whole list and clear any pointers found. */
 855	rb_list_head_clear(cpu_buffer->pages);
 856
 857	list_for_each(hd, cpu_buffer->pages)
 858		rb_list_head_clear(hd);
 859}
 860
 861static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 862			    struct buffer_page *head,
 863			    struct buffer_page *prev,
 864			    int old_flag, int new_flag)
 865{
 866	struct list_head *list;
 867	unsigned long val = (unsigned long)&head->list;
 868	unsigned long ret;
 869
 870	list = &prev->list;
 871
 872	val &= ~RB_FLAG_MASK;
 873
 874	ret = cmpxchg((unsigned long *)&list->next,
 875		      val | old_flag, val | new_flag);
 876
 877	/* check if the reader took the page */
 878	if ((ret & ~RB_FLAG_MASK) != val)
 879		return RB_PAGE_MOVED;
 880
 881	return ret & RB_FLAG_MASK;
 882}
 883
 884static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
 885				   struct buffer_page *head,
 886				   struct buffer_page *prev,
 887				   int old_flag)
 888{
 889	return rb_head_page_set(cpu_buffer, head, prev,
 890				old_flag, RB_PAGE_UPDATE);
 891}
 892
 893static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
 894				 struct buffer_page *head,
 895				 struct buffer_page *prev,
 896				 int old_flag)
 897{
 898	return rb_head_page_set(cpu_buffer, head, prev,
 899				old_flag, RB_PAGE_HEAD);
 900}
 901
 902static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
 903				   struct buffer_page *head,
 904				   struct buffer_page *prev,
 905				   int old_flag)
 906{
 907	return rb_head_page_set(cpu_buffer, head, prev,
 908				old_flag, RB_PAGE_NORMAL);
 909}
 910
 911static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
 912			       struct buffer_page **bpage)
 913{
 914	struct list_head *p = rb_list_head((*bpage)->list.next);
 915
 916	*bpage = list_entry(p, struct buffer_page, list);
 917}
 918
 919static struct buffer_page *
 920rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
 921{
 922	struct buffer_page *head;
 923	struct buffer_page *page;
 924	struct list_head *list;
 925	int i;
 926
 927	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
 928		return NULL;
 929
 930	/* sanity check */
 931	list = cpu_buffer->pages;
 932	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
 933		return NULL;
 934
 935	page = head = cpu_buffer->head_page;
 936	/*
 937	 * It is possible that the writer moves the header behind
 938	 * where we started, and we miss in one loop.
 939	 * A second loop should grab the header, but we'll do
 940	 * three loops just because I'm paranoid.
 941	 */
 942	for (i = 0; i < 3; i++) {
 943		do {
 944			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
 945				cpu_buffer->head_page = page;
 946				return page;
 947			}
 948			rb_inc_page(cpu_buffer, &page);
 949		} while (page != head);
 950	}
 951
 952	RB_WARN_ON(cpu_buffer, 1);
 953
 954	return NULL;
 955}
 956
 957static int rb_head_page_replace(struct buffer_page *old,
 958				struct buffer_page *new)
 959{
 960	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
 961	unsigned long val;
 962	unsigned long ret;
 963
 964	val = *ptr & ~RB_FLAG_MASK;
 965	val |= RB_PAGE_HEAD;
 966
 967	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
 968
 969	return ret == val;
 970}
 971
 972/*
 973 * rb_tail_page_update - move the tail page forward
 974 *
 975 * Returns 1 if moved tail page, 0 if someone else did.
 976 */
 977static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
 978			       struct buffer_page *tail_page,
 979			       struct buffer_page *next_page)
 980{
 981	struct buffer_page *old_tail;
 982	unsigned long old_entries;
 983	unsigned long old_write;
 984	int ret = 0;
 985
 986	/*
 987	 * The tail page now needs to be moved forward.
 988	 *
 989	 * We need to reset the tail page, but without messing
 990	 * with possible erasing of data brought in by interrupts
 991	 * that have moved the tail page and are currently on it.
 992	 *
 993	 * We add a counter to the write field to denote this.
 994	 */
 995	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
 996	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
 997
 998	/*
 999	 * Just make sure we have seen our old_write and synchronize
1000	 * with any interrupts that come in.
1001	 */
1002	barrier();
1003
1004	/*
1005	 * If the tail page is still the same as what we think
1006	 * it is, then it is up to us to update the tail
1007	 * pointer.
1008	 */
1009	if (tail_page == cpu_buffer->tail_page) {
1010		/* Zero the write counter */
1011		unsigned long val = old_write & ~RB_WRITE_MASK;
1012		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1013
1014		/*
1015		 * This will only succeed if an interrupt did
1016		 * not come in and change it. In which case, we
1017		 * do not want to modify it.
1018		 *
1019		 * We add (void) to let the compiler know that we do not care
1020		 * about the return value of these functions. We use the
1021		 * cmpxchg to only update if an interrupt did not already
1022		 * do it for us. If the cmpxchg fails, we don't care.
1023		 */
1024		(void)local_cmpxchg(&next_page->write, old_write, val);
1025		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1026
1027		/*
1028		 * No need to worry about races with clearing out the commit.
1029		 * it only can increment when a commit takes place. But that
1030		 * only happens in the outer most nested commit.
1031		 */
1032		local_set(&next_page->page->commit, 0);
1033
1034		old_tail = cmpxchg(&cpu_buffer->tail_page,
1035				   tail_page, next_page);
1036
1037		if (old_tail == tail_page)
1038			ret = 1;
1039	}
1040
1041	return ret;
1042}
1043
1044static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1045			  struct buffer_page *bpage)
1046{
1047	unsigned long val = (unsigned long)bpage;
1048
1049	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1050		return 1;
1051
1052	return 0;
1053}
1054
1055/**
1056 * rb_check_list - make sure a pointer to a list has the last bits zero
1057 */
1058static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1059			 struct list_head *list)
1060{
1061	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1062		return 1;
1063	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1064		return 1;
1065	return 0;
1066}
1067
1068/**
1069 * rb_check_pages - integrity check of buffer pages
1070 * @cpu_buffer: CPU buffer with pages to test
1071 *
1072 * As a safety measure we check to make sure the data pages have not
1073 * been corrupted.
1074 */
1075static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1076{
1077	struct list_head *head = cpu_buffer->pages;
1078	struct buffer_page *bpage, *tmp;
1079
1080	/* Reset the head page if it exists */
1081	if (cpu_buffer->head_page)
1082		rb_set_head_page(cpu_buffer);
1083
1084	rb_head_page_deactivate(cpu_buffer);
1085
1086	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1087		return -1;
1088	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1089		return -1;
1090
1091	if (rb_check_list(cpu_buffer, head))
1092		return -1;
1093
1094	list_for_each_entry_safe(bpage, tmp, head, list) {
1095		if (RB_WARN_ON(cpu_buffer,
1096			       bpage->list.next->prev != &bpage->list))
1097			return -1;
1098		if (RB_WARN_ON(cpu_buffer,
1099			       bpage->list.prev->next != &bpage->list))
1100			return -1;
1101		if (rb_check_list(cpu_buffer, &bpage->list))
1102			return -1;
1103	}
1104
1105	rb_head_page_activate(cpu_buffer);
1106
1107	return 0;
1108}
1109
1110static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
 
1111{
1112	int i;
1113	struct buffer_page *bpage, *tmp;
 
 
 
 
1114
1115	for (i = 0; i < nr_pages; i++) {
1116		struct page *page;
1117		/*
1118		 * __GFP_NORETRY flag makes sure that the allocation fails
1119		 * gracefully without invoking oom-killer and the system is
1120		 * not destabilized.
1121		 */
1122		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1123				    GFP_KERNEL | __GFP_NORETRY,
1124				    cpu_to_node(cpu));
1125		if (!bpage)
1126			goto free_pages;
1127
1128		list_add(&bpage->list, pages);
 
 
1129
1130		page = alloc_pages_node(cpu_to_node(cpu),
1131					GFP_KERNEL | __GFP_NORETRY, 0);
1132		if (!page)
1133			goto free_pages;
1134		bpage->page = page_address(page);
1135		rb_init_page(bpage->page);
1136	}
1137
1138	return 0;
1139
1140free_pages:
1141	list_for_each_entry_safe(bpage, tmp, pages, list) {
1142		list_del_init(&bpage->list);
1143		free_buffer_page(bpage);
1144	}
1145
1146	return -ENOMEM;
1147}
1148
1149static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1150			     unsigned nr_pages)
1151{
1152	LIST_HEAD(pages);
1153
1154	WARN_ON(!nr_pages);
1155
1156	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1157		return -ENOMEM;
1158
1159	/*
1160	 * The ring buffer page list is a circular list that does not
1161	 * start and end with a list head. All page list items point to
1162	 * other pages.
1163	 */
1164	cpu_buffer->pages = pages.next;
1165	list_del(&pages);
1166
1167	cpu_buffer->nr_pages = nr_pages;
1168
1169	rb_check_pages(cpu_buffer);
1170
1171	return 0;
 
 
 
 
 
 
 
1172}
1173
1174static struct ring_buffer_per_cpu *
1175rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1176{
1177	struct ring_buffer_per_cpu *cpu_buffer;
1178	struct buffer_page *bpage;
1179	struct page *page;
1180	int ret;
1181
1182	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1183				  GFP_KERNEL, cpu_to_node(cpu));
1184	if (!cpu_buffer)
1185		return NULL;
1186
1187	cpu_buffer->cpu = cpu;
1188	cpu_buffer->buffer = buffer;
1189	raw_spin_lock_init(&cpu_buffer->reader_lock);
1190	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1191	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1192	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1193	init_completion(&cpu_buffer->update_done);
1194	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1195	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1196
1197	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1198			    GFP_KERNEL, cpu_to_node(cpu));
1199	if (!bpage)
1200		goto fail_free_buffer;
1201
1202	rb_check_bpage(cpu_buffer, bpage);
1203
1204	cpu_buffer->reader_page = bpage;
1205	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1206	if (!page)
1207		goto fail_free_reader;
1208	bpage->page = page_address(page);
1209	rb_init_page(bpage->page);
1210
1211	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1212	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1213
1214	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1215	if (ret < 0)
1216		goto fail_free_reader;
1217
1218	cpu_buffer->head_page
1219		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1220	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1221
1222	rb_head_page_activate(cpu_buffer);
1223
1224	return cpu_buffer;
1225
1226 fail_free_reader:
1227	free_buffer_page(cpu_buffer->reader_page);
1228
1229 fail_free_buffer:
1230	kfree(cpu_buffer);
1231	return NULL;
1232}
1233
1234static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1235{
1236	struct list_head *head = cpu_buffer->pages;
1237	struct buffer_page *bpage, *tmp;
1238
1239	free_buffer_page(cpu_buffer->reader_page);
1240
1241	rb_head_page_deactivate(cpu_buffer);
1242
1243	if (head) {
1244		list_for_each_entry_safe(bpage, tmp, head, list) {
1245			list_del_init(&bpage->list);
1246			free_buffer_page(bpage);
1247		}
1248		bpage = list_entry(head, struct buffer_page, list);
1249		free_buffer_page(bpage);
1250	}
1251
1252	kfree(cpu_buffer);
1253}
1254
1255#ifdef CONFIG_HOTPLUG_CPU
1256static int rb_cpu_notify(struct notifier_block *self,
1257			 unsigned long action, void *hcpu);
1258#endif
1259
1260/**
1261 * __ring_buffer_alloc - allocate a new ring_buffer
1262 * @size: the size in bytes per cpu that is needed.
1263 * @flags: attributes to set for the ring buffer.
1264 *
1265 * Currently the only flag that is available is the RB_FL_OVERWRITE
1266 * flag. This flag means that the buffer will overwrite old data
1267 * when the buffer wraps. If this flag is not set, the buffer will
1268 * drop data when the tail hits the head.
1269 */
1270struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1271					struct lock_class_key *key)
1272{
1273	struct ring_buffer *buffer;
1274	int bsize;
1275	int cpu, nr_pages;
1276
1277	/* keep it in its own cache line */
1278	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1279			 GFP_KERNEL);
1280	if (!buffer)
1281		return NULL;
1282
1283	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1284		goto fail_free_buffer;
1285
1286	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1287	buffer->flags = flags;
1288	buffer->clock = trace_clock_local;
1289	buffer->reader_lock_key = key;
1290
1291	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1292	init_waitqueue_head(&buffer->irq_work.waiters);
1293
1294	/* need at least two pages */
1295	if (nr_pages < 2)
1296		nr_pages = 2;
1297
1298	/*
1299	 * In case of non-hotplug cpu, if the ring-buffer is allocated
1300	 * in early initcall, it will not be notified of secondary cpus.
1301	 * In that off case, we need to allocate for all possible cpus.
1302	 */
1303#ifdef CONFIG_HOTPLUG_CPU
1304	cpu_notifier_register_begin();
1305	cpumask_copy(buffer->cpumask, cpu_online_mask);
1306#else
1307	cpumask_copy(buffer->cpumask, cpu_possible_mask);
1308#endif
1309	buffer->cpus = nr_cpu_ids;
1310
1311	bsize = sizeof(void *) * nr_cpu_ids;
1312	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1313				  GFP_KERNEL);
1314	if (!buffer->buffers)
1315		goto fail_free_cpumask;
1316
1317	for_each_buffer_cpu(buffer, cpu) {
1318		buffer->buffers[cpu] =
1319			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1320		if (!buffer->buffers[cpu])
1321			goto fail_free_buffers;
1322	}
1323
1324#ifdef CONFIG_HOTPLUG_CPU
1325	buffer->cpu_notify.notifier_call = rb_cpu_notify;
1326	buffer->cpu_notify.priority = 0;
1327	__register_cpu_notifier(&buffer->cpu_notify);
1328	cpu_notifier_register_done();
1329#endif
1330
 
1331	mutex_init(&buffer->mutex);
1332
1333	return buffer;
1334
1335 fail_free_buffers:
1336	for_each_buffer_cpu(buffer, cpu) {
1337		if (buffer->buffers[cpu])
1338			rb_free_cpu_buffer(buffer->buffers[cpu]);
1339	}
1340	kfree(buffer->buffers);
1341
1342 fail_free_cpumask:
1343	free_cpumask_var(buffer->cpumask);
1344#ifdef CONFIG_HOTPLUG_CPU
1345	cpu_notifier_register_done();
1346#endif
1347
1348 fail_free_buffer:
1349	kfree(buffer);
1350	return NULL;
1351}
1352EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1353
1354/**
1355 * ring_buffer_free - free a ring buffer.
1356 * @buffer: the buffer to free.
1357 */
1358void
1359ring_buffer_free(struct ring_buffer *buffer)
1360{
1361	int cpu;
1362
 
 
1363#ifdef CONFIG_HOTPLUG_CPU
1364	cpu_notifier_register_begin();
1365	__unregister_cpu_notifier(&buffer->cpu_notify);
1366#endif
1367
1368	for_each_buffer_cpu(buffer, cpu)
1369		rb_free_cpu_buffer(buffer->buffers[cpu]);
1370
1371#ifdef CONFIG_HOTPLUG_CPU
1372	cpu_notifier_register_done();
1373#endif
1374
1375	kfree(buffer->buffers);
1376	free_cpumask_var(buffer->cpumask);
1377
1378	kfree(buffer);
1379}
1380EXPORT_SYMBOL_GPL(ring_buffer_free);
1381
1382void ring_buffer_set_clock(struct ring_buffer *buffer,
1383			   u64 (*clock)(void))
1384{
1385	buffer->clock = clock;
1386}
1387
1388static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1389
1390static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1391{
1392	return local_read(&bpage->entries) & RB_WRITE_MASK;
1393}
1394
1395static inline unsigned long rb_page_write(struct buffer_page *bpage)
1396{
1397	return local_read(&bpage->write) & RB_WRITE_MASK;
1398}
1399
1400static int
1401rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1402{
1403	struct list_head *tail_page, *to_remove, *next_page;
1404	struct buffer_page *to_remove_page, *tmp_iter_page;
1405	struct buffer_page *last_page, *first_page;
1406	unsigned int nr_removed;
1407	unsigned long head_bit;
1408	int page_entries;
1409
1410	head_bit = 0;
1411
1412	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1413	atomic_inc(&cpu_buffer->record_disabled);
1414	/*
1415	 * We don't race with the readers since we have acquired the reader
1416	 * lock. We also don't race with writers after disabling recording.
1417	 * This makes it easy to figure out the first and the last page to be
1418	 * removed from the list. We unlink all the pages in between including
1419	 * the first and last pages. This is done in a busy loop so that we
1420	 * lose the least number of traces.
1421	 * The pages are freed after we restart recording and unlock readers.
1422	 */
1423	tail_page = &cpu_buffer->tail_page->list;
1424
1425	/*
1426	 * tail page might be on reader page, we remove the next page
1427	 * from the ring buffer
1428	 */
1429	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1430		tail_page = rb_list_head(tail_page->next);
1431	to_remove = tail_page;
1432
1433	/* start of pages to remove */
1434	first_page = list_entry(rb_list_head(to_remove->next),
1435				struct buffer_page, list);
1436
1437	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1438		to_remove = rb_list_head(to_remove)->next;
1439		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
 
 
 
 
1440	}
 
 
1441
1442	next_page = rb_list_head(to_remove)->next;
1443
1444	/*
1445	 * Now we remove all pages between tail_page and next_page.
1446	 * Make sure that we have head_bit value preserved for the
1447	 * next page
1448	 */
1449	tail_page->next = (struct list_head *)((unsigned long)next_page |
1450						head_bit);
1451	next_page = rb_list_head(next_page);
1452	next_page->prev = tail_page;
1453
1454	/* make sure pages points to a valid page in the ring buffer */
1455	cpu_buffer->pages = next_page;
1456
1457	/* update head page */
1458	if (head_bit)
1459		cpu_buffer->head_page = list_entry(next_page,
1460						struct buffer_page, list);
1461
1462	/*
1463	 * change read pointer to make sure any read iterators reset
1464	 * themselves
1465	 */
1466	cpu_buffer->read = 0;
1467
1468	/* pages are removed, resume tracing and then free the pages */
1469	atomic_dec(&cpu_buffer->record_disabled);
1470	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1471
1472	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1473
1474	/* last buffer page to remove */
1475	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1476				list);
1477	tmp_iter_page = first_page;
1478
1479	do {
1480		to_remove_page = tmp_iter_page;
1481		rb_inc_page(cpu_buffer, &tmp_iter_page);
1482
1483		/* update the counters */
1484		page_entries = rb_page_entries(to_remove_page);
1485		if (page_entries) {
1486			/*
1487			 * If something was added to this page, it was full
1488			 * since it is not the tail page. So we deduct the
1489			 * bytes consumed in ring buffer from here.
1490			 * Increment overrun to account for the lost events.
1491			 */
1492			local_add(page_entries, &cpu_buffer->overrun);
1493			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1494		}
1495
1496		/*
1497		 * We have already removed references to this list item, just
1498		 * free up the buffer_page and its page
1499		 */
1500		free_buffer_page(to_remove_page);
1501		nr_removed--;
1502
1503	} while (to_remove_page != last_page);
1504
1505	RB_WARN_ON(cpu_buffer, nr_removed);
1506
1507	return nr_removed == 0;
 
1508}
1509
1510static int
1511rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
 
1512{
1513	struct list_head *pages = &cpu_buffer->new_pages;
1514	int retries, success;
1515
1516	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1517	/*
1518	 * We are holding the reader lock, so the reader page won't be swapped
1519	 * in the ring buffer. Now we are racing with the writer trying to
1520	 * move head page and the tail page.
1521	 * We are going to adapt the reader page update process where:
1522	 * 1. We first splice the start and end of list of new pages between
1523	 *    the head page and its previous page.
1524	 * 2. We cmpxchg the prev_page->next to point from head page to the
1525	 *    start of new pages list.
1526	 * 3. Finally, we update the head->prev to the end of new list.
1527	 *
1528	 * We will try this process 10 times, to make sure that we don't keep
1529	 * spinning.
1530	 */
1531	retries = 10;
1532	success = 0;
1533	while (retries--) {
1534		struct list_head *head_page, *prev_page, *r;
1535		struct list_head *last_page, *first_page;
1536		struct list_head *head_page_with_bit;
1537
1538		head_page = &rb_set_head_page(cpu_buffer)->list;
1539		if (!head_page)
1540			break;
1541		prev_page = head_page->prev;
1542
1543		first_page = pages->next;
1544		last_page  = pages->prev;
1545
1546		head_page_with_bit = (struct list_head *)
1547				     ((unsigned long)head_page | RB_PAGE_HEAD);
1548
1549		last_page->next = head_page_with_bit;
1550		first_page->prev = prev_page;
1551
1552		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1553
1554		if (r == head_page_with_bit) {
1555			/*
1556			 * yay, we replaced the page pointer to our new list,
1557			 * now, we just have to update to head page's prev
1558			 * pointer to point to end of list
1559			 */
1560			head_page->prev = last_page;
1561			success = 1;
1562			break;
1563		}
1564	}
1565
1566	if (success)
1567		INIT_LIST_HEAD(pages);
1568	/*
1569	 * If we weren't successful in adding in new pages, warn and stop
1570	 * tracing
1571	 */
1572	RB_WARN_ON(cpu_buffer, !success);
1573	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1574
1575	/* free pages if they weren't inserted */
1576	if (!success) {
1577		struct buffer_page *bpage, *tmp;
1578		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1579					 list) {
1580			list_del_init(&bpage->list);
1581			free_buffer_page(bpage);
1582		}
1583	}
1584	return success;
1585}
1586
1587static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1588{
1589	int success;
1590
1591	if (cpu_buffer->nr_pages_to_update > 0)
1592		success = rb_insert_pages(cpu_buffer);
1593	else
1594		success = rb_remove_pages(cpu_buffer,
1595					-cpu_buffer->nr_pages_to_update);
1596
1597	if (success)
1598		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1599}
1600
1601static void update_pages_handler(struct work_struct *work)
1602{
1603	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1604			struct ring_buffer_per_cpu, update_pages_work);
1605	rb_update_pages(cpu_buffer);
1606	complete(&cpu_buffer->update_done);
1607}
1608
1609/**
1610 * ring_buffer_resize - resize the ring buffer
1611 * @buffer: the buffer to resize.
1612 * @size: the new size.
1613 * @cpu_id: the cpu buffer to resize
1614 *
1615 * Minimum size is 2 * BUF_PAGE_SIZE.
1616 *
1617 * Returns 0 on success and < 0 on failure.
1618 */
1619int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1620			int cpu_id)
1621{
1622	struct ring_buffer_per_cpu *cpu_buffer;
1623	unsigned nr_pages;
1624	int cpu, err = 0;
 
 
 
1625
1626	/*
1627	 * Always succeed at resizing a non-existent buffer:
1628	 */
1629	if (!buffer)
1630		return size;
1631
1632	/* Make sure the requested buffer exists */
1633	if (cpu_id != RING_BUFFER_ALL_CPUS &&
1634	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
1635		return size;
1636
1637	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1638	size *= BUF_PAGE_SIZE;
 
1639
1640	/* we need a minimum of two pages */
1641	if (size < BUF_PAGE_SIZE * 2)
1642		size = BUF_PAGE_SIZE * 2;
1643
1644	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
1645
1646	/*
1647	 * Don't succeed if resizing is disabled, as a reader might be
1648	 * manipulating the ring buffer and is expecting a sane state while
1649	 * this is true.
1650	 */
1651	if (atomic_read(&buffer->resize_disabled))
1652		return -EBUSY;
1653
1654	/* prevent another thread from changing buffer sizes */
1655	mutex_lock(&buffer->mutex);
 
1656
1657	if (cpu_id == RING_BUFFER_ALL_CPUS) {
1658		/* calculate the pages to update */
1659		for_each_buffer_cpu(buffer, cpu) {
1660			cpu_buffer = buffer->buffers[cpu];
1661
1662			cpu_buffer->nr_pages_to_update = nr_pages -
1663							cpu_buffer->nr_pages;
1664			/*
1665			 * nothing more to do for removing pages or no update
1666			 */
1667			if (cpu_buffer->nr_pages_to_update <= 0)
1668				continue;
1669			/*
1670			 * to add pages, make sure all new pages can be
1671			 * allocated without receiving ENOMEM
1672			 */
1673			INIT_LIST_HEAD(&cpu_buffer->new_pages);
1674			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1675						&cpu_buffer->new_pages, cpu)) {
1676				/* not enough memory for new pages */
1677				err = -ENOMEM;
1678				goto out_err;
1679			}
1680		}
1681
1682		get_online_cpus();
1683		/*
1684		 * Fire off all the required work handlers
1685		 * We can't schedule on offline CPUs, but it's not necessary
1686		 * since we can change their buffer sizes without any race.
1687		 */
1688		for_each_buffer_cpu(buffer, cpu) {
1689			cpu_buffer = buffer->buffers[cpu];
1690			if (!cpu_buffer->nr_pages_to_update)
1691				continue;
1692
1693			/* The update must run on the CPU that is being updated. */
1694			preempt_disable();
1695			if (cpu == smp_processor_id() || !cpu_online(cpu)) {
1696				rb_update_pages(cpu_buffer);
1697				cpu_buffer->nr_pages_to_update = 0;
1698			} else {
1699				/*
1700				 * Can not disable preemption for schedule_work_on()
1701				 * on PREEMPT_RT.
1702				 */
1703				preempt_enable();
1704				schedule_work_on(cpu,
1705						&cpu_buffer->update_pages_work);
1706				preempt_disable();
1707			}
1708			preempt_enable();
1709		}
1710
1711		/* wait for all the updates to complete */
1712		for_each_buffer_cpu(buffer, cpu) {
1713			cpu_buffer = buffer->buffers[cpu];
1714			if (!cpu_buffer->nr_pages_to_update)
1715				continue;
1716
1717			if (cpu_online(cpu))
1718				wait_for_completion(&cpu_buffer->update_done);
1719			cpu_buffer->nr_pages_to_update = 0;
1720		}
 
 
1721
1722		put_online_cpus();
1723	} else {
1724		/* Make sure this CPU has been intitialized */
1725		if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1726			goto out;
1727
1728		cpu_buffer = buffer->buffers[cpu_id];
1729
1730		if (nr_pages == cpu_buffer->nr_pages)
1731			goto out;
1732
1733		cpu_buffer->nr_pages_to_update = nr_pages -
1734						cpu_buffer->nr_pages;
1735
1736		INIT_LIST_HEAD(&cpu_buffer->new_pages);
1737		if (cpu_buffer->nr_pages_to_update > 0 &&
1738			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1739					    &cpu_buffer->new_pages, cpu_id)) {
1740			err = -ENOMEM;
1741			goto out_err;
1742		}
1743
1744		get_online_cpus();
1745
1746		preempt_disable();
1747		/* The update must run on the CPU that is being updated. */
1748		if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
1749			rb_update_pages(cpu_buffer);
1750		else {
1751			/*
1752			 * Can not disable preemption for schedule_work_on()
1753			 * on PREEMPT_RT.
 
1754			 */
1755			preempt_enable();
1756			schedule_work_on(cpu_id,
1757					 &cpu_buffer->update_pages_work);
1758			wait_for_completion(&cpu_buffer->update_done);
1759			preempt_disable();
 
 
 
 
 
 
 
 
1760		}
1761		preempt_enable();
1762
1763		cpu_buffer->nr_pages_to_update = 0;
1764		put_online_cpus();
1765	}
1766
1767 out:
1768	/*
1769	 * The ring buffer resize can happen with the ring buffer
1770	 * enabled, so that the update disturbs the tracing as little
1771	 * as possible. But if the buffer is disabled, we do not need
1772	 * to worry about that, and we can take the time to verify
1773	 * that the buffer is not corrupt.
1774	 */
1775	if (atomic_read(&buffer->record_disabled)) {
1776		atomic_inc(&buffer->record_disabled);
1777		/*
1778		 * Even though the buffer was disabled, we must make sure
1779		 * that it is truly disabled before calling rb_check_pages.
1780		 * There could have been a race between checking
1781		 * record_disable and incrementing it.
1782		 */
1783		synchronize_sched();
1784		for_each_buffer_cpu(buffer, cpu) {
1785			cpu_buffer = buffer->buffers[cpu];
1786			rb_check_pages(cpu_buffer);
1787		}
1788		atomic_dec(&buffer->record_disabled);
1789	}
1790
1791	mutex_unlock(&buffer->mutex);
1792	return size;
1793
1794 out_err:
1795	for_each_buffer_cpu(buffer, cpu) {
1796		struct buffer_page *bpage, *tmp;
 
1797
1798		cpu_buffer = buffer->buffers[cpu];
1799		cpu_buffer->nr_pages_to_update = 0;
1800
1801		if (list_empty(&cpu_buffer->new_pages))
1802			continue;
1803
1804		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1805					list) {
1806			list_del_init(&bpage->list);
1807			free_buffer_page(bpage);
1808		}
1809	}
 
1810	mutex_unlock(&buffer->mutex);
1811	return err;
 
 
 
 
 
 
 
 
 
 
 
1812}
1813EXPORT_SYMBOL_GPL(ring_buffer_resize);
1814
1815void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1816{
1817	mutex_lock(&buffer->mutex);
1818	if (val)
1819		buffer->flags |= RB_FL_OVERWRITE;
1820	else
1821		buffer->flags &= ~RB_FL_OVERWRITE;
1822	mutex_unlock(&buffer->mutex);
1823}
1824EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1825
1826static inline void *
1827__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1828{
1829	return bpage->data + index;
1830}
1831
1832static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1833{
1834	return bpage->page->data + index;
1835}
1836
1837static inline struct ring_buffer_event *
1838rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1839{
1840	return __rb_page_index(cpu_buffer->reader_page,
1841			       cpu_buffer->reader_page->read);
1842}
1843
1844static inline struct ring_buffer_event *
1845rb_iter_head_event(struct ring_buffer_iter *iter)
1846{
1847	return __rb_page_index(iter->head_page, iter->head);
1848}
1849
 
 
 
 
 
1850static inline unsigned rb_page_commit(struct buffer_page *bpage)
1851{
1852	return local_read(&bpage->page->commit);
1853}
1854
 
 
 
 
 
1855/* Size is determined by what has been committed */
1856static inline unsigned rb_page_size(struct buffer_page *bpage)
1857{
1858	return rb_page_commit(bpage);
1859}
1860
1861static inline unsigned
1862rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1863{
1864	return rb_page_commit(cpu_buffer->commit_page);
1865}
1866
1867static inline unsigned
1868rb_event_index(struct ring_buffer_event *event)
1869{
1870	unsigned long addr = (unsigned long)event;
1871
1872	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1873}
1874
1875static inline int
1876rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1877		   struct ring_buffer_event *event)
1878{
1879	unsigned long addr = (unsigned long)event;
1880	unsigned long index;
1881
1882	index = rb_event_index(event);
1883	addr &= PAGE_MASK;
1884
1885	return cpu_buffer->commit_page->page == (void *)addr &&
1886		rb_commit_index(cpu_buffer) == index;
1887}
1888
1889static void
1890rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1891{
1892	unsigned long max_count;
1893
1894	/*
1895	 * We only race with interrupts and NMIs on this CPU.
1896	 * If we own the commit event, then we can commit
1897	 * all others that interrupted us, since the interruptions
1898	 * are in stack format (they finish before they come
1899	 * back to us). This allows us to do a simple loop to
1900	 * assign the commit to the tail.
1901	 */
1902 again:
1903	max_count = cpu_buffer->nr_pages * 100;
1904
1905	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1906		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1907			return;
1908		if (RB_WARN_ON(cpu_buffer,
1909			       rb_is_reader_page(cpu_buffer->tail_page)))
1910			return;
1911		local_set(&cpu_buffer->commit_page->page->commit,
1912			  rb_page_write(cpu_buffer->commit_page));
1913		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1914		cpu_buffer->write_stamp =
1915			cpu_buffer->commit_page->page->time_stamp;
1916		/* add barrier to keep gcc from optimizing too much */
1917		barrier();
1918	}
1919	while (rb_commit_index(cpu_buffer) !=
1920	       rb_page_write(cpu_buffer->commit_page)) {
1921
1922		local_set(&cpu_buffer->commit_page->page->commit,
1923			  rb_page_write(cpu_buffer->commit_page));
1924		RB_WARN_ON(cpu_buffer,
1925			   local_read(&cpu_buffer->commit_page->page->commit) &
1926			   ~RB_WRITE_MASK);
1927		barrier();
1928	}
1929
1930	/* again, keep gcc from optimizing */
1931	barrier();
1932
1933	/*
1934	 * If an interrupt came in just after the first while loop
1935	 * and pushed the tail page forward, we will be left with
1936	 * a dangling commit that will never go forward.
1937	 */
1938	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1939		goto again;
1940}
1941
1942static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1943{
1944	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1945	cpu_buffer->reader_page->read = 0;
1946}
1947
1948static void rb_inc_iter(struct ring_buffer_iter *iter)
1949{
1950	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1951
1952	/*
1953	 * The iterator could be on the reader page (it starts there).
1954	 * But the head could have moved, since the reader was
1955	 * found. Check for this case and assign the iterator
1956	 * to the head page instead of next.
1957	 */
1958	if (iter->head_page == cpu_buffer->reader_page)
1959		iter->head_page = rb_set_head_page(cpu_buffer);
1960	else
1961		rb_inc_page(cpu_buffer, &iter->head_page);
1962
1963	iter->read_stamp = iter->head_page->page->time_stamp;
1964	iter->head = 0;
1965}
1966
1967/* Slow path, do not inline */
1968static noinline struct ring_buffer_event *
1969rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1970{
1971	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1972
1973	/* Not the first event on the page? */
1974	if (rb_event_index(event)) {
1975		event->time_delta = delta & TS_MASK;
1976		event->array[0] = delta >> TS_SHIFT;
1977	} else {
1978		/* nope, just zero it */
1979		event->time_delta = 0;
1980		event->array[0] = 0;
1981	}
1982
1983	return skip_time_extend(event);
1984}
1985
1986/**
1987 * rb_update_event - update event type and data
1988 * @event: the even to update
1989 * @type: the type of event
1990 * @length: the size of the event field in the ring buffer
1991 *
1992 * Update the type and data fields of the event. The length
1993 * is the actual size that is written to the ring buffer,
1994 * and with this, we can determine what to place into the
1995 * data field.
1996 */
1997static void
1998rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1999		struct ring_buffer_event *event, unsigned length,
2000		int add_timestamp, u64 delta)
2001{
2002	/* Only a commit updates the timestamp */
2003	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2004		delta = 0;
2005
2006	/*
2007	 * If we need to add a timestamp, then we
2008	 * add it to the start of the resevered space.
2009	 */
2010	if (unlikely(add_timestamp)) {
2011		event = rb_add_time_stamp(event, delta);
2012		length -= RB_LEN_TIME_EXTEND;
2013		delta = 0;
2014	}
2015
2016	event->time_delta = delta;
2017	length -= RB_EVNT_HDR_SIZE;
2018	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2019		event->type_len = 0;
2020		event->array[0] = length;
2021	} else
2022		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2023}
2024
2025/*
2026 * rb_handle_head_page - writer hit the head page
2027 *
2028 * Returns: +1 to retry page
2029 *           0 to continue
2030 *          -1 on error
2031 */
2032static int
2033rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2034		    struct buffer_page *tail_page,
2035		    struct buffer_page *next_page)
2036{
2037	struct buffer_page *new_head;
2038	int entries;
2039	int type;
2040	int ret;
2041
2042	entries = rb_page_entries(next_page);
2043
2044	/*
2045	 * The hard part is here. We need to move the head
2046	 * forward, and protect against both readers on
2047	 * other CPUs and writers coming in via interrupts.
2048	 */
2049	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2050				       RB_PAGE_HEAD);
2051
2052	/*
2053	 * type can be one of four:
2054	 *  NORMAL - an interrupt already moved it for us
2055	 *  HEAD   - we are the first to get here.
2056	 *  UPDATE - we are the interrupt interrupting
2057	 *           a current move.
2058	 *  MOVED  - a reader on another CPU moved the next
2059	 *           pointer to its reader page. Give up
2060	 *           and try again.
2061	 */
2062
2063	switch (type) {
2064	case RB_PAGE_HEAD:
2065		/*
2066		 * We changed the head to UPDATE, thus
2067		 * it is our responsibility to update
2068		 * the counters.
2069		 */
2070		local_add(entries, &cpu_buffer->overrun);
2071		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2072
2073		/*
2074		 * The entries will be zeroed out when we move the
2075		 * tail page.
2076		 */
2077
2078		/* still more to do */
2079		break;
2080
2081	case RB_PAGE_UPDATE:
2082		/*
2083		 * This is an interrupt that interrupt the
2084		 * previous update. Still more to do.
2085		 */
2086		break;
2087	case RB_PAGE_NORMAL:
2088		/*
2089		 * An interrupt came in before the update
2090		 * and processed this for us.
2091		 * Nothing left to do.
2092		 */
2093		return 1;
2094	case RB_PAGE_MOVED:
2095		/*
2096		 * The reader is on another CPU and just did
2097		 * a swap with our next_page.
2098		 * Try again.
2099		 */
2100		return 1;
2101	default:
2102		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2103		return -1;
2104	}
2105
2106	/*
2107	 * Now that we are here, the old head pointer is
2108	 * set to UPDATE. This will keep the reader from
2109	 * swapping the head page with the reader page.
2110	 * The reader (on another CPU) will spin till
2111	 * we are finished.
2112	 *
2113	 * We just need to protect against interrupts
2114	 * doing the job. We will set the next pointer
2115	 * to HEAD. After that, we set the old pointer
2116	 * to NORMAL, but only if it was HEAD before.
2117	 * otherwise we are an interrupt, and only
2118	 * want the outer most commit to reset it.
2119	 */
2120	new_head = next_page;
2121	rb_inc_page(cpu_buffer, &new_head);
2122
2123	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2124				    RB_PAGE_NORMAL);
2125
2126	/*
2127	 * Valid returns are:
2128	 *  HEAD   - an interrupt came in and already set it.
2129	 *  NORMAL - One of two things:
2130	 *            1) We really set it.
2131	 *            2) A bunch of interrupts came in and moved
2132	 *               the page forward again.
2133	 */
2134	switch (ret) {
2135	case RB_PAGE_HEAD:
2136	case RB_PAGE_NORMAL:
2137		/* OK */
2138		break;
2139	default:
2140		RB_WARN_ON(cpu_buffer, 1);
2141		return -1;
2142	}
2143
2144	/*
2145	 * It is possible that an interrupt came in,
2146	 * set the head up, then more interrupts came in
2147	 * and moved it again. When we get back here,
2148	 * the page would have been set to NORMAL but we
2149	 * just set it back to HEAD.
2150	 *
2151	 * How do you detect this? Well, if that happened
2152	 * the tail page would have moved.
2153	 */
2154	if (ret == RB_PAGE_NORMAL) {
2155		/*
2156		 * If the tail had moved passed next, then we need
2157		 * to reset the pointer.
2158		 */
2159		if (cpu_buffer->tail_page != tail_page &&
2160		    cpu_buffer->tail_page != next_page)
2161			rb_head_page_set_normal(cpu_buffer, new_head,
2162						next_page,
2163						RB_PAGE_HEAD);
2164	}
2165
2166	/*
2167	 * If this was the outer most commit (the one that
2168	 * changed the original pointer from HEAD to UPDATE),
2169	 * then it is up to us to reset it to NORMAL.
2170	 */
2171	if (type == RB_PAGE_HEAD) {
2172		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2173					      tail_page,
2174					      RB_PAGE_UPDATE);
2175		if (RB_WARN_ON(cpu_buffer,
2176			       ret != RB_PAGE_UPDATE))
2177			return -1;
2178	}
2179
2180	return 0;
2181}
2182
2183static unsigned rb_calculate_event_length(unsigned length)
2184{
2185	struct ring_buffer_event event; /* Used only for sizeof array */
2186
2187	/* zero length can cause confusions */
2188	if (!length)
2189		length = 1;
2190
2191	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2192		length += sizeof(event.array[0]);
2193
2194	length += RB_EVNT_HDR_SIZE;
2195	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2196
2197	return length;
2198}
2199
2200static inline void
2201rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2202	      struct buffer_page *tail_page,
2203	      unsigned long tail, unsigned long length)
2204{
2205	struct ring_buffer_event *event;
2206
2207	/*
2208	 * Only the event that crossed the page boundary
2209	 * must fill the old tail_page with padding.
2210	 */
2211	if (tail >= BUF_PAGE_SIZE) {
2212		/*
2213		 * If the page was filled, then we still need
2214		 * to update the real_end. Reset it to zero
2215		 * and the reader will ignore it.
2216		 */
2217		if (tail == BUF_PAGE_SIZE)
2218			tail_page->real_end = 0;
2219
2220		local_sub(length, &tail_page->write);
2221		return;
2222	}
2223
2224	event = __rb_page_index(tail_page, tail);
2225	kmemcheck_annotate_bitfield(event, bitfield);
2226
2227	/* account for padding bytes */
2228	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2229
2230	/*
2231	 * Save the original length to the meta data.
2232	 * This will be used by the reader to add lost event
2233	 * counter.
2234	 */
2235	tail_page->real_end = tail;
2236
2237	/*
2238	 * If this event is bigger than the minimum size, then
2239	 * we need to be careful that we don't subtract the
2240	 * write counter enough to allow another writer to slip
2241	 * in on this page.
2242	 * We put in a discarded commit instead, to make sure
2243	 * that this space is not used again.
2244	 *
2245	 * If we are less than the minimum size, we don't need to
2246	 * worry about it.
2247	 */
2248	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2249		/* No room for any events */
2250
2251		/* Mark the rest of the page with padding */
2252		rb_event_set_padding(event);
2253
2254		/* Set the write back to the previous setting */
2255		local_sub(length, &tail_page->write);
2256		return;
2257	}
2258
2259	/* Put in a discarded event */
2260	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2261	event->type_len = RINGBUF_TYPE_PADDING;
2262	/* time delta must be non zero */
2263	event->time_delta = 1;
2264
2265	/* Set write to end of buffer */
2266	length = (tail + length) - BUF_PAGE_SIZE;
2267	local_sub(length, &tail_page->write);
2268}
2269
2270/*
2271 * This is the slow path, force gcc not to inline it.
2272 */
2273static noinline struct ring_buffer_event *
2274rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2275	     unsigned long length, unsigned long tail,
2276	     struct buffer_page *tail_page, u64 ts)
2277{
2278	struct buffer_page *commit_page = cpu_buffer->commit_page;
2279	struct ring_buffer *buffer = cpu_buffer->buffer;
2280	struct buffer_page *next_page;
2281	int ret;
2282
2283	next_page = tail_page;
2284
2285	rb_inc_page(cpu_buffer, &next_page);
2286
2287	/*
2288	 * If for some reason, we had an interrupt storm that made
2289	 * it all the way around the buffer, bail, and warn
2290	 * about it.
2291	 */
2292	if (unlikely(next_page == commit_page)) {
2293		local_inc(&cpu_buffer->commit_overrun);
2294		goto out_reset;
2295	}
2296
2297	/*
2298	 * This is where the fun begins!
2299	 *
2300	 * We are fighting against races between a reader that
2301	 * could be on another CPU trying to swap its reader
2302	 * page with the buffer head.
2303	 *
2304	 * We are also fighting against interrupts coming in and
2305	 * moving the head or tail on us as well.
2306	 *
2307	 * If the next page is the head page then we have filled
2308	 * the buffer, unless the commit page is still on the
2309	 * reader page.
2310	 */
2311	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2312
2313		/*
2314		 * If the commit is not on the reader page, then
2315		 * move the header page.
2316		 */
2317		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2318			/*
2319			 * If we are not in overwrite mode,
2320			 * this is easy, just stop here.
2321			 */
2322			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2323				local_inc(&cpu_buffer->dropped_events);
2324				goto out_reset;
2325			}
2326
2327			ret = rb_handle_head_page(cpu_buffer,
2328						  tail_page,
2329						  next_page);
2330			if (ret < 0)
2331				goto out_reset;
2332			if (ret)
2333				goto out_again;
2334		} else {
2335			/*
2336			 * We need to be careful here too. The
2337			 * commit page could still be on the reader
2338			 * page. We could have a small buffer, and
2339			 * have filled up the buffer with events
2340			 * from interrupts and such, and wrapped.
2341			 *
2342			 * Note, if the tail page is also the on the
2343			 * reader_page, we let it move out.
2344			 */
2345			if (unlikely((cpu_buffer->commit_page !=
2346				      cpu_buffer->tail_page) &&
2347				     (cpu_buffer->commit_page ==
2348				      cpu_buffer->reader_page))) {
2349				local_inc(&cpu_buffer->commit_overrun);
2350				goto out_reset;
2351			}
2352		}
2353	}
2354
2355	ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2356	if (ret) {
2357		/*
2358		 * Nested commits always have zero deltas, so
2359		 * just reread the time stamp
2360		 */
2361		ts = rb_time_stamp(buffer);
2362		next_page->page->time_stamp = ts;
2363	}
2364
2365 out_again:
2366
2367	rb_reset_tail(cpu_buffer, tail_page, tail, length);
2368
2369	/* fail and let the caller try again */
2370	return ERR_PTR(-EAGAIN);
2371
2372 out_reset:
2373	/* reset write */
2374	rb_reset_tail(cpu_buffer, tail_page, tail, length);
2375
2376	return NULL;
2377}
2378
2379static struct ring_buffer_event *
2380__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2381		  unsigned long length, u64 ts,
2382		  u64 delta, int add_timestamp)
2383{
2384	struct buffer_page *tail_page;
2385	struct ring_buffer_event *event;
2386	unsigned long tail, write;
2387
2388	/*
2389	 * If the time delta since the last event is too big to
2390	 * hold in the time field of the event, then we append a
2391	 * TIME EXTEND event ahead of the data event.
2392	 */
2393	if (unlikely(add_timestamp))
2394		length += RB_LEN_TIME_EXTEND;
2395
2396	tail_page = cpu_buffer->tail_page;
2397	write = local_add_return(length, &tail_page->write);
2398
2399	/* set write to only the index of the write */
2400	write &= RB_WRITE_MASK;
2401	tail = write - length;
2402
2403	/*
2404	 * If this is the first commit on the page, then it has the same
2405	 * timestamp as the page itself.
2406	 */
2407	if (!tail)
2408		delta = 0;
2409
2410	/* See if we shot pass the end of this buffer page */
2411	if (unlikely(write > BUF_PAGE_SIZE))
2412		return rb_move_tail(cpu_buffer, length, tail,
2413				    tail_page, ts);
2414
2415	/* We reserved something on the buffer */
2416
2417	event = __rb_page_index(tail_page, tail);
2418	kmemcheck_annotate_bitfield(event, bitfield);
2419	rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2420
2421	local_inc(&tail_page->entries);
2422
2423	/*
2424	 * If this is the first commit on the page, then update
2425	 * its timestamp.
2426	 */
2427	if (!tail)
2428		tail_page->page->time_stamp = ts;
2429
2430	/* account for these added bytes */
2431	local_add(length, &cpu_buffer->entries_bytes);
2432
2433	return event;
2434}
2435
2436static inline int
2437rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2438		  struct ring_buffer_event *event)
2439{
2440	unsigned long new_index, old_index;
2441	struct buffer_page *bpage;
2442	unsigned long index;
2443	unsigned long addr;
2444
2445	new_index = rb_event_index(event);
2446	old_index = new_index + rb_event_ts_length(event);
2447	addr = (unsigned long)event;
2448	addr &= PAGE_MASK;
2449
2450	bpage = cpu_buffer->tail_page;
2451
2452	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2453		unsigned long write_mask =
2454			local_read(&bpage->write) & ~RB_WRITE_MASK;
2455		unsigned long event_length = rb_event_length(event);
2456		/*
2457		 * This is on the tail page. It is possible that
2458		 * a write could come in and move the tail page
2459		 * and write to the next page. That is fine
2460		 * because we just shorten what is on this page.
2461		 */
2462		old_index += write_mask;
2463		new_index += write_mask;
2464		index = local_cmpxchg(&bpage->write, old_index, new_index);
2465		if (index == old_index) {
2466			/* update counters */
2467			local_sub(event_length, &cpu_buffer->entries_bytes);
2468			return 1;
2469		}
2470	}
2471
2472	/* could not discard */
2473	return 0;
2474}
2475
2476static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2477{
2478	local_inc(&cpu_buffer->committing);
2479	local_inc(&cpu_buffer->commits);
2480}
2481
2482static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2483{
2484	unsigned long commits;
2485
2486	if (RB_WARN_ON(cpu_buffer,
2487		       !local_read(&cpu_buffer->committing)))
2488		return;
2489
2490 again:
2491	commits = local_read(&cpu_buffer->commits);
2492	/* synchronize with interrupts */
2493	barrier();
2494	if (local_read(&cpu_buffer->committing) == 1)
2495		rb_set_commit_to_write(cpu_buffer);
2496
2497	local_dec(&cpu_buffer->committing);
2498
2499	/* synchronize with interrupts */
2500	barrier();
2501
2502	/*
2503	 * Need to account for interrupts coming in between the
2504	 * updating of the commit page and the clearing of the
2505	 * committing counter.
2506	 */
2507	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2508	    !local_read(&cpu_buffer->committing)) {
2509		local_inc(&cpu_buffer->committing);
2510		goto again;
2511	}
2512}
2513
2514static struct ring_buffer_event *
2515rb_reserve_next_event(struct ring_buffer *buffer,
2516		      struct ring_buffer_per_cpu *cpu_buffer,
2517		      unsigned long length)
2518{
2519	struct ring_buffer_event *event;
2520	u64 ts, delta;
2521	int nr_loops = 0;
2522	int add_timestamp;
2523	u64 diff;
2524
2525	rb_start_commit(cpu_buffer);
2526
2527#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2528	/*
2529	 * Due to the ability to swap a cpu buffer from a buffer
2530	 * it is possible it was swapped before we committed.
2531	 * (committing stops a swap). We check for it here and
2532	 * if it happened, we have to fail the write.
2533	 */
2534	barrier();
2535	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2536		local_dec(&cpu_buffer->committing);
2537		local_dec(&cpu_buffer->commits);
2538		return NULL;
2539	}
2540#endif
2541
2542	length = rb_calculate_event_length(length);
2543 again:
2544	add_timestamp = 0;
2545	delta = 0;
2546
2547	/*
2548	 * We allow for interrupts to reenter here and do a trace.
2549	 * If one does, it will cause this original code to loop
2550	 * back here. Even with heavy interrupts happening, this
2551	 * should only happen a few times in a row. If this happens
2552	 * 1000 times in a row, there must be either an interrupt
2553	 * storm or we have something buggy.
2554	 * Bail!
2555	 */
2556	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2557		goto out_fail;
2558
2559	ts = rb_time_stamp(cpu_buffer->buffer);
2560	diff = ts - cpu_buffer->write_stamp;
2561
2562	/* make sure this diff is calculated here */
2563	barrier();
2564
2565	/* Did the write stamp get updated already? */
2566	if (likely(ts >= cpu_buffer->write_stamp)) {
2567		delta = diff;
2568		if (unlikely(test_time_stamp(delta))) {
2569			int local_clock_stable = 1;
2570#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2571			local_clock_stable = sched_clock_stable();
2572#endif
2573			WARN_ONCE(delta > (1ULL << 59),
2574				  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2575				  (unsigned long long)delta,
2576				  (unsigned long long)ts,
2577				  (unsigned long long)cpu_buffer->write_stamp,
2578				  local_clock_stable ? "" :
2579				  "If you just came from a suspend/resume,\n"
2580				  "please switch to the trace global clock:\n"
2581				  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2582			add_timestamp = 1;
2583		}
2584	}
2585
2586	event = __rb_reserve_next(cpu_buffer, length, ts,
2587				  delta, add_timestamp);
2588	if (unlikely(PTR_ERR(event) == -EAGAIN))
2589		goto again;
2590
2591	if (!event)
2592		goto out_fail;
2593
2594	return event;
2595
2596 out_fail:
2597	rb_end_commit(cpu_buffer);
2598	return NULL;
2599}
2600
2601#ifdef CONFIG_TRACING
2602
2603/*
2604 * The lock and unlock are done within a preempt disable section.
2605 * The current_context per_cpu variable can only be modified
2606 * by the current task between lock and unlock. But it can
2607 * be modified more than once via an interrupt. To pass this
2608 * information from the lock to the unlock without having to
2609 * access the 'in_interrupt()' functions again (which do show
2610 * a bit of overhead in something as critical as function tracing,
2611 * we use a bitmask trick.
2612 *
2613 *  bit 0 =  NMI context
2614 *  bit 1 =  IRQ context
2615 *  bit 2 =  SoftIRQ context
2616 *  bit 3 =  normal context.
2617 *
2618 * This works because this is the order of contexts that can
2619 * preempt other contexts. A SoftIRQ never preempts an IRQ
2620 * context.
2621 *
2622 * When the context is determined, the corresponding bit is
2623 * checked and set (if it was set, then a recursion of that context
2624 * happened).
2625 *
2626 * On unlock, we need to clear this bit. To do so, just subtract
2627 * 1 from the current_context and AND it to itself.
2628 *
2629 * (binary)
2630 *  101 - 1 = 100
2631 *  101 & 100 = 100 (clearing bit zero)
2632 *
2633 *  1010 - 1 = 1001
2634 *  1010 & 1001 = 1000 (clearing bit 1)
2635 *
2636 * The least significant bit can be cleared this way, and it
2637 * just so happens that it is the same bit corresponding to
2638 * the current context.
2639 */
2640static DEFINE_PER_CPU(unsigned int, current_context);
2641
2642static __always_inline int trace_recursive_lock(void)
2643{
2644	unsigned int val = this_cpu_read(current_context);
2645	int bit;
2646
2647	if (in_interrupt()) {
2648		if (in_nmi())
2649			bit = 0;
2650		else if (in_irq())
2651			bit = 1;
2652		else
2653			bit = 2;
2654	} else
2655		bit = 3;
2656
2657	if (unlikely(val & (1 << bit)))
2658		return 1;
 
 
 
2659
2660	val |= (1 << bit);
2661	this_cpu_write(current_context, val);
 
 
 
 
2662
2663	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
2664}
2665
2666static __always_inline void trace_recursive_unlock(void)
2667{
2668	unsigned int val = this_cpu_read(current_context);
2669
2670	val--;
2671	val &= this_cpu_read(current_context);
2672	this_cpu_write(current_context, val);
2673}
2674
2675#else
2676
2677#define trace_recursive_lock()		(0)
2678#define trace_recursive_unlock()	do { } while (0)
2679
2680#endif
2681
2682/**
2683 * ring_buffer_lock_reserve - reserve a part of the buffer
2684 * @buffer: the ring buffer to reserve from
2685 * @length: the length of the data to reserve (excluding event header)
2686 *
2687 * Returns a reseverd event on the ring buffer to copy directly to.
2688 * The user of this interface will need to get the body to write into
2689 * and can use the ring_buffer_event_data() interface.
2690 *
2691 * The length is the length of the data needed, not the event length
2692 * which also includes the event header.
2693 *
2694 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2695 * If NULL is returned, then nothing has been allocated or locked.
2696 */
2697struct ring_buffer_event *
2698ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2699{
2700	struct ring_buffer_per_cpu *cpu_buffer;
2701	struct ring_buffer_event *event;
2702	int cpu;
2703
2704	if (ring_buffer_flags != RB_BUFFERS_ON)
2705		return NULL;
2706
2707	/* If we are tracing schedule, we don't want to recurse */
2708	preempt_disable_notrace();
2709
2710	if (atomic_read(&buffer->record_disabled))
2711		goto out_nocheck;
2712
2713	if (trace_recursive_lock())
2714		goto out_nocheck;
2715
2716	cpu = raw_smp_processor_id();
2717
2718	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2719		goto out;
2720
2721	cpu_buffer = buffer->buffers[cpu];
2722
2723	if (atomic_read(&cpu_buffer->record_disabled))
2724		goto out;
2725
2726	if (length > BUF_MAX_DATA_SIZE)
2727		goto out;
2728
2729	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2730	if (!event)
2731		goto out;
2732
2733	return event;
2734
2735 out:
2736	trace_recursive_unlock();
2737
2738 out_nocheck:
2739	preempt_enable_notrace();
2740	return NULL;
2741}
2742EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2743
2744static void
2745rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2746		      struct ring_buffer_event *event)
2747{
2748	u64 delta;
2749
2750	/*
2751	 * The event first in the commit queue updates the
2752	 * time stamp.
2753	 */
2754	if (rb_event_is_commit(cpu_buffer, event)) {
2755		/*
2756		 * A commit event that is first on a page
2757		 * updates the write timestamp with the page stamp
2758		 */
2759		if (!rb_event_index(event))
2760			cpu_buffer->write_stamp =
2761				cpu_buffer->commit_page->page->time_stamp;
2762		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2763			delta = event->array[0];
2764			delta <<= TS_SHIFT;
2765			delta += event->time_delta;
2766			cpu_buffer->write_stamp += delta;
2767		} else
2768			cpu_buffer->write_stamp += event->time_delta;
2769	}
2770}
2771
2772static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2773		      struct ring_buffer_event *event)
2774{
2775	local_inc(&cpu_buffer->entries);
2776	rb_update_write_stamp(cpu_buffer, event);
2777	rb_end_commit(cpu_buffer);
2778}
2779
2780static __always_inline void
2781rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2782{
2783	if (buffer->irq_work.waiters_pending) {
2784		buffer->irq_work.waiters_pending = false;
2785		/* irq_work_queue() supplies it's own memory barriers */
2786		irq_work_queue(&buffer->irq_work.work);
2787	}
2788
2789	if (cpu_buffer->irq_work.waiters_pending) {
2790		cpu_buffer->irq_work.waiters_pending = false;
2791		/* irq_work_queue() supplies it's own memory barriers */
2792		irq_work_queue(&cpu_buffer->irq_work.work);
2793	}
2794}
2795
2796/**
2797 * ring_buffer_unlock_commit - commit a reserved
2798 * @buffer: The buffer to commit to
2799 * @event: The event pointer to commit.
2800 *
2801 * This commits the data to the ring buffer, and releases any locks held.
2802 *
2803 * Must be paired with ring_buffer_lock_reserve.
2804 */
2805int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2806			      struct ring_buffer_event *event)
2807{
2808	struct ring_buffer_per_cpu *cpu_buffer;
2809	int cpu = raw_smp_processor_id();
2810
2811	cpu_buffer = buffer->buffers[cpu];
2812
2813	rb_commit(cpu_buffer, event);
2814
2815	rb_wakeups(buffer, cpu_buffer);
2816
2817	trace_recursive_unlock();
2818
2819	preempt_enable_notrace();
2820
2821	return 0;
2822}
2823EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2824
2825static inline void rb_event_discard(struct ring_buffer_event *event)
2826{
2827	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2828		event = skip_time_extend(event);
2829
2830	/* array[0] holds the actual length for the discarded event */
2831	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2832	event->type_len = RINGBUF_TYPE_PADDING;
2833	/* time delta must be non zero */
2834	if (!event->time_delta)
2835		event->time_delta = 1;
2836}
2837
2838/*
2839 * Decrement the entries to the page that an event is on.
2840 * The event does not even need to exist, only the pointer
2841 * to the page it is on. This may only be called before the commit
2842 * takes place.
2843 */
2844static inline void
2845rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2846		   struct ring_buffer_event *event)
2847{
2848	unsigned long addr = (unsigned long)event;
2849	struct buffer_page *bpage = cpu_buffer->commit_page;
2850	struct buffer_page *start;
2851
2852	addr &= PAGE_MASK;
2853
2854	/* Do the likely case first */
2855	if (likely(bpage->page == (void *)addr)) {
2856		local_dec(&bpage->entries);
2857		return;
2858	}
2859
2860	/*
2861	 * Because the commit page may be on the reader page we
2862	 * start with the next page and check the end loop there.
2863	 */
2864	rb_inc_page(cpu_buffer, &bpage);
2865	start = bpage;
2866	do {
2867		if (bpage->page == (void *)addr) {
2868			local_dec(&bpage->entries);
2869			return;
2870		}
2871		rb_inc_page(cpu_buffer, &bpage);
2872	} while (bpage != start);
2873
2874	/* commit not part of this buffer?? */
2875	RB_WARN_ON(cpu_buffer, 1);
2876}
2877
2878/**
2879 * ring_buffer_commit_discard - discard an event that has not been committed
2880 * @buffer: the ring buffer
2881 * @event: non committed event to discard
2882 *
2883 * Sometimes an event that is in the ring buffer needs to be ignored.
2884 * This function lets the user discard an event in the ring buffer
2885 * and then that event will not be read later.
2886 *
2887 * This function only works if it is called before the the item has been
2888 * committed. It will try to free the event from the ring buffer
2889 * if another event has not been added behind it.
2890 *
2891 * If another event has been added behind it, it will set the event
2892 * up as discarded, and perform the commit.
2893 *
2894 * If this function is called, do not call ring_buffer_unlock_commit on
2895 * the event.
2896 */
2897void ring_buffer_discard_commit(struct ring_buffer *buffer,
2898				struct ring_buffer_event *event)
2899{
2900	struct ring_buffer_per_cpu *cpu_buffer;
2901	int cpu;
2902
2903	/* The event is discarded regardless */
2904	rb_event_discard(event);
2905
2906	cpu = smp_processor_id();
2907	cpu_buffer = buffer->buffers[cpu];
2908
2909	/*
2910	 * This must only be called if the event has not been
2911	 * committed yet. Thus we can assume that preemption
2912	 * is still disabled.
2913	 */
2914	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2915
2916	rb_decrement_entry(cpu_buffer, event);
2917	if (rb_try_to_discard(cpu_buffer, event))
2918		goto out;
2919
2920	/*
2921	 * The commit is still visible by the reader, so we
2922	 * must still update the timestamp.
2923	 */
2924	rb_update_write_stamp(cpu_buffer, event);
2925 out:
2926	rb_end_commit(cpu_buffer);
2927
2928	trace_recursive_unlock();
2929
2930	preempt_enable_notrace();
2931
2932}
2933EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2934
2935/**
2936 * ring_buffer_write - write data to the buffer without reserving
2937 * @buffer: The ring buffer to write to.
2938 * @length: The length of the data being written (excluding the event header)
2939 * @data: The data to write to the buffer.
2940 *
2941 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2942 * one function. If you already have the data to write to the buffer, it
2943 * may be easier to simply call this function.
2944 *
2945 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2946 * and not the length of the event which would hold the header.
2947 */
2948int ring_buffer_write(struct ring_buffer *buffer,
2949		      unsigned long length,
2950		      void *data)
2951{
2952	struct ring_buffer_per_cpu *cpu_buffer;
2953	struct ring_buffer_event *event;
2954	void *body;
2955	int ret = -EBUSY;
2956	int cpu;
2957
2958	if (ring_buffer_flags != RB_BUFFERS_ON)
2959		return -EBUSY;
2960
2961	preempt_disable_notrace();
2962
2963	if (atomic_read(&buffer->record_disabled))
2964		goto out;
2965
2966	cpu = raw_smp_processor_id();
2967
2968	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2969		goto out;
2970
2971	cpu_buffer = buffer->buffers[cpu];
2972
2973	if (atomic_read(&cpu_buffer->record_disabled))
2974		goto out;
2975
2976	if (length > BUF_MAX_DATA_SIZE)
2977		goto out;
2978
2979	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2980	if (!event)
2981		goto out;
2982
2983	body = rb_event_data(event);
2984
2985	memcpy(body, data, length);
2986
2987	rb_commit(cpu_buffer, event);
2988
2989	rb_wakeups(buffer, cpu_buffer);
2990
2991	ret = 0;
2992 out:
2993	preempt_enable_notrace();
2994
2995	return ret;
2996}
2997EXPORT_SYMBOL_GPL(ring_buffer_write);
2998
2999static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3000{
3001	struct buffer_page *reader = cpu_buffer->reader_page;
3002	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3003	struct buffer_page *commit = cpu_buffer->commit_page;
3004
3005	/* In case of error, head will be NULL */
3006	if (unlikely(!head))
3007		return 1;
3008
3009	return reader->read == rb_page_commit(reader) &&
3010		(commit == reader ||
3011		 (commit == head &&
3012		  head->read == rb_page_commit(commit)));
3013}
3014
3015/**
3016 * ring_buffer_record_disable - stop all writes into the buffer
3017 * @buffer: The ring buffer to stop writes to.
3018 *
3019 * This prevents all writes to the buffer. Any attempt to write
3020 * to the buffer after this will fail and return NULL.
3021 *
3022 * The caller should call synchronize_sched() after this.
3023 */
3024void ring_buffer_record_disable(struct ring_buffer *buffer)
3025{
3026	atomic_inc(&buffer->record_disabled);
3027}
3028EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3029
3030/**
3031 * ring_buffer_record_enable - enable writes to the buffer
3032 * @buffer: The ring buffer to enable writes
3033 *
3034 * Note, multiple disables will need the same number of enables
3035 * to truly enable the writing (much like preempt_disable).
3036 */
3037void ring_buffer_record_enable(struct ring_buffer *buffer)
3038{
3039	atomic_dec(&buffer->record_disabled);
3040}
3041EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3042
3043/**
3044 * ring_buffer_record_off - stop all writes into the buffer
3045 * @buffer: The ring buffer to stop writes to.
3046 *
3047 * This prevents all writes to the buffer. Any attempt to write
3048 * to the buffer after this will fail and return NULL.
3049 *
3050 * This is different than ring_buffer_record_disable() as
3051 * it works like an on/off switch, where as the disable() version
3052 * must be paired with a enable().
3053 */
3054void ring_buffer_record_off(struct ring_buffer *buffer)
3055{
3056	unsigned int rd;
3057	unsigned int new_rd;
3058
3059	do {
3060		rd = atomic_read(&buffer->record_disabled);
3061		new_rd = rd | RB_BUFFER_OFF;
3062	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3063}
3064EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3065
3066/**
3067 * ring_buffer_record_on - restart writes into the buffer
3068 * @buffer: The ring buffer to start writes to.
3069 *
3070 * This enables all writes to the buffer that was disabled by
3071 * ring_buffer_record_off().
3072 *
3073 * This is different than ring_buffer_record_enable() as
3074 * it works like an on/off switch, where as the enable() version
3075 * must be paired with a disable().
3076 */
3077void ring_buffer_record_on(struct ring_buffer *buffer)
3078{
3079	unsigned int rd;
3080	unsigned int new_rd;
3081
3082	do {
3083		rd = atomic_read(&buffer->record_disabled);
3084		new_rd = rd & ~RB_BUFFER_OFF;
3085	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3086}
3087EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3088
3089/**
3090 * ring_buffer_record_is_on - return true if the ring buffer can write
3091 * @buffer: The ring buffer to see if write is enabled
3092 *
3093 * Returns true if the ring buffer is in a state that it accepts writes.
3094 */
3095int ring_buffer_record_is_on(struct ring_buffer *buffer)
3096{
3097	return !atomic_read(&buffer->record_disabled);
3098}
3099
3100/**
3101 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3102 * @buffer: The ring buffer to stop writes to.
3103 * @cpu: The CPU buffer to stop
3104 *
3105 * This prevents all writes to the buffer. Any attempt to write
3106 * to the buffer after this will fail and return NULL.
3107 *
3108 * The caller should call synchronize_sched() after this.
3109 */
3110void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3111{
3112	struct ring_buffer_per_cpu *cpu_buffer;
3113
3114	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3115		return;
3116
3117	cpu_buffer = buffer->buffers[cpu];
3118	atomic_inc(&cpu_buffer->record_disabled);
3119}
3120EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3121
3122/**
3123 * ring_buffer_record_enable_cpu - enable writes to the buffer
3124 * @buffer: The ring buffer to enable writes
3125 * @cpu: The CPU to enable.
3126 *
3127 * Note, multiple disables will need the same number of enables
3128 * to truly enable the writing (much like preempt_disable).
3129 */
3130void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3131{
3132	struct ring_buffer_per_cpu *cpu_buffer;
3133
3134	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3135		return;
3136
3137	cpu_buffer = buffer->buffers[cpu];
3138	atomic_dec(&cpu_buffer->record_disabled);
3139}
3140EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3141
3142/*
3143 * The total entries in the ring buffer is the running counter
3144 * of entries entered into the ring buffer, minus the sum of
3145 * the entries read from the ring buffer and the number of
3146 * entries that were overwritten.
3147 */
3148static inline unsigned long
3149rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3150{
3151	return local_read(&cpu_buffer->entries) -
3152		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3153}
3154
3155/**
3156 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3157 * @buffer: The ring buffer
3158 * @cpu: The per CPU buffer to read from.
3159 */
3160u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3161{
3162	unsigned long flags;
3163	struct ring_buffer_per_cpu *cpu_buffer;
3164	struct buffer_page *bpage;
3165	u64 ret = 0;
3166
3167	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3168		return 0;
3169
3170	cpu_buffer = buffer->buffers[cpu];
3171	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3172	/*
3173	 * if the tail is on reader_page, oldest time stamp is on the reader
3174	 * page
3175	 */
3176	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3177		bpage = cpu_buffer->reader_page;
3178	else
3179		bpage = rb_set_head_page(cpu_buffer);
3180	if (bpage)
3181		ret = bpage->page->time_stamp;
3182	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3183
3184	return ret;
3185}
3186EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3187
3188/**
3189 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3190 * @buffer: The ring buffer
3191 * @cpu: The per CPU buffer to read from.
3192 */
3193unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3194{
3195	struct ring_buffer_per_cpu *cpu_buffer;
3196	unsigned long ret;
3197
3198	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3199		return 0;
3200
3201	cpu_buffer = buffer->buffers[cpu];
3202	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3203
3204	return ret;
3205}
3206EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3207
3208/**
3209 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3210 * @buffer: The ring buffer
3211 * @cpu: The per CPU buffer to get the entries from.
3212 */
3213unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3214{
3215	struct ring_buffer_per_cpu *cpu_buffer;
3216
3217	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3218		return 0;
3219
3220	cpu_buffer = buffer->buffers[cpu];
3221
3222	return rb_num_of_entries(cpu_buffer);
3223}
3224EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3225
3226/**
3227 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3228 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3229 * @buffer: The ring buffer
3230 * @cpu: The per CPU buffer to get the number of overruns from
3231 */
3232unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3233{
3234	struct ring_buffer_per_cpu *cpu_buffer;
3235	unsigned long ret;
3236
3237	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3238		return 0;
3239
3240	cpu_buffer = buffer->buffers[cpu];
3241	ret = local_read(&cpu_buffer->overrun);
3242
3243	return ret;
3244}
3245EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3246
3247/**
3248 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3249 * commits failing due to the buffer wrapping around while there are uncommitted
3250 * events, such as during an interrupt storm.
3251 * @buffer: The ring buffer
3252 * @cpu: The per CPU buffer to get the number of overruns from
3253 */
3254unsigned long
3255ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3256{
3257	struct ring_buffer_per_cpu *cpu_buffer;
3258	unsigned long ret;
3259
3260	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3261		return 0;
3262
3263	cpu_buffer = buffer->buffers[cpu];
3264	ret = local_read(&cpu_buffer->commit_overrun);
3265
3266	return ret;
3267}
3268EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3269
3270/**
3271 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3272 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3273 * @buffer: The ring buffer
3274 * @cpu: The per CPU buffer to get the number of overruns from
3275 */
3276unsigned long
3277ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3278{
3279	struct ring_buffer_per_cpu *cpu_buffer;
3280	unsigned long ret;
3281
3282	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3283		return 0;
3284
3285	cpu_buffer = buffer->buffers[cpu];
3286	ret = local_read(&cpu_buffer->dropped_events);
3287
3288	return ret;
3289}
3290EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3291
3292/**
3293 * ring_buffer_read_events_cpu - get the number of events successfully read
3294 * @buffer: The ring buffer
3295 * @cpu: The per CPU buffer to get the number of events read
3296 */
3297unsigned long
3298ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3299{
3300	struct ring_buffer_per_cpu *cpu_buffer;
3301
3302	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3303		return 0;
3304
3305	cpu_buffer = buffer->buffers[cpu];
3306	return cpu_buffer->read;
3307}
3308EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3309
3310/**
3311 * ring_buffer_entries - get the number of entries in a buffer
3312 * @buffer: The ring buffer
3313 *
3314 * Returns the total number of entries in the ring buffer
3315 * (all CPU entries)
3316 */
3317unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3318{
3319	struct ring_buffer_per_cpu *cpu_buffer;
3320	unsigned long entries = 0;
3321	int cpu;
3322
3323	/* if you care about this being correct, lock the buffer */
3324	for_each_buffer_cpu(buffer, cpu) {
3325		cpu_buffer = buffer->buffers[cpu];
3326		entries += rb_num_of_entries(cpu_buffer);
3327	}
3328
3329	return entries;
3330}
3331EXPORT_SYMBOL_GPL(ring_buffer_entries);
3332
3333/**
3334 * ring_buffer_overruns - get the number of overruns in buffer
3335 * @buffer: The ring buffer
3336 *
3337 * Returns the total number of overruns in the ring buffer
3338 * (all CPU entries)
3339 */
3340unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3341{
3342	struct ring_buffer_per_cpu *cpu_buffer;
3343	unsigned long overruns = 0;
3344	int cpu;
3345
3346	/* if you care about this being correct, lock the buffer */
3347	for_each_buffer_cpu(buffer, cpu) {
3348		cpu_buffer = buffer->buffers[cpu];
3349		overruns += local_read(&cpu_buffer->overrun);
3350	}
3351
3352	return overruns;
3353}
3354EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3355
3356static void rb_iter_reset(struct ring_buffer_iter *iter)
3357{
3358	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3359
3360	/* Iterator usage is expected to have record disabled */
3361	if (list_empty(&cpu_buffer->reader_page->list)) {
3362		iter->head_page = rb_set_head_page(cpu_buffer);
3363		if (unlikely(!iter->head_page))
3364			return;
3365		iter->head = iter->head_page->read;
3366	} else {
3367		iter->head_page = cpu_buffer->reader_page;
3368		iter->head = cpu_buffer->reader_page->read;
3369	}
3370	if (iter->head)
3371		iter->read_stamp = cpu_buffer->read_stamp;
3372	else
3373		iter->read_stamp = iter->head_page->page->time_stamp;
3374	iter->cache_reader_page = cpu_buffer->reader_page;
3375	iter->cache_read = cpu_buffer->read;
3376}
3377
3378/**
3379 * ring_buffer_iter_reset - reset an iterator
3380 * @iter: The iterator to reset
3381 *
3382 * Resets the iterator, so that it will start from the beginning
3383 * again.
3384 */
3385void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3386{
3387	struct ring_buffer_per_cpu *cpu_buffer;
3388	unsigned long flags;
3389
3390	if (!iter)
3391		return;
3392
3393	cpu_buffer = iter->cpu_buffer;
3394
3395	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3396	rb_iter_reset(iter);
3397	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3398}
3399EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3400
3401/**
3402 * ring_buffer_iter_empty - check if an iterator has no more to read
3403 * @iter: The iterator to check
3404 */
3405int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3406{
3407	struct ring_buffer_per_cpu *cpu_buffer;
3408
3409	cpu_buffer = iter->cpu_buffer;
3410
3411	return iter->head_page == cpu_buffer->commit_page &&
3412		iter->head == rb_commit_index(cpu_buffer);
3413}
3414EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3415
3416static void
3417rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3418		     struct ring_buffer_event *event)
3419{
3420	u64 delta;
3421
3422	switch (event->type_len) {
3423	case RINGBUF_TYPE_PADDING:
3424		return;
3425
3426	case RINGBUF_TYPE_TIME_EXTEND:
3427		delta = event->array[0];
3428		delta <<= TS_SHIFT;
3429		delta += event->time_delta;
3430		cpu_buffer->read_stamp += delta;
3431		return;
3432
3433	case RINGBUF_TYPE_TIME_STAMP:
3434		/* FIXME: not implemented */
3435		return;
3436
3437	case RINGBUF_TYPE_DATA:
3438		cpu_buffer->read_stamp += event->time_delta;
3439		return;
3440
3441	default:
3442		BUG();
3443	}
3444	return;
3445}
3446
3447static void
3448rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3449			  struct ring_buffer_event *event)
3450{
3451	u64 delta;
3452
3453	switch (event->type_len) {
3454	case RINGBUF_TYPE_PADDING:
3455		return;
3456
3457	case RINGBUF_TYPE_TIME_EXTEND:
3458		delta = event->array[0];
3459		delta <<= TS_SHIFT;
3460		delta += event->time_delta;
3461		iter->read_stamp += delta;
3462		return;
3463
3464	case RINGBUF_TYPE_TIME_STAMP:
3465		/* FIXME: not implemented */
3466		return;
3467
3468	case RINGBUF_TYPE_DATA:
3469		iter->read_stamp += event->time_delta;
3470		return;
3471
3472	default:
3473		BUG();
3474	}
3475	return;
3476}
3477
3478static struct buffer_page *
3479rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3480{
3481	struct buffer_page *reader = NULL;
3482	unsigned long overwrite;
3483	unsigned long flags;
3484	int nr_loops = 0;
3485	int ret;
3486
3487	local_irq_save(flags);
3488	arch_spin_lock(&cpu_buffer->lock);
3489
3490 again:
3491	/*
3492	 * This should normally only loop twice. But because the
3493	 * start of the reader inserts an empty page, it causes
3494	 * a case where we will loop three times. There should be no
3495	 * reason to loop four times (that I know of).
3496	 */
3497	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3498		reader = NULL;
3499		goto out;
3500	}
3501
3502	reader = cpu_buffer->reader_page;
3503
3504	/* If there's more to read, return this page */
3505	if (cpu_buffer->reader_page->read < rb_page_size(reader))
3506		goto out;
3507
3508	/* Never should we have an index greater than the size */
3509	if (RB_WARN_ON(cpu_buffer,
3510		       cpu_buffer->reader_page->read > rb_page_size(reader)))
3511		goto out;
3512
3513	/* check if we caught up to the tail */
3514	reader = NULL;
3515	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3516		goto out;
3517
3518	/* Don't bother swapping if the ring buffer is empty */
3519	if (rb_num_of_entries(cpu_buffer) == 0)
3520		goto out;
3521
3522	/*
3523	 * Reset the reader page to size zero.
3524	 */
3525	local_set(&cpu_buffer->reader_page->write, 0);
3526	local_set(&cpu_buffer->reader_page->entries, 0);
3527	local_set(&cpu_buffer->reader_page->page->commit, 0);
3528	cpu_buffer->reader_page->real_end = 0;
3529
3530 spin:
3531	/*
3532	 * Splice the empty reader page into the list around the head.
3533	 */
3534	reader = rb_set_head_page(cpu_buffer);
3535	if (!reader)
3536		goto out;
3537	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3538	cpu_buffer->reader_page->list.prev = reader->list.prev;
3539
3540	/*
3541	 * cpu_buffer->pages just needs to point to the buffer, it
3542	 *  has no specific buffer page to point to. Lets move it out
3543	 *  of our way so we don't accidentally swap it.
3544	 */
3545	cpu_buffer->pages = reader->list.prev;
3546
3547	/* The reader page will be pointing to the new head */
3548	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3549
3550	/*
3551	 * We want to make sure we read the overruns after we set up our
3552	 * pointers to the next object. The writer side does a
3553	 * cmpxchg to cross pages which acts as the mb on the writer
3554	 * side. Note, the reader will constantly fail the swap
3555	 * while the writer is updating the pointers, so this
3556	 * guarantees that the overwrite recorded here is the one we
3557	 * want to compare with the last_overrun.
3558	 */
3559	smp_mb();
3560	overwrite = local_read(&(cpu_buffer->overrun));
3561
3562	/*
3563	 * Here's the tricky part.
3564	 *
3565	 * We need to move the pointer past the header page.
3566	 * But we can only do that if a writer is not currently
3567	 * moving it. The page before the header page has the
3568	 * flag bit '1' set if it is pointing to the page we want.
3569	 * but if the writer is in the process of moving it
3570	 * than it will be '2' or already moved '0'.
3571	 */
3572
3573	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3574
3575	/*
3576	 * If we did not convert it, then we must try again.
3577	 */
3578	if (!ret)
3579		goto spin;
3580
3581	/*
3582	 * Yeah! We succeeded in replacing the page.
3583	 *
3584	 * Now make the new head point back to the reader page.
3585	 */
3586	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3587	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3588
3589	/* Finally update the reader page to the new head */
3590	cpu_buffer->reader_page = reader;
3591	rb_reset_reader_page(cpu_buffer);
3592
3593	if (overwrite != cpu_buffer->last_overrun) {
3594		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3595		cpu_buffer->last_overrun = overwrite;
3596	}
3597
3598	goto again;
3599
3600 out:
3601	arch_spin_unlock(&cpu_buffer->lock);
3602	local_irq_restore(flags);
3603
3604	return reader;
3605}
3606
3607static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3608{
3609	struct ring_buffer_event *event;
3610	struct buffer_page *reader;
3611	unsigned length;
3612
3613	reader = rb_get_reader_page(cpu_buffer);
3614
3615	/* This function should not be called when buffer is empty */
3616	if (RB_WARN_ON(cpu_buffer, !reader))
3617		return;
3618
3619	event = rb_reader_event(cpu_buffer);
3620
3621	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3622		cpu_buffer->read++;
3623
3624	rb_update_read_stamp(cpu_buffer, event);
3625
3626	length = rb_event_length(event);
3627	cpu_buffer->reader_page->read += length;
3628}
3629
3630static void rb_advance_iter(struct ring_buffer_iter *iter)
3631{
3632	struct ring_buffer_per_cpu *cpu_buffer;
3633	struct ring_buffer_event *event;
3634	unsigned length;
3635
3636	cpu_buffer = iter->cpu_buffer;
3637
3638	/*
3639	 * Check if we are at the end of the buffer.
3640	 */
3641	if (iter->head >= rb_page_size(iter->head_page)) {
3642		/* discarded commits can make the page empty */
3643		if (iter->head_page == cpu_buffer->commit_page)
3644			return;
3645		rb_inc_iter(iter);
3646		return;
3647	}
3648
3649	event = rb_iter_head_event(iter);
3650
3651	length = rb_event_length(event);
3652
3653	/*
3654	 * This should not be called to advance the header if we are
3655	 * at the tail of the buffer.
3656	 */
3657	if (RB_WARN_ON(cpu_buffer,
3658		       (iter->head_page == cpu_buffer->commit_page) &&
3659		       (iter->head + length > rb_commit_index(cpu_buffer))))
3660		return;
3661
3662	rb_update_iter_read_stamp(iter, event);
3663
3664	iter->head += length;
3665
3666	/* check for end of page padding */
3667	if ((iter->head >= rb_page_size(iter->head_page)) &&
3668	    (iter->head_page != cpu_buffer->commit_page))
3669		rb_inc_iter(iter);
3670}
3671
3672static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3673{
3674	return cpu_buffer->lost_events;
3675}
3676
3677static struct ring_buffer_event *
3678rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3679	       unsigned long *lost_events)
3680{
3681	struct ring_buffer_event *event;
3682	struct buffer_page *reader;
3683	int nr_loops = 0;
3684
3685 again:
3686	/*
3687	 * We repeat when a time extend is encountered.
3688	 * Since the time extend is always attached to a data event,
3689	 * we should never loop more than once.
3690	 * (We never hit the following condition more than twice).
3691	 */
3692	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3693		return NULL;
3694
3695	reader = rb_get_reader_page(cpu_buffer);
3696	if (!reader)
3697		return NULL;
3698
3699	event = rb_reader_event(cpu_buffer);
3700
3701	switch (event->type_len) {
3702	case RINGBUF_TYPE_PADDING:
3703		if (rb_null_event(event))
3704			RB_WARN_ON(cpu_buffer, 1);
3705		/*
3706		 * Because the writer could be discarding every
3707		 * event it creates (which would probably be bad)
3708		 * if we were to go back to "again" then we may never
3709		 * catch up, and will trigger the warn on, or lock
3710		 * the box. Return the padding, and we will release
3711		 * the current locks, and try again.
3712		 */
3713		return event;
3714
3715	case RINGBUF_TYPE_TIME_EXTEND:
3716		/* Internal data, OK to advance */
3717		rb_advance_reader(cpu_buffer);
3718		goto again;
3719
3720	case RINGBUF_TYPE_TIME_STAMP:
3721		/* FIXME: not implemented */
3722		rb_advance_reader(cpu_buffer);
3723		goto again;
3724
3725	case RINGBUF_TYPE_DATA:
3726		if (ts) {
3727			*ts = cpu_buffer->read_stamp + event->time_delta;
3728			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3729							 cpu_buffer->cpu, ts);
3730		}
3731		if (lost_events)
3732			*lost_events = rb_lost_events(cpu_buffer);
3733		return event;
3734
3735	default:
3736		BUG();
3737	}
3738
3739	return NULL;
3740}
3741EXPORT_SYMBOL_GPL(ring_buffer_peek);
3742
3743static struct ring_buffer_event *
3744rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3745{
3746	struct ring_buffer *buffer;
3747	struct ring_buffer_per_cpu *cpu_buffer;
3748	struct ring_buffer_event *event;
3749	int nr_loops = 0;
3750
3751	cpu_buffer = iter->cpu_buffer;
3752	buffer = cpu_buffer->buffer;
3753
3754	/*
3755	 * Check if someone performed a consuming read to
3756	 * the buffer. A consuming read invalidates the iterator
3757	 * and we need to reset the iterator in this case.
3758	 */
3759	if (unlikely(iter->cache_read != cpu_buffer->read ||
3760		     iter->cache_reader_page != cpu_buffer->reader_page))
3761		rb_iter_reset(iter);
3762
3763 again:
3764	if (ring_buffer_iter_empty(iter))
3765		return NULL;
3766
3767	/*
3768	 * We repeat when a time extend is encountered.
3769	 * Since the time extend is always attached to a data event,
3770	 * we should never loop more than once.
3771	 * (We never hit the following condition more than twice).
3772	 */
3773	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3774		return NULL;
3775
3776	if (rb_per_cpu_empty(cpu_buffer))
3777		return NULL;
3778
3779	if (iter->head >= local_read(&iter->head_page->page->commit)) {
3780		rb_inc_iter(iter);
3781		goto again;
3782	}
3783
3784	event = rb_iter_head_event(iter);
3785
3786	switch (event->type_len) {
3787	case RINGBUF_TYPE_PADDING:
3788		if (rb_null_event(event)) {
3789			rb_inc_iter(iter);
3790			goto again;
3791		}
3792		rb_advance_iter(iter);
3793		return event;
3794
3795	case RINGBUF_TYPE_TIME_EXTEND:
3796		/* Internal data, OK to advance */
3797		rb_advance_iter(iter);
3798		goto again;
3799
3800	case RINGBUF_TYPE_TIME_STAMP:
3801		/* FIXME: not implemented */
3802		rb_advance_iter(iter);
3803		goto again;
3804
3805	case RINGBUF_TYPE_DATA:
3806		if (ts) {
3807			*ts = iter->read_stamp + event->time_delta;
3808			ring_buffer_normalize_time_stamp(buffer,
3809							 cpu_buffer->cpu, ts);
3810		}
3811		return event;
3812
3813	default:
3814		BUG();
3815	}
3816
3817	return NULL;
3818}
3819EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3820
3821static inline int rb_ok_to_lock(void)
3822{
3823	/*
3824	 * If an NMI die dumps out the content of the ring buffer
3825	 * do not grab locks. We also permanently disable the ring
3826	 * buffer too. A one time deal is all you get from reading
3827	 * the ring buffer from an NMI.
3828	 */
3829	if (likely(!in_nmi()))
3830		return 1;
3831
3832	tracing_off_permanent();
3833	return 0;
3834}
3835
3836/**
3837 * ring_buffer_peek - peek at the next event to be read
3838 * @buffer: The ring buffer to read
3839 * @cpu: The cpu to peak at
3840 * @ts: The timestamp counter of this event.
3841 * @lost_events: a variable to store if events were lost (may be NULL)
3842 *
3843 * This will return the event that will be read next, but does
3844 * not consume the data.
3845 */
3846struct ring_buffer_event *
3847ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3848		 unsigned long *lost_events)
3849{
3850	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3851	struct ring_buffer_event *event;
3852	unsigned long flags;
3853	int dolock;
3854
3855	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3856		return NULL;
3857
3858	dolock = rb_ok_to_lock();
3859 again:
3860	local_irq_save(flags);
3861	if (dolock)
3862		raw_spin_lock(&cpu_buffer->reader_lock);
3863	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3864	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3865		rb_advance_reader(cpu_buffer);
3866	if (dolock)
3867		raw_spin_unlock(&cpu_buffer->reader_lock);
3868	local_irq_restore(flags);
3869
3870	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3871		goto again;
3872
3873	return event;
3874}
3875
3876/**
3877 * ring_buffer_iter_peek - peek at the next event to be read
3878 * @iter: The ring buffer iterator
3879 * @ts: The timestamp counter of this event.
3880 *
3881 * This will return the event that will be read next, but does
3882 * not increment the iterator.
3883 */
3884struct ring_buffer_event *
3885ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3886{
3887	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3888	struct ring_buffer_event *event;
3889	unsigned long flags;
3890
3891 again:
3892	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3893	event = rb_iter_peek(iter, ts);
3894	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3895
3896	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3897		goto again;
3898
3899	return event;
3900}
3901
3902/**
3903 * ring_buffer_consume - return an event and consume it
3904 * @buffer: The ring buffer to get the next event from
3905 * @cpu: the cpu to read the buffer from
3906 * @ts: a variable to store the timestamp (may be NULL)
3907 * @lost_events: a variable to store if events were lost (may be NULL)
3908 *
3909 * Returns the next event in the ring buffer, and that event is consumed.
3910 * Meaning, that sequential reads will keep returning a different event,
3911 * and eventually empty the ring buffer if the producer is slower.
3912 */
3913struct ring_buffer_event *
3914ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3915		    unsigned long *lost_events)
3916{
3917	struct ring_buffer_per_cpu *cpu_buffer;
3918	struct ring_buffer_event *event = NULL;
3919	unsigned long flags;
3920	int dolock;
3921
3922	dolock = rb_ok_to_lock();
3923
3924 again:
3925	/* might be called in atomic */
3926	preempt_disable();
3927
3928	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3929		goto out;
3930
3931	cpu_buffer = buffer->buffers[cpu];
3932	local_irq_save(flags);
3933	if (dolock)
3934		raw_spin_lock(&cpu_buffer->reader_lock);
3935
3936	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3937	if (event) {
3938		cpu_buffer->lost_events = 0;
3939		rb_advance_reader(cpu_buffer);
3940	}
3941
3942	if (dolock)
3943		raw_spin_unlock(&cpu_buffer->reader_lock);
3944	local_irq_restore(flags);
3945
3946 out:
3947	preempt_enable();
3948
3949	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3950		goto again;
3951
3952	return event;
3953}
3954EXPORT_SYMBOL_GPL(ring_buffer_consume);
3955
3956/**
3957 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3958 * @buffer: The ring buffer to read from
3959 * @cpu: The cpu buffer to iterate over
3960 *
3961 * This performs the initial preparations necessary to iterate
3962 * through the buffer.  Memory is allocated, buffer recording
3963 * is disabled, and the iterator pointer is returned to the caller.
3964 *
3965 * Disabling buffer recordng prevents the reading from being
3966 * corrupted. This is not a consuming read, so a producer is not
3967 * expected.
3968 *
3969 * After a sequence of ring_buffer_read_prepare calls, the user is
3970 * expected to make at least one call to ring_buffer_read_prepare_sync.
3971 * Afterwards, ring_buffer_read_start is invoked to get things going
3972 * for real.
3973 *
3974 * This overall must be paired with ring_buffer_read_finish.
3975 */
3976struct ring_buffer_iter *
3977ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3978{
3979	struct ring_buffer_per_cpu *cpu_buffer;
3980	struct ring_buffer_iter *iter;
3981
3982	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3983		return NULL;
3984
3985	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3986	if (!iter)
3987		return NULL;
3988
3989	cpu_buffer = buffer->buffers[cpu];
3990
3991	iter->cpu_buffer = cpu_buffer;
3992
3993	atomic_inc(&buffer->resize_disabled);
3994	atomic_inc(&cpu_buffer->record_disabled);
3995
3996	return iter;
3997}
3998EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3999
4000/**
4001 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4002 *
4003 * All previously invoked ring_buffer_read_prepare calls to prepare
4004 * iterators will be synchronized.  Afterwards, read_buffer_read_start
4005 * calls on those iterators are allowed.
4006 */
4007void
4008ring_buffer_read_prepare_sync(void)
4009{
4010	synchronize_sched();
4011}
4012EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4013
4014/**
4015 * ring_buffer_read_start - start a non consuming read of the buffer
4016 * @iter: The iterator returned by ring_buffer_read_prepare
4017 *
4018 * This finalizes the startup of an iteration through the buffer.
4019 * The iterator comes from a call to ring_buffer_read_prepare and
4020 * an intervening ring_buffer_read_prepare_sync must have been
4021 * performed.
4022 *
4023 * Must be paired with ring_buffer_read_finish.
4024 */
4025void
4026ring_buffer_read_start(struct ring_buffer_iter *iter)
4027{
4028	struct ring_buffer_per_cpu *cpu_buffer;
4029	unsigned long flags;
4030
4031	if (!iter)
4032		return;
4033
4034	cpu_buffer = iter->cpu_buffer;
4035
4036	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4037	arch_spin_lock(&cpu_buffer->lock);
4038	rb_iter_reset(iter);
4039	arch_spin_unlock(&cpu_buffer->lock);
4040	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4041}
4042EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4043
4044/**
4045 * ring_buffer_read_finish - finish reading the iterator of the buffer
4046 * @iter: The iterator retrieved by ring_buffer_start
4047 *
4048 * This re-enables the recording to the buffer, and frees the
4049 * iterator.
4050 */
4051void
4052ring_buffer_read_finish(struct ring_buffer_iter *iter)
4053{
4054	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4055	unsigned long flags;
4056
4057	/*
4058	 * Ring buffer is disabled from recording, here's a good place
4059	 * to check the integrity of the ring buffer.
4060	 * Must prevent readers from trying to read, as the check
4061	 * clears the HEAD page and readers require it.
4062	 */
4063	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4064	rb_check_pages(cpu_buffer);
4065	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4066
4067	atomic_dec(&cpu_buffer->record_disabled);
4068	atomic_dec(&cpu_buffer->buffer->resize_disabled);
4069	kfree(iter);
4070}
4071EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4072
4073/**
4074 * ring_buffer_read - read the next item in the ring buffer by the iterator
4075 * @iter: The ring buffer iterator
4076 * @ts: The time stamp of the event read.
4077 *
4078 * This reads the next event in the ring buffer and increments the iterator.
4079 */
4080struct ring_buffer_event *
4081ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4082{
4083	struct ring_buffer_event *event;
4084	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4085	unsigned long flags;
4086
4087	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4088 again:
4089	event = rb_iter_peek(iter, ts);
4090	if (!event)
4091		goto out;
4092
4093	if (event->type_len == RINGBUF_TYPE_PADDING)
4094		goto again;
4095
4096	rb_advance_iter(iter);
4097 out:
4098	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4099
4100	return event;
4101}
4102EXPORT_SYMBOL_GPL(ring_buffer_read);
4103
4104/**
4105 * ring_buffer_size - return the size of the ring buffer (in bytes)
4106 * @buffer: The ring buffer.
4107 */
4108unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4109{
4110	/*
4111	 * Earlier, this method returned
4112	 *	BUF_PAGE_SIZE * buffer->nr_pages
4113	 * Since the nr_pages field is now removed, we have converted this to
4114	 * return the per cpu buffer value.
4115	 */
4116	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4117		return 0;
4118
4119	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4120}
4121EXPORT_SYMBOL_GPL(ring_buffer_size);
4122
4123static void
4124rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4125{
4126	rb_head_page_deactivate(cpu_buffer);
4127
4128	cpu_buffer->head_page
4129		= list_entry(cpu_buffer->pages, struct buffer_page, list);
4130	local_set(&cpu_buffer->head_page->write, 0);
4131	local_set(&cpu_buffer->head_page->entries, 0);
4132	local_set(&cpu_buffer->head_page->page->commit, 0);
4133
4134	cpu_buffer->head_page->read = 0;
4135
4136	cpu_buffer->tail_page = cpu_buffer->head_page;
4137	cpu_buffer->commit_page = cpu_buffer->head_page;
4138
4139	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4140	INIT_LIST_HEAD(&cpu_buffer->new_pages);
4141	local_set(&cpu_buffer->reader_page->write, 0);
4142	local_set(&cpu_buffer->reader_page->entries, 0);
4143	local_set(&cpu_buffer->reader_page->page->commit, 0);
4144	cpu_buffer->reader_page->read = 0;
4145
4146	local_set(&cpu_buffer->entries_bytes, 0);
4147	local_set(&cpu_buffer->overrun, 0);
4148	local_set(&cpu_buffer->commit_overrun, 0);
4149	local_set(&cpu_buffer->dropped_events, 0);
4150	local_set(&cpu_buffer->entries, 0);
4151	local_set(&cpu_buffer->committing, 0);
4152	local_set(&cpu_buffer->commits, 0);
4153	cpu_buffer->read = 0;
4154	cpu_buffer->read_bytes = 0;
4155
4156	cpu_buffer->write_stamp = 0;
4157	cpu_buffer->read_stamp = 0;
4158
4159	cpu_buffer->lost_events = 0;
4160	cpu_buffer->last_overrun = 0;
4161
4162	rb_head_page_activate(cpu_buffer);
4163}
4164
4165/**
4166 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4167 * @buffer: The ring buffer to reset a per cpu buffer of
4168 * @cpu: The CPU buffer to be reset
4169 */
4170void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4171{
4172	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4173	unsigned long flags;
4174
4175	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4176		return;
4177
4178	atomic_inc(&buffer->resize_disabled);
4179	atomic_inc(&cpu_buffer->record_disabled);
4180
4181	/* Make sure all commits have finished */
4182	synchronize_sched();
4183
4184	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4185
4186	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4187		goto out;
4188
4189	arch_spin_lock(&cpu_buffer->lock);
4190
4191	rb_reset_cpu(cpu_buffer);
4192
4193	arch_spin_unlock(&cpu_buffer->lock);
4194
4195 out:
4196	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4197
4198	atomic_dec(&cpu_buffer->record_disabled);
4199	atomic_dec(&buffer->resize_disabled);
4200}
4201EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4202
4203/**
4204 * ring_buffer_reset - reset a ring buffer
4205 * @buffer: The ring buffer to reset all cpu buffers
4206 */
4207void ring_buffer_reset(struct ring_buffer *buffer)
4208{
4209	int cpu;
4210
4211	for_each_buffer_cpu(buffer, cpu)
4212		ring_buffer_reset_cpu(buffer, cpu);
4213}
4214EXPORT_SYMBOL_GPL(ring_buffer_reset);
4215
4216/**
4217 * rind_buffer_empty - is the ring buffer empty?
4218 * @buffer: The ring buffer to test
4219 */
4220int ring_buffer_empty(struct ring_buffer *buffer)
4221{
4222	struct ring_buffer_per_cpu *cpu_buffer;
4223	unsigned long flags;
4224	int dolock;
4225	int cpu;
4226	int ret;
4227
4228	dolock = rb_ok_to_lock();
4229
4230	/* yes this is racy, but if you don't like the race, lock the buffer */
4231	for_each_buffer_cpu(buffer, cpu) {
4232		cpu_buffer = buffer->buffers[cpu];
4233		local_irq_save(flags);
4234		if (dolock)
4235			raw_spin_lock(&cpu_buffer->reader_lock);
4236		ret = rb_per_cpu_empty(cpu_buffer);
4237		if (dolock)
4238			raw_spin_unlock(&cpu_buffer->reader_lock);
4239		local_irq_restore(flags);
4240
4241		if (!ret)
4242			return 0;
4243	}
4244
4245	return 1;
4246}
4247EXPORT_SYMBOL_GPL(ring_buffer_empty);
4248
4249/**
4250 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4251 * @buffer: The ring buffer
4252 * @cpu: The CPU buffer to test
4253 */
4254int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4255{
4256	struct ring_buffer_per_cpu *cpu_buffer;
4257	unsigned long flags;
4258	int dolock;
4259	int ret;
4260
4261	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4262		return 1;
4263
4264	dolock = rb_ok_to_lock();
4265
4266	cpu_buffer = buffer->buffers[cpu];
4267	local_irq_save(flags);
4268	if (dolock)
4269		raw_spin_lock(&cpu_buffer->reader_lock);
4270	ret = rb_per_cpu_empty(cpu_buffer);
4271	if (dolock)
4272		raw_spin_unlock(&cpu_buffer->reader_lock);
4273	local_irq_restore(flags);
4274
4275	return ret;
4276}
4277EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4278
4279#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4280/**
4281 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4282 * @buffer_a: One buffer to swap with
4283 * @buffer_b: The other buffer to swap with
4284 *
4285 * This function is useful for tracers that want to take a "snapshot"
4286 * of a CPU buffer and has another back up buffer lying around.
4287 * it is expected that the tracer handles the cpu buffer not being
4288 * used at the moment.
4289 */
4290int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4291			 struct ring_buffer *buffer_b, int cpu)
4292{
4293	struct ring_buffer_per_cpu *cpu_buffer_a;
4294	struct ring_buffer_per_cpu *cpu_buffer_b;
4295	int ret = -EINVAL;
4296
4297	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4298	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
4299		goto out;
4300
4301	cpu_buffer_a = buffer_a->buffers[cpu];
4302	cpu_buffer_b = buffer_b->buffers[cpu];
4303
4304	/* At least make sure the two buffers are somewhat the same */
4305	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4306		goto out;
4307
4308	ret = -EAGAIN;
4309
4310	if (ring_buffer_flags != RB_BUFFERS_ON)
4311		goto out;
4312
4313	if (atomic_read(&buffer_a->record_disabled))
4314		goto out;
4315
4316	if (atomic_read(&buffer_b->record_disabled))
4317		goto out;
4318
 
 
 
4319	if (atomic_read(&cpu_buffer_a->record_disabled))
4320		goto out;
4321
4322	if (atomic_read(&cpu_buffer_b->record_disabled))
4323		goto out;
4324
4325	/*
4326	 * We can't do a synchronize_sched here because this
4327	 * function can be called in atomic context.
4328	 * Normally this will be called from the same CPU as cpu.
4329	 * If not it's up to the caller to protect this.
4330	 */
4331	atomic_inc(&cpu_buffer_a->record_disabled);
4332	atomic_inc(&cpu_buffer_b->record_disabled);
4333
4334	ret = -EBUSY;
4335	if (local_read(&cpu_buffer_a->committing))
4336		goto out_dec;
4337	if (local_read(&cpu_buffer_b->committing))
4338		goto out_dec;
4339
4340	buffer_a->buffers[cpu] = cpu_buffer_b;
4341	buffer_b->buffers[cpu] = cpu_buffer_a;
4342
4343	cpu_buffer_b->buffer = buffer_a;
4344	cpu_buffer_a->buffer = buffer_b;
4345
4346	ret = 0;
4347
4348out_dec:
4349	atomic_dec(&cpu_buffer_a->record_disabled);
4350	atomic_dec(&cpu_buffer_b->record_disabled);
4351out:
4352	return ret;
4353}
4354EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4355#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4356
4357/**
4358 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4359 * @buffer: the buffer to allocate for.
4360 * @cpu: the cpu buffer to allocate.
4361 *
4362 * This function is used in conjunction with ring_buffer_read_page.
4363 * When reading a full page from the ring buffer, these functions
4364 * can be used to speed up the process. The calling function should
4365 * allocate a few pages first with this function. Then when it
4366 * needs to get pages from the ring buffer, it passes the result
4367 * of this function into ring_buffer_read_page, which will swap
4368 * the page that was allocated, with the read page of the buffer.
4369 *
4370 * Returns:
4371 *  The page allocated, or NULL on error.
4372 */
4373void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4374{
4375	struct buffer_data_page *bpage;
4376	struct page *page;
4377
4378	page = alloc_pages_node(cpu_to_node(cpu),
4379				GFP_KERNEL | __GFP_NORETRY, 0);
4380	if (!page)
4381		return NULL;
4382
4383	bpage = page_address(page);
4384
4385	rb_init_page(bpage);
4386
4387	return bpage;
4388}
4389EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4390
4391/**
4392 * ring_buffer_free_read_page - free an allocated read page
4393 * @buffer: the buffer the page was allocate for
4394 * @data: the page to free
4395 *
4396 * Free a page allocated from ring_buffer_alloc_read_page.
4397 */
4398void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4399{
4400	free_page((unsigned long)data);
4401}
4402EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4403
4404/**
4405 * ring_buffer_read_page - extract a page from the ring buffer
4406 * @buffer: buffer to extract from
4407 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4408 * @len: amount to extract
4409 * @cpu: the cpu of the buffer to extract
4410 * @full: should the extraction only happen when the page is full.
4411 *
4412 * This function will pull out a page from the ring buffer and consume it.
4413 * @data_page must be the address of the variable that was returned
4414 * from ring_buffer_alloc_read_page. This is because the page might be used
4415 * to swap with a page in the ring buffer.
4416 *
4417 * for example:
4418 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
4419 *	if (!rpage)
4420 *		return error;
4421 *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4422 *	if (ret >= 0)
4423 *		process_page(rpage, ret);
4424 *
4425 * When @full is set, the function will not return true unless
4426 * the writer is off the reader page.
4427 *
4428 * Note: it is up to the calling functions to handle sleeps and wakeups.
4429 *  The ring buffer can be used anywhere in the kernel and can not
4430 *  blindly call wake_up. The layer that uses the ring buffer must be
4431 *  responsible for that.
4432 *
4433 * Returns:
4434 *  >=0 if data has been transferred, returns the offset of consumed data.
4435 *  <0 if no data has been transferred.
4436 */
4437int ring_buffer_read_page(struct ring_buffer *buffer,
4438			  void **data_page, size_t len, int cpu, int full)
4439{
4440	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4441	struct ring_buffer_event *event;
4442	struct buffer_data_page *bpage;
4443	struct buffer_page *reader;
4444	unsigned long missed_events;
4445	unsigned long flags;
4446	unsigned int commit;
4447	unsigned int read;
4448	u64 save_timestamp;
4449	int ret = -1;
4450
4451	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4452		goto out;
4453
4454	/*
4455	 * If len is not big enough to hold the page header, then
4456	 * we can not copy anything.
4457	 */
4458	if (len <= BUF_PAGE_HDR_SIZE)
4459		goto out;
4460
4461	len -= BUF_PAGE_HDR_SIZE;
4462
4463	if (!data_page)
4464		goto out;
4465
4466	bpage = *data_page;
4467	if (!bpage)
4468		goto out;
4469
4470	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4471
4472	reader = rb_get_reader_page(cpu_buffer);
4473	if (!reader)
4474		goto out_unlock;
4475
4476	event = rb_reader_event(cpu_buffer);
4477
4478	read = reader->read;
4479	commit = rb_page_commit(reader);
4480
4481	/* Check if any events were dropped */
4482	missed_events = cpu_buffer->lost_events;
4483
4484	/*
4485	 * If this page has been partially read or
4486	 * if len is not big enough to read the rest of the page or
4487	 * a writer is still on the page, then
4488	 * we must copy the data from the page to the buffer.
4489	 * Otherwise, we can simply swap the page with the one passed in.
4490	 */
4491	if (read || (len < (commit - read)) ||
4492	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
4493		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4494		unsigned int rpos = read;
4495		unsigned int pos = 0;
4496		unsigned int size;
4497
4498		if (full)
4499			goto out_unlock;
4500
4501		if (len > (commit - read))
4502			len = (commit - read);
4503
4504		/* Always keep the time extend and data together */
4505		size = rb_event_ts_length(event);
4506
4507		if (len < size)
4508			goto out_unlock;
4509
4510		/* save the current timestamp, since the user will need it */
4511		save_timestamp = cpu_buffer->read_stamp;
4512
4513		/* Need to copy one event at a time */
4514		do {
4515			/* We need the size of one event, because
4516			 * rb_advance_reader only advances by one event,
4517			 * whereas rb_event_ts_length may include the size of
4518			 * one or two events.
4519			 * We have already ensured there's enough space if this
4520			 * is a time extend. */
4521			size = rb_event_length(event);
4522			memcpy(bpage->data + pos, rpage->data + rpos, size);
4523
4524			len -= size;
4525
4526			rb_advance_reader(cpu_buffer);
4527			rpos = reader->read;
4528			pos += size;
4529
4530			if (rpos >= commit)
4531				break;
4532
4533			event = rb_reader_event(cpu_buffer);
4534			/* Always keep the time extend and data together */
4535			size = rb_event_ts_length(event);
4536		} while (len >= size);
4537
4538		/* update bpage */
4539		local_set(&bpage->commit, pos);
4540		bpage->time_stamp = save_timestamp;
4541
4542		/* we copied everything to the beginning */
4543		read = 0;
4544	} else {
4545		/* update the entry counter */
4546		cpu_buffer->read += rb_page_entries(reader);
4547		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4548
4549		/* swap the pages */
4550		rb_init_page(bpage);
4551		bpage = reader->page;
4552		reader->page = *data_page;
4553		local_set(&reader->write, 0);
4554		local_set(&reader->entries, 0);
4555		reader->read = 0;
4556		*data_page = bpage;
4557
4558		/*
4559		 * Use the real_end for the data size,
4560		 * This gives us a chance to store the lost events
4561		 * on the page.
4562		 */
4563		if (reader->real_end)
4564			local_set(&bpage->commit, reader->real_end);
4565	}
4566	ret = read;
4567
4568	cpu_buffer->lost_events = 0;
4569
4570	commit = local_read(&bpage->commit);
4571	/*
4572	 * Set a flag in the commit field if we lost events
4573	 */
4574	if (missed_events) {
4575		/* If there is room at the end of the page to save the
4576		 * missed events, then record it there.
4577		 */
4578		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4579			memcpy(&bpage->data[commit], &missed_events,
4580			       sizeof(missed_events));
4581			local_add(RB_MISSED_STORED, &bpage->commit);
4582			commit += sizeof(missed_events);
4583		}
4584		local_add(RB_MISSED_EVENTS, &bpage->commit);
4585	}
4586
4587	/*
4588	 * This page may be off to user land. Zero it out here.
4589	 */
4590	if (commit < BUF_PAGE_SIZE)
4591		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4592
4593 out_unlock:
4594	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4595
4596 out:
4597	return ret;
4598}
4599EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4601#ifdef CONFIG_HOTPLUG_CPU
4602static int rb_cpu_notify(struct notifier_block *self,
4603			 unsigned long action, void *hcpu)
4604{
4605	struct ring_buffer *buffer =
4606		container_of(self, struct ring_buffer, cpu_notify);
4607	long cpu = (long)hcpu;
4608	int cpu_i, nr_pages_same;
4609	unsigned int nr_pages;
4610
4611	switch (action) {
4612	case CPU_UP_PREPARE:
4613	case CPU_UP_PREPARE_FROZEN:
4614		if (cpumask_test_cpu(cpu, buffer->cpumask))
4615			return NOTIFY_OK;
4616
4617		nr_pages = 0;
4618		nr_pages_same = 1;
4619		/* check if all cpu sizes are same */
4620		for_each_buffer_cpu(buffer, cpu_i) {
4621			/* fill in the size from first enabled cpu */
4622			if (nr_pages == 0)
4623				nr_pages = buffer->buffers[cpu_i]->nr_pages;
4624			if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4625				nr_pages_same = 0;
4626				break;
4627			}
4628		}
4629		/* allocate minimum pages, user can later expand it */
4630		if (!nr_pages_same)
4631			nr_pages = 2;
4632		buffer->buffers[cpu] =
4633			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4634		if (!buffer->buffers[cpu]) {
4635			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4636			     cpu);
4637			return NOTIFY_OK;
4638		}
4639		smp_wmb();
4640		cpumask_set_cpu(cpu, buffer->cpumask);
4641		break;
4642	case CPU_DOWN_PREPARE:
4643	case CPU_DOWN_PREPARE_FROZEN:
4644		/*
4645		 * Do nothing.
4646		 *  If we were to free the buffer, then the user would
4647		 *  lose any trace that was in the buffer.
4648		 */
4649		break;
4650	default:
4651		break;
4652	}
4653	return NOTIFY_OK;
4654}
4655#endif
4656
4657#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4658/*
4659 * This is a basic integrity check of the ring buffer.
4660 * Late in the boot cycle this test will run when configured in.
4661 * It will kick off a thread per CPU that will go into a loop
4662 * writing to the per cpu ring buffer various sizes of data.
4663 * Some of the data will be large items, some small.
4664 *
4665 * Another thread is created that goes into a spin, sending out
4666 * IPIs to the other CPUs to also write into the ring buffer.
4667 * this is to test the nesting ability of the buffer.
4668 *
4669 * Basic stats are recorded and reported. If something in the
4670 * ring buffer should happen that's not expected, a big warning
4671 * is displayed and all ring buffers are disabled.
4672 */
4673static struct task_struct *rb_threads[NR_CPUS] __initdata;
4674
4675struct rb_test_data {
4676	struct ring_buffer	*buffer;
4677	unsigned long		events;
4678	unsigned long		bytes_written;
4679	unsigned long		bytes_alloc;
4680	unsigned long		bytes_dropped;
4681	unsigned long		events_nested;
4682	unsigned long		bytes_written_nested;
4683	unsigned long		bytes_alloc_nested;
4684	unsigned long		bytes_dropped_nested;
4685	int			min_size_nested;
4686	int			max_size_nested;
4687	int			max_size;
4688	int			min_size;
4689	int			cpu;
4690	int			cnt;
4691};
4692
4693static struct rb_test_data rb_data[NR_CPUS] __initdata;
4694
4695/* 1 meg per cpu */
4696#define RB_TEST_BUFFER_SIZE	1048576
4697
4698static char rb_string[] __initdata =
4699	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4700	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4701	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4702
4703static bool rb_test_started __initdata;
4704
4705struct rb_item {
4706	int size;
4707	char str[];
4708};
4709
4710static __init int rb_write_something(struct rb_test_data *data, bool nested)
4711{
4712	struct ring_buffer_event *event;
4713	struct rb_item *item;
4714	bool started;
4715	int event_len;
4716	int size;
4717	int len;
4718	int cnt;
4719
4720	/* Have nested writes different that what is written */
4721	cnt = data->cnt + (nested ? 27 : 0);
4722
4723	/* Multiply cnt by ~e, to make some unique increment */
4724	size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4725
4726	len = size + sizeof(struct rb_item);
4727
4728	started = rb_test_started;
4729	/* read rb_test_started before checking buffer enabled */
4730	smp_rmb();
4731
4732	event = ring_buffer_lock_reserve(data->buffer, len);
4733	if (!event) {
4734		/* Ignore dropped events before test starts. */
4735		if (started) {
4736			if (nested)
4737				data->bytes_dropped += len;
4738			else
4739				data->bytes_dropped_nested += len;
4740		}
4741		return len;
4742	}
4743
4744	event_len = ring_buffer_event_length(event);
4745
4746	if (RB_WARN_ON(data->buffer, event_len < len))
4747		goto out;
4748
4749	item = ring_buffer_event_data(event);
4750	item->size = size;
4751	memcpy(item->str, rb_string, size);
4752
4753	if (nested) {
4754		data->bytes_alloc_nested += event_len;
4755		data->bytes_written_nested += len;
4756		data->events_nested++;
4757		if (!data->min_size_nested || len < data->min_size_nested)
4758			data->min_size_nested = len;
4759		if (len > data->max_size_nested)
4760			data->max_size_nested = len;
4761	} else {
4762		data->bytes_alloc += event_len;
4763		data->bytes_written += len;
4764		data->events++;
4765		if (!data->min_size || len < data->min_size)
4766			data->max_size = len;
4767		if (len > data->max_size)
4768			data->max_size = len;
4769	}
4770
4771 out:
4772	ring_buffer_unlock_commit(data->buffer, event);
4773
4774	return 0;
4775}
4776
4777static __init int rb_test(void *arg)
4778{
4779	struct rb_test_data *data = arg;
4780
4781	while (!kthread_should_stop()) {
4782		rb_write_something(data, false);
4783		data->cnt++;
4784
4785		set_current_state(TASK_INTERRUPTIBLE);
4786		/* Now sleep between a min of 100-300us and a max of 1ms */
4787		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4788	}
4789
4790	return 0;
4791}
4792
4793static __init void rb_ipi(void *ignore)
4794{
4795	struct rb_test_data *data;
4796	int cpu = smp_processor_id();
4797
4798	data = &rb_data[cpu];
4799	rb_write_something(data, true);
4800}
4801
4802static __init int rb_hammer_test(void *arg)
4803{
4804	while (!kthread_should_stop()) {
4805
4806		/* Send an IPI to all cpus to write data! */
4807		smp_call_function(rb_ipi, NULL, 1);
4808		/* No sleep, but for non preempt, let others run */
4809		schedule();
4810	}
4811
4812	return 0;
4813}
4814
4815static __init int test_ringbuffer(void)
4816{
4817	struct task_struct *rb_hammer;
4818	struct ring_buffer *buffer;
4819	int cpu;
4820	int ret = 0;
4821
4822	pr_info("Running ring buffer tests...\n");
4823
4824	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4825	if (WARN_ON(!buffer))
4826		return 0;
4827
4828	/* Disable buffer so that threads can't write to it yet */
4829	ring_buffer_record_off(buffer);
4830
4831	for_each_online_cpu(cpu) {
4832		rb_data[cpu].buffer = buffer;
4833		rb_data[cpu].cpu = cpu;
4834		rb_data[cpu].cnt = cpu;
4835		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4836						 "rbtester/%d", cpu);
4837		if (WARN_ON(!rb_threads[cpu])) {
4838			pr_cont("FAILED\n");
4839			ret = -1;
4840			goto out_free;
4841		}
4842
4843		kthread_bind(rb_threads[cpu], cpu);
4844 		wake_up_process(rb_threads[cpu]);
4845	}
4846
4847	/* Now create the rb hammer! */
4848	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4849	if (WARN_ON(!rb_hammer)) {
4850		pr_cont("FAILED\n");
4851		ret = -1;
4852		goto out_free;
4853	}
4854
4855	ring_buffer_record_on(buffer);
4856	/*
4857	 * Show buffer is enabled before setting rb_test_started.
4858	 * Yes there's a small race window where events could be
4859	 * dropped and the thread wont catch it. But when a ring
4860	 * buffer gets enabled, there will always be some kind of
4861	 * delay before other CPUs see it. Thus, we don't care about
4862	 * those dropped events. We care about events dropped after
4863	 * the threads see that the buffer is active.
4864	 */
4865	smp_wmb();
4866	rb_test_started = true;
4867
4868	set_current_state(TASK_INTERRUPTIBLE);
4869	/* Just run for 10 seconds */;
4870	schedule_timeout(10 * HZ);
4871
4872	kthread_stop(rb_hammer);
4873
4874 out_free:
4875	for_each_online_cpu(cpu) {
4876		if (!rb_threads[cpu])
4877			break;
4878		kthread_stop(rb_threads[cpu]);
4879	}
4880	if (ret) {
4881		ring_buffer_free(buffer);
4882		return ret;
4883	}
4884
4885	/* Report! */
4886	pr_info("finished\n");
4887	for_each_online_cpu(cpu) {
4888		struct ring_buffer_event *event;
4889		struct rb_test_data *data = &rb_data[cpu];
4890		struct rb_item *item;
4891		unsigned long total_events;
4892		unsigned long total_dropped;
4893		unsigned long total_written;
4894		unsigned long total_alloc;
4895		unsigned long total_read = 0;
4896		unsigned long total_size = 0;
4897		unsigned long total_len = 0;
4898		unsigned long total_lost = 0;
4899		unsigned long lost;
4900		int big_event_size;
4901		int small_event_size;
4902
4903		ret = -1;
4904
4905		total_events = data->events + data->events_nested;
4906		total_written = data->bytes_written + data->bytes_written_nested;
4907		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4908		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4909
4910		big_event_size = data->max_size + data->max_size_nested;
4911		small_event_size = data->min_size + data->min_size_nested;
4912
4913		pr_info("CPU %d:\n", cpu);
4914		pr_info("              events:    %ld\n", total_events);
4915		pr_info("       dropped bytes:    %ld\n", total_dropped);
4916		pr_info("       alloced bytes:    %ld\n", total_alloc);
4917		pr_info("       written bytes:    %ld\n", total_written);
4918		pr_info("       biggest event:    %d\n", big_event_size);
4919		pr_info("      smallest event:    %d\n", small_event_size);
4920
4921		if (RB_WARN_ON(buffer, total_dropped))
4922			break;
4923
4924		ret = 0;
4925
4926		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4927			total_lost += lost;
4928			item = ring_buffer_event_data(event);
4929			total_len += ring_buffer_event_length(event);
4930			total_size += item->size + sizeof(struct rb_item);
4931			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4932				pr_info("FAILED!\n");
4933				pr_info("buffer had: %.*s\n", item->size, item->str);
4934				pr_info("expected:   %.*s\n", item->size, rb_string);
4935				RB_WARN_ON(buffer, 1);
4936				ret = -1;
4937				break;
4938			}
4939			total_read++;
4940		}
4941		if (ret)
4942			break;
4943
4944		ret = -1;
4945
4946		pr_info("         read events:   %ld\n", total_read);
4947		pr_info("         lost events:   %ld\n", total_lost);
4948		pr_info("        total events:   %ld\n", total_lost + total_read);
4949		pr_info("  recorded len bytes:   %ld\n", total_len);
4950		pr_info(" recorded size bytes:   %ld\n", total_size);
4951		if (total_lost)
4952			pr_info(" With dropped events, record len and size may not match\n"
4953				" alloced and written from above\n");
4954		if (!total_lost) {
4955			if (RB_WARN_ON(buffer, total_len != total_alloc ||
4956				       total_size != total_written))
4957				break;
4958		}
4959		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4960			break;
4961
4962		ret = 0;
4963	}
4964	if (!ret)
4965		pr_info("Ring buffer PASSED!\n");
4966
4967	ring_buffer_free(buffer);
4968	return 0;
4969}
4970
4971late_initcall(test_ringbuffer);
4972#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
v3.1
   1/*
   2 * Generic ring buffer
   3 *
   4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   5 */
 
   6#include <linux/ring_buffer.h>
   7#include <linux/trace_clock.h>
 
   8#include <linux/spinlock.h>
 
   9#include <linux/debugfs.h>
  10#include <linux/uaccess.h>
  11#include <linux/hardirq.h>
 
  12#include <linux/kmemcheck.h>
  13#include <linux/module.h>
  14#include <linux/percpu.h>
  15#include <linux/mutex.h>
 
  16#include <linux/slab.h>
  17#include <linux/init.h>
  18#include <linux/hash.h>
  19#include <linux/list.h>
  20#include <linux/cpu.h>
  21#include <linux/fs.h>
  22
  23#include <asm/local.h>
  24#include "trace.h"
 
  25
  26/*
  27 * The ring buffer header is special. We must manually up keep it.
  28 */
  29int ring_buffer_print_entry_header(struct trace_seq *s)
  30{
  31	int ret;
  32
  33	ret = trace_seq_printf(s, "# compressed entry header\n");
  34	ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
  35	ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
  36	ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
  37	ret = trace_seq_printf(s, "\n");
  38	ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
  39			       RINGBUF_TYPE_PADDING);
  40	ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
  41			       RINGBUF_TYPE_TIME_EXTEND);
  42	ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
  43			       RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  44
  45	return ret;
  46}
  47
  48/*
  49 * The ring buffer is made up of a list of pages. A separate list of pages is
  50 * allocated for each CPU. A writer may only write to a buffer that is
  51 * associated with the CPU it is currently executing on.  A reader may read
  52 * from any per cpu buffer.
  53 *
  54 * The reader is special. For each per cpu buffer, the reader has its own
  55 * reader page. When a reader has read the entire reader page, this reader
  56 * page is swapped with another page in the ring buffer.
  57 *
  58 * Now, as long as the writer is off the reader page, the reader can do what
  59 * ever it wants with that page. The writer will never write to that page
  60 * again (as long as it is out of the ring buffer).
  61 *
  62 * Here's some silly ASCII art.
  63 *
  64 *   +------+
  65 *   |reader|          RING BUFFER
  66 *   |page  |
  67 *   +------+        +---+   +---+   +---+
  68 *                   |   |-->|   |-->|   |
  69 *                   +---+   +---+   +---+
  70 *                     ^               |
  71 *                     |               |
  72 *                     +---------------+
  73 *
  74 *
  75 *   +------+
  76 *   |reader|          RING BUFFER
  77 *   |page  |------------------v
  78 *   +------+        +---+   +---+   +---+
  79 *                   |   |-->|   |-->|   |
  80 *                   +---+   +---+   +---+
  81 *                     ^               |
  82 *                     |               |
  83 *                     +---------------+
  84 *
  85 *
  86 *   +------+
  87 *   |reader|          RING BUFFER
  88 *   |page  |------------------v
  89 *   +------+        +---+   +---+   +---+
  90 *      ^            |   |-->|   |-->|   |
  91 *      |            +---+   +---+   +---+
  92 *      |                              |
  93 *      |                              |
  94 *      +------------------------------+
  95 *
  96 *
  97 *   +------+
  98 *   |buffer|          RING BUFFER
  99 *   |page  |------------------v
 100 *   +------+        +---+   +---+   +---+
 101 *      ^            |   |   |   |-->|   |
 102 *      |   New      +---+   +---+   +---+
 103 *      |  Reader------^               |
 104 *      |   page                       |
 105 *      +------------------------------+
 106 *
 107 *
 108 * After we make this swap, the reader can hand this page off to the splice
 109 * code and be done with it. It can even allocate a new page if it needs to
 110 * and swap that into the ring buffer.
 111 *
 112 * We will be using cmpxchg soon to make all this lockless.
 113 *
 114 */
 115
 116/*
 117 * A fast way to enable or disable all ring buffers is to
 118 * call tracing_on or tracing_off. Turning off the ring buffers
 119 * prevents all ring buffers from being recorded to.
 120 * Turning this switch on, makes it OK to write to the
 121 * ring buffer, if the ring buffer is enabled itself.
 122 *
 123 * There's three layers that must be on in order to write
 124 * to the ring buffer.
 125 *
 126 * 1) This global flag must be set.
 127 * 2) The ring buffer must be enabled for recording.
 128 * 3) The per cpu buffer must be enabled for recording.
 129 *
 130 * In case of an anomaly, this global flag has a bit set that
 131 * will permantly disable all ring buffers.
 132 */
 133
 134/*
 135 * Global flag to disable all recording to ring buffers
 136 *  This has two bits: ON, DISABLED
 137 *
 138 *  ON   DISABLED
 139 * ---- ----------
 140 *   0      0        : ring buffers are off
 141 *   1      0        : ring buffers are on
 142 *   X      1        : ring buffers are permanently disabled
 143 */
 144
 145enum {
 146	RB_BUFFERS_ON_BIT	= 0,
 147	RB_BUFFERS_DISABLED_BIT	= 1,
 148};
 149
 150enum {
 151	RB_BUFFERS_ON		= 1 << RB_BUFFERS_ON_BIT,
 152	RB_BUFFERS_DISABLED	= 1 << RB_BUFFERS_DISABLED_BIT,
 153};
 154
 155static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
 156
 
 
 
 157#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 158
 159/**
 160 * tracing_on - enable all tracing buffers
 161 *
 162 * This function enables all tracing buffers that may have been
 163 * disabled with tracing_off.
 164 */
 165void tracing_on(void)
 166{
 167	set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
 168}
 169EXPORT_SYMBOL_GPL(tracing_on);
 170
 171/**
 172 * tracing_off - turn off all tracing buffers
 173 *
 174 * This function stops all tracing buffers from recording data.
 175 * It does not disable any overhead the tracers themselves may
 176 * be causing. This function simply causes all recording to
 177 * the ring buffers to fail.
 178 */
 179void tracing_off(void)
 180{
 181	clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
 182}
 183EXPORT_SYMBOL_GPL(tracing_off);
 184
 185/**
 186 * tracing_off_permanent - permanently disable ring buffers
 187 *
 188 * This function, once called, will disable all ring buffers
 189 * permanently.
 190 */
 191void tracing_off_permanent(void)
 192{
 193	set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
 194}
 195
 196/**
 197 * tracing_is_on - show state of ring buffers enabled
 198 */
 199int tracing_is_on(void)
 200{
 201	return ring_buffer_flags == RB_BUFFERS_ON;
 202}
 203EXPORT_SYMBOL_GPL(tracing_is_on);
 204
 205#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 206#define RB_ALIGNMENT		4U
 207#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 208#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 209
 210#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 211# define RB_FORCE_8BYTE_ALIGNMENT	0
 212# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 213#else
 214# define RB_FORCE_8BYTE_ALIGNMENT	1
 215# define RB_ARCH_ALIGNMENT		8U
 216#endif
 217
 
 
 218/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 219#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 220
 221enum {
 222	RB_LEN_TIME_EXTEND = 8,
 223	RB_LEN_TIME_STAMP = 16,
 224};
 225
 226#define skip_time_extend(event) \
 227	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 228
 229static inline int rb_null_event(struct ring_buffer_event *event)
 230{
 231	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 232}
 233
 234static void rb_event_set_padding(struct ring_buffer_event *event)
 235{
 236	/* padding has a NULL time_delta */
 237	event->type_len = RINGBUF_TYPE_PADDING;
 238	event->time_delta = 0;
 239}
 240
 241static unsigned
 242rb_event_data_length(struct ring_buffer_event *event)
 243{
 244	unsigned length;
 245
 246	if (event->type_len)
 247		length = event->type_len * RB_ALIGNMENT;
 248	else
 249		length = event->array[0];
 250	return length + RB_EVNT_HDR_SIZE;
 251}
 252
 253/*
 254 * Return the length of the given event. Will return
 255 * the length of the time extend if the event is a
 256 * time extend.
 257 */
 258static inline unsigned
 259rb_event_length(struct ring_buffer_event *event)
 260{
 261	switch (event->type_len) {
 262	case RINGBUF_TYPE_PADDING:
 263		if (rb_null_event(event))
 264			/* undefined */
 265			return -1;
 266		return  event->array[0] + RB_EVNT_HDR_SIZE;
 267
 268	case RINGBUF_TYPE_TIME_EXTEND:
 269		return RB_LEN_TIME_EXTEND;
 270
 271	case RINGBUF_TYPE_TIME_STAMP:
 272		return RB_LEN_TIME_STAMP;
 273
 274	case RINGBUF_TYPE_DATA:
 275		return rb_event_data_length(event);
 276	default:
 277		BUG();
 278	}
 279	/* not hit */
 280	return 0;
 281}
 282
 283/*
 284 * Return total length of time extend and data,
 285 *   or just the event length for all other events.
 286 */
 287static inline unsigned
 288rb_event_ts_length(struct ring_buffer_event *event)
 289{
 290	unsigned len = 0;
 291
 292	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
 293		/* time extends include the data event after it */
 294		len = RB_LEN_TIME_EXTEND;
 295		event = skip_time_extend(event);
 296	}
 297	return len + rb_event_length(event);
 298}
 299
 300/**
 301 * ring_buffer_event_length - return the length of the event
 302 * @event: the event to get the length of
 303 *
 304 * Returns the size of the data load of a data event.
 305 * If the event is something other than a data event, it
 306 * returns the size of the event itself. With the exception
 307 * of a TIME EXTEND, where it still returns the size of the
 308 * data load of the data event after it.
 309 */
 310unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 311{
 312	unsigned length;
 313
 314	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 315		event = skip_time_extend(event);
 316
 317	length = rb_event_length(event);
 318	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 319		return length;
 320	length -= RB_EVNT_HDR_SIZE;
 321	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 322                length -= sizeof(event->array[0]);
 323	return length;
 324}
 325EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 326
 327/* inline for ring buffer fast paths */
 328static void *
 329rb_event_data(struct ring_buffer_event *event)
 330{
 331	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 332		event = skip_time_extend(event);
 333	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 334	/* If length is in len field, then array[0] has the data */
 335	if (event->type_len)
 336		return (void *)&event->array[0];
 337	/* Otherwise length is in array[0] and array[1] has the data */
 338	return (void *)&event->array[1];
 339}
 340
 341/**
 342 * ring_buffer_event_data - return the data of the event
 343 * @event: the event to get the data from
 344 */
 345void *ring_buffer_event_data(struct ring_buffer_event *event)
 346{
 347	return rb_event_data(event);
 348}
 349EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 350
 351#define for_each_buffer_cpu(buffer, cpu)		\
 352	for_each_cpu(cpu, buffer->cpumask)
 353
 354#define TS_SHIFT	27
 355#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 356#define TS_DELTA_TEST	(~TS_MASK)
 357
 358/* Flag when events were overwritten */
 359#define RB_MISSED_EVENTS	(1 << 31)
 360/* Missed count stored at end */
 361#define RB_MISSED_STORED	(1 << 30)
 362
 363struct buffer_data_page {
 364	u64		 time_stamp;	/* page time stamp */
 365	local_t		 commit;	/* write committed index */
 366	unsigned char	 data[];	/* data of buffer page */
 367};
 368
 369/*
 370 * Note, the buffer_page list must be first. The buffer pages
 371 * are allocated in cache lines, which means that each buffer
 372 * page will be at the beginning of a cache line, and thus
 373 * the least significant bits will be zero. We use this to
 374 * add flags in the list struct pointers, to make the ring buffer
 375 * lockless.
 376 */
 377struct buffer_page {
 378	struct list_head list;		/* list of buffer pages */
 379	local_t		 write;		/* index for next write */
 380	unsigned	 read;		/* index for next read */
 381	local_t		 entries;	/* entries on this page */
 382	unsigned long	 real_end;	/* real end of data */
 383	struct buffer_data_page *page;	/* Actual data page */
 384};
 385
 386/*
 387 * The buffer page counters, write and entries, must be reset
 388 * atomically when crossing page boundaries. To synchronize this
 389 * update, two counters are inserted into the number. One is
 390 * the actual counter for the write position or count on the page.
 391 *
 392 * The other is a counter of updaters. Before an update happens
 393 * the update partition of the counter is incremented. This will
 394 * allow the updater to update the counter atomically.
 395 *
 396 * The counter is 20 bits, and the state data is 12.
 397 */
 398#define RB_WRITE_MASK		0xfffff
 399#define RB_WRITE_INTCNT		(1 << 20)
 400
 401static void rb_init_page(struct buffer_data_page *bpage)
 402{
 403	local_set(&bpage->commit, 0);
 404}
 405
 406/**
 407 * ring_buffer_page_len - the size of data on the page.
 408 * @page: The page to read
 409 *
 410 * Returns the amount of data on the page, including buffer page header.
 411 */
 412size_t ring_buffer_page_len(void *page)
 413{
 414	return local_read(&((struct buffer_data_page *)page)->commit)
 415		+ BUF_PAGE_HDR_SIZE;
 416}
 417
 418/*
 419 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 420 * this issue out.
 421 */
 422static void free_buffer_page(struct buffer_page *bpage)
 423{
 424	free_page((unsigned long)bpage->page);
 425	kfree(bpage);
 426}
 427
 428/*
 429 * We need to fit the time_stamp delta into 27 bits.
 430 */
 431static inline int test_time_stamp(u64 delta)
 432{
 433	if (delta & TS_DELTA_TEST)
 434		return 1;
 435	return 0;
 436}
 437
 438#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 439
 440/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 441#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 442
 443int ring_buffer_print_page_header(struct trace_seq *s)
 444{
 445	struct buffer_data_page field;
 446	int ret;
 447
 448	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 449			       "offset:0;\tsize:%u;\tsigned:%u;\n",
 450			       (unsigned int)sizeof(field.time_stamp),
 451			       (unsigned int)is_signed_type(u64));
 452
 453	ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
 454			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
 455			       (unsigned int)offsetof(typeof(field), commit),
 456			       (unsigned int)sizeof(field.commit),
 457			       (unsigned int)is_signed_type(long));
 458
 459	ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
 460			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
 461			       (unsigned int)offsetof(typeof(field), commit),
 462			       1,
 463			       (unsigned int)is_signed_type(long));
 464
 465	ret = trace_seq_printf(s, "\tfield: char data;\t"
 466			       "offset:%u;\tsize:%u;\tsigned:%u;\n",
 467			       (unsigned int)offsetof(typeof(field), data),
 468			       (unsigned int)BUF_PAGE_SIZE,
 469			       (unsigned int)is_signed_type(char));
 470
 471	return ret;
 472}
 473
 
 
 
 
 
 
 474/*
 475 * head_page == tail_page && head == tail then buffer is empty.
 476 */
 477struct ring_buffer_per_cpu {
 478	int				cpu;
 479	atomic_t			record_disabled;
 480	struct ring_buffer		*buffer;
 481	spinlock_t			reader_lock;	/* serialize readers */
 482	arch_spinlock_t			lock;
 483	struct lock_class_key		lock_key;
 
 484	struct list_head		*pages;
 485	struct buffer_page		*head_page;	/* read from head */
 486	struct buffer_page		*tail_page;	/* write to tail */
 487	struct buffer_page		*commit_page;	/* committed pages */
 488	struct buffer_page		*reader_page;
 489	unsigned long			lost_events;
 490	unsigned long			last_overrun;
 
 
 
 491	local_t				commit_overrun;
 492	local_t				overrun;
 493	local_t				entries;
 494	local_t				committing;
 495	local_t				commits;
 496	unsigned long			read;
 
 497	u64				write_stamp;
 498	u64				read_stamp;
 
 
 
 
 
 
 
 499};
 500
 501struct ring_buffer {
 502	unsigned			pages;
 503	unsigned			flags;
 504	int				cpus;
 505	atomic_t			record_disabled;
 
 506	cpumask_var_t			cpumask;
 507
 508	struct lock_class_key		*reader_lock_key;
 509
 510	struct mutex			mutex;
 511
 512	struct ring_buffer_per_cpu	**buffers;
 513
 514#ifdef CONFIG_HOTPLUG_CPU
 515	struct notifier_block		cpu_notify;
 516#endif
 517	u64				(*clock)(void);
 
 
 518};
 519
 520struct ring_buffer_iter {
 521	struct ring_buffer_per_cpu	*cpu_buffer;
 522	unsigned long			head;
 523	struct buffer_page		*head_page;
 524	struct buffer_page		*cache_reader_page;
 525	unsigned long			cache_read;
 526	u64				read_stamp;
 527};
 528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 530#define RB_WARN_ON(b, cond)						\
 531	({								\
 532		int _____ret = unlikely(cond);				\
 533		if (_____ret) {						\
 534			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
 535				struct ring_buffer_per_cpu *__b =	\
 536					(void *)b;			\
 537				atomic_inc(&__b->buffer->record_disabled); \
 538			} else						\
 539				atomic_inc(&b->record_disabled);	\
 540			WARN_ON(1);					\
 541		}							\
 542		_____ret;						\
 543	})
 544
 545/* Up this if you want to test the TIME_EXTENTS and normalization */
 546#define DEBUG_SHIFT 0
 547
 548static inline u64 rb_time_stamp(struct ring_buffer *buffer)
 549{
 550	/* shift to debug/test normalization and TIME_EXTENTS */
 551	return buffer->clock() << DEBUG_SHIFT;
 552}
 553
 554u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 555{
 556	u64 time;
 557
 558	preempt_disable_notrace();
 559	time = rb_time_stamp(buffer);
 560	preempt_enable_no_resched_notrace();
 561
 562	return time;
 563}
 564EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 565
 566void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
 567				      int cpu, u64 *ts)
 568{
 569	/* Just stupid testing the normalize function and deltas */
 570	*ts >>= DEBUG_SHIFT;
 571}
 572EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 573
 574/*
 575 * Making the ring buffer lockless makes things tricky.
 576 * Although writes only happen on the CPU that they are on,
 577 * and they only need to worry about interrupts. Reads can
 578 * happen on any CPU.
 579 *
 580 * The reader page is always off the ring buffer, but when the
 581 * reader finishes with a page, it needs to swap its page with
 582 * a new one from the buffer. The reader needs to take from
 583 * the head (writes go to the tail). But if a writer is in overwrite
 584 * mode and wraps, it must push the head page forward.
 585 *
 586 * Here lies the problem.
 587 *
 588 * The reader must be careful to replace only the head page, and
 589 * not another one. As described at the top of the file in the
 590 * ASCII art, the reader sets its old page to point to the next
 591 * page after head. It then sets the page after head to point to
 592 * the old reader page. But if the writer moves the head page
 593 * during this operation, the reader could end up with the tail.
 594 *
 595 * We use cmpxchg to help prevent this race. We also do something
 596 * special with the page before head. We set the LSB to 1.
 597 *
 598 * When the writer must push the page forward, it will clear the
 599 * bit that points to the head page, move the head, and then set
 600 * the bit that points to the new head page.
 601 *
 602 * We also don't want an interrupt coming in and moving the head
 603 * page on another writer. Thus we use the second LSB to catch
 604 * that too. Thus:
 605 *
 606 * head->list->prev->next        bit 1          bit 0
 607 *                              -------        -------
 608 * Normal page                     0              0
 609 * Points to head page             0              1
 610 * New head page                   1              0
 611 *
 612 * Note we can not trust the prev pointer of the head page, because:
 613 *
 614 * +----+       +-----+        +-----+
 615 * |    |------>|  T  |---X--->|  N  |
 616 * |    |<------|     |        |     |
 617 * +----+       +-----+        +-----+
 618 *   ^                           ^ |
 619 *   |          +-----+          | |
 620 *   +----------|  R  |----------+ |
 621 *              |     |<-----------+
 622 *              +-----+
 623 *
 624 * Key:  ---X-->  HEAD flag set in pointer
 625 *         T      Tail page
 626 *         R      Reader page
 627 *         N      Next page
 628 *
 629 * (see __rb_reserve_next() to see where this happens)
 630 *
 631 *  What the above shows is that the reader just swapped out
 632 *  the reader page with a page in the buffer, but before it
 633 *  could make the new header point back to the new page added
 634 *  it was preempted by a writer. The writer moved forward onto
 635 *  the new page added by the reader and is about to move forward
 636 *  again.
 637 *
 638 *  You can see, it is legitimate for the previous pointer of
 639 *  the head (or any page) not to point back to itself. But only
 640 *  temporarially.
 641 */
 642
 643#define RB_PAGE_NORMAL		0UL
 644#define RB_PAGE_HEAD		1UL
 645#define RB_PAGE_UPDATE		2UL
 646
 647
 648#define RB_FLAG_MASK		3UL
 649
 650/* PAGE_MOVED is not part of the mask */
 651#define RB_PAGE_MOVED		4UL
 652
 653/*
 654 * rb_list_head - remove any bit
 655 */
 656static struct list_head *rb_list_head(struct list_head *list)
 657{
 658	unsigned long val = (unsigned long)list;
 659
 660	return (struct list_head *)(val & ~RB_FLAG_MASK);
 661}
 662
 663/*
 664 * rb_is_head_page - test if the given page is the head page
 665 *
 666 * Because the reader may move the head_page pointer, we can
 667 * not trust what the head page is (it may be pointing to
 668 * the reader page). But if the next page is a header page,
 669 * its flags will be non zero.
 670 */
 671static inline int
 672rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 673		struct buffer_page *page, struct list_head *list)
 674{
 675	unsigned long val;
 676
 677	val = (unsigned long)list->next;
 678
 679	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
 680		return RB_PAGE_MOVED;
 681
 682	return val & RB_FLAG_MASK;
 683}
 684
 685/*
 686 * rb_is_reader_page
 687 *
 688 * The unique thing about the reader page, is that, if the
 689 * writer is ever on it, the previous pointer never points
 690 * back to the reader page.
 691 */
 692static int rb_is_reader_page(struct buffer_page *page)
 693{
 694	struct list_head *list = page->list.prev;
 695
 696	return rb_list_head(list->next) != &page->list;
 697}
 698
 699/*
 700 * rb_set_list_to_head - set a list_head to be pointing to head.
 701 */
 702static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
 703				struct list_head *list)
 704{
 705	unsigned long *ptr;
 706
 707	ptr = (unsigned long *)&list->next;
 708	*ptr |= RB_PAGE_HEAD;
 709	*ptr &= ~RB_PAGE_UPDATE;
 710}
 711
 712/*
 713 * rb_head_page_activate - sets up head page
 714 */
 715static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
 716{
 717	struct buffer_page *head;
 718
 719	head = cpu_buffer->head_page;
 720	if (!head)
 721		return;
 722
 723	/*
 724	 * Set the previous list pointer to have the HEAD flag.
 725	 */
 726	rb_set_list_to_head(cpu_buffer, head->list.prev);
 727}
 728
 729static void rb_list_head_clear(struct list_head *list)
 730{
 731	unsigned long *ptr = (unsigned long *)&list->next;
 732
 733	*ptr &= ~RB_FLAG_MASK;
 734}
 735
 736/*
 737 * rb_head_page_dactivate - clears head page ptr (for free list)
 738 */
 739static void
 740rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
 741{
 742	struct list_head *hd;
 743
 744	/* Go through the whole list and clear any pointers found. */
 745	rb_list_head_clear(cpu_buffer->pages);
 746
 747	list_for_each(hd, cpu_buffer->pages)
 748		rb_list_head_clear(hd);
 749}
 750
 751static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 752			    struct buffer_page *head,
 753			    struct buffer_page *prev,
 754			    int old_flag, int new_flag)
 755{
 756	struct list_head *list;
 757	unsigned long val = (unsigned long)&head->list;
 758	unsigned long ret;
 759
 760	list = &prev->list;
 761
 762	val &= ~RB_FLAG_MASK;
 763
 764	ret = cmpxchg((unsigned long *)&list->next,
 765		      val | old_flag, val | new_flag);
 766
 767	/* check if the reader took the page */
 768	if ((ret & ~RB_FLAG_MASK) != val)
 769		return RB_PAGE_MOVED;
 770
 771	return ret & RB_FLAG_MASK;
 772}
 773
 774static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
 775				   struct buffer_page *head,
 776				   struct buffer_page *prev,
 777				   int old_flag)
 778{
 779	return rb_head_page_set(cpu_buffer, head, prev,
 780				old_flag, RB_PAGE_UPDATE);
 781}
 782
 783static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
 784				 struct buffer_page *head,
 785				 struct buffer_page *prev,
 786				 int old_flag)
 787{
 788	return rb_head_page_set(cpu_buffer, head, prev,
 789				old_flag, RB_PAGE_HEAD);
 790}
 791
 792static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
 793				   struct buffer_page *head,
 794				   struct buffer_page *prev,
 795				   int old_flag)
 796{
 797	return rb_head_page_set(cpu_buffer, head, prev,
 798				old_flag, RB_PAGE_NORMAL);
 799}
 800
 801static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
 802			       struct buffer_page **bpage)
 803{
 804	struct list_head *p = rb_list_head((*bpage)->list.next);
 805
 806	*bpage = list_entry(p, struct buffer_page, list);
 807}
 808
 809static struct buffer_page *
 810rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
 811{
 812	struct buffer_page *head;
 813	struct buffer_page *page;
 814	struct list_head *list;
 815	int i;
 816
 817	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
 818		return NULL;
 819
 820	/* sanity check */
 821	list = cpu_buffer->pages;
 822	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
 823		return NULL;
 824
 825	page = head = cpu_buffer->head_page;
 826	/*
 827	 * It is possible that the writer moves the header behind
 828	 * where we started, and we miss in one loop.
 829	 * A second loop should grab the header, but we'll do
 830	 * three loops just because I'm paranoid.
 831	 */
 832	for (i = 0; i < 3; i++) {
 833		do {
 834			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
 835				cpu_buffer->head_page = page;
 836				return page;
 837			}
 838			rb_inc_page(cpu_buffer, &page);
 839		} while (page != head);
 840	}
 841
 842	RB_WARN_ON(cpu_buffer, 1);
 843
 844	return NULL;
 845}
 846
 847static int rb_head_page_replace(struct buffer_page *old,
 848				struct buffer_page *new)
 849{
 850	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
 851	unsigned long val;
 852	unsigned long ret;
 853
 854	val = *ptr & ~RB_FLAG_MASK;
 855	val |= RB_PAGE_HEAD;
 856
 857	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
 858
 859	return ret == val;
 860}
 861
 862/*
 863 * rb_tail_page_update - move the tail page forward
 864 *
 865 * Returns 1 if moved tail page, 0 if someone else did.
 866 */
 867static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
 868			       struct buffer_page *tail_page,
 869			       struct buffer_page *next_page)
 870{
 871	struct buffer_page *old_tail;
 872	unsigned long old_entries;
 873	unsigned long old_write;
 874	int ret = 0;
 875
 876	/*
 877	 * The tail page now needs to be moved forward.
 878	 *
 879	 * We need to reset the tail page, but without messing
 880	 * with possible erasing of data brought in by interrupts
 881	 * that have moved the tail page and are currently on it.
 882	 *
 883	 * We add a counter to the write field to denote this.
 884	 */
 885	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
 886	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
 887
 888	/*
 889	 * Just make sure we have seen our old_write and synchronize
 890	 * with any interrupts that come in.
 891	 */
 892	barrier();
 893
 894	/*
 895	 * If the tail page is still the same as what we think
 896	 * it is, then it is up to us to update the tail
 897	 * pointer.
 898	 */
 899	if (tail_page == cpu_buffer->tail_page) {
 900		/* Zero the write counter */
 901		unsigned long val = old_write & ~RB_WRITE_MASK;
 902		unsigned long eval = old_entries & ~RB_WRITE_MASK;
 903
 904		/*
 905		 * This will only succeed if an interrupt did
 906		 * not come in and change it. In which case, we
 907		 * do not want to modify it.
 908		 *
 909		 * We add (void) to let the compiler know that we do not care
 910		 * about the return value of these functions. We use the
 911		 * cmpxchg to only update if an interrupt did not already
 912		 * do it for us. If the cmpxchg fails, we don't care.
 913		 */
 914		(void)local_cmpxchg(&next_page->write, old_write, val);
 915		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
 916
 917		/*
 918		 * No need to worry about races with clearing out the commit.
 919		 * it only can increment when a commit takes place. But that
 920		 * only happens in the outer most nested commit.
 921		 */
 922		local_set(&next_page->page->commit, 0);
 923
 924		old_tail = cmpxchg(&cpu_buffer->tail_page,
 925				   tail_page, next_page);
 926
 927		if (old_tail == tail_page)
 928			ret = 1;
 929	}
 930
 931	return ret;
 932}
 933
 934static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
 935			  struct buffer_page *bpage)
 936{
 937	unsigned long val = (unsigned long)bpage;
 938
 939	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
 940		return 1;
 941
 942	return 0;
 943}
 944
 945/**
 946 * rb_check_list - make sure a pointer to a list has the last bits zero
 947 */
 948static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
 949			 struct list_head *list)
 950{
 951	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
 952		return 1;
 953	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
 954		return 1;
 955	return 0;
 956}
 957
 958/**
 959 * check_pages - integrity check of buffer pages
 960 * @cpu_buffer: CPU buffer with pages to test
 961 *
 962 * As a safety measure we check to make sure the data pages have not
 963 * been corrupted.
 964 */
 965static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 966{
 967	struct list_head *head = cpu_buffer->pages;
 968	struct buffer_page *bpage, *tmp;
 969
 
 
 
 
 970	rb_head_page_deactivate(cpu_buffer);
 971
 972	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
 973		return -1;
 974	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
 975		return -1;
 976
 977	if (rb_check_list(cpu_buffer, head))
 978		return -1;
 979
 980	list_for_each_entry_safe(bpage, tmp, head, list) {
 981		if (RB_WARN_ON(cpu_buffer,
 982			       bpage->list.next->prev != &bpage->list))
 983			return -1;
 984		if (RB_WARN_ON(cpu_buffer,
 985			       bpage->list.prev->next != &bpage->list))
 986			return -1;
 987		if (rb_check_list(cpu_buffer, &bpage->list))
 988			return -1;
 989	}
 990
 991	rb_head_page_activate(cpu_buffer);
 992
 993	return 0;
 994}
 995
 996static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
 997			     unsigned nr_pages)
 998{
 
 999	struct buffer_page *bpage, *tmp;
1000	LIST_HEAD(pages);
1001	unsigned i;
1002
1003	WARN_ON(!nr_pages);
1004
1005	for (i = 0; i < nr_pages; i++) {
1006		struct page *page;
1007		/*
1008		 * __GFP_NORETRY flag makes sure that the allocation fails
1009		 * gracefully without invoking oom-killer and the system is
1010		 * not destabilized.
1011		 */
1012		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1013				    GFP_KERNEL | __GFP_NORETRY,
1014				    cpu_to_node(cpu_buffer->cpu));
1015		if (!bpage)
1016			goto free_pages;
1017
1018		rb_check_bpage(cpu_buffer, bpage);
1019
1020		list_add(&bpage->list, &pages);
1021
1022		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
1023					GFP_KERNEL | __GFP_NORETRY, 0);
1024		if (!page)
1025			goto free_pages;
1026		bpage->page = page_address(page);
1027		rb_init_page(bpage->page);
1028	}
1029
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030	/*
1031	 * The ring buffer page list is a circular list that does not
1032	 * start and end with a list head. All page list items point to
1033	 * other pages.
1034	 */
1035	cpu_buffer->pages = pages.next;
1036	list_del(&pages);
1037
 
 
1038	rb_check_pages(cpu_buffer);
1039
1040	return 0;
1041
1042 free_pages:
1043	list_for_each_entry_safe(bpage, tmp, &pages, list) {
1044		list_del_init(&bpage->list);
1045		free_buffer_page(bpage);
1046	}
1047	return -ENOMEM;
1048}
1049
1050static struct ring_buffer_per_cpu *
1051rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
1052{
1053	struct ring_buffer_per_cpu *cpu_buffer;
1054	struct buffer_page *bpage;
1055	struct page *page;
1056	int ret;
1057
1058	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1059				  GFP_KERNEL, cpu_to_node(cpu));
1060	if (!cpu_buffer)
1061		return NULL;
1062
1063	cpu_buffer->cpu = cpu;
1064	cpu_buffer->buffer = buffer;
1065	spin_lock_init(&cpu_buffer->reader_lock);
1066	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1067	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 
 
 
 
1068
1069	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1070			    GFP_KERNEL, cpu_to_node(cpu));
1071	if (!bpage)
1072		goto fail_free_buffer;
1073
1074	rb_check_bpage(cpu_buffer, bpage);
1075
1076	cpu_buffer->reader_page = bpage;
1077	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1078	if (!page)
1079		goto fail_free_reader;
1080	bpage->page = page_address(page);
1081	rb_init_page(bpage->page);
1082
1083	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
 
1084
1085	ret = rb_allocate_pages(cpu_buffer, buffer->pages);
1086	if (ret < 0)
1087		goto fail_free_reader;
1088
1089	cpu_buffer->head_page
1090		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1091	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1092
1093	rb_head_page_activate(cpu_buffer);
1094
1095	return cpu_buffer;
1096
1097 fail_free_reader:
1098	free_buffer_page(cpu_buffer->reader_page);
1099
1100 fail_free_buffer:
1101	kfree(cpu_buffer);
1102	return NULL;
1103}
1104
1105static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1106{
1107	struct list_head *head = cpu_buffer->pages;
1108	struct buffer_page *bpage, *tmp;
1109
1110	free_buffer_page(cpu_buffer->reader_page);
1111
1112	rb_head_page_deactivate(cpu_buffer);
1113
1114	if (head) {
1115		list_for_each_entry_safe(bpage, tmp, head, list) {
1116			list_del_init(&bpage->list);
1117			free_buffer_page(bpage);
1118		}
1119		bpage = list_entry(head, struct buffer_page, list);
1120		free_buffer_page(bpage);
1121	}
1122
1123	kfree(cpu_buffer);
1124}
1125
1126#ifdef CONFIG_HOTPLUG_CPU
1127static int rb_cpu_notify(struct notifier_block *self,
1128			 unsigned long action, void *hcpu);
1129#endif
1130
1131/**
1132 * ring_buffer_alloc - allocate a new ring_buffer
1133 * @size: the size in bytes per cpu that is needed.
1134 * @flags: attributes to set for the ring buffer.
1135 *
1136 * Currently the only flag that is available is the RB_FL_OVERWRITE
1137 * flag. This flag means that the buffer will overwrite old data
1138 * when the buffer wraps. If this flag is not set, the buffer will
1139 * drop data when the tail hits the head.
1140 */
1141struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1142					struct lock_class_key *key)
1143{
1144	struct ring_buffer *buffer;
1145	int bsize;
1146	int cpu;
1147
1148	/* keep it in its own cache line */
1149	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1150			 GFP_KERNEL);
1151	if (!buffer)
1152		return NULL;
1153
1154	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1155		goto fail_free_buffer;
1156
1157	buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1158	buffer->flags = flags;
1159	buffer->clock = trace_clock_local;
1160	buffer->reader_lock_key = key;
1161
 
 
 
1162	/* need at least two pages */
1163	if (buffer->pages < 2)
1164		buffer->pages = 2;
1165
1166	/*
1167	 * In case of non-hotplug cpu, if the ring-buffer is allocated
1168	 * in early initcall, it will not be notified of secondary cpus.
1169	 * In that off case, we need to allocate for all possible cpus.
1170	 */
1171#ifdef CONFIG_HOTPLUG_CPU
1172	get_online_cpus();
1173	cpumask_copy(buffer->cpumask, cpu_online_mask);
1174#else
1175	cpumask_copy(buffer->cpumask, cpu_possible_mask);
1176#endif
1177	buffer->cpus = nr_cpu_ids;
1178
1179	bsize = sizeof(void *) * nr_cpu_ids;
1180	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1181				  GFP_KERNEL);
1182	if (!buffer->buffers)
1183		goto fail_free_cpumask;
1184
1185	for_each_buffer_cpu(buffer, cpu) {
1186		buffer->buffers[cpu] =
1187			rb_allocate_cpu_buffer(buffer, cpu);
1188		if (!buffer->buffers[cpu])
1189			goto fail_free_buffers;
1190	}
1191
1192#ifdef CONFIG_HOTPLUG_CPU
1193	buffer->cpu_notify.notifier_call = rb_cpu_notify;
1194	buffer->cpu_notify.priority = 0;
1195	register_cpu_notifier(&buffer->cpu_notify);
 
1196#endif
1197
1198	put_online_cpus();
1199	mutex_init(&buffer->mutex);
1200
1201	return buffer;
1202
1203 fail_free_buffers:
1204	for_each_buffer_cpu(buffer, cpu) {
1205		if (buffer->buffers[cpu])
1206			rb_free_cpu_buffer(buffer->buffers[cpu]);
1207	}
1208	kfree(buffer->buffers);
1209
1210 fail_free_cpumask:
1211	free_cpumask_var(buffer->cpumask);
1212	put_online_cpus();
 
 
1213
1214 fail_free_buffer:
1215	kfree(buffer);
1216	return NULL;
1217}
1218EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1219
1220/**
1221 * ring_buffer_free - free a ring buffer.
1222 * @buffer: the buffer to free.
1223 */
1224void
1225ring_buffer_free(struct ring_buffer *buffer)
1226{
1227	int cpu;
1228
1229	get_online_cpus();
1230
1231#ifdef CONFIG_HOTPLUG_CPU
1232	unregister_cpu_notifier(&buffer->cpu_notify);
 
1233#endif
1234
1235	for_each_buffer_cpu(buffer, cpu)
1236		rb_free_cpu_buffer(buffer->buffers[cpu]);
1237
1238	put_online_cpus();
 
 
1239
1240	kfree(buffer->buffers);
1241	free_cpumask_var(buffer->cpumask);
1242
1243	kfree(buffer);
1244}
1245EXPORT_SYMBOL_GPL(ring_buffer_free);
1246
1247void ring_buffer_set_clock(struct ring_buffer *buffer,
1248			   u64 (*clock)(void))
1249{
1250	buffer->clock = clock;
1251}
1252
1253static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1254
1255static void
1256rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
 
 
 
 
 
 
 
 
 
 
1257{
1258	struct buffer_page *bpage;
1259	struct list_head *p;
1260	unsigned i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1261
1262	spin_lock_irq(&cpu_buffer->reader_lock);
1263	rb_head_page_deactivate(cpu_buffer);
 
1264
1265	for (i = 0; i < nr_pages; i++) {
1266		if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1267			goto out;
1268		p = cpu_buffer->pages->next;
1269		bpage = list_entry(p, struct buffer_page, list);
1270		list_del_init(&bpage->list);
1271		free_buffer_page(bpage);
1272	}
1273	if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1274		goto out;
1275
1276	rb_reset_cpu(cpu_buffer);
1277	rb_check_pages(cpu_buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1278
1279out:
1280	spin_unlock_irq(&cpu_buffer->reader_lock);
1281}
1282
1283static void
1284rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1285		struct list_head *pages, unsigned nr_pages)
1286{
1287	struct buffer_page *bpage;
1288	struct list_head *p;
1289	unsigned i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1290
1291	spin_lock_irq(&cpu_buffer->reader_lock);
1292	rb_head_page_deactivate(cpu_buffer);
 
 
 
 
 
 
1293
1294	for (i = 0; i < nr_pages; i++) {
1295		if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1296			goto out;
1297		p = pages->next;
1298		bpage = list_entry(p, struct buffer_page, list);
1299		list_del_init(&bpage->list);
1300		list_add_tail(&bpage->list, cpu_buffer->pages);
 
1301	}
1302	rb_reset_cpu(cpu_buffer);
1303	rb_check_pages(cpu_buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1304
1305out:
1306	spin_unlock_irq(&cpu_buffer->reader_lock);
 
 
 
 
1307}
1308
1309/**
1310 * ring_buffer_resize - resize the ring buffer
1311 * @buffer: the buffer to resize.
1312 * @size: the new size.
 
1313 *
1314 * Minimum size is 2 * BUF_PAGE_SIZE.
1315 *
1316 * Returns -1 on failure.
1317 */
1318int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
 
1319{
1320	struct ring_buffer_per_cpu *cpu_buffer;
1321	unsigned nr_pages, rm_pages, new_pages;
1322	struct buffer_page *bpage, *tmp;
1323	unsigned long buffer_size;
1324	LIST_HEAD(pages);
1325	int i, cpu;
1326
1327	/*
1328	 * Always succeed at resizing a non-existent buffer:
1329	 */
1330	if (!buffer)
1331		return size;
1332
 
 
 
 
 
1333	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1334	size *= BUF_PAGE_SIZE;
1335	buffer_size = buffer->pages * BUF_PAGE_SIZE;
1336
1337	/* we need a minimum of two pages */
1338	if (size < BUF_PAGE_SIZE * 2)
1339		size = BUF_PAGE_SIZE * 2;
1340
1341	if (size == buffer_size)
1342		return size;
1343
1344	atomic_inc(&buffer->record_disabled);
1345
1346	/* Make sure all writers are done with this buffer. */
1347	synchronize_sched();
 
 
 
1348
 
1349	mutex_lock(&buffer->mutex);
1350	get_online_cpus();
1351
1352	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
 
 
1353
1354	if (size < buffer_size) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1355
1356		/* easy case, just free pages */
1357		if (RB_WARN_ON(buffer, nr_pages >= buffer->pages))
1358			goto out_fail;
 
 
 
 
 
 
 
1359
1360		rm_pages = buffer->pages - nr_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1361
 
1362		for_each_buffer_cpu(buffer, cpu) {
1363			cpu_buffer = buffer->buffers[cpu];
1364			rb_remove_pages(cpu_buffer, rm_pages);
 
 
 
 
 
1365		}
1366		goto out;
1367	}
1368
1369	/*
1370	 * This is a bit more difficult. We only want to add pages
1371	 * when we can allocate enough for all CPUs. We do this
1372	 * by allocating all the pages and storing them on a local
1373	 * link list. If we succeed in our allocation, then we
1374	 * add these pages to the cpu_buffers. Otherwise we just free
1375	 * them all and return -ENOMEM;
1376	 */
1377	if (RB_WARN_ON(buffer, nr_pages <= buffer->pages))
1378		goto out_fail;
 
 
 
 
 
 
 
 
 
 
 
1379
1380	new_pages = nr_pages - buffer->pages;
1381
1382	for_each_buffer_cpu(buffer, cpu) {
1383		for (i = 0; i < new_pages; i++) {
1384			struct page *page;
 
 
1385			/*
1386			 * __GFP_NORETRY flag makes sure that the allocation
1387			 * fails gracefully without invoking oom-killer and
1388			 * the system is not destabilized.
1389			 */
1390			bpage = kzalloc_node(ALIGN(sizeof(*bpage),
1391						  cache_line_size()),
1392					    GFP_KERNEL | __GFP_NORETRY,
1393					    cpu_to_node(cpu));
1394			if (!bpage)
1395				goto free_pages;
1396			list_add(&bpage->list, &pages);
1397			page = alloc_pages_node(cpu_to_node(cpu),
1398						GFP_KERNEL | __GFP_NORETRY, 0);
1399			if (!page)
1400				goto free_pages;
1401			bpage->page = page_address(page);
1402			rb_init_page(bpage->page);
1403		}
 
 
 
 
1404	}
1405
1406	for_each_buffer_cpu(buffer, cpu) {
1407		cpu_buffer = buffer->buffers[cpu];
1408		rb_insert_pages(cpu_buffer, &pages, new_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1409	}
1410
1411	if (RB_WARN_ON(buffer, !list_empty(&pages)))
1412		goto out_fail;
1413
1414 out:
1415	buffer->pages = nr_pages;
1416	put_online_cpus();
1417	mutex_unlock(&buffer->mutex);
1418
1419	atomic_dec(&buffer->record_disabled);
 
1420
1421	return size;
 
1422
1423 free_pages:
1424	list_for_each_entry_safe(bpage, tmp, &pages, list) {
1425		list_del_init(&bpage->list);
1426		free_buffer_page(bpage);
 
1427	}
1428	put_online_cpus();
1429	mutex_unlock(&buffer->mutex);
1430	atomic_dec(&buffer->record_disabled);
1431	return -ENOMEM;
1432
1433	/*
1434	 * Something went totally wrong, and we are too paranoid
1435	 * to even clean up the mess.
1436	 */
1437 out_fail:
1438	put_online_cpus();
1439	mutex_unlock(&buffer->mutex);
1440	atomic_dec(&buffer->record_disabled);
1441	return -1;
1442}
1443EXPORT_SYMBOL_GPL(ring_buffer_resize);
1444
1445void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1446{
1447	mutex_lock(&buffer->mutex);
1448	if (val)
1449		buffer->flags |= RB_FL_OVERWRITE;
1450	else
1451		buffer->flags &= ~RB_FL_OVERWRITE;
1452	mutex_unlock(&buffer->mutex);
1453}
1454EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1455
1456static inline void *
1457__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1458{
1459	return bpage->data + index;
1460}
1461
1462static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1463{
1464	return bpage->page->data + index;
1465}
1466
1467static inline struct ring_buffer_event *
1468rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1469{
1470	return __rb_page_index(cpu_buffer->reader_page,
1471			       cpu_buffer->reader_page->read);
1472}
1473
1474static inline struct ring_buffer_event *
1475rb_iter_head_event(struct ring_buffer_iter *iter)
1476{
1477	return __rb_page_index(iter->head_page, iter->head);
1478}
1479
1480static inline unsigned long rb_page_write(struct buffer_page *bpage)
1481{
1482	return local_read(&bpage->write) & RB_WRITE_MASK;
1483}
1484
1485static inline unsigned rb_page_commit(struct buffer_page *bpage)
1486{
1487	return local_read(&bpage->page->commit);
1488}
1489
1490static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1491{
1492	return local_read(&bpage->entries) & RB_WRITE_MASK;
1493}
1494
1495/* Size is determined by what has been committed */
1496static inline unsigned rb_page_size(struct buffer_page *bpage)
1497{
1498	return rb_page_commit(bpage);
1499}
1500
1501static inline unsigned
1502rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1503{
1504	return rb_page_commit(cpu_buffer->commit_page);
1505}
1506
1507static inline unsigned
1508rb_event_index(struct ring_buffer_event *event)
1509{
1510	unsigned long addr = (unsigned long)event;
1511
1512	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1513}
1514
1515static inline int
1516rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1517		   struct ring_buffer_event *event)
1518{
1519	unsigned long addr = (unsigned long)event;
1520	unsigned long index;
1521
1522	index = rb_event_index(event);
1523	addr &= PAGE_MASK;
1524
1525	return cpu_buffer->commit_page->page == (void *)addr &&
1526		rb_commit_index(cpu_buffer) == index;
1527}
1528
1529static void
1530rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1531{
1532	unsigned long max_count;
1533
1534	/*
1535	 * We only race with interrupts and NMIs on this CPU.
1536	 * If we own the commit event, then we can commit
1537	 * all others that interrupted us, since the interruptions
1538	 * are in stack format (they finish before they come
1539	 * back to us). This allows us to do a simple loop to
1540	 * assign the commit to the tail.
1541	 */
1542 again:
1543	max_count = cpu_buffer->buffer->pages * 100;
1544
1545	while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1546		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1547			return;
1548		if (RB_WARN_ON(cpu_buffer,
1549			       rb_is_reader_page(cpu_buffer->tail_page)))
1550			return;
1551		local_set(&cpu_buffer->commit_page->page->commit,
1552			  rb_page_write(cpu_buffer->commit_page));
1553		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1554		cpu_buffer->write_stamp =
1555			cpu_buffer->commit_page->page->time_stamp;
1556		/* add barrier to keep gcc from optimizing too much */
1557		barrier();
1558	}
1559	while (rb_commit_index(cpu_buffer) !=
1560	       rb_page_write(cpu_buffer->commit_page)) {
1561
1562		local_set(&cpu_buffer->commit_page->page->commit,
1563			  rb_page_write(cpu_buffer->commit_page));
1564		RB_WARN_ON(cpu_buffer,
1565			   local_read(&cpu_buffer->commit_page->page->commit) &
1566			   ~RB_WRITE_MASK);
1567		barrier();
1568	}
1569
1570	/* again, keep gcc from optimizing */
1571	barrier();
1572
1573	/*
1574	 * If an interrupt came in just after the first while loop
1575	 * and pushed the tail page forward, we will be left with
1576	 * a dangling commit that will never go forward.
1577	 */
1578	if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1579		goto again;
1580}
1581
1582static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1583{
1584	cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1585	cpu_buffer->reader_page->read = 0;
1586}
1587
1588static void rb_inc_iter(struct ring_buffer_iter *iter)
1589{
1590	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1591
1592	/*
1593	 * The iterator could be on the reader page (it starts there).
1594	 * But the head could have moved, since the reader was
1595	 * found. Check for this case and assign the iterator
1596	 * to the head page instead of next.
1597	 */
1598	if (iter->head_page == cpu_buffer->reader_page)
1599		iter->head_page = rb_set_head_page(cpu_buffer);
1600	else
1601		rb_inc_page(cpu_buffer, &iter->head_page);
1602
1603	iter->read_stamp = iter->head_page->page->time_stamp;
1604	iter->head = 0;
1605}
1606
1607/* Slow path, do not inline */
1608static noinline struct ring_buffer_event *
1609rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1610{
1611	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1612
1613	/* Not the first event on the page? */
1614	if (rb_event_index(event)) {
1615		event->time_delta = delta & TS_MASK;
1616		event->array[0] = delta >> TS_SHIFT;
1617	} else {
1618		/* nope, just zero it */
1619		event->time_delta = 0;
1620		event->array[0] = 0;
1621	}
1622
1623	return skip_time_extend(event);
1624}
1625
1626/**
1627 * ring_buffer_update_event - update event type and data
1628 * @event: the even to update
1629 * @type: the type of event
1630 * @length: the size of the event field in the ring buffer
1631 *
1632 * Update the type and data fields of the event. The length
1633 * is the actual size that is written to the ring buffer,
1634 * and with this, we can determine what to place into the
1635 * data field.
1636 */
1637static void
1638rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1639		struct ring_buffer_event *event, unsigned length,
1640		int add_timestamp, u64 delta)
1641{
1642	/* Only a commit updates the timestamp */
1643	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1644		delta = 0;
1645
1646	/*
1647	 * If we need to add a timestamp, then we
1648	 * add it to the start of the resevered space.
1649	 */
1650	if (unlikely(add_timestamp)) {
1651		event = rb_add_time_stamp(event, delta);
1652		length -= RB_LEN_TIME_EXTEND;
1653		delta = 0;
1654	}
1655
1656	event->time_delta = delta;
1657	length -= RB_EVNT_HDR_SIZE;
1658	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
1659		event->type_len = 0;
1660		event->array[0] = length;
1661	} else
1662		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
1663}
1664
1665/*
1666 * rb_handle_head_page - writer hit the head page
1667 *
1668 * Returns: +1 to retry page
1669 *           0 to continue
1670 *          -1 on error
1671 */
1672static int
1673rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1674		    struct buffer_page *tail_page,
1675		    struct buffer_page *next_page)
1676{
1677	struct buffer_page *new_head;
1678	int entries;
1679	int type;
1680	int ret;
1681
1682	entries = rb_page_entries(next_page);
1683
1684	/*
1685	 * The hard part is here. We need to move the head
1686	 * forward, and protect against both readers on
1687	 * other CPUs and writers coming in via interrupts.
1688	 */
1689	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1690				       RB_PAGE_HEAD);
1691
1692	/*
1693	 * type can be one of four:
1694	 *  NORMAL - an interrupt already moved it for us
1695	 *  HEAD   - we are the first to get here.
1696	 *  UPDATE - we are the interrupt interrupting
1697	 *           a current move.
1698	 *  MOVED  - a reader on another CPU moved the next
1699	 *           pointer to its reader page. Give up
1700	 *           and try again.
1701	 */
1702
1703	switch (type) {
1704	case RB_PAGE_HEAD:
1705		/*
1706		 * We changed the head to UPDATE, thus
1707		 * it is our responsibility to update
1708		 * the counters.
1709		 */
1710		local_add(entries, &cpu_buffer->overrun);
 
1711
1712		/*
1713		 * The entries will be zeroed out when we move the
1714		 * tail page.
1715		 */
1716
1717		/* still more to do */
1718		break;
1719
1720	case RB_PAGE_UPDATE:
1721		/*
1722		 * This is an interrupt that interrupt the
1723		 * previous update. Still more to do.
1724		 */
1725		break;
1726	case RB_PAGE_NORMAL:
1727		/*
1728		 * An interrupt came in before the update
1729		 * and processed this for us.
1730		 * Nothing left to do.
1731		 */
1732		return 1;
1733	case RB_PAGE_MOVED:
1734		/*
1735		 * The reader is on another CPU and just did
1736		 * a swap with our next_page.
1737		 * Try again.
1738		 */
1739		return 1;
1740	default:
1741		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1742		return -1;
1743	}
1744
1745	/*
1746	 * Now that we are here, the old head pointer is
1747	 * set to UPDATE. This will keep the reader from
1748	 * swapping the head page with the reader page.
1749	 * The reader (on another CPU) will spin till
1750	 * we are finished.
1751	 *
1752	 * We just need to protect against interrupts
1753	 * doing the job. We will set the next pointer
1754	 * to HEAD. After that, we set the old pointer
1755	 * to NORMAL, but only if it was HEAD before.
1756	 * otherwise we are an interrupt, and only
1757	 * want the outer most commit to reset it.
1758	 */
1759	new_head = next_page;
1760	rb_inc_page(cpu_buffer, &new_head);
1761
1762	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1763				    RB_PAGE_NORMAL);
1764
1765	/*
1766	 * Valid returns are:
1767	 *  HEAD   - an interrupt came in and already set it.
1768	 *  NORMAL - One of two things:
1769	 *            1) We really set it.
1770	 *            2) A bunch of interrupts came in and moved
1771	 *               the page forward again.
1772	 */
1773	switch (ret) {
1774	case RB_PAGE_HEAD:
1775	case RB_PAGE_NORMAL:
1776		/* OK */
1777		break;
1778	default:
1779		RB_WARN_ON(cpu_buffer, 1);
1780		return -1;
1781	}
1782
1783	/*
1784	 * It is possible that an interrupt came in,
1785	 * set the head up, then more interrupts came in
1786	 * and moved it again. When we get back here,
1787	 * the page would have been set to NORMAL but we
1788	 * just set it back to HEAD.
1789	 *
1790	 * How do you detect this? Well, if that happened
1791	 * the tail page would have moved.
1792	 */
1793	if (ret == RB_PAGE_NORMAL) {
1794		/*
1795		 * If the tail had moved passed next, then we need
1796		 * to reset the pointer.
1797		 */
1798		if (cpu_buffer->tail_page != tail_page &&
1799		    cpu_buffer->tail_page != next_page)
1800			rb_head_page_set_normal(cpu_buffer, new_head,
1801						next_page,
1802						RB_PAGE_HEAD);
1803	}
1804
1805	/*
1806	 * If this was the outer most commit (the one that
1807	 * changed the original pointer from HEAD to UPDATE),
1808	 * then it is up to us to reset it to NORMAL.
1809	 */
1810	if (type == RB_PAGE_HEAD) {
1811		ret = rb_head_page_set_normal(cpu_buffer, next_page,
1812					      tail_page,
1813					      RB_PAGE_UPDATE);
1814		if (RB_WARN_ON(cpu_buffer,
1815			       ret != RB_PAGE_UPDATE))
1816			return -1;
1817	}
1818
1819	return 0;
1820}
1821
1822static unsigned rb_calculate_event_length(unsigned length)
1823{
1824	struct ring_buffer_event event; /* Used only for sizeof array */
1825
1826	/* zero length can cause confusions */
1827	if (!length)
1828		length = 1;
1829
1830	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1831		length += sizeof(event.array[0]);
1832
1833	length += RB_EVNT_HDR_SIZE;
1834	length = ALIGN(length, RB_ARCH_ALIGNMENT);
1835
1836	return length;
1837}
1838
1839static inline void
1840rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
1841	      struct buffer_page *tail_page,
1842	      unsigned long tail, unsigned long length)
1843{
1844	struct ring_buffer_event *event;
1845
1846	/*
1847	 * Only the event that crossed the page boundary
1848	 * must fill the old tail_page with padding.
1849	 */
1850	if (tail >= BUF_PAGE_SIZE) {
1851		/*
1852		 * If the page was filled, then we still need
1853		 * to update the real_end. Reset it to zero
1854		 * and the reader will ignore it.
1855		 */
1856		if (tail == BUF_PAGE_SIZE)
1857			tail_page->real_end = 0;
1858
1859		local_sub(length, &tail_page->write);
1860		return;
1861	}
1862
1863	event = __rb_page_index(tail_page, tail);
1864	kmemcheck_annotate_bitfield(event, bitfield);
1865
 
 
 
1866	/*
1867	 * Save the original length to the meta data.
1868	 * This will be used by the reader to add lost event
1869	 * counter.
1870	 */
1871	tail_page->real_end = tail;
1872
1873	/*
1874	 * If this event is bigger than the minimum size, then
1875	 * we need to be careful that we don't subtract the
1876	 * write counter enough to allow another writer to slip
1877	 * in on this page.
1878	 * We put in a discarded commit instead, to make sure
1879	 * that this space is not used again.
1880	 *
1881	 * If we are less than the minimum size, we don't need to
1882	 * worry about it.
1883	 */
1884	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
1885		/* No room for any events */
1886
1887		/* Mark the rest of the page with padding */
1888		rb_event_set_padding(event);
1889
1890		/* Set the write back to the previous setting */
1891		local_sub(length, &tail_page->write);
1892		return;
1893	}
1894
1895	/* Put in a discarded event */
1896	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
1897	event->type_len = RINGBUF_TYPE_PADDING;
1898	/* time delta must be non zero */
1899	event->time_delta = 1;
1900
1901	/* Set write to end of buffer */
1902	length = (tail + length) - BUF_PAGE_SIZE;
1903	local_sub(length, &tail_page->write);
1904}
1905
1906/*
1907 * This is the slow path, force gcc not to inline it.
1908 */
1909static noinline struct ring_buffer_event *
1910rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
1911	     unsigned long length, unsigned long tail,
1912	     struct buffer_page *tail_page, u64 ts)
1913{
1914	struct buffer_page *commit_page = cpu_buffer->commit_page;
1915	struct ring_buffer *buffer = cpu_buffer->buffer;
1916	struct buffer_page *next_page;
1917	int ret;
1918
1919	next_page = tail_page;
1920
1921	rb_inc_page(cpu_buffer, &next_page);
1922
1923	/*
1924	 * If for some reason, we had an interrupt storm that made
1925	 * it all the way around the buffer, bail, and warn
1926	 * about it.
1927	 */
1928	if (unlikely(next_page == commit_page)) {
1929		local_inc(&cpu_buffer->commit_overrun);
1930		goto out_reset;
1931	}
1932
1933	/*
1934	 * This is where the fun begins!
1935	 *
1936	 * We are fighting against races between a reader that
1937	 * could be on another CPU trying to swap its reader
1938	 * page with the buffer head.
1939	 *
1940	 * We are also fighting against interrupts coming in and
1941	 * moving the head or tail on us as well.
1942	 *
1943	 * If the next page is the head page then we have filled
1944	 * the buffer, unless the commit page is still on the
1945	 * reader page.
1946	 */
1947	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
1948
1949		/*
1950		 * If the commit is not on the reader page, then
1951		 * move the header page.
1952		 */
1953		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
1954			/*
1955			 * If we are not in overwrite mode,
1956			 * this is easy, just stop here.
1957			 */
1958			if (!(buffer->flags & RB_FL_OVERWRITE))
 
1959				goto out_reset;
 
1960
1961			ret = rb_handle_head_page(cpu_buffer,
1962						  tail_page,
1963						  next_page);
1964			if (ret < 0)
1965				goto out_reset;
1966			if (ret)
1967				goto out_again;
1968		} else {
1969			/*
1970			 * We need to be careful here too. The
1971			 * commit page could still be on the reader
1972			 * page. We could have a small buffer, and
1973			 * have filled up the buffer with events
1974			 * from interrupts and such, and wrapped.
1975			 *
1976			 * Note, if the tail page is also the on the
1977			 * reader_page, we let it move out.
1978			 */
1979			if (unlikely((cpu_buffer->commit_page !=
1980				      cpu_buffer->tail_page) &&
1981				     (cpu_buffer->commit_page ==
1982				      cpu_buffer->reader_page))) {
1983				local_inc(&cpu_buffer->commit_overrun);
1984				goto out_reset;
1985			}
1986		}
1987	}
1988
1989	ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
1990	if (ret) {
1991		/*
1992		 * Nested commits always have zero deltas, so
1993		 * just reread the time stamp
1994		 */
1995		ts = rb_time_stamp(buffer);
1996		next_page->page->time_stamp = ts;
1997	}
1998
1999 out_again:
2000
2001	rb_reset_tail(cpu_buffer, tail_page, tail, length);
2002
2003	/* fail and let the caller try again */
2004	return ERR_PTR(-EAGAIN);
2005
2006 out_reset:
2007	/* reset write */
2008	rb_reset_tail(cpu_buffer, tail_page, tail, length);
2009
2010	return NULL;
2011}
2012
2013static struct ring_buffer_event *
2014__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2015		  unsigned long length, u64 ts,
2016		  u64 delta, int add_timestamp)
2017{
2018	struct buffer_page *tail_page;
2019	struct ring_buffer_event *event;
2020	unsigned long tail, write;
2021
2022	/*
2023	 * If the time delta since the last event is too big to
2024	 * hold in the time field of the event, then we append a
2025	 * TIME EXTEND event ahead of the data event.
2026	 */
2027	if (unlikely(add_timestamp))
2028		length += RB_LEN_TIME_EXTEND;
2029
2030	tail_page = cpu_buffer->tail_page;
2031	write = local_add_return(length, &tail_page->write);
2032
2033	/* set write to only the index of the write */
2034	write &= RB_WRITE_MASK;
2035	tail = write - length;
2036
 
 
 
 
 
 
 
2037	/* See if we shot pass the end of this buffer page */
2038	if (unlikely(write > BUF_PAGE_SIZE))
2039		return rb_move_tail(cpu_buffer, length, tail,
2040				    tail_page, ts);
2041
2042	/* We reserved something on the buffer */
2043
2044	event = __rb_page_index(tail_page, tail);
2045	kmemcheck_annotate_bitfield(event, bitfield);
2046	rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2047
2048	local_inc(&tail_page->entries);
2049
2050	/*
2051	 * If this is the first commit on the page, then update
2052	 * its timestamp.
2053	 */
2054	if (!tail)
2055		tail_page->page->time_stamp = ts;
2056
 
 
 
2057	return event;
2058}
2059
2060static inline int
2061rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2062		  struct ring_buffer_event *event)
2063{
2064	unsigned long new_index, old_index;
2065	struct buffer_page *bpage;
2066	unsigned long index;
2067	unsigned long addr;
2068
2069	new_index = rb_event_index(event);
2070	old_index = new_index + rb_event_ts_length(event);
2071	addr = (unsigned long)event;
2072	addr &= PAGE_MASK;
2073
2074	bpage = cpu_buffer->tail_page;
2075
2076	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2077		unsigned long write_mask =
2078			local_read(&bpage->write) & ~RB_WRITE_MASK;
 
2079		/*
2080		 * This is on the tail page. It is possible that
2081		 * a write could come in and move the tail page
2082		 * and write to the next page. That is fine
2083		 * because we just shorten what is on this page.
2084		 */
2085		old_index += write_mask;
2086		new_index += write_mask;
2087		index = local_cmpxchg(&bpage->write, old_index, new_index);
2088		if (index == old_index)
 
 
2089			return 1;
 
2090	}
2091
2092	/* could not discard */
2093	return 0;
2094}
2095
2096static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2097{
2098	local_inc(&cpu_buffer->committing);
2099	local_inc(&cpu_buffer->commits);
2100}
2101
2102static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2103{
2104	unsigned long commits;
2105
2106	if (RB_WARN_ON(cpu_buffer,
2107		       !local_read(&cpu_buffer->committing)))
2108		return;
2109
2110 again:
2111	commits = local_read(&cpu_buffer->commits);
2112	/* synchronize with interrupts */
2113	barrier();
2114	if (local_read(&cpu_buffer->committing) == 1)
2115		rb_set_commit_to_write(cpu_buffer);
2116
2117	local_dec(&cpu_buffer->committing);
2118
2119	/* synchronize with interrupts */
2120	barrier();
2121
2122	/*
2123	 * Need to account for interrupts coming in between the
2124	 * updating of the commit page and the clearing of the
2125	 * committing counter.
2126	 */
2127	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2128	    !local_read(&cpu_buffer->committing)) {
2129		local_inc(&cpu_buffer->committing);
2130		goto again;
2131	}
2132}
2133
2134static struct ring_buffer_event *
2135rb_reserve_next_event(struct ring_buffer *buffer,
2136		      struct ring_buffer_per_cpu *cpu_buffer,
2137		      unsigned long length)
2138{
2139	struct ring_buffer_event *event;
2140	u64 ts, delta;
2141	int nr_loops = 0;
2142	int add_timestamp;
2143	u64 diff;
2144
2145	rb_start_commit(cpu_buffer);
2146
2147#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2148	/*
2149	 * Due to the ability to swap a cpu buffer from a buffer
2150	 * it is possible it was swapped before we committed.
2151	 * (committing stops a swap). We check for it here and
2152	 * if it happened, we have to fail the write.
2153	 */
2154	barrier();
2155	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2156		local_dec(&cpu_buffer->committing);
2157		local_dec(&cpu_buffer->commits);
2158		return NULL;
2159	}
2160#endif
2161
2162	length = rb_calculate_event_length(length);
2163 again:
2164	add_timestamp = 0;
2165	delta = 0;
2166
2167	/*
2168	 * We allow for interrupts to reenter here and do a trace.
2169	 * If one does, it will cause this original code to loop
2170	 * back here. Even with heavy interrupts happening, this
2171	 * should only happen a few times in a row. If this happens
2172	 * 1000 times in a row, there must be either an interrupt
2173	 * storm or we have something buggy.
2174	 * Bail!
2175	 */
2176	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2177		goto out_fail;
2178
2179	ts = rb_time_stamp(cpu_buffer->buffer);
2180	diff = ts - cpu_buffer->write_stamp;
2181
2182	/* make sure this diff is calculated here */
2183	barrier();
2184
2185	/* Did the write stamp get updated already? */
2186	if (likely(ts >= cpu_buffer->write_stamp)) {
2187		delta = diff;
2188		if (unlikely(test_time_stamp(delta))) {
2189			int local_clock_stable = 1;
2190#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2191			local_clock_stable = sched_clock_stable;
2192#endif
2193			WARN_ONCE(delta > (1ULL << 59),
2194				  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2195				  (unsigned long long)delta,
2196				  (unsigned long long)ts,
2197				  (unsigned long long)cpu_buffer->write_stamp,
2198				  local_clock_stable ? "" :
2199				  "If you just came from a suspend/resume,\n"
2200				  "please switch to the trace global clock:\n"
2201				  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2202			add_timestamp = 1;
2203		}
2204	}
2205
2206	event = __rb_reserve_next(cpu_buffer, length, ts,
2207				  delta, add_timestamp);
2208	if (unlikely(PTR_ERR(event) == -EAGAIN))
2209		goto again;
2210
2211	if (!event)
2212		goto out_fail;
2213
2214	return event;
2215
2216 out_fail:
2217	rb_end_commit(cpu_buffer);
2218	return NULL;
2219}
2220
2221#ifdef CONFIG_TRACING
2222
2223#define TRACE_RECURSIVE_DEPTH 16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2224
2225/* Keep this code out of the fast path cache */
2226static noinline void trace_recursive_fail(void)
2227{
2228	/* Disable all tracing before we do anything else */
2229	tracing_off_permanent();
2230
2231	printk_once(KERN_WARNING "Tracing recursion: depth[%ld]:"
2232		    "HC[%lu]:SC[%lu]:NMI[%lu]\n",
2233		    trace_recursion_buffer(),
2234		    hardirq_count() >> HARDIRQ_SHIFT,
2235		    softirq_count() >> SOFTIRQ_SHIFT,
2236		    in_nmi());
2237
2238	WARN_ON_ONCE(1);
2239}
2240
2241static inline int trace_recursive_lock(void)
2242{
2243	trace_recursion_inc();
2244
2245	if (likely(trace_recursion_buffer() < TRACE_RECURSIVE_DEPTH))
2246		return 0;
2247
2248	trace_recursive_fail();
2249
2250	return -1;
2251}
2252
2253static inline void trace_recursive_unlock(void)
2254{
2255	WARN_ON_ONCE(!trace_recursion_buffer());
2256
2257	trace_recursion_dec();
 
 
2258}
2259
2260#else
2261
2262#define trace_recursive_lock()		(0)
2263#define trace_recursive_unlock()	do { } while (0)
2264
2265#endif
2266
2267/**
2268 * ring_buffer_lock_reserve - reserve a part of the buffer
2269 * @buffer: the ring buffer to reserve from
2270 * @length: the length of the data to reserve (excluding event header)
2271 *
2272 * Returns a reseverd event on the ring buffer to copy directly to.
2273 * The user of this interface will need to get the body to write into
2274 * and can use the ring_buffer_event_data() interface.
2275 *
2276 * The length is the length of the data needed, not the event length
2277 * which also includes the event header.
2278 *
2279 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2280 * If NULL is returned, then nothing has been allocated or locked.
2281 */
2282struct ring_buffer_event *
2283ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2284{
2285	struct ring_buffer_per_cpu *cpu_buffer;
2286	struct ring_buffer_event *event;
2287	int cpu;
2288
2289	if (ring_buffer_flags != RB_BUFFERS_ON)
2290		return NULL;
2291
2292	/* If we are tracing schedule, we don't want to recurse */
2293	preempt_disable_notrace();
2294
2295	if (atomic_read(&buffer->record_disabled))
2296		goto out_nocheck;
2297
2298	if (trace_recursive_lock())
2299		goto out_nocheck;
2300
2301	cpu = raw_smp_processor_id();
2302
2303	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2304		goto out;
2305
2306	cpu_buffer = buffer->buffers[cpu];
2307
2308	if (atomic_read(&cpu_buffer->record_disabled))
2309		goto out;
2310
2311	if (length > BUF_MAX_DATA_SIZE)
2312		goto out;
2313
2314	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2315	if (!event)
2316		goto out;
2317
2318	return event;
2319
2320 out:
2321	trace_recursive_unlock();
2322
2323 out_nocheck:
2324	preempt_enable_notrace();
2325	return NULL;
2326}
2327EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2328
2329static void
2330rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2331		      struct ring_buffer_event *event)
2332{
2333	u64 delta;
2334
2335	/*
2336	 * The event first in the commit queue updates the
2337	 * time stamp.
2338	 */
2339	if (rb_event_is_commit(cpu_buffer, event)) {
2340		/*
2341		 * A commit event that is first on a page
2342		 * updates the write timestamp with the page stamp
2343		 */
2344		if (!rb_event_index(event))
2345			cpu_buffer->write_stamp =
2346				cpu_buffer->commit_page->page->time_stamp;
2347		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2348			delta = event->array[0];
2349			delta <<= TS_SHIFT;
2350			delta += event->time_delta;
2351			cpu_buffer->write_stamp += delta;
2352		} else
2353			cpu_buffer->write_stamp += event->time_delta;
2354	}
2355}
2356
2357static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2358		      struct ring_buffer_event *event)
2359{
2360	local_inc(&cpu_buffer->entries);
2361	rb_update_write_stamp(cpu_buffer, event);
2362	rb_end_commit(cpu_buffer);
2363}
2364
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2365/**
2366 * ring_buffer_unlock_commit - commit a reserved
2367 * @buffer: The buffer to commit to
2368 * @event: The event pointer to commit.
2369 *
2370 * This commits the data to the ring buffer, and releases any locks held.
2371 *
2372 * Must be paired with ring_buffer_lock_reserve.
2373 */
2374int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2375			      struct ring_buffer_event *event)
2376{
2377	struct ring_buffer_per_cpu *cpu_buffer;
2378	int cpu = raw_smp_processor_id();
2379
2380	cpu_buffer = buffer->buffers[cpu];
2381
2382	rb_commit(cpu_buffer, event);
2383
 
 
2384	trace_recursive_unlock();
2385
2386	preempt_enable_notrace();
2387
2388	return 0;
2389}
2390EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2391
2392static inline void rb_event_discard(struct ring_buffer_event *event)
2393{
2394	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2395		event = skip_time_extend(event);
2396
2397	/* array[0] holds the actual length for the discarded event */
2398	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2399	event->type_len = RINGBUF_TYPE_PADDING;
2400	/* time delta must be non zero */
2401	if (!event->time_delta)
2402		event->time_delta = 1;
2403}
2404
2405/*
2406 * Decrement the entries to the page that an event is on.
2407 * The event does not even need to exist, only the pointer
2408 * to the page it is on. This may only be called before the commit
2409 * takes place.
2410 */
2411static inline void
2412rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2413		   struct ring_buffer_event *event)
2414{
2415	unsigned long addr = (unsigned long)event;
2416	struct buffer_page *bpage = cpu_buffer->commit_page;
2417	struct buffer_page *start;
2418
2419	addr &= PAGE_MASK;
2420
2421	/* Do the likely case first */
2422	if (likely(bpage->page == (void *)addr)) {
2423		local_dec(&bpage->entries);
2424		return;
2425	}
2426
2427	/*
2428	 * Because the commit page may be on the reader page we
2429	 * start with the next page and check the end loop there.
2430	 */
2431	rb_inc_page(cpu_buffer, &bpage);
2432	start = bpage;
2433	do {
2434		if (bpage->page == (void *)addr) {
2435			local_dec(&bpage->entries);
2436			return;
2437		}
2438		rb_inc_page(cpu_buffer, &bpage);
2439	} while (bpage != start);
2440
2441	/* commit not part of this buffer?? */
2442	RB_WARN_ON(cpu_buffer, 1);
2443}
2444
2445/**
2446 * ring_buffer_commit_discard - discard an event that has not been committed
2447 * @buffer: the ring buffer
2448 * @event: non committed event to discard
2449 *
2450 * Sometimes an event that is in the ring buffer needs to be ignored.
2451 * This function lets the user discard an event in the ring buffer
2452 * and then that event will not be read later.
2453 *
2454 * This function only works if it is called before the the item has been
2455 * committed. It will try to free the event from the ring buffer
2456 * if another event has not been added behind it.
2457 *
2458 * If another event has been added behind it, it will set the event
2459 * up as discarded, and perform the commit.
2460 *
2461 * If this function is called, do not call ring_buffer_unlock_commit on
2462 * the event.
2463 */
2464void ring_buffer_discard_commit(struct ring_buffer *buffer,
2465				struct ring_buffer_event *event)
2466{
2467	struct ring_buffer_per_cpu *cpu_buffer;
2468	int cpu;
2469
2470	/* The event is discarded regardless */
2471	rb_event_discard(event);
2472
2473	cpu = smp_processor_id();
2474	cpu_buffer = buffer->buffers[cpu];
2475
2476	/*
2477	 * This must only be called if the event has not been
2478	 * committed yet. Thus we can assume that preemption
2479	 * is still disabled.
2480	 */
2481	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2482
2483	rb_decrement_entry(cpu_buffer, event);
2484	if (rb_try_to_discard(cpu_buffer, event))
2485		goto out;
2486
2487	/*
2488	 * The commit is still visible by the reader, so we
2489	 * must still update the timestamp.
2490	 */
2491	rb_update_write_stamp(cpu_buffer, event);
2492 out:
2493	rb_end_commit(cpu_buffer);
2494
2495	trace_recursive_unlock();
2496
2497	preempt_enable_notrace();
2498
2499}
2500EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2501
2502/**
2503 * ring_buffer_write - write data to the buffer without reserving
2504 * @buffer: The ring buffer to write to.
2505 * @length: The length of the data being written (excluding the event header)
2506 * @data: The data to write to the buffer.
2507 *
2508 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2509 * one function. If you already have the data to write to the buffer, it
2510 * may be easier to simply call this function.
2511 *
2512 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2513 * and not the length of the event which would hold the header.
2514 */
2515int ring_buffer_write(struct ring_buffer *buffer,
2516			unsigned long length,
2517			void *data)
2518{
2519	struct ring_buffer_per_cpu *cpu_buffer;
2520	struct ring_buffer_event *event;
2521	void *body;
2522	int ret = -EBUSY;
2523	int cpu;
2524
2525	if (ring_buffer_flags != RB_BUFFERS_ON)
2526		return -EBUSY;
2527
2528	preempt_disable_notrace();
2529
2530	if (atomic_read(&buffer->record_disabled))
2531		goto out;
2532
2533	cpu = raw_smp_processor_id();
2534
2535	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2536		goto out;
2537
2538	cpu_buffer = buffer->buffers[cpu];
2539
2540	if (atomic_read(&cpu_buffer->record_disabled))
2541		goto out;
2542
2543	if (length > BUF_MAX_DATA_SIZE)
2544		goto out;
2545
2546	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2547	if (!event)
2548		goto out;
2549
2550	body = rb_event_data(event);
2551
2552	memcpy(body, data, length);
2553
2554	rb_commit(cpu_buffer, event);
2555
 
 
2556	ret = 0;
2557 out:
2558	preempt_enable_notrace();
2559
2560	return ret;
2561}
2562EXPORT_SYMBOL_GPL(ring_buffer_write);
2563
2564static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2565{
2566	struct buffer_page *reader = cpu_buffer->reader_page;
2567	struct buffer_page *head = rb_set_head_page(cpu_buffer);
2568	struct buffer_page *commit = cpu_buffer->commit_page;
2569
2570	/* In case of error, head will be NULL */
2571	if (unlikely(!head))
2572		return 1;
2573
2574	return reader->read == rb_page_commit(reader) &&
2575		(commit == reader ||
2576		 (commit == head &&
2577		  head->read == rb_page_commit(commit)));
2578}
2579
2580/**
2581 * ring_buffer_record_disable - stop all writes into the buffer
2582 * @buffer: The ring buffer to stop writes to.
2583 *
2584 * This prevents all writes to the buffer. Any attempt to write
2585 * to the buffer after this will fail and return NULL.
2586 *
2587 * The caller should call synchronize_sched() after this.
2588 */
2589void ring_buffer_record_disable(struct ring_buffer *buffer)
2590{
2591	atomic_inc(&buffer->record_disabled);
2592}
2593EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
2594
2595/**
2596 * ring_buffer_record_enable - enable writes to the buffer
2597 * @buffer: The ring buffer to enable writes
2598 *
2599 * Note, multiple disables will need the same number of enables
2600 * to truly enable the writing (much like preempt_disable).
2601 */
2602void ring_buffer_record_enable(struct ring_buffer *buffer)
2603{
2604	atomic_dec(&buffer->record_disabled);
2605}
2606EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
2607
2608/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2609 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
2610 * @buffer: The ring buffer to stop writes to.
2611 * @cpu: The CPU buffer to stop
2612 *
2613 * This prevents all writes to the buffer. Any attempt to write
2614 * to the buffer after this will fail and return NULL.
2615 *
2616 * The caller should call synchronize_sched() after this.
2617 */
2618void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
2619{
2620	struct ring_buffer_per_cpu *cpu_buffer;
2621
2622	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2623		return;
2624
2625	cpu_buffer = buffer->buffers[cpu];
2626	atomic_inc(&cpu_buffer->record_disabled);
2627}
2628EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
2629
2630/**
2631 * ring_buffer_record_enable_cpu - enable writes to the buffer
2632 * @buffer: The ring buffer to enable writes
2633 * @cpu: The CPU to enable.
2634 *
2635 * Note, multiple disables will need the same number of enables
2636 * to truly enable the writing (much like preempt_disable).
2637 */
2638void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
2639{
2640	struct ring_buffer_per_cpu *cpu_buffer;
2641
2642	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2643		return;
2644
2645	cpu_buffer = buffer->buffers[cpu];
2646	atomic_dec(&cpu_buffer->record_disabled);
2647}
2648EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
2649
2650/*
2651 * The total entries in the ring buffer is the running counter
2652 * of entries entered into the ring buffer, minus the sum of
2653 * the entries read from the ring buffer and the number of
2654 * entries that were overwritten.
2655 */
2656static inline unsigned long
2657rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
2658{
2659	return local_read(&cpu_buffer->entries) -
2660		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
2661}
2662
2663/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2664 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
2665 * @buffer: The ring buffer
2666 * @cpu: The per CPU buffer to get the entries from.
2667 */
2668unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
2669{
2670	struct ring_buffer_per_cpu *cpu_buffer;
2671
2672	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2673		return 0;
2674
2675	cpu_buffer = buffer->buffers[cpu];
2676
2677	return rb_num_of_entries(cpu_buffer);
2678}
2679EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
2680
2681/**
2682 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
 
2683 * @buffer: The ring buffer
2684 * @cpu: The per CPU buffer to get the number of overruns from
2685 */
2686unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
2687{
2688	struct ring_buffer_per_cpu *cpu_buffer;
2689	unsigned long ret;
2690
2691	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2692		return 0;
2693
2694	cpu_buffer = buffer->buffers[cpu];
2695	ret = local_read(&cpu_buffer->overrun);
2696
2697	return ret;
2698}
2699EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
2700
2701/**
2702 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by commits
 
 
2703 * @buffer: The ring buffer
2704 * @cpu: The per CPU buffer to get the number of overruns from
2705 */
2706unsigned long
2707ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
2708{
2709	struct ring_buffer_per_cpu *cpu_buffer;
2710	unsigned long ret;
2711
2712	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2713		return 0;
2714
2715	cpu_buffer = buffer->buffers[cpu];
2716	ret = local_read(&cpu_buffer->commit_overrun);
2717
2718	return ret;
2719}
2720EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
2721
2722/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2723 * ring_buffer_entries - get the number of entries in a buffer
2724 * @buffer: The ring buffer
2725 *
2726 * Returns the total number of entries in the ring buffer
2727 * (all CPU entries)
2728 */
2729unsigned long ring_buffer_entries(struct ring_buffer *buffer)
2730{
2731	struct ring_buffer_per_cpu *cpu_buffer;
2732	unsigned long entries = 0;
2733	int cpu;
2734
2735	/* if you care about this being correct, lock the buffer */
2736	for_each_buffer_cpu(buffer, cpu) {
2737		cpu_buffer = buffer->buffers[cpu];
2738		entries += rb_num_of_entries(cpu_buffer);
2739	}
2740
2741	return entries;
2742}
2743EXPORT_SYMBOL_GPL(ring_buffer_entries);
2744
2745/**
2746 * ring_buffer_overruns - get the number of overruns in buffer
2747 * @buffer: The ring buffer
2748 *
2749 * Returns the total number of overruns in the ring buffer
2750 * (all CPU entries)
2751 */
2752unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
2753{
2754	struct ring_buffer_per_cpu *cpu_buffer;
2755	unsigned long overruns = 0;
2756	int cpu;
2757
2758	/* if you care about this being correct, lock the buffer */
2759	for_each_buffer_cpu(buffer, cpu) {
2760		cpu_buffer = buffer->buffers[cpu];
2761		overruns += local_read(&cpu_buffer->overrun);
2762	}
2763
2764	return overruns;
2765}
2766EXPORT_SYMBOL_GPL(ring_buffer_overruns);
2767
2768static void rb_iter_reset(struct ring_buffer_iter *iter)
2769{
2770	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2771
2772	/* Iterator usage is expected to have record disabled */
2773	if (list_empty(&cpu_buffer->reader_page->list)) {
2774		iter->head_page = rb_set_head_page(cpu_buffer);
2775		if (unlikely(!iter->head_page))
2776			return;
2777		iter->head = iter->head_page->read;
2778	} else {
2779		iter->head_page = cpu_buffer->reader_page;
2780		iter->head = cpu_buffer->reader_page->read;
2781	}
2782	if (iter->head)
2783		iter->read_stamp = cpu_buffer->read_stamp;
2784	else
2785		iter->read_stamp = iter->head_page->page->time_stamp;
2786	iter->cache_reader_page = cpu_buffer->reader_page;
2787	iter->cache_read = cpu_buffer->read;
2788}
2789
2790/**
2791 * ring_buffer_iter_reset - reset an iterator
2792 * @iter: The iterator to reset
2793 *
2794 * Resets the iterator, so that it will start from the beginning
2795 * again.
2796 */
2797void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
2798{
2799	struct ring_buffer_per_cpu *cpu_buffer;
2800	unsigned long flags;
2801
2802	if (!iter)
2803		return;
2804
2805	cpu_buffer = iter->cpu_buffer;
2806
2807	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2808	rb_iter_reset(iter);
2809	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2810}
2811EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
2812
2813/**
2814 * ring_buffer_iter_empty - check if an iterator has no more to read
2815 * @iter: The iterator to check
2816 */
2817int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
2818{
2819	struct ring_buffer_per_cpu *cpu_buffer;
2820
2821	cpu_buffer = iter->cpu_buffer;
2822
2823	return iter->head_page == cpu_buffer->commit_page &&
2824		iter->head == rb_commit_index(cpu_buffer);
2825}
2826EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
2827
2828static void
2829rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2830		     struct ring_buffer_event *event)
2831{
2832	u64 delta;
2833
2834	switch (event->type_len) {
2835	case RINGBUF_TYPE_PADDING:
2836		return;
2837
2838	case RINGBUF_TYPE_TIME_EXTEND:
2839		delta = event->array[0];
2840		delta <<= TS_SHIFT;
2841		delta += event->time_delta;
2842		cpu_buffer->read_stamp += delta;
2843		return;
2844
2845	case RINGBUF_TYPE_TIME_STAMP:
2846		/* FIXME: not implemented */
2847		return;
2848
2849	case RINGBUF_TYPE_DATA:
2850		cpu_buffer->read_stamp += event->time_delta;
2851		return;
2852
2853	default:
2854		BUG();
2855	}
2856	return;
2857}
2858
2859static void
2860rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
2861			  struct ring_buffer_event *event)
2862{
2863	u64 delta;
2864
2865	switch (event->type_len) {
2866	case RINGBUF_TYPE_PADDING:
2867		return;
2868
2869	case RINGBUF_TYPE_TIME_EXTEND:
2870		delta = event->array[0];
2871		delta <<= TS_SHIFT;
2872		delta += event->time_delta;
2873		iter->read_stamp += delta;
2874		return;
2875
2876	case RINGBUF_TYPE_TIME_STAMP:
2877		/* FIXME: not implemented */
2878		return;
2879
2880	case RINGBUF_TYPE_DATA:
2881		iter->read_stamp += event->time_delta;
2882		return;
2883
2884	default:
2885		BUG();
2886	}
2887	return;
2888}
2889
2890static struct buffer_page *
2891rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2892{
2893	struct buffer_page *reader = NULL;
2894	unsigned long overwrite;
2895	unsigned long flags;
2896	int nr_loops = 0;
2897	int ret;
2898
2899	local_irq_save(flags);
2900	arch_spin_lock(&cpu_buffer->lock);
2901
2902 again:
2903	/*
2904	 * This should normally only loop twice. But because the
2905	 * start of the reader inserts an empty page, it causes
2906	 * a case where we will loop three times. There should be no
2907	 * reason to loop four times (that I know of).
2908	 */
2909	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
2910		reader = NULL;
2911		goto out;
2912	}
2913
2914	reader = cpu_buffer->reader_page;
2915
2916	/* If there's more to read, return this page */
2917	if (cpu_buffer->reader_page->read < rb_page_size(reader))
2918		goto out;
2919
2920	/* Never should we have an index greater than the size */
2921	if (RB_WARN_ON(cpu_buffer,
2922		       cpu_buffer->reader_page->read > rb_page_size(reader)))
2923		goto out;
2924
2925	/* check if we caught up to the tail */
2926	reader = NULL;
2927	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
2928		goto out;
2929
 
 
 
 
2930	/*
2931	 * Reset the reader page to size zero.
2932	 */
2933	local_set(&cpu_buffer->reader_page->write, 0);
2934	local_set(&cpu_buffer->reader_page->entries, 0);
2935	local_set(&cpu_buffer->reader_page->page->commit, 0);
2936	cpu_buffer->reader_page->real_end = 0;
2937
2938 spin:
2939	/*
2940	 * Splice the empty reader page into the list around the head.
2941	 */
2942	reader = rb_set_head_page(cpu_buffer);
 
 
2943	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2944	cpu_buffer->reader_page->list.prev = reader->list.prev;
2945
2946	/*
2947	 * cpu_buffer->pages just needs to point to the buffer, it
2948	 *  has no specific buffer page to point to. Lets move it out
2949	 *  of our way so we don't accidentally swap it.
2950	 */
2951	cpu_buffer->pages = reader->list.prev;
2952
2953	/* The reader page will be pointing to the new head */
2954	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
2955
2956	/*
2957	 * We want to make sure we read the overruns after we set up our
2958	 * pointers to the next object. The writer side does a
2959	 * cmpxchg to cross pages which acts as the mb on the writer
2960	 * side. Note, the reader will constantly fail the swap
2961	 * while the writer is updating the pointers, so this
2962	 * guarantees that the overwrite recorded here is the one we
2963	 * want to compare with the last_overrun.
2964	 */
2965	smp_mb();
2966	overwrite = local_read(&(cpu_buffer->overrun));
2967
2968	/*
2969	 * Here's the tricky part.
2970	 *
2971	 * We need to move the pointer past the header page.
2972	 * But we can only do that if a writer is not currently
2973	 * moving it. The page before the header page has the
2974	 * flag bit '1' set if it is pointing to the page we want.
2975	 * but if the writer is in the process of moving it
2976	 * than it will be '2' or already moved '0'.
2977	 */
2978
2979	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
2980
2981	/*
2982	 * If we did not convert it, then we must try again.
2983	 */
2984	if (!ret)
2985		goto spin;
2986
2987	/*
2988	 * Yeah! We succeeded in replacing the page.
2989	 *
2990	 * Now make the new head point back to the reader page.
2991	 */
2992	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2993	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2994
2995	/* Finally update the reader page to the new head */
2996	cpu_buffer->reader_page = reader;
2997	rb_reset_reader_page(cpu_buffer);
2998
2999	if (overwrite != cpu_buffer->last_overrun) {
3000		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3001		cpu_buffer->last_overrun = overwrite;
3002	}
3003
3004	goto again;
3005
3006 out:
3007	arch_spin_unlock(&cpu_buffer->lock);
3008	local_irq_restore(flags);
3009
3010	return reader;
3011}
3012
3013static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3014{
3015	struct ring_buffer_event *event;
3016	struct buffer_page *reader;
3017	unsigned length;
3018
3019	reader = rb_get_reader_page(cpu_buffer);
3020
3021	/* This function should not be called when buffer is empty */
3022	if (RB_WARN_ON(cpu_buffer, !reader))
3023		return;
3024
3025	event = rb_reader_event(cpu_buffer);
3026
3027	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3028		cpu_buffer->read++;
3029
3030	rb_update_read_stamp(cpu_buffer, event);
3031
3032	length = rb_event_length(event);
3033	cpu_buffer->reader_page->read += length;
3034}
3035
3036static void rb_advance_iter(struct ring_buffer_iter *iter)
3037{
3038	struct ring_buffer_per_cpu *cpu_buffer;
3039	struct ring_buffer_event *event;
3040	unsigned length;
3041
3042	cpu_buffer = iter->cpu_buffer;
3043
3044	/*
3045	 * Check if we are at the end of the buffer.
3046	 */
3047	if (iter->head >= rb_page_size(iter->head_page)) {
3048		/* discarded commits can make the page empty */
3049		if (iter->head_page == cpu_buffer->commit_page)
3050			return;
3051		rb_inc_iter(iter);
3052		return;
3053	}
3054
3055	event = rb_iter_head_event(iter);
3056
3057	length = rb_event_length(event);
3058
3059	/*
3060	 * This should not be called to advance the header if we are
3061	 * at the tail of the buffer.
3062	 */
3063	if (RB_WARN_ON(cpu_buffer,
3064		       (iter->head_page == cpu_buffer->commit_page) &&
3065		       (iter->head + length > rb_commit_index(cpu_buffer))))
3066		return;
3067
3068	rb_update_iter_read_stamp(iter, event);
3069
3070	iter->head += length;
3071
3072	/* check for end of page padding */
3073	if ((iter->head >= rb_page_size(iter->head_page)) &&
3074	    (iter->head_page != cpu_buffer->commit_page))
3075		rb_advance_iter(iter);
3076}
3077
3078static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3079{
3080	return cpu_buffer->lost_events;
3081}
3082
3083static struct ring_buffer_event *
3084rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3085	       unsigned long *lost_events)
3086{
3087	struct ring_buffer_event *event;
3088	struct buffer_page *reader;
3089	int nr_loops = 0;
3090
3091 again:
3092	/*
3093	 * We repeat when a time extend is encountered.
3094	 * Since the time extend is always attached to a data event,
3095	 * we should never loop more than once.
3096	 * (We never hit the following condition more than twice).
3097	 */
3098	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3099		return NULL;
3100
3101	reader = rb_get_reader_page(cpu_buffer);
3102	if (!reader)
3103		return NULL;
3104
3105	event = rb_reader_event(cpu_buffer);
3106
3107	switch (event->type_len) {
3108	case RINGBUF_TYPE_PADDING:
3109		if (rb_null_event(event))
3110			RB_WARN_ON(cpu_buffer, 1);
3111		/*
3112		 * Because the writer could be discarding every
3113		 * event it creates (which would probably be bad)
3114		 * if we were to go back to "again" then we may never
3115		 * catch up, and will trigger the warn on, or lock
3116		 * the box. Return the padding, and we will release
3117		 * the current locks, and try again.
3118		 */
3119		return event;
3120
3121	case RINGBUF_TYPE_TIME_EXTEND:
3122		/* Internal data, OK to advance */
3123		rb_advance_reader(cpu_buffer);
3124		goto again;
3125
3126	case RINGBUF_TYPE_TIME_STAMP:
3127		/* FIXME: not implemented */
3128		rb_advance_reader(cpu_buffer);
3129		goto again;
3130
3131	case RINGBUF_TYPE_DATA:
3132		if (ts) {
3133			*ts = cpu_buffer->read_stamp + event->time_delta;
3134			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3135							 cpu_buffer->cpu, ts);
3136		}
3137		if (lost_events)
3138			*lost_events = rb_lost_events(cpu_buffer);
3139		return event;
3140
3141	default:
3142		BUG();
3143	}
3144
3145	return NULL;
3146}
3147EXPORT_SYMBOL_GPL(ring_buffer_peek);
3148
3149static struct ring_buffer_event *
3150rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3151{
3152	struct ring_buffer *buffer;
3153	struct ring_buffer_per_cpu *cpu_buffer;
3154	struct ring_buffer_event *event;
3155	int nr_loops = 0;
3156
3157	cpu_buffer = iter->cpu_buffer;
3158	buffer = cpu_buffer->buffer;
3159
3160	/*
3161	 * Check if someone performed a consuming read to
3162	 * the buffer. A consuming read invalidates the iterator
3163	 * and we need to reset the iterator in this case.
3164	 */
3165	if (unlikely(iter->cache_read != cpu_buffer->read ||
3166		     iter->cache_reader_page != cpu_buffer->reader_page))
3167		rb_iter_reset(iter);
3168
3169 again:
3170	if (ring_buffer_iter_empty(iter))
3171		return NULL;
3172
3173	/*
3174	 * We repeat when a time extend is encountered.
3175	 * Since the time extend is always attached to a data event,
3176	 * we should never loop more than once.
3177	 * (We never hit the following condition more than twice).
3178	 */
3179	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3180		return NULL;
3181
3182	if (rb_per_cpu_empty(cpu_buffer))
3183		return NULL;
3184
3185	if (iter->head >= local_read(&iter->head_page->page->commit)) {
3186		rb_inc_iter(iter);
3187		goto again;
3188	}
3189
3190	event = rb_iter_head_event(iter);
3191
3192	switch (event->type_len) {
3193	case RINGBUF_TYPE_PADDING:
3194		if (rb_null_event(event)) {
3195			rb_inc_iter(iter);
3196			goto again;
3197		}
3198		rb_advance_iter(iter);
3199		return event;
3200
3201	case RINGBUF_TYPE_TIME_EXTEND:
3202		/* Internal data, OK to advance */
3203		rb_advance_iter(iter);
3204		goto again;
3205
3206	case RINGBUF_TYPE_TIME_STAMP:
3207		/* FIXME: not implemented */
3208		rb_advance_iter(iter);
3209		goto again;
3210
3211	case RINGBUF_TYPE_DATA:
3212		if (ts) {
3213			*ts = iter->read_stamp + event->time_delta;
3214			ring_buffer_normalize_time_stamp(buffer,
3215							 cpu_buffer->cpu, ts);
3216		}
3217		return event;
3218
3219	default:
3220		BUG();
3221	}
3222
3223	return NULL;
3224}
3225EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3226
3227static inline int rb_ok_to_lock(void)
3228{
3229	/*
3230	 * If an NMI die dumps out the content of the ring buffer
3231	 * do not grab locks. We also permanently disable the ring
3232	 * buffer too. A one time deal is all you get from reading
3233	 * the ring buffer from an NMI.
3234	 */
3235	if (likely(!in_nmi()))
3236		return 1;
3237
3238	tracing_off_permanent();
3239	return 0;
3240}
3241
3242/**
3243 * ring_buffer_peek - peek at the next event to be read
3244 * @buffer: The ring buffer to read
3245 * @cpu: The cpu to peak at
3246 * @ts: The timestamp counter of this event.
3247 * @lost_events: a variable to store if events were lost (may be NULL)
3248 *
3249 * This will return the event that will be read next, but does
3250 * not consume the data.
3251 */
3252struct ring_buffer_event *
3253ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3254		 unsigned long *lost_events)
3255{
3256	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3257	struct ring_buffer_event *event;
3258	unsigned long flags;
3259	int dolock;
3260
3261	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3262		return NULL;
3263
3264	dolock = rb_ok_to_lock();
3265 again:
3266	local_irq_save(flags);
3267	if (dolock)
3268		spin_lock(&cpu_buffer->reader_lock);
3269	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3270	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3271		rb_advance_reader(cpu_buffer);
3272	if (dolock)
3273		spin_unlock(&cpu_buffer->reader_lock);
3274	local_irq_restore(flags);
3275
3276	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3277		goto again;
3278
3279	return event;
3280}
3281
3282/**
3283 * ring_buffer_iter_peek - peek at the next event to be read
3284 * @iter: The ring buffer iterator
3285 * @ts: The timestamp counter of this event.
3286 *
3287 * This will return the event that will be read next, but does
3288 * not increment the iterator.
3289 */
3290struct ring_buffer_event *
3291ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3292{
3293	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3294	struct ring_buffer_event *event;
3295	unsigned long flags;
3296
3297 again:
3298	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3299	event = rb_iter_peek(iter, ts);
3300	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3301
3302	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3303		goto again;
3304
3305	return event;
3306}
3307
3308/**
3309 * ring_buffer_consume - return an event and consume it
3310 * @buffer: The ring buffer to get the next event from
3311 * @cpu: the cpu to read the buffer from
3312 * @ts: a variable to store the timestamp (may be NULL)
3313 * @lost_events: a variable to store if events were lost (may be NULL)
3314 *
3315 * Returns the next event in the ring buffer, and that event is consumed.
3316 * Meaning, that sequential reads will keep returning a different event,
3317 * and eventually empty the ring buffer if the producer is slower.
3318 */
3319struct ring_buffer_event *
3320ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3321		    unsigned long *lost_events)
3322{
3323	struct ring_buffer_per_cpu *cpu_buffer;
3324	struct ring_buffer_event *event = NULL;
3325	unsigned long flags;
3326	int dolock;
3327
3328	dolock = rb_ok_to_lock();
3329
3330 again:
3331	/* might be called in atomic */
3332	preempt_disable();
3333
3334	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3335		goto out;
3336
3337	cpu_buffer = buffer->buffers[cpu];
3338	local_irq_save(flags);
3339	if (dolock)
3340		spin_lock(&cpu_buffer->reader_lock);
3341
3342	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3343	if (event) {
3344		cpu_buffer->lost_events = 0;
3345		rb_advance_reader(cpu_buffer);
3346	}
3347
3348	if (dolock)
3349		spin_unlock(&cpu_buffer->reader_lock);
3350	local_irq_restore(flags);
3351
3352 out:
3353	preempt_enable();
3354
3355	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3356		goto again;
3357
3358	return event;
3359}
3360EXPORT_SYMBOL_GPL(ring_buffer_consume);
3361
3362/**
3363 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3364 * @buffer: The ring buffer to read from
3365 * @cpu: The cpu buffer to iterate over
3366 *
3367 * This performs the initial preparations necessary to iterate
3368 * through the buffer.  Memory is allocated, buffer recording
3369 * is disabled, and the iterator pointer is returned to the caller.
3370 *
3371 * Disabling buffer recordng prevents the reading from being
3372 * corrupted. This is not a consuming read, so a producer is not
3373 * expected.
3374 *
3375 * After a sequence of ring_buffer_read_prepare calls, the user is
3376 * expected to make at least one call to ring_buffer_prepare_sync.
3377 * Afterwards, ring_buffer_read_start is invoked to get things going
3378 * for real.
3379 *
3380 * This overall must be paired with ring_buffer_finish.
3381 */
3382struct ring_buffer_iter *
3383ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3384{
3385	struct ring_buffer_per_cpu *cpu_buffer;
3386	struct ring_buffer_iter *iter;
3387
3388	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3389		return NULL;
3390
3391	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3392	if (!iter)
3393		return NULL;
3394
3395	cpu_buffer = buffer->buffers[cpu];
3396
3397	iter->cpu_buffer = cpu_buffer;
3398
 
3399	atomic_inc(&cpu_buffer->record_disabled);
3400
3401	return iter;
3402}
3403EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3404
3405/**
3406 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3407 *
3408 * All previously invoked ring_buffer_read_prepare calls to prepare
3409 * iterators will be synchronized.  Afterwards, read_buffer_read_start
3410 * calls on those iterators are allowed.
3411 */
3412void
3413ring_buffer_read_prepare_sync(void)
3414{
3415	synchronize_sched();
3416}
3417EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3418
3419/**
3420 * ring_buffer_read_start - start a non consuming read of the buffer
3421 * @iter: The iterator returned by ring_buffer_read_prepare
3422 *
3423 * This finalizes the startup of an iteration through the buffer.
3424 * The iterator comes from a call to ring_buffer_read_prepare and
3425 * an intervening ring_buffer_read_prepare_sync must have been
3426 * performed.
3427 *
3428 * Must be paired with ring_buffer_finish.
3429 */
3430void
3431ring_buffer_read_start(struct ring_buffer_iter *iter)
3432{
3433	struct ring_buffer_per_cpu *cpu_buffer;
3434	unsigned long flags;
3435
3436	if (!iter)
3437		return;
3438
3439	cpu_buffer = iter->cpu_buffer;
3440
3441	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3442	arch_spin_lock(&cpu_buffer->lock);
3443	rb_iter_reset(iter);
3444	arch_spin_unlock(&cpu_buffer->lock);
3445	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3446}
3447EXPORT_SYMBOL_GPL(ring_buffer_read_start);
3448
3449/**
3450 * ring_buffer_finish - finish reading the iterator of the buffer
3451 * @iter: The iterator retrieved by ring_buffer_start
3452 *
3453 * This re-enables the recording to the buffer, and frees the
3454 * iterator.
3455 */
3456void
3457ring_buffer_read_finish(struct ring_buffer_iter *iter)
3458{
3459	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
 
 
 
 
 
 
 
 
 
 
 
3460
3461	atomic_dec(&cpu_buffer->record_disabled);
 
3462	kfree(iter);
3463}
3464EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
3465
3466/**
3467 * ring_buffer_read - read the next item in the ring buffer by the iterator
3468 * @iter: The ring buffer iterator
3469 * @ts: The time stamp of the event read.
3470 *
3471 * This reads the next event in the ring buffer and increments the iterator.
3472 */
3473struct ring_buffer_event *
3474ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
3475{
3476	struct ring_buffer_event *event;
3477	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3478	unsigned long flags;
3479
3480	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3481 again:
3482	event = rb_iter_peek(iter, ts);
3483	if (!event)
3484		goto out;
3485
3486	if (event->type_len == RINGBUF_TYPE_PADDING)
3487		goto again;
3488
3489	rb_advance_iter(iter);
3490 out:
3491	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3492
3493	return event;
3494}
3495EXPORT_SYMBOL_GPL(ring_buffer_read);
3496
3497/**
3498 * ring_buffer_size - return the size of the ring buffer (in bytes)
3499 * @buffer: The ring buffer.
3500 */
3501unsigned long ring_buffer_size(struct ring_buffer *buffer)
3502{
3503	return BUF_PAGE_SIZE * buffer->pages;
 
 
 
 
 
 
 
 
 
3504}
3505EXPORT_SYMBOL_GPL(ring_buffer_size);
3506
3507static void
3508rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
3509{
3510	rb_head_page_deactivate(cpu_buffer);
3511
3512	cpu_buffer->head_page
3513		= list_entry(cpu_buffer->pages, struct buffer_page, list);
3514	local_set(&cpu_buffer->head_page->write, 0);
3515	local_set(&cpu_buffer->head_page->entries, 0);
3516	local_set(&cpu_buffer->head_page->page->commit, 0);
3517
3518	cpu_buffer->head_page->read = 0;
3519
3520	cpu_buffer->tail_page = cpu_buffer->head_page;
3521	cpu_buffer->commit_page = cpu_buffer->head_page;
3522
3523	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
 
3524	local_set(&cpu_buffer->reader_page->write, 0);
3525	local_set(&cpu_buffer->reader_page->entries, 0);
3526	local_set(&cpu_buffer->reader_page->page->commit, 0);
3527	cpu_buffer->reader_page->read = 0;
3528
 
 
3529	local_set(&cpu_buffer->commit_overrun, 0);
3530	local_set(&cpu_buffer->overrun, 0);
3531	local_set(&cpu_buffer->entries, 0);
3532	local_set(&cpu_buffer->committing, 0);
3533	local_set(&cpu_buffer->commits, 0);
3534	cpu_buffer->read = 0;
 
3535
3536	cpu_buffer->write_stamp = 0;
3537	cpu_buffer->read_stamp = 0;
3538
3539	cpu_buffer->lost_events = 0;
3540	cpu_buffer->last_overrun = 0;
3541
3542	rb_head_page_activate(cpu_buffer);
3543}
3544
3545/**
3546 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
3547 * @buffer: The ring buffer to reset a per cpu buffer of
3548 * @cpu: The CPU buffer to be reset
3549 */
3550void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3551{
3552	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3553	unsigned long flags;
3554
3555	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3556		return;
3557
 
3558	atomic_inc(&cpu_buffer->record_disabled);
3559
3560	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
 
 
3561
3562	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3563		goto out;
3564
3565	arch_spin_lock(&cpu_buffer->lock);
3566
3567	rb_reset_cpu(cpu_buffer);
3568
3569	arch_spin_unlock(&cpu_buffer->lock);
3570
3571 out:
3572	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3573
3574	atomic_dec(&cpu_buffer->record_disabled);
 
3575}
3576EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
3577
3578/**
3579 * ring_buffer_reset - reset a ring buffer
3580 * @buffer: The ring buffer to reset all cpu buffers
3581 */
3582void ring_buffer_reset(struct ring_buffer *buffer)
3583{
3584	int cpu;
3585
3586	for_each_buffer_cpu(buffer, cpu)
3587		ring_buffer_reset_cpu(buffer, cpu);
3588}
3589EXPORT_SYMBOL_GPL(ring_buffer_reset);
3590
3591/**
3592 * rind_buffer_empty - is the ring buffer empty?
3593 * @buffer: The ring buffer to test
3594 */
3595int ring_buffer_empty(struct ring_buffer *buffer)
3596{
3597	struct ring_buffer_per_cpu *cpu_buffer;
3598	unsigned long flags;
3599	int dolock;
3600	int cpu;
3601	int ret;
3602
3603	dolock = rb_ok_to_lock();
3604
3605	/* yes this is racy, but if you don't like the race, lock the buffer */
3606	for_each_buffer_cpu(buffer, cpu) {
3607		cpu_buffer = buffer->buffers[cpu];
3608		local_irq_save(flags);
3609		if (dolock)
3610			spin_lock(&cpu_buffer->reader_lock);
3611		ret = rb_per_cpu_empty(cpu_buffer);
3612		if (dolock)
3613			spin_unlock(&cpu_buffer->reader_lock);
3614		local_irq_restore(flags);
3615
3616		if (!ret)
3617			return 0;
3618	}
3619
3620	return 1;
3621}
3622EXPORT_SYMBOL_GPL(ring_buffer_empty);
3623
3624/**
3625 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
3626 * @buffer: The ring buffer
3627 * @cpu: The CPU buffer to test
3628 */
3629int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
3630{
3631	struct ring_buffer_per_cpu *cpu_buffer;
3632	unsigned long flags;
3633	int dolock;
3634	int ret;
3635
3636	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3637		return 1;
3638
3639	dolock = rb_ok_to_lock();
3640
3641	cpu_buffer = buffer->buffers[cpu];
3642	local_irq_save(flags);
3643	if (dolock)
3644		spin_lock(&cpu_buffer->reader_lock);
3645	ret = rb_per_cpu_empty(cpu_buffer);
3646	if (dolock)
3647		spin_unlock(&cpu_buffer->reader_lock);
3648	local_irq_restore(flags);
3649
3650	return ret;
3651}
3652EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
3653
3654#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3655/**
3656 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
3657 * @buffer_a: One buffer to swap with
3658 * @buffer_b: The other buffer to swap with
3659 *
3660 * This function is useful for tracers that want to take a "snapshot"
3661 * of a CPU buffer and has another back up buffer lying around.
3662 * it is expected that the tracer handles the cpu buffer not being
3663 * used at the moment.
3664 */
3665int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
3666			 struct ring_buffer *buffer_b, int cpu)
3667{
3668	struct ring_buffer_per_cpu *cpu_buffer_a;
3669	struct ring_buffer_per_cpu *cpu_buffer_b;
3670	int ret = -EINVAL;
3671
3672	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
3673	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
3674		goto out;
3675
 
 
 
3676	/* At least make sure the two buffers are somewhat the same */
3677	if (buffer_a->pages != buffer_b->pages)
3678		goto out;
3679
3680	ret = -EAGAIN;
3681
3682	if (ring_buffer_flags != RB_BUFFERS_ON)
3683		goto out;
3684
3685	if (atomic_read(&buffer_a->record_disabled))
3686		goto out;
3687
3688	if (atomic_read(&buffer_b->record_disabled))
3689		goto out;
3690
3691	cpu_buffer_a = buffer_a->buffers[cpu];
3692	cpu_buffer_b = buffer_b->buffers[cpu];
3693
3694	if (atomic_read(&cpu_buffer_a->record_disabled))
3695		goto out;
3696
3697	if (atomic_read(&cpu_buffer_b->record_disabled))
3698		goto out;
3699
3700	/*
3701	 * We can't do a synchronize_sched here because this
3702	 * function can be called in atomic context.
3703	 * Normally this will be called from the same CPU as cpu.
3704	 * If not it's up to the caller to protect this.
3705	 */
3706	atomic_inc(&cpu_buffer_a->record_disabled);
3707	atomic_inc(&cpu_buffer_b->record_disabled);
3708
3709	ret = -EBUSY;
3710	if (local_read(&cpu_buffer_a->committing))
3711		goto out_dec;
3712	if (local_read(&cpu_buffer_b->committing))
3713		goto out_dec;
3714
3715	buffer_a->buffers[cpu] = cpu_buffer_b;
3716	buffer_b->buffers[cpu] = cpu_buffer_a;
3717
3718	cpu_buffer_b->buffer = buffer_a;
3719	cpu_buffer_a->buffer = buffer_b;
3720
3721	ret = 0;
3722
3723out_dec:
3724	atomic_dec(&cpu_buffer_a->record_disabled);
3725	atomic_dec(&cpu_buffer_b->record_disabled);
3726out:
3727	return ret;
3728}
3729EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
3730#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
3731
3732/**
3733 * ring_buffer_alloc_read_page - allocate a page to read from buffer
3734 * @buffer: the buffer to allocate for.
 
3735 *
3736 * This function is used in conjunction with ring_buffer_read_page.
3737 * When reading a full page from the ring buffer, these functions
3738 * can be used to speed up the process. The calling function should
3739 * allocate a few pages first with this function. Then when it
3740 * needs to get pages from the ring buffer, it passes the result
3741 * of this function into ring_buffer_read_page, which will swap
3742 * the page that was allocated, with the read page of the buffer.
3743 *
3744 * Returns:
3745 *  The page allocated, or NULL on error.
3746 */
3747void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
3748{
3749	struct buffer_data_page *bpage;
3750	struct page *page;
3751
3752	page = alloc_pages_node(cpu_to_node(cpu),
3753				GFP_KERNEL | __GFP_NORETRY, 0);
3754	if (!page)
3755		return NULL;
3756
3757	bpage = page_address(page);
3758
3759	rb_init_page(bpage);
3760
3761	return bpage;
3762}
3763EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
3764
3765/**
3766 * ring_buffer_free_read_page - free an allocated read page
3767 * @buffer: the buffer the page was allocate for
3768 * @data: the page to free
3769 *
3770 * Free a page allocated from ring_buffer_alloc_read_page.
3771 */
3772void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
3773{
3774	free_page((unsigned long)data);
3775}
3776EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
3777
3778/**
3779 * ring_buffer_read_page - extract a page from the ring buffer
3780 * @buffer: buffer to extract from
3781 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
3782 * @len: amount to extract
3783 * @cpu: the cpu of the buffer to extract
3784 * @full: should the extraction only happen when the page is full.
3785 *
3786 * This function will pull out a page from the ring buffer and consume it.
3787 * @data_page must be the address of the variable that was returned
3788 * from ring_buffer_alloc_read_page. This is because the page might be used
3789 * to swap with a page in the ring buffer.
3790 *
3791 * for example:
3792 *	rpage = ring_buffer_alloc_read_page(buffer);
3793 *	if (!rpage)
3794 *		return error;
3795 *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
3796 *	if (ret >= 0)
3797 *		process_page(rpage, ret);
3798 *
3799 * When @full is set, the function will not return true unless
3800 * the writer is off the reader page.
3801 *
3802 * Note: it is up to the calling functions to handle sleeps and wakeups.
3803 *  The ring buffer can be used anywhere in the kernel and can not
3804 *  blindly call wake_up. The layer that uses the ring buffer must be
3805 *  responsible for that.
3806 *
3807 * Returns:
3808 *  >=0 if data has been transferred, returns the offset of consumed data.
3809 *  <0 if no data has been transferred.
3810 */
3811int ring_buffer_read_page(struct ring_buffer *buffer,
3812			  void **data_page, size_t len, int cpu, int full)
3813{
3814	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3815	struct ring_buffer_event *event;
3816	struct buffer_data_page *bpage;
3817	struct buffer_page *reader;
3818	unsigned long missed_events;
3819	unsigned long flags;
3820	unsigned int commit;
3821	unsigned int read;
3822	u64 save_timestamp;
3823	int ret = -1;
3824
3825	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3826		goto out;
3827
3828	/*
3829	 * If len is not big enough to hold the page header, then
3830	 * we can not copy anything.
3831	 */
3832	if (len <= BUF_PAGE_HDR_SIZE)
3833		goto out;
3834
3835	len -= BUF_PAGE_HDR_SIZE;
3836
3837	if (!data_page)
3838		goto out;
3839
3840	bpage = *data_page;
3841	if (!bpage)
3842		goto out;
3843
3844	spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3845
3846	reader = rb_get_reader_page(cpu_buffer);
3847	if (!reader)
3848		goto out_unlock;
3849
3850	event = rb_reader_event(cpu_buffer);
3851
3852	read = reader->read;
3853	commit = rb_page_commit(reader);
3854
3855	/* Check if any events were dropped */
3856	missed_events = cpu_buffer->lost_events;
3857
3858	/*
3859	 * If this page has been partially read or
3860	 * if len is not big enough to read the rest of the page or
3861	 * a writer is still on the page, then
3862	 * we must copy the data from the page to the buffer.
3863	 * Otherwise, we can simply swap the page with the one passed in.
3864	 */
3865	if (read || (len < (commit - read)) ||
3866	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
3867		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
3868		unsigned int rpos = read;
3869		unsigned int pos = 0;
3870		unsigned int size;
3871
3872		if (full)
3873			goto out_unlock;
3874
3875		if (len > (commit - read))
3876			len = (commit - read);
3877
3878		/* Always keep the time extend and data together */
3879		size = rb_event_ts_length(event);
3880
3881		if (len < size)
3882			goto out_unlock;
3883
3884		/* save the current timestamp, since the user will need it */
3885		save_timestamp = cpu_buffer->read_stamp;
3886
3887		/* Need to copy one event at a time */
3888		do {
3889			/* We need the size of one event, because
3890			 * rb_advance_reader only advances by one event,
3891			 * whereas rb_event_ts_length may include the size of
3892			 * one or two events.
3893			 * We have already ensured there's enough space if this
3894			 * is a time extend. */
3895			size = rb_event_length(event);
3896			memcpy(bpage->data + pos, rpage->data + rpos, size);
3897
3898			len -= size;
3899
3900			rb_advance_reader(cpu_buffer);
3901			rpos = reader->read;
3902			pos += size;
3903
3904			if (rpos >= commit)
3905				break;
3906
3907			event = rb_reader_event(cpu_buffer);
3908			/* Always keep the time extend and data together */
3909			size = rb_event_ts_length(event);
3910		} while (len >= size);
3911
3912		/* update bpage */
3913		local_set(&bpage->commit, pos);
3914		bpage->time_stamp = save_timestamp;
3915
3916		/* we copied everything to the beginning */
3917		read = 0;
3918	} else {
3919		/* update the entry counter */
3920		cpu_buffer->read += rb_page_entries(reader);
 
3921
3922		/* swap the pages */
3923		rb_init_page(bpage);
3924		bpage = reader->page;
3925		reader->page = *data_page;
3926		local_set(&reader->write, 0);
3927		local_set(&reader->entries, 0);
3928		reader->read = 0;
3929		*data_page = bpage;
3930
3931		/*
3932		 * Use the real_end for the data size,
3933		 * This gives us a chance to store the lost events
3934		 * on the page.
3935		 */
3936		if (reader->real_end)
3937			local_set(&bpage->commit, reader->real_end);
3938	}
3939	ret = read;
3940
3941	cpu_buffer->lost_events = 0;
3942
3943	commit = local_read(&bpage->commit);
3944	/*
3945	 * Set a flag in the commit field if we lost events
3946	 */
3947	if (missed_events) {
3948		/* If there is room at the end of the page to save the
3949		 * missed events, then record it there.
3950		 */
3951		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
3952			memcpy(&bpage->data[commit], &missed_events,
3953			       sizeof(missed_events));
3954			local_add(RB_MISSED_STORED, &bpage->commit);
3955			commit += sizeof(missed_events);
3956		}
3957		local_add(RB_MISSED_EVENTS, &bpage->commit);
3958	}
3959
3960	/*
3961	 * This page may be off to user land. Zero it out here.
3962	 */
3963	if (commit < BUF_PAGE_SIZE)
3964		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
3965
3966 out_unlock:
3967	spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3968
3969 out:
3970	return ret;
3971}
3972EXPORT_SYMBOL_GPL(ring_buffer_read_page);
3973
3974#ifdef CONFIG_TRACING
3975static ssize_t
3976rb_simple_read(struct file *filp, char __user *ubuf,
3977	       size_t cnt, loff_t *ppos)
3978{
3979	unsigned long *p = filp->private_data;
3980	char buf[64];
3981	int r;
3982
3983	if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
3984		r = sprintf(buf, "permanently disabled\n");
3985	else
3986		r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
3987
3988	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3989}
3990
3991static ssize_t
3992rb_simple_write(struct file *filp, const char __user *ubuf,
3993		size_t cnt, loff_t *ppos)
3994{
3995	unsigned long *p = filp->private_data;
3996	unsigned long val;
3997	int ret;
3998
3999	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4000	if (ret)
4001		return ret;
4002
4003	if (val)
4004		set_bit(RB_BUFFERS_ON_BIT, p);
4005	else
4006		clear_bit(RB_BUFFERS_ON_BIT, p);
4007
4008	(*ppos)++;
4009
4010	return cnt;
4011}
4012
4013static const struct file_operations rb_simple_fops = {
4014	.open		= tracing_open_generic,
4015	.read		= rb_simple_read,
4016	.write		= rb_simple_write,
4017	.llseek		= default_llseek,
4018};
4019
4020
4021static __init int rb_init_debugfs(void)
4022{
4023	struct dentry *d_tracer;
4024
4025	d_tracer = tracing_init_dentry();
4026
4027	trace_create_file("tracing_on", 0644, d_tracer,
4028			    &ring_buffer_flags, &rb_simple_fops);
4029
4030	return 0;
4031}
4032
4033fs_initcall(rb_init_debugfs);
4034#endif
4035
4036#ifdef CONFIG_HOTPLUG_CPU
4037static int rb_cpu_notify(struct notifier_block *self,
4038			 unsigned long action, void *hcpu)
4039{
4040	struct ring_buffer *buffer =
4041		container_of(self, struct ring_buffer, cpu_notify);
4042	long cpu = (long)hcpu;
 
 
4043
4044	switch (action) {
4045	case CPU_UP_PREPARE:
4046	case CPU_UP_PREPARE_FROZEN:
4047		if (cpumask_test_cpu(cpu, buffer->cpumask))
4048			return NOTIFY_OK;
4049
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4050		buffer->buffers[cpu] =
4051			rb_allocate_cpu_buffer(buffer, cpu);
4052		if (!buffer->buffers[cpu]) {
4053			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4054			     cpu);
4055			return NOTIFY_OK;
4056		}
4057		smp_wmb();
4058		cpumask_set_cpu(cpu, buffer->cpumask);
4059		break;
4060	case CPU_DOWN_PREPARE:
4061	case CPU_DOWN_PREPARE_FROZEN:
4062		/*
4063		 * Do nothing.
4064		 *  If we were to free the buffer, then the user would
4065		 *  lose any trace that was in the buffer.
4066		 */
4067		break;
4068	default:
4069		break;
4070	}
4071	return NOTIFY_OK;
4072}
4073#endif