Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 * Generic ring buffer
   3 *
   4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   5 */
   6#include <linux/trace_events.h>
   7#include <linux/ring_buffer.h>
   8#include <linux/trace_clock.h>
 
   9#include <linux/trace_seq.h>
  10#include <linux/spinlock.h>
  11#include <linux/irq_work.h>
  12#include <linux/uaccess.h>
  13#include <linux/hardirq.h>
  14#include <linux/kthread.h>	/* for self test */
  15#include <linux/kmemcheck.h>
  16#include <linux/module.h>
  17#include <linux/percpu.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/slab.h>
  21#include <linux/init.h>
  22#include <linux/hash.h>
  23#include <linux/list.h>
  24#include <linux/cpu.h>
 
  25
  26#include <asm/local.h>
  27
  28static void update_pages_handler(struct work_struct *work);
  29
  30/*
  31 * The ring buffer header is special. We must manually up keep it.
  32 */
  33int ring_buffer_print_entry_header(struct trace_seq *s)
  34{
  35	trace_seq_puts(s, "# compressed entry header\n");
  36	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  37	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  38	trace_seq_puts(s, "\tarray       :   32 bits\n");
  39	trace_seq_putc(s, '\n');
  40	trace_seq_printf(s, "\tpadding     : type == %d\n",
  41			 RINGBUF_TYPE_PADDING);
  42	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  43			 RINGBUF_TYPE_TIME_EXTEND);
 
 
  44	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  45			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  46
  47	return !trace_seq_has_overflowed(s);
  48}
  49
  50/*
  51 * The ring buffer is made up of a list of pages. A separate list of pages is
  52 * allocated for each CPU. A writer may only write to a buffer that is
  53 * associated with the CPU it is currently executing on.  A reader may read
  54 * from any per cpu buffer.
  55 *
  56 * The reader is special. For each per cpu buffer, the reader has its own
  57 * reader page. When a reader has read the entire reader page, this reader
  58 * page is swapped with another page in the ring buffer.
  59 *
  60 * Now, as long as the writer is off the reader page, the reader can do what
  61 * ever it wants with that page. The writer will never write to that page
  62 * again (as long as it is out of the ring buffer).
  63 *
  64 * Here's some silly ASCII art.
  65 *
  66 *   +------+
  67 *   |reader|          RING BUFFER
  68 *   |page  |
  69 *   +------+        +---+   +---+   +---+
  70 *                   |   |-->|   |-->|   |
  71 *                   +---+   +---+   +---+
  72 *                     ^               |
  73 *                     |               |
  74 *                     +---------------+
  75 *
  76 *
  77 *   +------+
  78 *   |reader|          RING BUFFER
  79 *   |page  |------------------v
  80 *   +------+        +---+   +---+   +---+
  81 *                   |   |-->|   |-->|   |
  82 *                   +---+   +---+   +---+
  83 *                     ^               |
  84 *                     |               |
  85 *                     +---------------+
  86 *
  87 *
  88 *   +------+
  89 *   |reader|          RING BUFFER
  90 *   |page  |------------------v
  91 *   +------+        +---+   +---+   +---+
  92 *      ^            |   |-->|   |-->|   |
  93 *      |            +---+   +---+   +---+
  94 *      |                              |
  95 *      |                              |
  96 *      +------------------------------+
  97 *
  98 *
  99 *   +------+
 100 *   |buffer|          RING BUFFER
 101 *   |page  |------------------v
 102 *   +------+        +---+   +---+   +---+
 103 *      ^            |   |   |   |-->|   |
 104 *      |   New      +---+   +---+   +---+
 105 *      |  Reader------^               |
 106 *      |   page                       |
 107 *      +------------------------------+
 108 *
 109 *
 110 * After we make this swap, the reader can hand this page off to the splice
 111 * code and be done with it. It can even allocate a new page if it needs to
 112 * and swap that into the ring buffer.
 113 *
 114 * We will be using cmpxchg soon to make all this lockless.
 115 *
 116 */
 117
 118/* Used for individual buffers (after the counter) */
 119#define RB_BUFFER_OFF		(1 << 20)
 120
 121#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 122
 123#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 124#define RB_ALIGNMENT		4U
 125#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 126#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 127
 128#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 129# define RB_FORCE_8BYTE_ALIGNMENT	0
 130# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 131#else
 132# define RB_FORCE_8BYTE_ALIGNMENT	1
 133# define RB_ARCH_ALIGNMENT		8U
 134#endif
 135
 136#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 137
 138/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 139#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 140
 141enum {
 142	RB_LEN_TIME_EXTEND = 8,
 143	RB_LEN_TIME_STAMP = 16,
 144};
 145
 146#define skip_time_extend(event) \
 147	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 148
 
 
 
 149static inline int rb_null_event(struct ring_buffer_event *event)
 150{
 151	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 152}
 153
 154static void rb_event_set_padding(struct ring_buffer_event *event)
 155{
 156	/* padding has a NULL time_delta */
 157	event->type_len = RINGBUF_TYPE_PADDING;
 158	event->time_delta = 0;
 159}
 160
 161static unsigned
 162rb_event_data_length(struct ring_buffer_event *event)
 163{
 164	unsigned length;
 165
 166	if (event->type_len)
 167		length = event->type_len * RB_ALIGNMENT;
 168	else
 169		length = event->array[0];
 170	return length + RB_EVNT_HDR_SIZE;
 171}
 172
 173/*
 174 * Return the length of the given event. Will return
 175 * the length of the time extend if the event is a
 176 * time extend.
 177 */
 178static inline unsigned
 179rb_event_length(struct ring_buffer_event *event)
 180{
 181	switch (event->type_len) {
 182	case RINGBUF_TYPE_PADDING:
 183		if (rb_null_event(event))
 184			/* undefined */
 185			return -1;
 186		return  event->array[0] + RB_EVNT_HDR_SIZE;
 187
 188	case RINGBUF_TYPE_TIME_EXTEND:
 189		return RB_LEN_TIME_EXTEND;
 190
 191	case RINGBUF_TYPE_TIME_STAMP:
 192		return RB_LEN_TIME_STAMP;
 193
 194	case RINGBUF_TYPE_DATA:
 195		return rb_event_data_length(event);
 196	default:
 197		BUG();
 198	}
 199	/* not hit */
 200	return 0;
 201}
 202
 203/*
 204 * Return total length of time extend and data,
 205 *   or just the event length for all other events.
 206 */
 207static inline unsigned
 208rb_event_ts_length(struct ring_buffer_event *event)
 209{
 210	unsigned len = 0;
 211
 212	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
 213		/* time extends include the data event after it */
 214		len = RB_LEN_TIME_EXTEND;
 215		event = skip_time_extend(event);
 216	}
 217	return len + rb_event_length(event);
 218}
 219
 220/**
 221 * ring_buffer_event_length - return the length of the event
 222 * @event: the event to get the length of
 223 *
 224 * Returns the size of the data load of a data event.
 225 * If the event is something other than a data event, it
 226 * returns the size of the event itself. With the exception
 227 * of a TIME EXTEND, where it still returns the size of the
 228 * data load of the data event after it.
 229 */
 230unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 231{
 232	unsigned length;
 233
 234	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 235		event = skip_time_extend(event);
 236
 237	length = rb_event_length(event);
 238	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 239		return length;
 240	length -= RB_EVNT_HDR_SIZE;
 241	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 242                length -= sizeof(event->array[0]);
 243	return length;
 244}
 245EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 246
 247/* inline for ring buffer fast paths */
 248static void *
 249rb_event_data(struct ring_buffer_event *event)
 250{
 251	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 252		event = skip_time_extend(event);
 253	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 254	/* If length is in len field, then array[0] has the data */
 255	if (event->type_len)
 256		return (void *)&event->array[0];
 257	/* Otherwise length is in array[0] and array[1] has the data */
 258	return (void *)&event->array[1];
 259}
 260
 261/**
 262 * ring_buffer_event_data - return the data of the event
 263 * @event: the event to get the data from
 264 */
 265void *ring_buffer_event_data(struct ring_buffer_event *event)
 266{
 267	return rb_event_data(event);
 268}
 269EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 270
 271#define for_each_buffer_cpu(buffer, cpu)		\
 272	for_each_cpu(cpu, buffer->cpumask)
 273
 274#define TS_SHIFT	27
 275#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 276#define TS_DELTA_TEST	(~TS_MASK)
 277
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 278/* Flag when events were overwritten */
 279#define RB_MISSED_EVENTS	(1 << 31)
 280/* Missed count stored at end */
 281#define RB_MISSED_STORED	(1 << 30)
 282
 
 
 283struct buffer_data_page {
 284	u64		 time_stamp;	/* page time stamp */
 285	local_t		 commit;	/* write committed index */
 286	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 287};
 288
 289/*
 290 * Note, the buffer_page list must be first. The buffer pages
 291 * are allocated in cache lines, which means that each buffer
 292 * page will be at the beginning of a cache line, and thus
 293 * the least significant bits will be zero. We use this to
 294 * add flags in the list struct pointers, to make the ring buffer
 295 * lockless.
 296 */
 297struct buffer_page {
 298	struct list_head list;		/* list of buffer pages */
 299	local_t		 write;		/* index for next write */
 300	unsigned	 read;		/* index for next read */
 301	local_t		 entries;	/* entries on this page */
 302	unsigned long	 real_end;	/* real end of data */
 303	struct buffer_data_page *page;	/* Actual data page */
 304};
 305
 306/*
 307 * The buffer page counters, write and entries, must be reset
 308 * atomically when crossing page boundaries. To synchronize this
 309 * update, two counters are inserted into the number. One is
 310 * the actual counter for the write position or count on the page.
 311 *
 312 * The other is a counter of updaters. Before an update happens
 313 * the update partition of the counter is incremented. This will
 314 * allow the updater to update the counter atomically.
 315 *
 316 * The counter is 20 bits, and the state data is 12.
 317 */
 318#define RB_WRITE_MASK		0xfffff
 319#define RB_WRITE_INTCNT		(1 << 20)
 320
 321static void rb_init_page(struct buffer_data_page *bpage)
 322{
 323	local_set(&bpage->commit, 0);
 324}
 325
 326/**
 327 * ring_buffer_page_len - the size of data on the page.
 328 * @page: The page to read
 329 *
 330 * Returns the amount of data on the page, including buffer page header.
 331 */
 332size_t ring_buffer_page_len(void *page)
 333{
 334	return local_read(&((struct buffer_data_page *)page)->commit)
 
 
 335		+ BUF_PAGE_HDR_SIZE;
 336}
 337
 338/*
 339 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 340 * this issue out.
 341 */
 342static void free_buffer_page(struct buffer_page *bpage)
 343{
 344	free_page((unsigned long)bpage->page);
 345	kfree(bpage);
 346}
 347
 348/*
 349 * We need to fit the time_stamp delta into 27 bits.
 350 */
 351static inline int test_time_stamp(u64 delta)
 352{
 353	if (delta & TS_DELTA_TEST)
 354		return 1;
 355	return 0;
 356}
 357
 358#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 359
 360/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 361#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 362
 363int ring_buffer_print_page_header(struct trace_seq *s)
 364{
 365	struct buffer_data_page field;
 366
 367	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 368			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 369			 (unsigned int)sizeof(field.time_stamp),
 370			 (unsigned int)is_signed_type(u64));
 371
 372	trace_seq_printf(s, "\tfield: local_t commit;\t"
 373			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 374			 (unsigned int)offsetof(typeof(field), commit),
 375			 (unsigned int)sizeof(field.commit),
 376			 (unsigned int)is_signed_type(long));
 377
 378	trace_seq_printf(s, "\tfield: int overwrite;\t"
 379			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 380			 (unsigned int)offsetof(typeof(field), commit),
 381			 1,
 382			 (unsigned int)is_signed_type(long));
 383
 384	trace_seq_printf(s, "\tfield: char data;\t"
 385			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 386			 (unsigned int)offsetof(typeof(field), data),
 387			 (unsigned int)BUF_PAGE_SIZE,
 388			 (unsigned int)is_signed_type(char));
 389
 390	return !trace_seq_has_overflowed(s);
 391}
 392
 393struct rb_irq_work {
 394	struct irq_work			work;
 395	wait_queue_head_t		waiters;
 396	wait_queue_head_t		full_waiters;
 397	bool				waiters_pending;
 398	bool				full_waiters_pending;
 399	bool				wakeup_full;
 400};
 401
 402/*
 403 * Structure to hold event state and handle nested events.
 404 */
 405struct rb_event_info {
 406	u64			ts;
 407	u64			delta;
 408	unsigned long		length;
 409	struct buffer_page	*tail_page;
 410	int			add_timestamp;
 411};
 412
 413/*
 414 * Used for which event context the event is in.
 415 *  NMI     = 0
 416 *  IRQ     = 1
 417 *  SOFTIRQ = 2
 418 *  NORMAL  = 3
 419 *
 420 * See trace_recursive_lock() comment below for more details.
 421 */
 422enum {
 423	RB_CTX_NMI,
 424	RB_CTX_IRQ,
 425	RB_CTX_SOFTIRQ,
 426	RB_CTX_NORMAL,
 427	RB_CTX_MAX
 428};
 429
 430/*
 431 * head_page == tail_page && head == tail then buffer is empty.
 432 */
 433struct ring_buffer_per_cpu {
 434	int				cpu;
 435	atomic_t			record_disabled;
 436	struct ring_buffer		*buffer;
 437	raw_spinlock_t			reader_lock;	/* serialize readers */
 438	arch_spinlock_t			lock;
 439	struct lock_class_key		lock_key;
 440	unsigned int			nr_pages;
 
 441	unsigned int			current_context;
 442	struct list_head		*pages;
 443	struct buffer_page		*head_page;	/* read from head */
 444	struct buffer_page		*tail_page;	/* write to tail */
 445	struct buffer_page		*commit_page;	/* committed pages */
 446	struct buffer_page		*reader_page;
 447	unsigned long			lost_events;
 448	unsigned long			last_overrun;
 
 449	local_t				entries_bytes;
 450	local_t				entries;
 451	local_t				overrun;
 452	local_t				commit_overrun;
 453	local_t				dropped_events;
 454	local_t				committing;
 455	local_t				commits;
 456	unsigned long			read;
 457	unsigned long			read_bytes;
 458	u64				write_stamp;
 459	u64				read_stamp;
 460	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 461	int				nr_pages_to_update;
 462	struct list_head		new_pages; /* new pages to add */
 463	struct work_struct		update_pages_work;
 464	struct completion		update_done;
 465
 466	struct rb_irq_work		irq_work;
 467};
 468
 469struct ring_buffer {
 470	unsigned			flags;
 471	int				cpus;
 472	atomic_t			record_disabled;
 473	atomic_t			resize_disabled;
 474	cpumask_var_t			cpumask;
 475
 476	struct lock_class_key		*reader_lock_key;
 477
 478	struct mutex			mutex;
 479
 480	struct ring_buffer_per_cpu	**buffers;
 481
 482#ifdef CONFIG_HOTPLUG_CPU
 483	struct notifier_block		cpu_notify;
 484#endif
 485	u64				(*clock)(void);
 486
 487	struct rb_irq_work		irq_work;
 
 488};
 489
 490struct ring_buffer_iter {
 491	struct ring_buffer_per_cpu	*cpu_buffer;
 492	unsigned long			head;
 493	struct buffer_page		*head_page;
 494	struct buffer_page		*cache_reader_page;
 495	unsigned long			cache_read;
 496	u64				read_stamp;
 497};
 498
 499/*
 500 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 501 *
 502 * Schedules a delayed work to wake up any task that is blocked on the
 503 * ring buffer waiters queue.
 504 */
 505static void rb_wake_up_waiters(struct irq_work *work)
 506{
 507	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 508
 509	wake_up_all(&rbwork->waiters);
 510	if (rbwork->wakeup_full) {
 511		rbwork->wakeup_full = false;
 512		wake_up_all(&rbwork->full_waiters);
 513	}
 514}
 515
 516/**
 517 * ring_buffer_wait - wait for input to the ring buffer
 518 * @buffer: buffer to wait on
 519 * @cpu: the cpu buffer to wait on
 520 * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
 521 *
 522 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 523 * as data is added to any of the @buffer's cpu buffers. Otherwise
 524 * it will wait for data to be added to a specific cpu buffer.
 525 */
 526int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
 527{
 528	struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
 529	DEFINE_WAIT(wait);
 530	struct rb_irq_work *work;
 531	int ret = 0;
 532
 533	/*
 534	 * Depending on what the caller is waiting for, either any
 535	 * data in any cpu buffer, or a specific buffer, put the
 536	 * caller on the appropriate wait queue.
 537	 */
 538	if (cpu == RING_BUFFER_ALL_CPUS) {
 539		work = &buffer->irq_work;
 540		/* Full only makes sense on per cpu reads */
 541		full = false;
 542	} else {
 543		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 544			return -ENODEV;
 545		cpu_buffer = buffer->buffers[cpu];
 546		work = &cpu_buffer->irq_work;
 547	}
 548
 549
 550	while (true) {
 551		if (full)
 552			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
 553		else
 554			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 555
 556		/*
 557		 * The events can happen in critical sections where
 558		 * checking a work queue can cause deadlocks.
 559		 * After adding a task to the queue, this flag is set
 560		 * only to notify events to try to wake up the queue
 561		 * using irq_work.
 562		 *
 563		 * We don't clear it even if the buffer is no longer
 564		 * empty. The flag only causes the next event to run
 565		 * irq_work to do the work queue wake up. The worse
 566		 * that can happen if we race with !trace_empty() is that
 567		 * an event will cause an irq_work to try to wake up
 568		 * an empty queue.
 569		 *
 570		 * There's no reason to protect this flag either, as
 571		 * the work queue and irq_work logic will do the necessary
 572		 * synchronization for the wake ups. The only thing
 573		 * that is necessary is that the wake up happens after
 574		 * a task has been queued. It's OK for spurious wake ups.
 575		 */
 576		if (full)
 577			work->full_waiters_pending = true;
 578		else
 579			work->waiters_pending = true;
 580
 581		if (signal_pending(current)) {
 582			ret = -EINTR;
 583			break;
 584		}
 585
 586		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
 587			break;
 588
 589		if (cpu != RING_BUFFER_ALL_CPUS &&
 590		    !ring_buffer_empty_cpu(buffer, cpu)) {
 591			unsigned long flags;
 592			bool pagebusy;
 593
 594			if (!full)
 595				break;
 596
 597			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 598			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 599			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 600
 601			if (!pagebusy)
 602				break;
 603		}
 604
 605		schedule();
 606	}
 607
 608	if (full)
 609		finish_wait(&work->full_waiters, &wait);
 610	else
 611		finish_wait(&work->waiters, &wait);
 612
 613	return ret;
 614}
 615
 616/**
 617 * ring_buffer_poll_wait - poll on buffer input
 618 * @buffer: buffer to wait on
 619 * @cpu: the cpu buffer to wait on
 620 * @filp: the file descriptor
 621 * @poll_table: The poll descriptor
 622 *
 623 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 624 * as data is added to any of the @buffer's cpu buffers. Otherwise
 625 * it will wait for data to be added to a specific cpu buffer.
 626 *
 627 * Returns POLLIN | POLLRDNORM if data exists in the buffers,
 628 * zero otherwise.
 629 */
 630int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
 631			  struct file *filp, poll_table *poll_table)
 632{
 633	struct ring_buffer_per_cpu *cpu_buffer;
 634	struct rb_irq_work *work;
 635
 636	if (cpu == RING_BUFFER_ALL_CPUS)
 637		work = &buffer->irq_work;
 638	else {
 639		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 640			return -EINVAL;
 641
 642		cpu_buffer = buffer->buffers[cpu];
 643		work = &cpu_buffer->irq_work;
 644	}
 645
 646	poll_wait(filp, &work->waiters, poll_table);
 647	work->waiters_pending = true;
 648	/*
 649	 * There's a tight race between setting the waiters_pending and
 650	 * checking if the ring buffer is empty.  Once the waiters_pending bit
 651	 * is set, the next event will wake the task up, but we can get stuck
 652	 * if there's only a single event in.
 653	 *
 654	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
 655	 * but adding a memory barrier to all events will cause too much of a
 656	 * performance hit in the fast path.  We only need a memory barrier when
 657	 * the buffer goes from empty to having content.  But as this race is
 658	 * extremely small, and it's not a problem if another event comes in, we
 659	 * will fix it later.
 660	 */
 661	smp_mb();
 662
 663	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
 664	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
 665		return POLLIN | POLLRDNORM;
 666	return 0;
 667}
 668
 669/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 670#define RB_WARN_ON(b, cond)						\
 671	({								\
 672		int _____ret = unlikely(cond);				\
 673		if (_____ret) {						\
 674			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
 675				struct ring_buffer_per_cpu *__b =	\
 676					(void *)b;			\
 677				atomic_inc(&__b->buffer->record_disabled); \
 678			} else						\
 679				atomic_inc(&b->record_disabled);	\
 680			WARN_ON(1);					\
 681		}							\
 682		_____ret;						\
 683	})
 684
 685/* Up this if you want to test the TIME_EXTENTS and normalization */
 686#define DEBUG_SHIFT 0
 687
 688static inline u64 rb_time_stamp(struct ring_buffer *buffer)
 689{
 690	/* shift to debug/test normalization and TIME_EXTENTS */
 691	return buffer->clock() << DEBUG_SHIFT;
 692}
 693
 694u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 695{
 696	u64 time;
 697
 698	preempt_disable_notrace();
 699	time = rb_time_stamp(buffer);
 700	preempt_enable_no_resched_notrace();
 701
 702	return time;
 703}
 704EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 705
 706void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
 707				      int cpu, u64 *ts)
 708{
 709	/* Just stupid testing the normalize function and deltas */
 710	*ts >>= DEBUG_SHIFT;
 711}
 712EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 713
 714/*
 715 * Making the ring buffer lockless makes things tricky.
 716 * Although writes only happen on the CPU that they are on,
 717 * and they only need to worry about interrupts. Reads can
 718 * happen on any CPU.
 719 *
 720 * The reader page is always off the ring buffer, but when the
 721 * reader finishes with a page, it needs to swap its page with
 722 * a new one from the buffer. The reader needs to take from
 723 * the head (writes go to the tail). But if a writer is in overwrite
 724 * mode and wraps, it must push the head page forward.
 725 *
 726 * Here lies the problem.
 727 *
 728 * The reader must be careful to replace only the head page, and
 729 * not another one. As described at the top of the file in the
 730 * ASCII art, the reader sets its old page to point to the next
 731 * page after head. It then sets the page after head to point to
 732 * the old reader page. But if the writer moves the head page
 733 * during this operation, the reader could end up with the tail.
 734 *
 735 * We use cmpxchg to help prevent this race. We also do something
 736 * special with the page before head. We set the LSB to 1.
 737 *
 738 * When the writer must push the page forward, it will clear the
 739 * bit that points to the head page, move the head, and then set
 740 * the bit that points to the new head page.
 741 *
 742 * We also don't want an interrupt coming in and moving the head
 743 * page on another writer. Thus we use the second LSB to catch
 744 * that too. Thus:
 745 *
 746 * head->list->prev->next        bit 1          bit 0
 747 *                              -------        -------
 748 * Normal page                     0              0
 749 * Points to head page             0              1
 750 * New head page                   1              0
 751 *
 752 * Note we can not trust the prev pointer of the head page, because:
 753 *
 754 * +----+       +-----+        +-----+
 755 * |    |------>|  T  |---X--->|  N  |
 756 * |    |<------|     |        |     |
 757 * +----+       +-----+        +-----+
 758 *   ^                           ^ |
 759 *   |          +-----+          | |
 760 *   +----------|  R  |----------+ |
 761 *              |     |<-----------+
 762 *              +-----+
 763 *
 764 * Key:  ---X-->  HEAD flag set in pointer
 765 *         T      Tail page
 766 *         R      Reader page
 767 *         N      Next page
 768 *
 769 * (see __rb_reserve_next() to see where this happens)
 770 *
 771 *  What the above shows is that the reader just swapped out
 772 *  the reader page with a page in the buffer, but before it
 773 *  could make the new header point back to the new page added
 774 *  it was preempted by a writer. The writer moved forward onto
 775 *  the new page added by the reader and is about to move forward
 776 *  again.
 777 *
 778 *  You can see, it is legitimate for the previous pointer of
 779 *  the head (or any page) not to point back to itself. But only
 780 *  temporarially.
 781 */
 782
 783#define RB_PAGE_NORMAL		0UL
 784#define RB_PAGE_HEAD		1UL
 785#define RB_PAGE_UPDATE		2UL
 786
 787
 788#define RB_FLAG_MASK		3UL
 789
 790/* PAGE_MOVED is not part of the mask */
 791#define RB_PAGE_MOVED		4UL
 792
 793/*
 794 * rb_list_head - remove any bit
 795 */
 796static struct list_head *rb_list_head(struct list_head *list)
 797{
 798	unsigned long val = (unsigned long)list;
 799
 800	return (struct list_head *)(val & ~RB_FLAG_MASK);
 801}
 802
 803/*
 804 * rb_is_head_page - test if the given page is the head page
 805 *
 806 * Because the reader may move the head_page pointer, we can
 807 * not trust what the head page is (it may be pointing to
 808 * the reader page). But if the next page is a header page,
 809 * its flags will be non zero.
 810 */
 811static inline int
 812rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 813		struct buffer_page *page, struct list_head *list)
 814{
 815	unsigned long val;
 816
 817	val = (unsigned long)list->next;
 818
 819	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
 820		return RB_PAGE_MOVED;
 821
 822	return val & RB_FLAG_MASK;
 823}
 824
 825/*
 826 * rb_is_reader_page
 827 *
 828 * The unique thing about the reader page, is that, if the
 829 * writer is ever on it, the previous pointer never points
 830 * back to the reader page.
 831 */
 832static bool rb_is_reader_page(struct buffer_page *page)
 833{
 834	struct list_head *list = page->list.prev;
 835
 836	return rb_list_head(list->next) != &page->list;
 837}
 838
 839/*
 840 * rb_set_list_to_head - set a list_head to be pointing to head.
 841 */
 842static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
 843				struct list_head *list)
 844{
 845	unsigned long *ptr;
 846
 847	ptr = (unsigned long *)&list->next;
 848	*ptr |= RB_PAGE_HEAD;
 849	*ptr &= ~RB_PAGE_UPDATE;
 850}
 851
 852/*
 853 * rb_head_page_activate - sets up head page
 854 */
 855static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
 856{
 857	struct buffer_page *head;
 858
 859	head = cpu_buffer->head_page;
 860	if (!head)
 861		return;
 862
 863	/*
 864	 * Set the previous list pointer to have the HEAD flag.
 865	 */
 866	rb_set_list_to_head(cpu_buffer, head->list.prev);
 867}
 868
 869static void rb_list_head_clear(struct list_head *list)
 870{
 871	unsigned long *ptr = (unsigned long *)&list->next;
 872
 873	*ptr &= ~RB_FLAG_MASK;
 874}
 875
 876/*
 877 * rb_head_page_dactivate - clears head page ptr (for free list)
 878 */
 879static void
 880rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
 881{
 882	struct list_head *hd;
 883
 884	/* Go through the whole list and clear any pointers found. */
 885	rb_list_head_clear(cpu_buffer->pages);
 886
 887	list_for_each(hd, cpu_buffer->pages)
 888		rb_list_head_clear(hd);
 889}
 890
 891static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 892			    struct buffer_page *head,
 893			    struct buffer_page *prev,
 894			    int old_flag, int new_flag)
 895{
 896	struct list_head *list;
 897	unsigned long val = (unsigned long)&head->list;
 898	unsigned long ret;
 899
 900	list = &prev->list;
 901
 902	val &= ~RB_FLAG_MASK;
 903
 904	ret = cmpxchg((unsigned long *)&list->next,
 905		      val | old_flag, val | new_flag);
 906
 907	/* check if the reader took the page */
 908	if ((ret & ~RB_FLAG_MASK) != val)
 909		return RB_PAGE_MOVED;
 910
 911	return ret & RB_FLAG_MASK;
 912}
 913
 914static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
 915				   struct buffer_page *head,
 916				   struct buffer_page *prev,
 917				   int old_flag)
 918{
 919	return rb_head_page_set(cpu_buffer, head, prev,
 920				old_flag, RB_PAGE_UPDATE);
 921}
 922
 923static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
 924				 struct buffer_page *head,
 925				 struct buffer_page *prev,
 926				 int old_flag)
 927{
 928	return rb_head_page_set(cpu_buffer, head, prev,
 929				old_flag, RB_PAGE_HEAD);
 930}
 931
 932static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
 933				   struct buffer_page *head,
 934				   struct buffer_page *prev,
 935				   int old_flag)
 936{
 937	return rb_head_page_set(cpu_buffer, head, prev,
 938				old_flag, RB_PAGE_NORMAL);
 939}
 940
 941static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
 942			       struct buffer_page **bpage)
 943{
 944	struct list_head *p = rb_list_head((*bpage)->list.next);
 945
 946	*bpage = list_entry(p, struct buffer_page, list);
 947}
 948
 949static struct buffer_page *
 950rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
 951{
 952	struct buffer_page *head;
 953	struct buffer_page *page;
 954	struct list_head *list;
 955	int i;
 956
 957	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
 958		return NULL;
 959
 960	/* sanity check */
 961	list = cpu_buffer->pages;
 962	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
 963		return NULL;
 964
 965	page = head = cpu_buffer->head_page;
 966	/*
 967	 * It is possible that the writer moves the header behind
 968	 * where we started, and we miss in one loop.
 969	 * A second loop should grab the header, but we'll do
 970	 * three loops just because I'm paranoid.
 971	 */
 972	for (i = 0; i < 3; i++) {
 973		do {
 974			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
 975				cpu_buffer->head_page = page;
 976				return page;
 977			}
 978			rb_inc_page(cpu_buffer, &page);
 979		} while (page != head);
 980	}
 981
 982	RB_WARN_ON(cpu_buffer, 1);
 983
 984	return NULL;
 985}
 986
 987static int rb_head_page_replace(struct buffer_page *old,
 988				struct buffer_page *new)
 989{
 990	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
 991	unsigned long val;
 992	unsigned long ret;
 993
 994	val = *ptr & ~RB_FLAG_MASK;
 995	val |= RB_PAGE_HEAD;
 996
 997	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
 998
 999	return ret == val;
1000}
1001
1002/*
1003 * rb_tail_page_update - move the tail page forward
1004 */
1005static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1006			       struct buffer_page *tail_page,
1007			       struct buffer_page *next_page)
1008{
1009	unsigned long old_entries;
1010	unsigned long old_write;
1011
1012	/*
1013	 * The tail page now needs to be moved forward.
1014	 *
1015	 * We need to reset the tail page, but without messing
1016	 * with possible erasing of data brought in by interrupts
1017	 * that have moved the tail page and are currently on it.
1018	 *
1019	 * We add a counter to the write field to denote this.
1020	 */
1021	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1022	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1023
1024	/*
1025	 * Just make sure we have seen our old_write and synchronize
1026	 * with any interrupts that come in.
1027	 */
1028	barrier();
1029
1030	/*
1031	 * If the tail page is still the same as what we think
1032	 * it is, then it is up to us to update the tail
1033	 * pointer.
1034	 */
1035	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1036		/* Zero the write counter */
1037		unsigned long val = old_write & ~RB_WRITE_MASK;
1038		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1039
1040		/*
1041		 * This will only succeed if an interrupt did
1042		 * not come in and change it. In which case, we
1043		 * do not want to modify it.
1044		 *
1045		 * We add (void) to let the compiler know that we do not care
1046		 * about the return value of these functions. We use the
1047		 * cmpxchg to only update if an interrupt did not already
1048		 * do it for us. If the cmpxchg fails, we don't care.
1049		 */
1050		(void)local_cmpxchg(&next_page->write, old_write, val);
1051		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1052
1053		/*
1054		 * No need to worry about races with clearing out the commit.
1055		 * it only can increment when a commit takes place. But that
1056		 * only happens in the outer most nested commit.
1057		 */
1058		local_set(&next_page->page->commit, 0);
1059
1060		/* Again, either we update tail_page or an interrupt does */
1061		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1062	}
1063}
1064
1065static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1066			  struct buffer_page *bpage)
1067{
1068	unsigned long val = (unsigned long)bpage;
1069
1070	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1071		return 1;
1072
1073	return 0;
1074}
1075
1076/**
1077 * rb_check_list - make sure a pointer to a list has the last bits zero
1078 */
1079static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1080			 struct list_head *list)
1081{
1082	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1083		return 1;
1084	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1085		return 1;
1086	return 0;
1087}
1088
1089/**
1090 * rb_check_pages - integrity check of buffer pages
1091 * @cpu_buffer: CPU buffer with pages to test
1092 *
1093 * As a safety measure we check to make sure the data pages have not
1094 * been corrupted.
1095 */
1096static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1097{
1098	struct list_head *head = cpu_buffer->pages;
1099	struct buffer_page *bpage, *tmp;
1100
1101	/* Reset the head page if it exists */
1102	if (cpu_buffer->head_page)
1103		rb_set_head_page(cpu_buffer);
1104
1105	rb_head_page_deactivate(cpu_buffer);
1106
1107	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1108		return -1;
1109	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1110		return -1;
1111
1112	if (rb_check_list(cpu_buffer, head))
1113		return -1;
1114
1115	list_for_each_entry_safe(bpage, tmp, head, list) {
1116		if (RB_WARN_ON(cpu_buffer,
1117			       bpage->list.next->prev != &bpage->list))
1118			return -1;
1119		if (RB_WARN_ON(cpu_buffer,
1120			       bpage->list.prev->next != &bpage->list))
1121			return -1;
1122		if (rb_check_list(cpu_buffer, &bpage->list))
1123			return -1;
1124	}
1125
1126	rb_head_page_activate(cpu_buffer);
1127
1128	return 0;
1129}
1130
1131static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
1132{
1133	int i;
1134	struct buffer_page *bpage, *tmp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136	for (i = 0; i < nr_pages; i++) {
1137		struct page *page;
1138		/*
1139		 * __GFP_NORETRY flag makes sure that the allocation fails
1140		 * gracefully without invoking oom-killer and the system is
1141		 * not destabilized.
1142		 */
1143		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1144				    GFP_KERNEL | __GFP_NORETRY,
1145				    cpu_to_node(cpu));
1146		if (!bpage)
1147			goto free_pages;
1148
1149		list_add(&bpage->list, pages);
1150
1151		page = alloc_pages_node(cpu_to_node(cpu),
1152					GFP_KERNEL | __GFP_NORETRY, 0);
1153		if (!page)
1154			goto free_pages;
1155		bpage->page = page_address(page);
1156		rb_init_page(bpage->page);
 
 
 
1157	}
 
 
1158
1159	return 0;
1160
1161free_pages:
1162	list_for_each_entry_safe(bpage, tmp, pages, list) {
1163		list_del_init(&bpage->list);
1164		free_buffer_page(bpage);
1165	}
 
 
1166
1167	return -ENOMEM;
1168}
1169
1170static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1171			     unsigned nr_pages)
1172{
1173	LIST_HEAD(pages);
1174
1175	WARN_ON(!nr_pages);
1176
1177	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1178		return -ENOMEM;
1179
1180	/*
1181	 * The ring buffer page list is a circular list that does not
1182	 * start and end with a list head. All page list items point to
1183	 * other pages.
1184	 */
1185	cpu_buffer->pages = pages.next;
1186	list_del(&pages);
1187
1188	cpu_buffer->nr_pages = nr_pages;
1189
1190	rb_check_pages(cpu_buffer);
1191
1192	return 0;
1193}
1194
1195static struct ring_buffer_per_cpu *
1196rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1197{
1198	struct ring_buffer_per_cpu *cpu_buffer;
1199	struct buffer_page *bpage;
1200	struct page *page;
1201	int ret;
1202
1203	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1204				  GFP_KERNEL, cpu_to_node(cpu));
1205	if (!cpu_buffer)
1206		return NULL;
1207
1208	cpu_buffer->cpu = cpu;
1209	cpu_buffer->buffer = buffer;
1210	raw_spin_lock_init(&cpu_buffer->reader_lock);
1211	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1212	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1213	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1214	init_completion(&cpu_buffer->update_done);
1215	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1216	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1217	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1218
1219	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1220			    GFP_KERNEL, cpu_to_node(cpu));
1221	if (!bpage)
1222		goto fail_free_buffer;
1223
1224	rb_check_bpage(cpu_buffer, bpage);
1225
1226	cpu_buffer->reader_page = bpage;
1227	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1228	if (!page)
1229		goto fail_free_reader;
1230	bpage->page = page_address(page);
1231	rb_init_page(bpage->page);
1232
1233	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1234	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1235
1236	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1237	if (ret < 0)
1238		goto fail_free_reader;
1239
1240	cpu_buffer->head_page
1241		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1242	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1243
1244	rb_head_page_activate(cpu_buffer);
1245
1246	return cpu_buffer;
1247
1248 fail_free_reader:
1249	free_buffer_page(cpu_buffer->reader_page);
1250
1251 fail_free_buffer:
1252	kfree(cpu_buffer);
1253	return NULL;
1254}
1255
1256static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1257{
1258	struct list_head *head = cpu_buffer->pages;
1259	struct buffer_page *bpage, *tmp;
1260
1261	free_buffer_page(cpu_buffer->reader_page);
1262
1263	rb_head_page_deactivate(cpu_buffer);
1264
1265	if (head) {
1266		list_for_each_entry_safe(bpage, tmp, head, list) {
1267			list_del_init(&bpage->list);
1268			free_buffer_page(bpage);
1269		}
1270		bpage = list_entry(head, struct buffer_page, list);
1271		free_buffer_page(bpage);
1272	}
1273
1274	kfree(cpu_buffer);
1275}
1276
1277#ifdef CONFIG_HOTPLUG_CPU
1278static int rb_cpu_notify(struct notifier_block *self,
1279			 unsigned long action, void *hcpu);
1280#endif
1281
1282/**
1283 * __ring_buffer_alloc - allocate a new ring_buffer
1284 * @size: the size in bytes per cpu that is needed.
1285 * @flags: attributes to set for the ring buffer.
1286 *
1287 * Currently the only flag that is available is the RB_FL_OVERWRITE
1288 * flag. This flag means that the buffer will overwrite old data
1289 * when the buffer wraps. If this flag is not set, the buffer will
1290 * drop data when the tail hits the head.
1291 */
1292struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1293					struct lock_class_key *key)
1294{
1295	struct ring_buffer *buffer;
 
1296	int bsize;
1297	int cpu, nr_pages;
 
1298
1299	/* keep it in its own cache line */
1300	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1301			 GFP_KERNEL);
1302	if (!buffer)
1303		return NULL;
1304
1305	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1306		goto fail_free_buffer;
1307
1308	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1309	buffer->flags = flags;
1310	buffer->clock = trace_clock_local;
1311	buffer->reader_lock_key = key;
1312
1313	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1314	init_waitqueue_head(&buffer->irq_work.waiters);
1315
1316	/* need at least two pages */
1317	if (nr_pages < 2)
1318		nr_pages = 2;
1319
1320	/*
1321	 * In case of non-hotplug cpu, if the ring-buffer is allocated
1322	 * in early initcall, it will not be notified of secondary cpus.
1323	 * In that off case, we need to allocate for all possible cpus.
1324	 */
1325#ifdef CONFIG_HOTPLUG_CPU
1326	cpu_notifier_register_begin();
1327	cpumask_copy(buffer->cpumask, cpu_online_mask);
1328#else
1329	cpumask_copy(buffer->cpumask, cpu_possible_mask);
1330#endif
1331	buffer->cpus = nr_cpu_ids;
1332
1333	bsize = sizeof(void *) * nr_cpu_ids;
1334	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1335				  GFP_KERNEL);
1336	if (!buffer->buffers)
1337		goto fail_free_cpumask;
1338
1339	for_each_buffer_cpu(buffer, cpu) {
1340		buffer->buffers[cpu] =
1341			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1342		if (!buffer->buffers[cpu])
1343			goto fail_free_buffers;
1344	}
1345
1346#ifdef CONFIG_HOTPLUG_CPU
1347	buffer->cpu_notify.notifier_call = rb_cpu_notify;
1348	buffer->cpu_notify.priority = 0;
1349	__register_cpu_notifier(&buffer->cpu_notify);
1350	cpu_notifier_register_done();
1351#endif
1352
1353	mutex_init(&buffer->mutex);
1354
1355	return buffer;
1356
1357 fail_free_buffers:
1358	for_each_buffer_cpu(buffer, cpu) {
1359		if (buffer->buffers[cpu])
1360			rb_free_cpu_buffer(buffer->buffers[cpu]);
1361	}
1362	kfree(buffer->buffers);
1363
1364 fail_free_cpumask:
1365	free_cpumask_var(buffer->cpumask);
1366#ifdef CONFIG_HOTPLUG_CPU
1367	cpu_notifier_register_done();
1368#endif
1369
1370 fail_free_buffer:
1371	kfree(buffer);
1372	return NULL;
1373}
1374EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1375
1376/**
1377 * ring_buffer_free - free a ring buffer.
1378 * @buffer: the buffer to free.
1379 */
1380void
1381ring_buffer_free(struct ring_buffer *buffer)
1382{
1383	int cpu;
1384
1385#ifdef CONFIG_HOTPLUG_CPU
1386	cpu_notifier_register_begin();
1387	__unregister_cpu_notifier(&buffer->cpu_notify);
1388#endif
1389
1390	for_each_buffer_cpu(buffer, cpu)
1391		rb_free_cpu_buffer(buffer->buffers[cpu]);
1392
1393#ifdef CONFIG_HOTPLUG_CPU
1394	cpu_notifier_register_done();
1395#endif
1396
1397	kfree(buffer->buffers);
1398	free_cpumask_var(buffer->cpumask);
1399
1400	kfree(buffer);
1401}
1402EXPORT_SYMBOL_GPL(ring_buffer_free);
1403
1404void ring_buffer_set_clock(struct ring_buffer *buffer,
1405			   u64 (*clock)(void))
1406{
1407	buffer->clock = clock;
1408}
1409
 
 
 
 
 
 
 
 
 
 
1410static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1411
1412static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1413{
1414	return local_read(&bpage->entries) & RB_WRITE_MASK;
1415}
1416
1417static inline unsigned long rb_page_write(struct buffer_page *bpage)
1418{
1419	return local_read(&bpage->write) & RB_WRITE_MASK;
1420}
1421
1422static int
1423rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1424{
1425	struct list_head *tail_page, *to_remove, *next_page;
1426	struct buffer_page *to_remove_page, *tmp_iter_page;
1427	struct buffer_page *last_page, *first_page;
1428	unsigned int nr_removed;
1429	unsigned long head_bit;
1430	int page_entries;
1431
1432	head_bit = 0;
1433
1434	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1435	atomic_inc(&cpu_buffer->record_disabled);
1436	/*
1437	 * We don't race with the readers since we have acquired the reader
1438	 * lock. We also don't race with writers after disabling recording.
1439	 * This makes it easy to figure out the first and the last page to be
1440	 * removed from the list. We unlink all the pages in between including
1441	 * the first and last pages. This is done in a busy loop so that we
1442	 * lose the least number of traces.
1443	 * The pages are freed after we restart recording and unlock readers.
1444	 */
1445	tail_page = &cpu_buffer->tail_page->list;
1446
1447	/*
1448	 * tail page might be on reader page, we remove the next page
1449	 * from the ring buffer
1450	 */
1451	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1452		tail_page = rb_list_head(tail_page->next);
1453	to_remove = tail_page;
1454
1455	/* start of pages to remove */
1456	first_page = list_entry(rb_list_head(to_remove->next),
1457				struct buffer_page, list);
1458
1459	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1460		to_remove = rb_list_head(to_remove)->next;
1461		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1462	}
1463
1464	next_page = rb_list_head(to_remove)->next;
1465
1466	/*
1467	 * Now we remove all pages between tail_page and next_page.
1468	 * Make sure that we have head_bit value preserved for the
1469	 * next page
1470	 */
1471	tail_page->next = (struct list_head *)((unsigned long)next_page |
1472						head_bit);
1473	next_page = rb_list_head(next_page);
1474	next_page->prev = tail_page;
1475
1476	/* make sure pages points to a valid page in the ring buffer */
1477	cpu_buffer->pages = next_page;
1478
1479	/* update head page */
1480	if (head_bit)
1481		cpu_buffer->head_page = list_entry(next_page,
1482						struct buffer_page, list);
1483
1484	/*
1485	 * change read pointer to make sure any read iterators reset
1486	 * themselves
1487	 */
1488	cpu_buffer->read = 0;
1489
1490	/* pages are removed, resume tracing and then free the pages */
1491	atomic_dec(&cpu_buffer->record_disabled);
1492	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1493
1494	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1495
1496	/* last buffer page to remove */
1497	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1498				list);
1499	tmp_iter_page = first_page;
1500
1501	do {
1502		to_remove_page = tmp_iter_page;
1503		rb_inc_page(cpu_buffer, &tmp_iter_page);
1504
1505		/* update the counters */
1506		page_entries = rb_page_entries(to_remove_page);
1507		if (page_entries) {
1508			/*
1509			 * If something was added to this page, it was full
1510			 * since it is not the tail page. So we deduct the
1511			 * bytes consumed in ring buffer from here.
1512			 * Increment overrun to account for the lost events.
1513			 */
1514			local_add(page_entries, &cpu_buffer->overrun);
1515			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1516		}
1517
1518		/*
1519		 * We have already removed references to this list item, just
1520		 * free up the buffer_page and its page
1521		 */
1522		free_buffer_page(to_remove_page);
1523		nr_removed--;
1524
1525	} while (to_remove_page != last_page);
1526
1527	RB_WARN_ON(cpu_buffer, nr_removed);
1528
1529	return nr_removed == 0;
1530}
1531
1532static int
1533rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1534{
1535	struct list_head *pages = &cpu_buffer->new_pages;
1536	int retries, success;
1537
1538	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1539	/*
1540	 * We are holding the reader lock, so the reader page won't be swapped
1541	 * in the ring buffer. Now we are racing with the writer trying to
1542	 * move head page and the tail page.
1543	 * We are going to adapt the reader page update process where:
1544	 * 1. We first splice the start and end of list of new pages between
1545	 *    the head page and its previous page.
1546	 * 2. We cmpxchg the prev_page->next to point from head page to the
1547	 *    start of new pages list.
1548	 * 3. Finally, we update the head->prev to the end of new list.
1549	 *
1550	 * We will try this process 10 times, to make sure that we don't keep
1551	 * spinning.
1552	 */
1553	retries = 10;
1554	success = 0;
1555	while (retries--) {
1556		struct list_head *head_page, *prev_page, *r;
1557		struct list_head *last_page, *first_page;
1558		struct list_head *head_page_with_bit;
1559
1560		head_page = &rb_set_head_page(cpu_buffer)->list;
1561		if (!head_page)
1562			break;
1563		prev_page = head_page->prev;
1564
1565		first_page = pages->next;
1566		last_page  = pages->prev;
1567
1568		head_page_with_bit = (struct list_head *)
1569				     ((unsigned long)head_page | RB_PAGE_HEAD);
1570
1571		last_page->next = head_page_with_bit;
1572		first_page->prev = prev_page;
1573
1574		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1575
1576		if (r == head_page_with_bit) {
1577			/*
1578			 * yay, we replaced the page pointer to our new list,
1579			 * now, we just have to update to head page's prev
1580			 * pointer to point to end of list
1581			 */
1582			head_page->prev = last_page;
1583			success = 1;
1584			break;
1585		}
1586	}
1587
1588	if (success)
1589		INIT_LIST_HEAD(pages);
1590	/*
1591	 * If we weren't successful in adding in new pages, warn and stop
1592	 * tracing
1593	 */
1594	RB_WARN_ON(cpu_buffer, !success);
1595	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1596
1597	/* free pages if they weren't inserted */
1598	if (!success) {
1599		struct buffer_page *bpage, *tmp;
1600		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1601					 list) {
1602			list_del_init(&bpage->list);
1603			free_buffer_page(bpage);
1604		}
1605	}
1606	return success;
1607}
1608
1609static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1610{
1611	int success;
1612
1613	if (cpu_buffer->nr_pages_to_update > 0)
1614		success = rb_insert_pages(cpu_buffer);
1615	else
1616		success = rb_remove_pages(cpu_buffer,
1617					-cpu_buffer->nr_pages_to_update);
1618
1619	if (success)
1620		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1621}
1622
1623static void update_pages_handler(struct work_struct *work)
1624{
1625	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1626			struct ring_buffer_per_cpu, update_pages_work);
1627	rb_update_pages(cpu_buffer);
1628	complete(&cpu_buffer->update_done);
1629}
1630
1631/**
1632 * ring_buffer_resize - resize the ring buffer
1633 * @buffer: the buffer to resize.
1634 * @size: the new size.
1635 * @cpu_id: the cpu buffer to resize
1636 *
1637 * Minimum size is 2 * BUF_PAGE_SIZE.
1638 *
1639 * Returns 0 on success and < 0 on failure.
1640 */
1641int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1642			int cpu_id)
1643{
1644	struct ring_buffer_per_cpu *cpu_buffer;
1645	unsigned nr_pages;
1646	int cpu, err = 0;
1647
1648	/*
1649	 * Always succeed at resizing a non-existent buffer:
1650	 */
1651	if (!buffer)
1652		return size;
1653
1654	/* Make sure the requested buffer exists */
1655	if (cpu_id != RING_BUFFER_ALL_CPUS &&
1656	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
1657		return size;
1658
1659	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1660	size *= BUF_PAGE_SIZE;
1661
1662	/* we need a minimum of two pages */
1663	if (size < BUF_PAGE_SIZE * 2)
1664		size = BUF_PAGE_SIZE * 2;
1665
1666	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1667
1668	/*
1669	 * Don't succeed if resizing is disabled, as a reader might be
1670	 * manipulating the ring buffer and is expecting a sane state while
1671	 * this is true.
1672	 */
1673	if (atomic_read(&buffer->resize_disabled))
1674		return -EBUSY;
1675
1676	/* prevent another thread from changing buffer sizes */
1677	mutex_lock(&buffer->mutex);
1678
1679	if (cpu_id == RING_BUFFER_ALL_CPUS) {
1680		/* calculate the pages to update */
1681		for_each_buffer_cpu(buffer, cpu) {
1682			cpu_buffer = buffer->buffers[cpu];
1683
1684			cpu_buffer->nr_pages_to_update = nr_pages -
1685							cpu_buffer->nr_pages;
1686			/*
1687			 * nothing more to do for removing pages or no update
1688			 */
1689			if (cpu_buffer->nr_pages_to_update <= 0)
1690				continue;
1691			/*
1692			 * to add pages, make sure all new pages can be
1693			 * allocated without receiving ENOMEM
1694			 */
1695			INIT_LIST_HEAD(&cpu_buffer->new_pages);
1696			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1697						&cpu_buffer->new_pages, cpu)) {
1698				/* not enough memory for new pages */
1699				err = -ENOMEM;
1700				goto out_err;
1701			}
1702		}
1703
1704		get_online_cpus();
1705		/*
1706		 * Fire off all the required work handlers
1707		 * We can't schedule on offline CPUs, but it's not necessary
1708		 * since we can change their buffer sizes without any race.
1709		 */
1710		for_each_buffer_cpu(buffer, cpu) {
1711			cpu_buffer = buffer->buffers[cpu];
1712			if (!cpu_buffer->nr_pages_to_update)
1713				continue;
1714
1715			/* Can't run something on an offline CPU. */
1716			if (!cpu_online(cpu)) {
1717				rb_update_pages(cpu_buffer);
1718				cpu_buffer->nr_pages_to_update = 0;
1719			} else {
1720				schedule_work_on(cpu,
1721						&cpu_buffer->update_pages_work);
1722			}
1723		}
1724
1725		/* wait for all the updates to complete */
1726		for_each_buffer_cpu(buffer, cpu) {
1727			cpu_buffer = buffer->buffers[cpu];
1728			if (!cpu_buffer->nr_pages_to_update)
1729				continue;
1730
1731			if (cpu_online(cpu))
1732				wait_for_completion(&cpu_buffer->update_done);
1733			cpu_buffer->nr_pages_to_update = 0;
1734		}
1735
1736		put_online_cpus();
1737	} else {
1738		/* Make sure this CPU has been intitialized */
1739		if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1740			goto out;
1741
1742		cpu_buffer = buffer->buffers[cpu_id];
1743
1744		if (nr_pages == cpu_buffer->nr_pages)
1745			goto out;
1746
1747		cpu_buffer->nr_pages_to_update = nr_pages -
1748						cpu_buffer->nr_pages;
1749
1750		INIT_LIST_HEAD(&cpu_buffer->new_pages);
1751		if (cpu_buffer->nr_pages_to_update > 0 &&
1752			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1753					    &cpu_buffer->new_pages, cpu_id)) {
1754			err = -ENOMEM;
1755			goto out_err;
1756		}
1757
1758		get_online_cpus();
1759
1760		/* Can't run something on an offline CPU. */
1761		if (!cpu_online(cpu_id))
1762			rb_update_pages(cpu_buffer);
1763		else {
1764			schedule_work_on(cpu_id,
1765					 &cpu_buffer->update_pages_work);
1766			wait_for_completion(&cpu_buffer->update_done);
1767		}
1768
1769		cpu_buffer->nr_pages_to_update = 0;
1770		put_online_cpus();
1771	}
1772
1773 out:
1774	/*
1775	 * The ring buffer resize can happen with the ring buffer
1776	 * enabled, so that the update disturbs the tracing as little
1777	 * as possible. But if the buffer is disabled, we do not need
1778	 * to worry about that, and we can take the time to verify
1779	 * that the buffer is not corrupt.
1780	 */
1781	if (atomic_read(&buffer->record_disabled)) {
1782		atomic_inc(&buffer->record_disabled);
1783		/*
1784		 * Even though the buffer was disabled, we must make sure
1785		 * that it is truly disabled before calling rb_check_pages.
1786		 * There could have been a race between checking
1787		 * record_disable and incrementing it.
1788		 */
1789		synchronize_sched();
1790		for_each_buffer_cpu(buffer, cpu) {
1791			cpu_buffer = buffer->buffers[cpu];
1792			rb_check_pages(cpu_buffer);
1793		}
1794		atomic_dec(&buffer->record_disabled);
1795	}
1796
1797	mutex_unlock(&buffer->mutex);
1798	return size;
1799
1800 out_err:
1801	for_each_buffer_cpu(buffer, cpu) {
1802		struct buffer_page *bpage, *tmp;
1803
1804		cpu_buffer = buffer->buffers[cpu];
1805		cpu_buffer->nr_pages_to_update = 0;
1806
1807		if (list_empty(&cpu_buffer->new_pages))
1808			continue;
1809
1810		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1811					list) {
1812			list_del_init(&bpage->list);
1813			free_buffer_page(bpage);
1814		}
1815	}
1816	mutex_unlock(&buffer->mutex);
1817	return err;
1818}
1819EXPORT_SYMBOL_GPL(ring_buffer_resize);
1820
1821void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1822{
1823	mutex_lock(&buffer->mutex);
1824	if (val)
1825		buffer->flags |= RB_FL_OVERWRITE;
1826	else
1827		buffer->flags &= ~RB_FL_OVERWRITE;
1828	mutex_unlock(&buffer->mutex);
1829}
1830EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1831
1832static inline void *
1833__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1834{
1835	return bpage->data + index;
1836}
1837
1838static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1839{
1840	return bpage->page->data + index;
1841}
1842
1843static inline struct ring_buffer_event *
1844rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1845{
1846	return __rb_page_index(cpu_buffer->reader_page,
1847			       cpu_buffer->reader_page->read);
1848}
1849
1850static inline struct ring_buffer_event *
1851rb_iter_head_event(struct ring_buffer_iter *iter)
1852{
1853	return __rb_page_index(iter->head_page, iter->head);
1854}
1855
1856static inline unsigned rb_page_commit(struct buffer_page *bpage)
1857{
1858	return local_read(&bpage->page->commit);
1859}
1860
1861/* Size is determined by what has been committed */
1862static inline unsigned rb_page_size(struct buffer_page *bpage)
1863{
1864	return rb_page_commit(bpage);
1865}
1866
1867static inline unsigned
1868rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1869{
1870	return rb_page_commit(cpu_buffer->commit_page);
1871}
1872
1873static inline unsigned
1874rb_event_index(struct ring_buffer_event *event)
1875{
1876	unsigned long addr = (unsigned long)event;
1877
1878	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1879}
1880
1881static void rb_inc_iter(struct ring_buffer_iter *iter)
1882{
1883	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1884
1885	/*
1886	 * The iterator could be on the reader page (it starts there).
1887	 * But the head could have moved, since the reader was
1888	 * found. Check for this case and assign the iterator
1889	 * to the head page instead of next.
1890	 */
1891	if (iter->head_page == cpu_buffer->reader_page)
1892		iter->head_page = rb_set_head_page(cpu_buffer);
1893	else
1894		rb_inc_page(cpu_buffer, &iter->head_page);
1895
1896	iter->read_stamp = iter->head_page->page->time_stamp;
1897	iter->head = 0;
1898}
1899
1900/*
1901 * rb_handle_head_page - writer hit the head page
1902 *
1903 * Returns: +1 to retry page
1904 *           0 to continue
1905 *          -1 on error
1906 */
1907static int
1908rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1909		    struct buffer_page *tail_page,
1910		    struct buffer_page *next_page)
1911{
1912	struct buffer_page *new_head;
1913	int entries;
1914	int type;
1915	int ret;
1916
1917	entries = rb_page_entries(next_page);
1918
1919	/*
1920	 * The hard part is here. We need to move the head
1921	 * forward, and protect against both readers on
1922	 * other CPUs and writers coming in via interrupts.
1923	 */
1924	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1925				       RB_PAGE_HEAD);
1926
1927	/*
1928	 * type can be one of four:
1929	 *  NORMAL - an interrupt already moved it for us
1930	 *  HEAD   - we are the first to get here.
1931	 *  UPDATE - we are the interrupt interrupting
1932	 *           a current move.
1933	 *  MOVED  - a reader on another CPU moved the next
1934	 *           pointer to its reader page. Give up
1935	 *           and try again.
1936	 */
1937
1938	switch (type) {
1939	case RB_PAGE_HEAD:
1940		/*
1941		 * We changed the head to UPDATE, thus
1942		 * it is our responsibility to update
1943		 * the counters.
1944		 */
1945		local_add(entries, &cpu_buffer->overrun);
1946		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1947
1948		/*
1949		 * The entries will be zeroed out when we move the
1950		 * tail page.
1951		 */
1952
1953		/* still more to do */
1954		break;
1955
1956	case RB_PAGE_UPDATE:
1957		/*
1958		 * This is an interrupt that interrupt the
1959		 * previous update. Still more to do.
1960		 */
1961		break;
1962	case RB_PAGE_NORMAL:
1963		/*
1964		 * An interrupt came in before the update
1965		 * and processed this for us.
1966		 * Nothing left to do.
1967		 */
1968		return 1;
1969	case RB_PAGE_MOVED:
1970		/*
1971		 * The reader is on another CPU and just did
1972		 * a swap with our next_page.
1973		 * Try again.
1974		 */
1975		return 1;
1976	default:
1977		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1978		return -1;
1979	}
1980
1981	/*
1982	 * Now that we are here, the old head pointer is
1983	 * set to UPDATE. This will keep the reader from
1984	 * swapping the head page with the reader page.
1985	 * The reader (on another CPU) will spin till
1986	 * we are finished.
1987	 *
1988	 * We just need to protect against interrupts
1989	 * doing the job. We will set the next pointer
1990	 * to HEAD. After that, we set the old pointer
1991	 * to NORMAL, but only if it was HEAD before.
1992	 * otherwise we are an interrupt, and only
1993	 * want the outer most commit to reset it.
1994	 */
1995	new_head = next_page;
1996	rb_inc_page(cpu_buffer, &new_head);
1997
1998	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1999				    RB_PAGE_NORMAL);
2000
2001	/*
2002	 * Valid returns are:
2003	 *  HEAD   - an interrupt came in and already set it.
2004	 *  NORMAL - One of two things:
2005	 *            1) We really set it.
2006	 *            2) A bunch of interrupts came in and moved
2007	 *               the page forward again.
2008	 */
2009	switch (ret) {
2010	case RB_PAGE_HEAD:
2011	case RB_PAGE_NORMAL:
2012		/* OK */
2013		break;
2014	default:
2015		RB_WARN_ON(cpu_buffer, 1);
2016		return -1;
2017	}
2018
2019	/*
2020	 * It is possible that an interrupt came in,
2021	 * set the head up, then more interrupts came in
2022	 * and moved it again. When we get back here,
2023	 * the page would have been set to NORMAL but we
2024	 * just set it back to HEAD.
2025	 *
2026	 * How do you detect this? Well, if that happened
2027	 * the tail page would have moved.
2028	 */
2029	if (ret == RB_PAGE_NORMAL) {
2030		struct buffer_page *buffer_tail_page;
2031
2032		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2033		/*
2034		 * If the tail had moved passed next, then we need
2035		 * to reset the pointer.
2036		 */
2037		if (buffer_tail_page != tail_page &&
2038		    buffer_tail_page != next_page)
2039			rb_head_page_set_normal(cpu_buffer, new_head,
2040						next_page,
2041						RB_PAGE_HEAD);
2042	}
2043
2044	/*
2045	 * If this was the outer most commit (the one that
2046	 * changed the original pointer from HEAD to UPDATE),
2047	 * then it is up to us to reset it to NORMAL.
2048	 */
2049	if (type == RB_PAGE_HEAD) {
2050		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2051					      tail_page,
2052					      RB_PAGE_UPDATE);
2053		if (RB_WARN_ON(cpu_buffer,
2054			       ret != RB_PAGE_UPDATE))
2055			return -1;
2056	}
2057
2058	return 0;
2059}
2060
2061static inline void
2062rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2063	      unsigned long tail, struct rb_event_info *info)
2064{
2065	struct buffer_page *tail_page = info->tail_page;
2066	struct ring_buffer_event *event;
2067	unsigned long length = info->length;
2068
2069	/*
2070	 * Only the event that crossed the page boundary
2071	 * must fill the old tail_page with padding.
2072	 */
2073	if (tail >= BUF_PAGE_SIZE) {
2074		/*
2075		 * If the page was filled, then we still need
2076		 * to update the real_end. Reset it to zero
2077		 * and the reader will ignore it.
2078		 */
2079		if (tail == BUF_PAGE_SIZE)
2080			tail_page->real_end = 0;
2081
2082		local_sub(length, &tail_page->write);
2083		return;
2084	}
2085
2086	event = __rb_page_index(tail_page, tail);
2087	kmemcheck_annotate_bitfield(event, bitfield);
2088
2089	/* account for padding bytes */
2090	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2091
2092	/*
2093	 * Save the original length to the meta data.
2094	 * This will be used by the reader to add lost event
2095	 * counter.
2096	 */
2097	tail_page->real_end = tail;
2098
2099	/*
2100	 * If this event is bigger than the minimum size, then
2101	 * we need to be careful that we don't subtract the
2102	 * write counter enough to allow another writer to slip
2103	 * in on this page.
2104	 * We put in a discarded commit instead, to make sure
2105	 * that this space is not used again.
2106	 *
2107	 * If we are less than the minimum size, we don't need to
2108	 * worry about it.
2109	 */
2110	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2111		/* No room for any events */
2112
2113		/* Mark the rest of the page with padding */
2114		rb_event_set_padding(event);
2115
2116		/* Set the write back to the previous setting */
2117		local_sub(length, &tail_page->write);
2118		return;
2119	}
2120
2121	/* Put in a discarded event */
2122	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2123	event->type_len = RINGBUF_TYPE_PADDING;
2124	/* time delta must be non zero */
2125	event->time_delta = 1;
2126
2127	/* Set write to end of buffer */
2128	length = (tail + length) - BUF_PAGE_SIZE;
2129	local_sub(length, &tail_page->write);
2130}
2131
2132static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2133
2134/*
2135 * This is the slow path, force gcc not to inline it.
2136 */
2137static noinline struct ring_buffer_event *
2138rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2139	     unsigned long tail, struct rb_event_info *info)
2140{
2141	struct buffer_page *tail_page = info->tail_page;
2142	struct buffer_page *commit_page = cpu_buffer->commit_page;
2143	struct ring_buffer *buffer = cpu_buffer->buffer;
2144	struct buffer_page *next_page;
2145	int ret;
2146
2147	next_page = tail_page;
2148
2149	rb_inc_page(cpu_buffer, &next_page);
2150
2151	/*
2152	 * If for some reason, we had an interrupt storm that made
2153	 * it all the way around the buffer, bail, and warn
2154	 * about it.
2155	 */
2156	if (unlikely(next_page == commit_page)) {
2157		local_inc(&cpu_buffer->commit_overrun);
2158		goto out_reset;
2159	}
2160
2161	/*
2162	 * This is where the fun begins!
2163	 *
2164	 * We are fighting against races between a reader that
2165	 * could be on another CPU trying to swap its reader
2166	 * page with the buffer head.
2167	 *
2168	 * We are also fighting against interrupts coming in and
2169	 * moving the head or tail on us as well.
2170	 *
2171	 * If the next page is the head page then we have filled
2172	 * the buffer, unless the commit page is still on the
2173	 * reader page.
2174	 */
2175	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2176
2177		/*
2178		 * If the commit is not on the reader page, then
2179		 * move the header page.
2180		 */
2181		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2182			/*
2183			 * If we are not in overwrite mode,
2184			 * this is easy, just stop here.
2185			 */
2186			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2187				local_inc(&cpu_buffer->dropped_events);
2188				goto out_reset;
2189			}
2190
2191			ret = rb_handle_head_page(cpu_buffer,
2192						  tail_page,
2193						  next_page);
2194			if (ret < 0)
2195				goto out_reset;
2196			if (ret)
2197				goto out_again;
2198		} else {
2199			/*
2200			 * We need to be careful here too. The
2201			 * commit page could still be on the reader
2202			 * page. We could have a small buffer, and
2203			 * have filled up the buffer with events
2204			 * from interrupts and such, and wrapped.
2205			 *
2206			 * Note, if the tail page is also the on the
2207			 * reader_page, we let it move out.
2208			 */
2209			if (unlikely((cpu_buffer->commit_page !=
2210				      cpu_buffer->tail_page) &&
2211				     (cpu_buffer->commit_page ==
2212				      cpu_buffer->reader_page))) {
2213				local_inc(&cpu_buffer->commit_overrun);
2214				goto out_reset;
2215			}
2216		}
2217	}
2218
2219	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2220
2221 out_again:
2222
2223	rb_reset_tail(cpu_buffer, tail, info);
2224
2225	/* Commit what we have for now. */
2226	rb_end_commit(cpu_buffer);
2227	/* rb_end_commit() decs committing */
2228	local_inc(&cpu_buffer->committing);
2229
2230	/* fail and let the caller try again */
2231	return ERR_PTR(-EAGAIN);
2232
2233 out_reset:
2234	/* reset write */
2235	rb_reset_tail(cpu_buffer, tail, info);
2236
2237	return NULL;
2238}
2239
2240/* Slow path, do not inline */
2241static noinline struct ring_buffer_event *
2242rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
2243{
2244	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
 
 
 
2245
2246	/* Not the first event on the page? */
2247	if (rb_event_index(event)) {
2248		event->time_delta = delta & TS_MASK;
2249		event->array[0] = delta >> TS_SHIFT;
2250	} else {
2251		/* nope, just zero it */
2252		event->time_delta = 0;
2253		event->array[0] = 0;
2254	}
2255
2256	return skip_time_extend(event);
2257}
2258
2259static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2260				     struct ring_buffer_event *event);
2261
2262/**
2263 * rb_update_event - update event type and data
2264 * @event: the event to update
2265 * @type: the type of event
2266 * @length: the size of the event field in the ring buffer
2267 *
2268 * Update the type and data fields of the event. The length
2269 * is the actual size that is written to the ring buffer,
2270 * and with this, we can determine what to place into the
2271 * data field.
2272 */
2273static void
2274rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2275		struct ring_buffer_event *event,
2276		struct rb_event_info *info)
2277{
2278	unsigned length = info->length;
2279	u64 delta = info->delta;
2280
2281	/* Only a commit updates the timestamp */
2282	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2283		delta = 0;
2284
2285	/*
2286	 * If we need to add a timestamp, then we
2287	 * add it to the start of the resevered space.
2288	 */
2289	if (unlikely(info->add_timestamp)) {
2290		event = rb_add_time_stamp(event, delta);
 
 
2291		length -= RB_LEN_TIME_EXTEND;
2292		delta = 0;
2293	}
2294
2295	event->time_delta = delta;
2296	length -= RB_EVNT_HDR_SIZE;
2297	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2298		event->type_len = 0;
2299		event->array[0] = length;
2300	} else
2301		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2302}
2303
2304static unsigned rb_calculate_event_length(unsigned length)
2305{
2306	struct ring_buffer_event event; /* Used only for sizeof array */
2307
2308	/* zero length can cause confusions */
2309	if (!length)
2310		length++;
2311
2312	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2313		length += sizeof(event.array[0]);
2314
2315	length += RB_EVNT_HDR_SIZE;
2316	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2317
2318	/*
2319	 * In case the time delta is larger than the 27 bits for it
2320	 * in the header, we need to add a timestamp. If another
2321	 * event comes in when trying to discard this one to increase
2322	 * the length, then the timestamp will be added in the allocated
2323	 * space of this event. If length is bigger than the size needed
2324	 * for the TIME_EXTEND, then padding has to be used. The events
2325	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2326	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2327	 * As length is a multiple of 4, we only need to worry if it
2328	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2329	 */
2330	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2331		length += RB_ALIGNMENT;
2332
2333	return length;
2334}
2335
2336#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2337static inline bool sched_clock_stable(void)
2338{
2339	return true;
2340}
2341#endif
2342
2343static inline int
2344rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2345		  struct ring_buffer_event *event)
2346{
2347	unsigned long new_index, old_index;
2348	struct buffer_page *bpage;
2349	unsigned long index;
2350	unsigned long addr;
2351
2352	new_index = rb_event_index(event);
2353	old_index = new_index + rb_event_ts_length(event);
2354	addr = (unsigned long)event;
2355	addr &= PAGE_MASK;
2356
2357	bpage = READ_ONCE(cpu_buffer->tail_page);
2358
2359	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2360		unsigned long write_mask =
2361			local_read(&bpage->write) & ~RB_WRITE_MASK;
2362		unsigned long event_length = rb_event_length(event);
2363		/*
2364		 * This is on the tail page. It is possible that
2365		 * a write could come in and move the tail page
2366		 * and write to the next page. That is fine
2367		 * because we just shorten what is on this page.
2368		 */
2369		old_index += write_mask;
2370		new_index += write_mask;
2371		index = local_cmpxchg(&bpage->write, old_index, new_index);
2372		if (index == old_index) {
2373			/* update counters */
2374			local_sub(event_length, &cpu_buffer->entries_bytes);
2375			return 1;
2376		}
2377	}
2378
2379	/* could not discard */
2380	return 0;
2381}
2382
2383static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2384{
2385	local_inc(&cpu_buffer->committing);
2386	local_inc(&cpu_buffer->commits);
2387}
2388
2389static void
2390rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2391{
2392	unsigned long max_count;
2393
2394	/*
2395	 * We only race with interrupts and NMIs on this CPU.
2396	 * If we own the commit event, then we can commit
2397	 * all others that interrupted us, since the interruptions
2398	 * are in stack format (they finish before they come
2399	 * back to us). This allows us to do a simple loop to
2400	 * assign the commit to the tail.
2401	 */
2402 again:
2403	max_count = cpu_buffer->nr_pages * 100;
2404
2405	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2406		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2407			return;
2408		if (RB_WARN_ON(cpu_buffer,
2409			       rb_is_reader_page(cpu_buffer->tail_page)))
2410			return;
2411		local_set(&cpu_buffer->commit_page->page->commit,
2412			  rb_page_write(cpu_buffer->commit_page));
2413		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2414		/* Only update the write stamp if the page has an event */
2415		if (rb_page_write(cpu_buffer->commit_page))
2416			cpu_buffer->write_stamp =
2417				cpu_buffer->commit_page->page->time_stamp;
2418		/* add barrier to keep gcc from optimizing too much */
2419		barrier();
2420	}
2421	while (rb_commit_index(cpu_buffer) !=
2422	       rb_page_write(cpu_buffer->commit_page)) {
2423
2424		local_set(&cpu_buffer->commit_page->page->commit,
2425			  rb_page_write(cpu_buffer->commit_page));
2426		RB_WARN_ON(cpu_buffer,
2427			   local_read(&cpu_buffer->commit_page->page->commit) &
2428			   ~RB_WRITE_MASK);
2429		barrier();
2430	}
2431
2432	/* again, keep gcc from optimizing */
2433	barrier();
2434
2435	/*
2436	 * If an interrupt came in just after the first while loop
2437	 * and pushed the tail page forward, we will be left with
2438	 * a dangling commit that will never go forward.
2439	 */
2440	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2441		goto again;
2442}
2443
2444static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2445{
2446	unsigned long commits;
2447
2448	if (RB_WARN_ON(cpu_buffer,
2449		       !local_read(&cpu_buffer->committing)))
2450		return;
2451
2452 again:
2453	commits = local_read(&cpu_buffer->commits);
2454	/* synchronize with interrupts */
2455	barrier();
2456	if (local_read(&cpu_buffer->committing) == 1)
2457		rb_set_commit_to_write(cpu_buffer);
2458
2459	local_dec(&cpu_buffer->committing);
2460
2461	/* synchronize with interrupts */
2462	barrier();
2463
2464	/*
2465	 * Need to account for interrupts coming in between the
2466	 * updating of the commit page and the clearing of the
2467	 * committing counter.
2468	 */
2469	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2470	    !local_read(&cpu_buffer->committing)) {
2471		local_inc(&cpu_buffer->committing);
2472		goto again;
2473	}
2474}
2475
2476static inline void rb_event_discard(struct ring_buffer_event *event)
2477{
2478	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2479		event = skip_time_extend(event);
2480
2481	/* array[0] holds the actual length for the discarded event */
2482	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2483	event->type_len = RINGBUF_TYPE_PADDING;
2484	/* time delta must be non zero */
2485	if (!event->time_delta)
2486		event->time_delta = 1;
2487}
2488
2489static inline bool
2490rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2491		   struct ring_buffer_event *event)
2492{
2493	unsigned long addr = (unsigned long)event;
2494	unsigned long index;
2495
2496	index = rb_event_index(event);
2497	addr &= PAGE_MASK;
2498
2499	return cpu_buffer->commit_page->page == (void *)addr &&
2500		rb_commit_index(cpu_buffer) == index;
2501}
2502
2503static void
2504rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2505		      struct ring_buffer_event *event)
2506{
2507	u64 delta;
2508
2509	/*
2510	 * The event first in the commit queue updates the
2511	 * time stamp.
2512	 */
2513	if (rb_event_is_commit(cpu_buffer, event)) {
2514		/*
2515		 * A commit event that is first on a page
2516		 * updates the write timestamp with the page stamp
2517		 */
2518		if (!rb_event_index(event))
2519			cpu_buffer->write_stamp =
2520				cpu_buffer->commit_page->page->time_stamp;
2521		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2522			delta = event->array[0];
2523			delta <<= TS_SHIFT;
2524			delta += event->time_delta;
2525			cpu_buffer->write_stamp += delta;
 
 
 
2526		} else
2527			cpu_buffer->write_stamp += event->time_delta;
2528	}
2529}
2530
2531static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2532		      struct ring_buffer_event *event)
2533{
2534	local_inc(&cpu_buffer->entries);
2535	rb_update_write_stamp(cpu_buffer, event);
2536	rb_end_commit(cpu_buffer);
2537}
2538
2539static __always_inline void
2540rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2541{
2542	bool pagebusy;
2543
2544	if (buffer->irq_work.waiters_pending) {
2545		buffer->irq_work.waiters_pending = false;
2546		/* irq_work_queue() supplies it's own memory barriers */
2547		irq_work_queue(&buffer->irq_work.work);
2548	}
2549
2550	if (cpu_buffer->irq_work.waiters_pending) {
2551		cpu_buffer->irq_work.waiters_pending = false;
2552		/* irq_work_queue() supplies it's own memory barriers */
2553		irq_work_queue(&cpu_buffer->irq_work.work);
2554	}
2555
2556	pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
2557
2558	if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
2559		cpu_buffer->irq_work.wakeup_full = true;
2560		cpu_buffer->irq_work.full_waiters_pending = false;
2561		/* irq_work_queue() supplies it's own memory barriers */
2562		irq_work_queue(&cpu_buffer->irq_work.work);
2563	}
2564}
2565
2566/*
2567 * The lock and unlock are done within a preempt disable section.
2568 * The current_context per_cpu variable can only be modified
2569 * by the current task between lock and unlock. But it can
2570 * be modified more than once via an interrupt. To pass this
2571 * information from the lock to the unlock without having to
2572 * access the 'in_interrupt()' functions again (which do show
2573 * a bit of overhead in something as critical as function tracing,
2574 * we use a bitmask trick.
2575 *
2576 *  bit 0 =  NMI context
2577 *  bit 1 =  IRQ context
2578 *  bit 2 =  SoftIRQ context
2579 *  bit 3 =  normal context.
2580 *
2581 * This works because this is the order of contexts that can
2582 * preempt other contexts. A SoftIRQ never preempts an IRQ
2583 * context.
2584 *
2585 * When the context is determined, the corresponding bit is
2586 * checked and set (if it was set, then a recursion of that context
2587 * happened).
2588 *
2589 * On unlock, we need to clear this bit. To do so, just subtract
2590 * 1 from the current_context and AND it to itself.
2591 *
2592 * (binary)
2593 *  101 - 1 = 100
2594 *  101 & 100 = 100 (clearing bit zero)
2595 *
2596 *  1010 - 1 = 1001
2597 *  1010 & 1001 = 1000 (clearing bit 1)
2598 *
2599 * The least significant bit can be cleared this way, and it
2600 * just so happens that it is the same bit corresponding to
2601 * the current context.
2602 */
2603
2604static __always_inline int
2605trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2606{
2607	unsigned int val = cpu_buffer->current_context;
 
2608	int bit;
2609
2610	if (in_interrupt()) {
2611		if (in_nmi())
2612			bit = RB_CTX_NMI;
2613		else if (in_irq())
2614			bit = RB_CTX_IRQ;
2615		else
2616			bit = RB_CTX_SOFTIRQ;
2617	} else
2618		bit = RB_CTX_NORMAL;
 
 
 
2619
2620	if (unlikely(val & (1 << bit)))
2621		return 1;
2622
2623	val |= (1 << bit);
2624	cpu_buffer->current_context = val;
2625
2626	return 0;
2627}
2628
2629static __always_inline void
2630trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2631{
2632	cpu_buffer->current_context &= cpu_buffer->current_context - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2633}
2634
2635/**
2636 * ring_buffer_unlock_commit - commit a reserved
2637 * @buffer: The buffer to commit to
2638 * @event: The event pointer to commit.
2639 *
2640 * This commits the data to the ring buffer, and releases any locks held.
2641 *
2642 * Must be paired with ring_buffer_lock_reserve.
2643 */
2644int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2645			      struct ring_buffer_event *event)
2646{
2647	struct ring_buffer_per_cpu *cpu_buffer;
2648	int cpu = raw_smp_processor_id();
2649
2650	cpu_buffer = buffer->buffers[cpu];
2651
2652	rb_commit(cpu_buffer, event);
2653
2654	rb_wakeups(buffer, cpu_buffer);
2655
2656	trace_recursive_unlock(cpu_buffer);
2657
2658	preempt_enable_notrace();
2659
2660	return 0;
2661}
2662EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2663
2664static noinline void
2665rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2666		    struct rb_event_info *info)
2667{
2668	WARN_ONCE(info->delta > (1ULL << 59),
2669		  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2670		  (unsigned long long)info->delta,
2671		  (unsigned long long)info->ts,
2672		  (unsigned long long)cpu_buffer->write_stamp,
2673		  sched_clock_stable() ? "" :
2674		  "If you just came from a suspend/resume,\n"
2675		  "please switch to the trace global clock:\n"
2676		  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
 
2677	info->add_timestamp = 1;
2678}
2679
2680static struct ring_buffer_event *
2681__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2682		  struct rb_event_info *info)
2683{
2684	struct ring_buffer_event *event;
2685	struct buffer_page *tail_page;
2686	unsigned long tail, write;
2687
2688	/*
2689	 * If the time delta since the last event is too big to
2690	 * hold in the time field of the event, then we append a
2691	 * TIME EXTEND event ahead of the data event.
2692	 */
2693	if (unlikely(info->add_timestamp))
2694		info->length += RB_LEN_TIME_EXTEND;
2695
2696	/* Don't let the compiler play games with cpu_buffer->tail_page */
2697	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2698	write = local_add_return(info->length, &tail_page->write);
2699
2700	/* set write to only the index of the write */
2701	write &= RB_WRITE_MASK;
2702	tail = write - info->length;
2703
2704	/*
2705	 * If this is the first commit on the page, then it has the same
2706	 * timestamp as the page itself.
2707	 */
2708	if (!tail)
2709		info->delta = 0;
2710
2711	/* See if we shot pass the end of this buffer page */
2712	if (unlikely(write > BUF_PAGE_SIZE))
2713		return rb_move_tail(cpu_buffer, tail, info);
2714
2715	/* We reserved something on the buffer */
2716
2717	event = __rb_page_index(tail_page, tail);
2718	kmemcheck_annotate_bitfield(event, bitfield);
2719	rb_update_event(cpu_buffer, event, info);
2720
2721	local_inc(&tail_page->entries);
2722
2723	/*
2724	 * If this is the first commit on the page, then update
2725	 * its timestamp.
2726	 */
2727	if (!tail)
2728		tail_page->page->time_stamp = info->ts;
2729
2730	/* account for these added bytes */
2731	local_add(info->length, &cpu_buffer->entries_bytes);
2732
2733	return event;
2734}
2735
2736static struct ring_buffer_event *
2737rb_reserve_next_event(struct ring_buffer *buffer,
2738		      struct ring_buffer_per_cpu *cpu_buffer,
2739		      unsigned long length)
2740{
2741	struct ring_buffer_event *event;
2742	struct rb_event_info info;
2743	int nr_loops = 0;
2744	u64 diff;
2745
2746	rb_start_commit(cpu_buffer);
2747
2748#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2749	/*
2750	 * Due to the ability to swap a cpu buffer from a buffer
2751	 * it is possible it was swapped before we committed.
2752	 * (committing stops a swap). We check for it here and
2753	 * if it happened, we have to fail the write.
2754	 */
2755	barrier();
2756	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2757		local_dec(&cpu_buffer->committing);
2758		local_dec(&cpu_buffer->commits);
2759		return NULL;
2760	}
2761#endif
2762
2763	info.length = rb_calculate_event_length(length);
2764 again:
2765	info.add_timestamp = 0;
2766	info.delta = 0;
2767
2768	/*
2769	 * We allow for interrupts to reenter here and do a trace.
2770	 * If one does, it will cause this original code to loop
2771	 * back here. Even with heavy interrupts happening, this
2772	 * should only happen a few times in a row. If this happens
2773	 * 1000 times in a row, there must be either an interrupt
2774	 * storm or we have something buggy.
2775	 * Bail!
2776	 */
2777	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2778		goto out_fail;
2779
2780	info.ts = rb_time_stamp(cpu_buffer->buffer);
2781	diff = info.ts - cpu_buffer->write_stamp;
2782
2783	/* make sure this diff is calculated here */
2784	barrier();
2785
2786	/* Did the write stamp get updated already? */
2787	if (likely(info.ts >= cpu_buffer->write_stamp)) {
 
 
 
2788		info.delta = diff;
2789		if (unlikely(test_time_stamp(info.delta)))
2790			rb_handle_timestamp(cpu_buffer, &info);
2791	}
2792
2793	event = __rb_reserve_next(cpu_buffer, &info);
2794
2795	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2796		if (info.add_timestamp)
2797			info.length -= RB_LEN_TIME_EXTEND;
2798		goto again;
2799	}
2800
2801	if (!event)
2802		goto out_fail;
2803
2804	return event;
2805
2806 out_fail:
2807	rb_end_commit(cpu_buffer);
2808	return NULL;
2809}
2810
2811/**
2812 * ring_buffer_lock_reserve - reserve a part of the buffer
2813 * @buffer: the ring buffer to reserve from
2814 * @length: the length of the data to reserve (excluding event header)
2815 *
2816 * Returns a reseverd event on the ring buffer to copy directly to.
2817 * The user of this interface will need to get the body to write into
2818 * and can use the ring_buffer_event_data() interface.
2819 *
2820 * The length is the length of the data needed, not the event length
2821 * which also includes the event header.
2822 *
2823 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2824 * If NULL is returned, then nothing has been allocated or locked.
2825 */
2826struct ring_buffer_event *
2827ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2828{
2829	struct ring_buffer_per_cpu *cpu_buffer;
2830	struct ring_buffer_event *event;
2831	int cpu;
2832
2833	/* If we are tracing schedule, we don't want to recurse */
2834	preempt_disable_notrace();
2835
2836	if (unlikely(atomic_read(&buffer->record_disabled)))
2837		goto out;
2838
2839	cpu = raw_smp_processor_id();
2840
2841	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
2842		goto out;
2843
2844	cpu_buffer = buffer->buffers[cpu];
2845
2846	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
2847		goto out;
2848
2849	if (unlikely(length > BUF_MAX_DATA_SIZE))
2850		goto out;
2851
2852	if (unlikely(trace_recursive_lock(cpu_buffer)))
2853		goto out;
2854
2855	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2856	if (!event)
2857		goto out_unlock;
2858
2859	return event;
2860
2861 out_unlock:
2862	trace_recursive_unlock(cpu_buffer);
2863 out:
2864	preempt_enable_notrace();
2865	return NULL;
2866}
2867EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2868
2869/*
2870 * Decrement the entries to the page that an event is on.
2871 * The event does not even need to exist, only the pointer
2872 * to the page it is on. This may only be called before the commit
2873 * takes place.
2874 */
2875static inline void
2876rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2877		   struct ring_buffer_event *event)
2878{
2879	unsigned long addr = (unsigned long)event;
2880	struct buffer_page *bpage = cpu_buffer->commit_page;
2881	struct buffer_page *start;
2882
2883	addr &= PAGE_MASK;
2884
2885	/* Do the likely case first */
2886	if (likely(bpage->page == (void *)addr)) {
2887		local_dec(&bpage->entries);
2888		return;
2889	}
2890
2891	/*
2892	 * Because the commit page may be on the reader page we
2893	 * start with the next page and check the end loop there.
2894	 */
2895	rb_inc_page(cpu_buffer, &bpage);
2896	start = bpage;
2897	do {
2898		if (bpage->page == (void *)addr) {
2899			local_dec(&bpage->entries);
2900			return;
2901		}
2902		rb_inc_page(cpu_buffer, &bpage);
2903	} while (bpage != start);
2904
2905	/* commit not part of this buffer?? */
2906	RB_WARN_ON(cpu_buffer, 1);
2907}
2908
2909/**
2910 * ring_buffer_commit_discard - discard an event that has not been committed
2911 * @buffer: the ring buffer
2912 * @event: non committed event to discard
2913 *
2914 * Sometimes an event that is in the ring buffer needs to be ignored.
2915 * This function lets the user discard an event in the ring buffer
2916 * and then that event will not be read later.
2917 *
2918 * This function only works if it is called before the the item has been
2919 * committed. It will try to free the event from the ring buffer
2920 * if another event has not been added behind it.
2921 *
2922 * If another event has been added behind it, it will set the event
2923 * up as discarded, and perform the commit.
2924 *
2925 * If this function is called, do not call ring_buffer_unlock_commit on
2926 * the event.
2927 */
2928void ring_buffer_discard_commit(struct ring_buffer *buffer,
2929				struct ring_buffer_event *event)
2930{
2931	struct ring_buffer_per_cpu *cpu_buffer;
2932	int cpu;
2933
2934	/* The event is discarded regardless */
2935	rb_event_discard(event);
2936
2937	cpu = smp_processor_id();
2938	cpu_buffer = buffer->buffers[cpu];
2939
2940	/*
2941	 * This must only be called if the event has not been
2942	 * committed yet. Thus we can assume that preemption
2943	 * is still disabled.
2944	 */
2945	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2946
2947	rb_decrement_entry(cpu_buffer, event);
2948	if (rb_try_to_discard(cpu_buffer, event))
2949		goto out;
2950
2951	/*
2952	 * The commit is still visible by the reader, so we
2953	 * must still update the timestamp.
2954	 */
2955	rb_update_write_stamp(cpu_buffer, event);
2956 out:
2957	rb_end_commit(cpu_buffer);
2958
2959	trace_recursive_unlock(cpu_buffer);
2960
2961	preempt_enable_notrace();
2962
2963}
2964EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2965
2966/**
2967 * ring_buffer_write - write data to the buffer without reserving
2968 * @buffer: The ring buffer to write to.
2969 * @length: The length of the data being written (excluding the event header)
2970 * @data: The data to write to the buffer.
2971 *
2972 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2973 * one function. If you already have the data to write to the buffer, it
2974 * may be easier to simply call this function.
2975 *
2976 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2977 * and not the length of the event which would hold the header.
2978 */
2979int ring_buffer_write(struct ring_buffer *buffer,
2980		      unsigned long length,
2981		      void *data)
2982{
2983	struct ring_buffer_per_cpu *cpu_buffer;
2984	struct ring_buffer_event *event;
2985	void *body;
2986	int ret = -EBUSY;
2987	int cpu;
2988
2989	preempt_disable_notrace();
2990
2991	if (atomic_read(&buffer->record_disabled))
2992		goto out;
2993
2994	cpu = raw_smp_processor_id();
2995
2996	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2997		goto out;
2998
2999	cpu_buffer = buffer->buffers[cpu];
3000
3001	if (atomic_read(&cpu_buffer->record_disabled))
3002		goto out;
3003
3004	if (length > BUF_MAX_DATA_SIZE)
3005		goto out;
3006
3007	if (unlikely(trace_recursive_lock(cpu_buffer)))
3008		goto out;
3009
3010	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3011	if (!event)
3012		goto out_unlock;
3013
3014	body = rb_event_data(event);
3015
3016	memcpy(body, data, length);
3017
3018	rb_commit(cpu_buffer, event);
3019
3020	rb_wakeups(buffer, cpu_buffer);
3021
3022	ret = 0;
3023
3024 out_unlock:
3025	trace_recursive_unlock(cpu_buffer);
3026
3027 out:
3028	preempt_enable_notrace();
3029
3030	return ret;
3031}
3032EXPORT_SYMBOL_GPL(ring_buffer_write);
3033
3034static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3035{
3036	struct buffer_page *reader = cpu_buffer->reader_page;
3037	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3038	struct buffer_page *commit = cpu_buffer->commit_page;
3039
3040	/* In case of error, head will be NULL */
3041	if (unlikely(!head))
3042		return true;
3043
3044	return reader->read == rb_page_commit(reader) &&
3045		(commit == reader ||
3046		 (commit == head &&
3047		  head->read == rb_page_commit(commit)));
3048}
3049
3050/**
3051 * ring_buffer_record_disable - stop all writes into the buffer
3052 * @buffer: The ring buffer to stop writes to.
3053 *
3054 * This prevents all writes to the buffer. Any attempt to write
3055 * to the buffer after this will fail and return NULL.
3056 *
3057 * The caller should call synchronize_sched() after this.
3058 */
3059void ring_buffer_record_disable(struct ring_buffer *buffer)
3060{
3061	atomic_inc(&buffer->record_disabled);
3062}
3063EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3064
3065/**
3066 * ring_buffer_record_enable - enable writes to the buffer
3067 * @buffer: The ring buffer to enable writes
3068 *
3069 * Note, multiple disables will need the same number of enables
3070 * to truly enable the writing (much like preempt_disable).
3071 */
3072void ring_buffer_record_enable(struct ring_buffer *buffer)
3073{
3074	atomic_dec(&buffer->record_disabled);
3075}
3076EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3077
3078/**
3079 * ring_buffer_record_off - stop all writes into the buffer
3080 * @buffer: The ring buffer to stop writes to.
3081 *
3082 * This prevents all writes to the buffer. Any attempt to write
3083 * to the buffer after this will fail and return NULL.
3084 *
3085 * This is different than ring_buffer_record_disable() as
3086 * it works like an on/off switch, where as the disable() version
3087 * must be paired with a enable().
3088 */
3089void ring_buffer_record_off(struct ring_buffer *buffer)
3090{
3091	unsigned int rd;
3092	unsigned int new_rd;
3093
3094	do {
3095		rd = atomic_read(&buffer->record_disabled);
3096		new_rd = rd | RB_BUFFER_OFF;
3097	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3098}
3099EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3100
3101/**
3102 * ring_buffer_record_on - restart writes into the buffer
3103 * @buffer: The ring buffer to start writes to.
3104 *
3105 * This enables all writes to the buffer that was disabled by
3106 * ring_buffer_record_off().
3107 *
3108 * This is different than ring_buffer_record_enable() as
3109 * it works like an on/off switch, where as the enable() version
3110 * must be paired with a disable().
3111 */
3112void ring_buffer_record_on(struct ring_buffer *buffer)
3113{
3114	unsigned int rd;
3115	unsigned int new_rd;
3116
3117	do {
3118		rd = atomic_read(&buffer->record_disabled);
3119		new_rd = rd & ~RB_BUFFER_OFF;
3120	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3121}
3122EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3123
3124/**
3125 * ring_buffer_record_is_on - return true if the ring buffer can write
3126 * @buffer: The ring buffer to see if write is enabled
3127 *
3128 * Returns true if the ring buffer is in a state that it accepts writes.
3129 */
3130int ring_buffer_record_is_on(struct ring_buffer *buffer)
3131{
3132	return !atomic_read(&buffer->record_disabled);
3133}
3134
3135/**
3136 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3137 * @buffer: The ring buffer to stop writes to.
3138 * @cpu: The CPU buffer to stop
3139 *
3140 * This prevents all writes to the buffer. Any attempt to write
3141 * to the buffer after this will fail and return NULL.
3142 *
3143 * The caller should call synchronize_sched() after this.
3144 */
3145void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3146{
3147	struct ring_buffer_per_cpu *cpu_buffer;
3148
3149	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3150		return;
3151
3152	cpu_buffer = buffer->buffers[cpu];
3153	atomic_inc(&cpu_buffer->record_disabled);
3154}
3155EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3156
3157/**
3158 * ring_buffer_record_enable_cpu - enable writes to the buffer
3159 * @buffer: The ring buffer to enable writes
3160 * @cpu: The CPU to enable.
3161 *
3162 * Note, multiple disables will need the same number of enables
3163 * to truly enable the writing (much like preempt_disable).
3164 */
3165void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3166{
3167	struct ring_buffer_per_cpu *cpu_buffer;
3168
3169	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3170		return;
3171
3172	cpu_buffer = buffer->buffers[cpu];
3173	atomic_dec(&cpu_buffer->record_disabled);
3174}
3175EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3176
3177/*
3178 * The total entries in the ring buffer is the running counter
3179 * of entries entered into the ring buffer, minus the sum of
3180 * the entries read from the ring buffer and the number of
3181 * entries that were overwritten.
3182 */
3183static inline unsigned long
3184rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3185{
3186	return local_read(&cpu_buffer->entries) -
3187		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3188}
3189
3190/**
3191 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3192 * @buffer: The ring buffer
3193 * @cpu: The per CPU buffer to read from.
3194 */
3195u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3196{
3197	unsigned long flags;
3198	struct ring_buffer_per_cpu *cpu_buffer;
3199	struct buffer_page *bpage;
3200	u64 ret = 0;
3201
3202	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3203		return 0;
3204
3205	cpu_buffer = buffer->buffers[cpu];
3206	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3207	/*
3208	 * if the tail is on reader_page, oldest time stamp is on the reader
3209	 * page
3210	 */
3211	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3212		bpage = cpu_buffer->reader_page;
3213	else
3214		bpage = rb_set_head_page(cpu_buffer);
3215	if (bpage)
3216		ret = bpage->page->time_stamp;
3217	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3218
3219	return ret;
3220}
3221EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3222
3223/**
3224 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3225 * @buffer: The ring buffer
3226 * @cpu: The per CPU buffer to read from.
3227 */
3228unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3229{
3230	struct ring_buffer_per_cpu *cpu_buffer;
3231	unsigned long ret;
3232
3233	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3234		return 0;
3235
3236	cpu_buffer = buffer->buffers[cpu];
3237	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3238
3239	return ret;
3240}
3241EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3242
3243/**
3244 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3245 * @buffer: The ring buffer
3246 * @cpu: The per CPU buffer to get the entries from.
3247 */
3248unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3249{
3250	struct ring_buffer_per_cpu *cpu_buffer;
3251
3252	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3253		return 0;
3254
3255	cpu_buffer = buffer->buffers[cpu];
3256
3257	return rb_num_of_entries(cpu_buffer);
3258}
3259EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3260
3261/**
3262 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3263 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3264 * @buffer: The ring buffer
3265 * @cpu: The per CPU buffer to get the number of overruns from
3266 */
3267unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3268{
3269	struct ring_buffer_per_cpu *cpu_buffer;
3270	unsigned long ret;
3271
3272	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3273		return 0;
3274
3275	cpu_buffer = buffer->buffers[cpu];
3276	ret = local_read(&cpu_buffer->overrun);
3277
3278	return ret;
3279}
3280EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3281
3282/**
3283 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3284 * commits failing due to the buffer wrapping around while there are uncommitted
3285 * events, such as during an interrupt storm.
3286 * @buffer: The ring buffer
3287 * @cpu: The per CPU buffer to get the number of overruns from
3288 */
3289unsigned long
3290ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3291{
3292	struct ring_buffer_per_cpu *cpu_buffer;
3293	unsigned long ret;
3294
3295	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3296		return 0;
3297
3298	cpu_buffer = buffer->buffers[cpu];
3299	ret = local_read(&cpu_buffer->commit_overrun);
3300
3301	return ret;
3302}
3303EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3304
3305/**
3306 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3307 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3308 * @buffer: The ring buffer
3309 * @cpu: The per CPU buffer to get the number of overruns from
3310 */
3311unsigned long
3312ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3313{
3314	struct ring_buffer_per_cpu *cpu_buffer;
3315	unsigned long ret;
3316
3317	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3318		return 0;
3319
3320	cpu_buffer = buffer->buffers[cpu];
3321	ret = local_read(&cpu_buffer->dropped_events);
3322
3323	return ret;
3324}
3325EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3326
3327/**
3328 * ring_buffer_read_events_cpu - get the number of events successfully read
3329 * @buffer: The ring buffer
3330 * @cpu: The per CPU buffer to get the number of events read
3331 */
3332unsigned long
3333ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3334{
3335	struct ring_buffer_per_cpu *cpu_buffer;
3336
3337	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3338		return 0;
3339
3340	cpu_buffer = buffer->buffers[cpu];
3341	return cpu_buffer->read;
3342}
3343EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3344
3345/**
3346 * ring_buffer_entries - get the number of entries in a buffer
3347 * @buffer: The ring buffer
3348 *
3349 * Returns the total number of entries in the ring buffer
3350 * (all CPU entries)
3351 */
3352unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3353{
3354	struct ring_buffer_per_cpu *cpu_buffer;
3355	unsigned long entries = 0;
3356	int cpu;
3357
3358	/* if you care about this being correct, lock the buffer */
3359	for_each_buffer_cpu(buffer, cpu) {
3360		cpu_buffer = buffer->buffers[cpu];
3361		entries += rb_num_of_entries(cpu_buffer);
3362	}
3363
3364	return entries;
3365}
3366EXPORT_SYMBOL_GPL(ring_buffer_entries);
3367
3368/**
3369 * ring_buffer_overruns - get the number of overruns in buffer
3370 * @buffer: The ring buffer
3371 *
3372 * Returns the total number of overruns in the ring buffer
3373 * (all CPU entries)
3374 */
3375unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3376{
3377	struct ring_buffer_per_cpu *cpu_buffer;
3378	unsigned long overruns = 0;
3379	int cpu;
3380
3381	/* if you care about this being correct, lock the buffer */
3382	for_each_buffer_cpu(buffer, cpu) {
3383		cpu_buffer = buffer->buffers[cpu];
3384		overruns += local_read(&cpu_buffer->overrun);
3385	}
3386
3387	return overruns;
3388}
3389EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3390
3391static void rb_iter_reset(struct ring_buffer_iter *iter)
3392{
3393	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3394
3395	/* Iterator usage is expected to have record disabled */
3396	iter->head_page = cpu_buffer->reader_page;
3397	iter->head = cpu_buffer->reader_page->read;
3398
3399	iter->cache_reader_page = iter->head_page;
3400	iter->cache_read = cpu_buffer->read;
3401
3402	if (iter->head)
3403		iter->read_stamp = cpu_buffer->read_stamp;
3404	else
3405		iter->read_stamp = iter->head_page->page->time_stamp;
3406}
3407
3408/**
3409 * ring_buffer_iter_reset - reset an iterator
3410 * @iter: The iterator to reset
3411 *
3412 * Resets the iterator, so that it will start from the beginning
3413 * again.
3414 */
3415void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3416{
3417	struct ring_buffer_per_cpu *cpu_buffer;
3418	unsigned long flags;
3419
3420	if (!iter)
3421		return;
3422
3423	cpu_buffer = iter->cpu_buffer;
3424
3425	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3426	rb_iter_reset(iter);
3427	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3428}
3429EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3430
3431/**
3432 * ring_buffer_iter_empty - check if an iterator has no more to read
3433 * @iter: The iterator to check
3434 */
3435int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3436{
3437	struct ring_buffer_per_cpu *cpu_buffer;
 
 
 
 
3438
3439	cpu_buffer = iter->cpu_buffer;
3440
3441	return iter->head_page == cpu_buffer->commit_page &&
3442		iter->head == rb_commit_index(cpu_buffer);
 
 
 
 
 
 
 
 
3443}
3444EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3445
3446static void
3447rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3448		     struct ring_buffer_event *event)
3449{
3450	u64 delta;
3451
3452	switch (event->type_len) {
3453	case RINGBUF_TYPE_PADDING:
3454		return;
3455
3456	case RINGBUF_TYPE_TIME_EXTEND:
3457		delta = event->array[0];
3458		delta <<= TS_SHIFT;
3459		delta += event->time_delta;
3460		cpu_buffer->read_stamp += delta;
3461		return;
3462
3463	case RINGBUF_TYPE_TIME_STAMP:
3464		/* FIXME: not implemented */
 
3465		return;
3466
3467	case RINGBUF_TYPE_DATA:
3468		cpu_buffer->read_stamp += event->time_delta;
3469		return;
3470
3471	default:
3472		BUG();
3473	}
3474	return;
3475}
3476
3477static void
3478rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3479			  struct ring_buffer_event *event)
3480{
3481	u64 delta;
3482
3483	switch (event->type_len) {
3484	case RINGBUF_TYPE_PADDING:
3485		return;
3486
3487	case RINGBUF_TYPE_TIME_EXTEND:
3488		delta = event->array[0];
3489		delta <<= TS_SHIFT;
3490		delta += event->time_delta;
3491		iter->read_stamp += delta;
3492		return;
3493
3494	case RINGBUF_TYPE_TIME_STAMP:
3495		/* FIXME: not implemented */
 
3496		return;
3497
3498	case RINGBUF_TYPE_DATA:
3499		iter->read_stamp += event->time_delta;
3500		return;
3501
3502	default:
3503		BUG();
3504	}
3505	return;
3506}
3507
3508static struct buffer_page *
3509rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3510{
3511	struct buffer_page *reader = NULL;
3512	unsigned long overwrite;
3513	unsigned long flags;
3514	int nr_loops = 0;
3515	int ret;
3516
3517	local_irq_save(flags);
3518	arch_spin_lock(&cpu_buffer->lock);
3519
3520 again:
3521	/*
3522	 * This should normally only loop twice. But because the
3523	 * start of the reader inserts an empty page, it causes
3524	 * a case where we will loop three times. There should be no
3525	 * reason to loop four times (that I know of).
3526	 */
3527	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3528		reader = NULL;
3529		goto out;
3530	}
3531
3532	reader = cpu_buffer->reader_page;
3533
3534	/* If there's more to read, return this page */
3535	if (cpu_buffer->reader_page->read < rb_page_size(reader))
3536		goto out;
3537
3538	/* Never should we have an index greater than the size */
3539	if (RB_WARN_ON(cpu_buffer,
3540		       cpu_buffer->reader_page->read > rb_page_size(reader)))
3541		goto out;
3542
3543	/* check if we caught up to the tail */
3544	reader = NULL;
3545	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3546		goto out;
3547
3548	/* Don't bother swapping if the ring buffer is empty */
3549	if (rb_num_of_entries(cpu_buffer) == 0)
3550		goto out;
3551
3552	/*
3553	 * Reset the reader page to size zero.
3554	 */
3555	local_set(&cpu_buffer->reader_page->write, 0);
3556	local_set(&cpu_buffer->reader_page->entries, 0);
3557	local_set(&cpu_buffer->reader_page->page->commit, 0);
3558	cpu_buffer->reader_page->real_end = 0;
3559
3560 spin:
3561	/*
3562	 * Splice the empty reader page into the list around the head.
3563	 */
3564	reader = rb_set_head_page(cpu_buffer);
3565	if (!reader)
3566		goto out;
3567	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3568	cpu_buffer->reader_page->list.prev = reader->list.prev;
3569
3570	/*
3571	 * cpu_buffer->pages just needs to point to the buffer, it
3572	 *  has no specific buffer page to point to. Lets move it out
3573	 *  of our way so we don't accidentally swap it.
3574	 */
3575	cpu_buffer->pages = reader->list.prev;
3576
3577	/* The reader page will be pointing to the new head */
3578	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3579
3580	/*
3581	 * We want to make sure we read the overruns after we set up our
3582	 * pointers to the next object. The writer side does a
3583	 * cmpxchg to cross pages which acts as the mb on the writer
3584	 * side. Note, the reader will constantly fail the swap
3585	 * while the writer is updating the pointers, so this
3586	 * guarantees that the overwrite recorded here is the one we
3587	 * want to compare with the last_overrun.
3588	 */
3589	smp_mb();
3590	overwrite = local_read(&(cpu_buffer->overrun));
3591
3592	/*
3593	 * Here's the tricky part.
3594	 *
3595	 * We need to move the pointer past the header page.
3596	 * But we can only do that if a writer is not currently
3597	 * moving it. The page before the header page has the
3598	 * flag bit '1' set if it is pointing to the page we want.
3599	 * but if the writer is in the process of moving it
3600	 * than it will be '2' or already moved '0'.
3601	 */
3602
3603	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3604
3605	/*
3606	 * If we did not convert it, then we must try again.
3607	 */
3608	if (!ret)
3609		goto spin;
3610
3611	/*
3612	 * Yeah! We succeeded in replacing the page.
3613	 *
3614	 * Now make the new head point back to the reader page.
3615	 */
3616	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3617	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3618
3619	/* Finally update the reader page to the new head */
3620	cpu_buffer->reader_page = reader;
3621	cpu_buffer->reader_page->read = 0;
3622
3623	if (overwrite != cpu_buffer->last_overrun) {
3624		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3625		cpu_buffer->last_overrun = overwrite;
3626	}
3627
3628	goto again;
3629
3630 out:
3631	/* Update the read_stamp on the first event */
3632	if (reader && reader->read == 0)
3633		cpu_buffer->read_stamp = reader->page->time_stamp;
3634
3635	arch_spin_unlock(&cpu_buffer->lock);
3636	local_irq_restore(flags);
3637
3638	return reader;
3639}
3640
3641static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3642{
3643	struct ring_buffer_event *event;
3644	struct buffer_page *reader;
3645	unsigned length;
3646
3647	reader = rb_get_reader_page(cpu_buffer);
3648
3649	/* This function should not be called when buffer is empty */
3650	if (RB_WARN_ON(cpu_buffer, !reader))
3651		return;
3652
3653	event = rb_reader_event(cpu_buffer);
3654
3655	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3656		cpu_buffer->read++;
3657
3658	rb_update_read_stamp(cpu_buffer, event);
3659
3660	length = rb_event_length(event);
3661	cpu_buffer->reader_page->read += length;
3662}
3663
3664static void rb_advance_iter(struct ring_buffer_iter *iter)
3665{
3666	struct ring_buffer_per_cpu *cpu_buffer;
3667	struct ring_buffer_event *event;
3668	unsigned length;
3669
3670	cpu_buffer = iter->cpu_buffer;
3671
3672	/*
3673	 * Check if we are at the end of the buffer.
3674	 */
3675	if (iter->head >= rb_page_size(iter->head_page)) {
3676		/* discarded commits can make the page empty */
3677		if (iter->head_page == cpu_buffer->commit_page)
3678			return;
3679		rb_inc_iter(iter);
3680		return;
3681	}
3682
3683	event = rb_iter_head_event(iter);
3684
3685	length = rb_event_length(event);
3686
3687	/*
3688	 * This should not be called to advance the header if we are
3689	 * at the tail of the buffer.
3690	 */
3691	if (RB_WARN_ON(cpu_buffer,
3692		       (iter->head_page == cpu_buffer->commit_page) &&
3693		       (iter->head + length > rb_commit_index(cpu_buffer))))
3694		return;
3695
3696	rb_update_iter_read_stamp(iter, event);
3697
3698	iter->head += length;
3699
3700	/* check for end of page padding */
3701	if ((iter->head >= rb_page_size(iter->head_page)) &&
3702	    (iter->head_page != cpu_buffer->commit_page))
3703		rb_inc_iter(iter);
3704}
3705
3706static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3707{
3708	return cpu_buffer->lost_events;
3709}
3710
3711static struct ring_buffer_event *
3712rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3713	       unsigned long *lost_events)
3714{
3715	struct ring_buffer_event *event;
3716	struct buffer_page *reader;
3717	int nr_loops = 0;
3718
 
 
3719 again:
3720	/*
3721	 * We repeat when a time extend is encountered.
3722	 * Since the time extend is always attached to a data event,
3723	 * we should never loop more than once.
3724	 * (We never hit the following condition more than twice).
3725	 */
3726	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3727		return NULL;
3728
3729	reader = rb_get_reader_page(cpu_buffer);
3730	if (!reader)
3731		return NULL;
3732
3733	event = rb_reader_event(cpu_buffer);
3734
3735	switch (event->type_len) {
3736	case RINGBUF_TYPE_PADDING:
3737		if (rb_null_event(event))
3738			RB_WARN_ON(cpu_buffer, 1);
3739		/*
3740		 * Because the writer could be discarding every
3741		 * event it creates (which would probably be bad)
3742		 * if we were to go back to "again" then we may never
3743		 * catch up, and will trigger the warn on, or lock
3744		 * the box. Return the padding, and we will release
3745		 * the current locks, and try again.
3746		 */
3747		return event;
3748
3749	case RINGBUF_TYPE_TIME_EXTEND:
3750		/* Internal data, OK to advance */
3751		rb_advance_reader(cpu_buffer);
3752		goto again;
3753
3754	case RINGBUF_TYPE_TIME_STAMP:
3755		/* FIXME: not implemented */
 
 
 
 
 
3756		rb_advance_reader(cpu_buffer);
3757		goto again;
3758
3759	case RINGBUF_TYPE_DATA:
3760		if (ts) {
3761			*ts = cpu_buffer->read_stamp + event->time_delta;
3762			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3763							 cpu_buffer->cpu, ts);
3764		}
3765		if (lost_events)
3766			*lost_events = rb_lost_events(cpu_buffer);
3767		return event;
3768
3769	default:
3770		BUG();
3771	}
3772
3773	return NULL;
3774}
3775EXPORT_SYMBOL_GPL(ring_buffer_peek);
3776
3777static struct ring_buffer_event *
3778rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3779{
3780	struct ring_buffer *buffer;
3781	struct ring_buffer_per_cpu *cpu_buffer;
3782	struct ring_buffer_event *event;
3783	int nr_loops = 0;
3784
 
 
 
3785	cpu_buffer = iter->cpu_buffer;
3786	buffer = cpu_buffer->buffer;
3787
3788	/*
3789	 * Check if someone performed a consuming read to
3790	 * the buffer. A consuming read invalidates the iterator
3791	 * and we need to reset the iterator in this case.
3792	 */
3793	if (unlikely(iter->cache_read != cpu_buffer->read ||
3794		     iter->cache_reader_page != cpu_buffer->reader_page))
3795		rb_iter_reset(iter);
3796
3797 again:
3798	if (ring_buffer_iter_empty(iter))
3799		return NULL;
3800
3801	/*
3802	 * We repeat when a time extend is encountered or we hit
3803	 * the end of the page. Since the time extend is always attached
3804	 * to a data event, we should never loop more than three times.
3805	 * Once for going to next page, once on time extend, and
3806	 * finally once to get the event.
3807	 * (We never hit the following condition more than thrice).
3808	 */
3809	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3810		return NULL;
3811
3812	if (rb_per_cpu_empty(cpu_buffer))
3813		return NULL;
3814
3815	if (iter->head >= rb_page_size(iter->head_page)) {
3816		rb_inc_iter(iter);
3817		goto again;
3818	}
3819
3820	event = rb_iter_head_event(iter);
3821
3822	switch (event->type_len) {
3823	case RINGBUF_TYPE_PADDING:
3824		if (rb_null_event(event)) {
3825			rb_inc_iter(iter);
3826			goto again;
3827		}
3828		rb_advance_iter(iter);
3829		return event;
3830
3831	case RINGBUF_TYPE_TIME_EXTEND:
3832		/* Internal data, OK to advance */
3833		rb_advance_iter(iter);
3834		goto again;
3835
3836	case RINGBUF_TYPE_TIME_STAMP:
3837		/* FIXME: not implemented */
 
 
 
 
 
3838		rb_advance_iter(iter);
3839		goto again;
3840
3841	case RINGBUF_TYPE_DATA:
3842		if (ts) {
3843			*ts = iter->read_stamp + event->time_delta;
3844			ring_buffer_normalize_time_stamp(buffer,
3845							 cpu_buffer->cpu, ts);
3846		}
3847		return event;
3848
3849	default:
3850		BUG();
3851	}
3852
3853	return NULL;
3854}
3855EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3856
3857static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
3858{
3859	if (likely(!in_nmi())) {
3860		raw_spin_lock(&cpu_buffer->reader_lock);
3861		return true;
3862	}
3863
3864	/*
3865	 * If an NMI die dumps out the content of the ring buffer
3866	 * trylock must be used to prevent a deadlock if the NMI
3867	 * preempted a task that holds the ring buffer locks. If
3868	 * we get the lock then all is fine, if not, then continue
3869	 * to do the read, but this can corrupt the ring buffer,
3870	 * so it must be permanently disabled from future writes.
3871	 * Reading from NMI is a oneshot deal.
3872	 */
3873	if (raw_spin_trylock(&cpu_buffer->reader_lock))
3874		return true;
3875
3876	/* Continue without locking, but disable the ring buffer */
3877	atomic_inc(&cpu_buffer->record_disabled);
3878	return false;
3879}
3880
3881static inline void
3882rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
3883{
3884	if (likely(locked))
3885		raw_spin_unlock(&cpu_buffer->reader_lock);
3886	return;
3887}
3888
3889/**
3890 * ring_buffer_peek - peek at the next event to be read
3891 * @buffer: The ring buffer to read
3892 * @cpu: The cpu to peak at
3893 * @ts: The timestamp counter of this event.
3894 * @lost_events: a variable to store if events were lost (may be NULL)
3895 *
3896 * This will return the event that will be read next, but does
3897 * not consume the data.
3898 */
3899struct ring_buffer_event *
3900ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3901		 unsigned long *lost_events)
3902{
3903	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3904	struct ring_buffer_event *event;
3905	unsigned long flags;
3906	bool dolock;
3907
3908	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3909		return NULL;
3910
3911 again:
3912	local_irq_save(flags);
3913	dolock = rb_reader_lock(cpu_buffer);
3914	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3915	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3916		rb_advance_reader(cpu_buffer);
3917	rb_reader_unlock(cpu_buffer, dolock);
3918	local_irq_restore(flags);
3919
3920	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3921		goto again;
3922
3923	return event;
3924}
3925
3926/**
3927 * ring_buffer_iter_peek - peek at the next event to be read
3928 * @iter: The ring buffer iterator
3929 * @ts: The timestamp counter of this event.
3930 *
3931 * This will return the event that will be read next, but does
3932 * not increment the iterator.
3933 */
3934struct ring_buffer_event *
3935ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3936{
3937	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3938	struct ring_buffer_event *event;
3939	unsigned long flags;
3940
3941 again:
3942	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3943	event = rb_iter_peek(iter, ts);
3944	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3945
3946	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3947		goto again;
3948
3949	return event;
3950}
3951
3952/**
3953 * ring_buffer_consume - return an event and consume it
3954 * @buffer: The ring buffer to get the next event from
3955 * @cpu: the cpu to read the buffer from
3956 * @ts: a variable to store the timestamp (may be NULL)
3957 * @lost_events: a variable to store if events were lost (may be NULL)
3958 *
3959 * Returns the next event in the ring buffer, and that event is consumed.
3960 * Meaning, that sequential reads will keep returning a different event,
3961 * and eventually empty the ring buffer if the producer is slower.
3962 */
3963struct ring_buffer_event *
3964ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3965		    unsigned long *lost_events)
3966{
3967	struct ring_buffer_per_cpu *cpu_buffer;
3968	struct ring_buffer_event *event = NULL;
3969	unsigned long flags;
3970	bool dolock;
3971
3972 again:
3973	/* might be called in atomic */
3974	preempt_disable();
3975
3976	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3977		goto out;
3978
3979	cpu_buffer = buffer->buffers[cpu];
3980	local_irq_save(flags);
3981	dolock = rb_reader_lock(cpu_buffer);
3982
3983	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3984	if (event) {
3985		cpu_buffer->lost_events = 0;
3986		rb_advance_reader(cpu_buffer);
3987	}
3988
3989	rb_reader_unlock(cpu_buffer, dolock);
3990	local_irq_restore(flags);
3991
3992 out:
3993	preempt_enable();
3994
3995	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3996		goto again;
3997
3998	return event;
3999}
4000EXPORT_SYMBOL_GPL(ring_buffer_consume);
4001
4002/**
4003 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4004 * @buffer: The ring buffer to read from
4005 * @cpu: The cpu buffer to iterate over
4006 *
4007 * This performs the initial preparations necessary to iterate
4008 * through the buffer.  Memory is allocated, buffer recording
4009 * is disabled, and the iterator pointer is returned to the caller.
4010 *
4011 * Disabling buffer recordng prevents the reading from being
4012 * corrupted. This is not a consuming read, so a producer is not
4013 * expected.
4014 *
4015 * After a sequence of ring_buffer_read_prepare calls, the user is
4016 * expected to make at least one call to ring_buffer_read_prepare_sync.
4017 * Afterwards, ring_buffer_read_start is invoked to get things going
4018 * for real.
4019 *
4020 * This overall must be paired with ring_buffer_read_finish.
4021 */
4022struct ring_buffer_iter *
4023ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
4024{
4025	struct ring_buffer_per_cpu *cpu_buffer;
4026	struct ring_buffer_iter *iter;
4027
4028	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4029		return NULL;
4030
4031	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
4032	if (!iter)
4033		return NULL;
4034
4035	cpu_buffer = buffer->buffers[cpu];
4036
4037	iter->cpu_buffer = cpu_buffer;
4038
4039	atomic_inc(&buffer->resize_disabled);
4040	atomic_inc(&cpu_buffer->record_disabled);
4041
4042	return iter;
4043}
4044EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4045
4046/**
4047 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4048 *
4049 * All previously invoked ring_buffer_read_prepare calls to prepare
4050 * iterators will be synchronized.  Afterwards, read_buffer_read_start
4051 * calls on those iterators are allowed.
4052 */
4053void
4054ring_buffer_read_prepare_sync(void)
4055{
4056	synchronize_sched();
4057}
4058EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4059
4060/**
4061 * ring_buffer_read_start - start a non consuming read of the buffer
4062 * @iter: The iterator returned by ring_buffer_read_prepare
4063 *
4064 * This finalizes the startup of an iteration through the buffer.
4065 * The iterator comes from a call to ring_buffer_read_prepare and
4066 * an intervening ring_buffer_read_prepare_sync must have been
4067 * performed.
4068 *
4069 * Must be paired with ring_buffer_read_finish.
4070 */
4071void
4072ring_buffer_read_start(struct ring_buffer_iter *iter)
4073{
4074	struct ring_buffer_per_cpu *cpu_buffer;
4075	unsigned long flags;
4076
4077	if (!iter)
4078		return;
4079
4080	cpu_buffer = iter->cpu_buffer;
4081
4082	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4083	arch_spin_lock(&cpu_buffer->lock);
4084	rb_iter_reset(iter);
4085	arch_spin_unlock(&cpu_buffer->lock);
4086	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4087}
4088EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4089
4090/**
4091 * ring_buffer_read_finish - finish reading the iterator of the buffer
4092 * @iter: The iterator retrieved by ring_buffer_start
4093 *
4094 * This re-enables the recording to the buffer, and frees the
4095 * iterator.
4096 */
4097void
4098ring_buffer_read_finish(struct ring_buffer_iter *iter)
4099{
4100	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4101	unsigned long flags;
4102
4103	/*
4104	 * Ring buffer is disabled from recording, here's a good place
4105	 * to check the integrity of the ring buffer.
4106	 * Must prevent readers from trying to read, as the check
4107	 * clears the HEAD page and readers require it.
4108	 */
4109	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4110	rb_check_pages(cpu_buffer);
4111	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4112
4113	atomic_dec(&cpu_buffer->record_disabled);
4114	atomic_dec(&cpu_buffer->buffer->resize_disabled);
4115	kfree(iter);
4116}
4117EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4118
4119/**
4120 * ring_buffer_read - read the next item in the ring buffer by the iterator
4121 * @iter: The ring buffer iterator
4122 * @ts: The time stamp of the event read.
4123 *
4124 * This reads the next event in the ring buffer and increments the iterator.
4125 */
4126struct ring_buffer_event *
4127ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4128{
4129	struct ring_buffer_event *event;
4130	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4131	unsigned long flags;
4132
4133	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4134 again:
4135	event = rb_iter_peek(iter, ts);
4136	if (!event)
4137		goto out;
4138
4139	if (event->type_len == RINGBUF_TYPE_PADDING)
4140		goto again;
4141
4142	rb_advance_iter(iter);
4143 out:
4144	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4145
4146	return event;
4147}
4148EXPORT_SYMBOL_GPL(ring_buffer_read);
4149
4150/**
4151 * ring_buffer_size - return the size of the ring buffer (in bytes)
4152 * @buffer: The ring buffer.
4153 */
4154unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4155{
4156	/*
4157	 * Earlier, this method returned
4158	 *	BUF_PAGE_SIZE * buffer->nr_pages
4159	 * Since the nr_pages field is now removed, we have converted this to
4160	 * return the per cpu buffer value.
4161	 */
4162	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4163		return 0;
4164
4165	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4166}
4167EXPORT_SYMBOL_GPL(ring_buffer_size);
4168
4169static void
4170rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4171{
4172	rb_head_page_deactivate(cpu_buffer);
4173
4174	cpu_buffer->head_page
4175		= list_entry(cpu_buffer->pages, struct buffer_page, list);
4176	local_set(&cpu_buffer->head_page->write, 0);
4177	local_set(&cpu_buffer->head_page->entries, 0);
4178	local_set(&cpu_buffer->head_page->page->commit, 0);
4179
4180	cpu_buffer->head_page->read = 0;
4181
4182	cpu_buffer->tail_page = cpu_buffer->head_page;
4183	cpu_buffer->commit_page = cpu_buffer->head_page;
4184
4185	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4186	INIT_LIST_HEAD(&cpu_buffer->new_pages);
4187	local_set(&cpu_buffer->reader_page->write, 0);
4188	local_set(&cpu_buffer->reader_page->entries, 0);
4189	local_set(&cpu_buffer->reader_page->page->commit, 0);
4190	cpu_buffer->reader_page->read = 0;
4191
4192	local_set(&cpu_buffer->entries_bytes, 0);
4193	local_set(&cpu_buffer->overrun, 0);
4194	local_set(&cpu_buffer->commit_overrun, 0);
4195	local_set(&cpu_buffer->dropped_events, 0);
4196	local_set(&cpu_buffer->entries, 0);
4197	local_set(&cpu_buffer->committing, 0);
4198	local_set(&cpu_buffer->commits, 0);
4199	cpu_buffer->read = 0;
4200	cpu_buffer->read_bytes = 0;
4201
4202	cpu_buffer->write_stamp = 0;
4203	cpu_buffer->read_stamp = 0;
4204
4205	cpu_buffer->lost_events = 0;
4206	cpu_buffer->last_overrun = 0;
4207
4208	rb_head_page_activate(cpu_buffer);
4209}
4210
4211/**
4212 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4213 * @buffer: The ring buffer to reset a per cpu buffer of
4214 * @cpu: The CPU buffer to be reset
4215 */
4216void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4217{
4218	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4219	unsigned long flags;
4220
4221	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4222		return;
4223
4224	atomic_inc(&buffer->resize_disabled);
4225	atomic_inc(&cpu_buffer->record_disabled);
4226
4227	/* Make sure all commits have finished */
4228	synchronize_sched();
4229
4230	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4231
4232	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4233		goto out;
4234
4235	arch_spin_lock(&cpu_buffer->lock);
4236
4237	rb_reset_cpu(cpu_buffer);
4238
4239	arch_spin_unlock(&cpu_buffer->lock);
4240
4241 out:
4242	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4243
4244	atomic_dec(&cpu_buffer->record_disabled);
4245	atomic_dec(&buffer->resize_disabled);
4246}
4247EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4248
4249/**
4250 * ring_buffer_reset - reset a ring buffer
4251 * @buffer: The ring buffer to reset all cpu buffers
4252 */
4253void ring_buffer_reset(struct ring_buffer *buffer)
4254{
4255	int cpu;
4256
4257	for_each_buffer_cpu(buffer, cpu)
4258		ring_buffer_reset_cpu(buffer, cpu);
4259}
4260EXPORT_SYMBOL_GPL(ring_buffer_reset);
4261
4262/**
4263 * rind_buffer_empty - is the ring buffer empty?
4264 * @buffer: The ring buffer to test
4265 */
4266bool ring_buffer_empty(struct ring_buffer *buffer)
4267{
4268	struct ring_buffer_per_cpu *cpu_buffer;
4269	unsigned long flags;
4270	bool dolock;
4271	int cpu;
4272	int ret;
4273
4274	/* yes this is racy, but if you don't like the race, lock the buffer */
4275	for_each_buffer_cpu(buffer, cpu) {
4276		cpu_buffer = buffer->buffers[cpu];
4277		local_irq_save(flags);
4278		dolock = rb_reader_lock(cpu_buffer);
4279		ret = rb_per_cpu_empty(cpu_buffer);
4280		rb_reader_unlock(cpu_buffer, dolock);
4281		local_irq_restore(flags);
4282
4283		if (!ret)
4284			return false;
4285	}
4286
4287	return true;
4288}
4289EXPORT_SYMBOL_GPL(ring_buffer_empty);
4290
4291/**
4292 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4293 * @buffer: The ring buffer
4294 * @cpu: The CPU buffer to test
4295 */
4296bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4297{
4298	struct ring_buffer_per_cpu *cpu_buffer;
4299	unsigned long flags;
4300	bool dolock;
4301	int ret;
4302
4303	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4304		return true;
4305
4306	cpu_buffer = buffer->buffers[cpu];
4307	local_irq_save(flags);
4308	dolock = rb_reader_lock(cpu_buffer);
4309	ret = rb_per_cpu_empty(cpu_buffer);
4310	rb_reader_unlock(cpu_buffer, dolock);
4311	local_irq_restore(flags);
4312
4313	return ret;
4314}
4315EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4316
4317#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4318/**
4319 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4320 * @buffer_a: One buffer to swap with
4321 * @buffer_b: The other buffer to swap with
4322 *
4323 * This function is useful for tracers that want to take a "snapshot"
4324 * of a CPU buffer and has another back up buffer lying around.
4325 * it is expected that the tracer handles the cpu buffer not being
4326 * used at the moment.
4327 */
4328int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4329			 struct ring_buffer *buffer_b, int cpu)
4330{
4331	struct ring_buffer_per_cpu *cpu_buffer_a;
4332	struct ring_buffer_per_cpu *cpu_buffer_b;
4333	int ret = -EINVAL;
4334
4335	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4336	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
4337		goto out;
4338
4339	cpu_buffer_a = buffer_a->buffers[cpu];
4340	cpu_buffer_b = buffer_b->buffers[cpu];
4341
4342	/* At least make sure the two buffers are somewhat the same */
4343	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4344		goto out;
4345
4346	ret = -EAGAIN;
4347
4348	if (atomic_read(&buffer_a->record_disabled))
4349		goto out;
4350
4351	if (atomic_read(&buffer_b->record_disabled))
4352		goto out;
4353
4354	if (atomic_read(&cpu_buffer_a->record_disabled))
4355		goto out;
4356
4357	if (atomic_read(&cpu_buffer_b->record_disabled))
4358		goto out;
4359
4360	/*
4361	 * We can't do a synchronize_sched here because this
4362	 * function can be called in atomic context.
4363	 * Normally this will be called from the same CPU as cpu.
4364	 * If not it's up to the caller to protect this.
4365	 */
4366	atomic_inc(&cpu_buffer_a->record_disabled);
4367	atomic_inc(&cpu_buffer_b->record_disabled);
4368
4369	ret = -EBUSY;
4370	if (local_read(&cpu_buffer_a->committing))
4371		goto out_dec;
4372	if (local_read(&cpu_buffer_b->committing))
4373		goto out_dec;
4374
4375	buffer_a->buffers[cpu] = cpu_buffer_b;
4376	buffer_b->buffers[cpu] = cpu_buffer_a;
4377
4378	cpu_buffer_b->buffer = buffer_a;
4379	cpu_buffer_a->buffer = buffer_b;
4380
4381	ret = 0;
4382
4383out_dec:
4384	atomic_dec(&cpu_buffer_a->record_disabled);
4385	atomic_dec(&cpu_buffer_b->record_disabled);
4386out:
4387	return ret;
4388}
4389EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4390#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4391
4392/**
4393 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4394 * @buffer: the buffer to allocate for.
4395 * @cpu: the cpu buffer to allocate.
4396 *
4397 * This function is used in conjunction with ring_buffer_read_page.
4398 * When reading a full page from the ring buffer, these functions
4399 * can be used to speed up the process. The calling function should
4400 * allocate a few pages first with this function. Then when it
4401 * needs to get pages from the ring buffer, it passes the result
4402 * of this function into ring_buffer_read_page, which will swap
4403 * the page that was allocated, with the read page of the buffer.
4404 *
4405 * Returns:
4406 *  The page allocated, or NULL on error.
4407 */
4408void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4409{
4410	struct buffer_data_page *bpage;
 
 
4411	struct page *page;
4412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4413	page = alloc_pages_node(cpu_to_node(cpu),
4414				GFP_KERNEL | __GFP_NORETRY, 0);
4415	if (!page)
4416		return NULL;
4417
4418	bpage = page_address(page);
4419
 
4420	rb_init_page(bpage);
4421
4422	return bpage;
4423}
4424EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4425
4426/**
4427 * ring_buffer_free_read_page - free an allocated read page
4428 * @buffer: the buffer the page was allocate for
 
4429 * @data: the page to free
4430 *
4431 * Free a page allocated from ring_buffer_alloc_read_page.
4432 */
4433void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4434{
4435	free_page((unsigned long)data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4436}
4437EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4438
4439/**
4440 * ring_buffer_read_page - extract a page from the ring buffer
4441 * @buffer: buffer to extract from
4442 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4443 * @len: amount to extract
4444 * @cpu: the cpu of the buffer to extract
4445 * @full: should the extraction only happen when the page is full.
4446 *
4447 * This function will pull out a page from the ring buffer and consume it.
4448 * @data_page must be the address of the variable that was returned
4449 * from ring_buffer_alloc_read_page. This is because the page might be used
4450 * to swap with a page in the ring buffer.
4451 *
4452 * for example:
4453 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
4454 *	if (!rpage)
4455 *		return error;
4456 *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4457 *	if (ret >= 0)
4458 *		process_page(rpage, ret);
4459 *
4460 * When @full is set, the function will not return true unless
4461 * the writer is off the reader page.
4462 *
4463 * Note: it is up to the calling functions to handle sleeps and wakeups.
4464 *  The ring buffer can be used anywhere in the kernel and can not
4465 *  blindly call wake_up. The layer that uses the ring buffer must be
4466 *  responsible for that.
4467 *
4468 * Returns:
4469 *  >=0 if data has been transferred, returns the offset of consumed data.
4470 *  <0 if no data has been transferred.
4471 */
4472int ring_buffer_read_page(struct ring_buffer *buffer,
4473			  void **data_page, size_t len, int cpu, int full)
4474{
4475	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4476	struct ring_buffer_event *event;
4477	struct buffer_data_page *bpage;
4478	struct buffer_page *reader;
4479	unsigned long missed_events;
4480	unsigned long flags;
4481	unsigned int commit;
4482	unsigned int read;
4483	u64 save_timestamp;
4484	int ret = -1;
4485
4486	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4487		goto out;
4488
4489	/*
4490	 * If len is not big enough to hold the page header, then
4491	 * we can not copy anything.
4492	 */
4493	if (len <= BUF_PAGE_HDR_SIZE)
4494		goto out;
4495
4496	len -= BUF_PAGE_HDR_SIZE;
4497
4498	if (!data_page)
4499		goto out;
4500
4501	bpage = *data_page;
4502	if (!bpage)
4503		goto out;
4504
4505	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4506
4507	reader = rb_get_reader_page(cpu_buffer);
4508	if (!reader)
4509		goto out_unlock;
4510
4511	event = rb_reader_event(cpu_buffer);
4512
4513	read = reader->read;
4514	commit = rb_page_commit(reader);
4515
4516	/* Check if any events were dropped */
4517	missed_events = cpu_buffer->lost_events;
4518
4519	/*
4520	 * If this page has been partially read or
4521	 * if len is not big enough to read the rest of the page or
4522	 * a writer is still on the page, then
4523	 * we must copy the data from the page to the buffer.
4524	 * Otherwise, we can simply swap the page with the one passed in.
4525	 */
4526	if (read || (len < (commit - read)) ||
4527	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
4528		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4529		unsigned int rpos = read;
4530		unsigned int pos = 0;
4531		unsigned int size;
4532
4533		if (full)
4534			goto out_unlock;
4535
4536		if (len > (commit - read))
4537			len = (commit - read);
4538
4539		/* Always keep the time extend and data together */
4540		size = rb_event_ts_length(event);
4541
4542		if (len < size)
4543			goto out_unlock;
4544
4545		/* save the current timestamp, since the user will need it */
4546		save_timestamp = cpu_buffer->read_stamp;
4547
4548		/* Need to copy one event at a time */
4549		do {
4550			/* We need the size of one event, because
4551			 * rb_advance_reader only advances by one event,
4552			 * whereas rb_event_ts_length may include the size of
4553			 * one or two events.
4554			 * We have already ensured there's enough space if this
4555			 * is a time extend. */
4556			size = rb_event_length(event);
4557			memcpy(bpage->data + pos, rpage->data + rpos, size);
4558
4559			len -= size;
4560
4561			rb_advance_reader(cpu_buffer);
4562			rpos = reader->read;
4563			pos += size;
4564
4565			if (rpos >= commit)
4566				break;
4567
4568			event = rb_reader_event(cpu_buffer);
4569			/* Always keep the time extend and data together */
4570			size = rb_event_ts_length(event);
4571		} while (len >= size);
4572
4573		/* update bpage */
4574		local_set(&bpage->commit, pos);
4575		bpage->time_stamp = save_timestamp;
4576
4577		/* we copied everything to the beginning */
4578		read = 0;
4579	} else {
4580		/* update the entry counter */
4581		cpu_buffer->read += rb_page_entries(reader);
4582		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4583
4584		/* swap the pages */
4585		rb_init_page(bpage);
4586		bpage = reader->page;
4587		reader->page = *data_page;
4588		local_set(&reader->write, 0);
4589		local_set(&reader->entries, 0);
4590		reader->read = 0;
4591		*data_page = bpage;
4592
4593		/*
4594		 * Use the real_end for the data size,
4595		 * This gives us a chance to store the lost events
4596		 * on the page.
4597		 */
4598		if (reader->real_end)
4599			local_set(&bpage->commit, reader->real_end);
4600	}
4601	ret = read;
4602
4603	cpu_buffer->lost_events = 0;
4604
4605	commit = local_read(&bpage->commit);
4606	/*
4607	 * Set a flag in the commit field if we lost events
4608	 */
4609	if (missed_events) {
4610		/* If there is room at the end of the page to save the
4611		 * missed events, then record it there.
4612		 */
4613		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4614			memcpy(&bpage->data[commit], &missed_events,
4615			       sizeof(missed_events));
4616			local_add(RB_MISSED_STORED, &bpage->commit);
4617			commit += sizeof(missed_events);
4618		}
4619		local_add(RB_MISSED_EVENTS, &bpage->commit);
4620	}
4621
4622	/*
4623	 * This page may be off to user land. Zero it out here.
4624	 */
4625	if (commit < BUF_PAGE_SIZE)
4626		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4627
4628 out_unlock:
4629	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4630
4631 out:
4632	return ret;
4633}
4634EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4635
4636#ifdef CONFIG_HOTPLUG_CPU
4637static int rb_cpu_notify(struct notifier_block *self,
4638			 unsigned long action, void *hcpu)
4639{
4640	struct ring_buffer *buffer =
4641		container_of(self, struct ring_buffer, cpu_notify);
4642	long cpu = (long)hcpu;
4643	int cpu_i, nr_pages_same;
4644	unsigned int nr_pages;
4645
4646	switch (action) {
4647	case CPU_UP_PREPARE:
4648	case CPU_UP_PREPARE_FROZEN:
4649		if (cpumask_test_cpu(cpu, buffer->cpumask))
4650			return NOTIFY_OK;
4651
4652		nr_pages = 0;
4653		nr_pages_same = 1;
4654		/* check if all cpu sizes are same */
4655		for_each_buffer_cpu(buffer, cpu_i) {
4656			/* fill in the size from first enabled cpu */
4657			if (nr_pages == 0)
4658				nr_pages = buffer->buffers[cpu_i]->nr_pages;
4659			if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4660				nr_pages_same = 0;
4661				break;
4662			}
4663		}
4664		/* allocate minimum pages, user can later expand it */
4665		if (!nr_pages_same)
4666			nr_pages = 2;
4667		buffer->buffers[cpu] =
4668			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4669		if (!buffer->buffers[cpu]) {
4670			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4671			     cpu);
4672			return NOTIFY_OK;
4673		}
4674		smp_wmb();
4675		cpumask_set_cpu(cpu, buffer->cpumask);
4676		break;
4677	case CPU_DOWN_PREPARE:
4678	case CPU_DOWN_PREPARE_FROZEN:
4679		/*
4680		 * Do nothing.
4681		 *  If we were to free the buffer, then the user would
4682		 *  lose any trace that was in the buffer.
4683		 */
4684		break;
4685	default:
4686		break;
4687	}
4688	return NOTIFY_OK;
 
 
 
 
 
 
 
 
 
 
 
 
4689}
4690#endif
4691
4692#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4693/*
4694 * This is a basic integrity check of the ring buffer.
4695 * Late in the boot cycle this test will run when configured in.
4696 * It will kick off a thread per CPU that will go into a loop
4697 * writing to the per cpu ring buffer various sizes of data.
4698 * Some of the data will be large items, some small.
4699 *
4700 * Another thread is created that goes into a spin, sending out
4701 * IPIs to the other CPUs to also write into the ring buffer.
4702 * this is to test the nesting ability of the buffer.
4703 *
4704 * Basic stats are recorded and reported. If something in the
4705 * ring buffer should happen that's not expected, a big warning
4706 * is displayed and all ring buffers are disabled.
4707 */
4708static struct task_struct *rb_threads[NR_CPUS] __initdata;
4709
4710struct rb_test_data {
4711	struct ring_buffer	*buffer;
4712	unsigned long		events;
4713	unsigned long		bytes_written;
4714	unsigned long		bytes_alloc;
4715	unsigned long		bytes_dropped;
4716	unsigned long		events_nested;
4717	unsigned long		bytes_written_nested;
4718	unsigned long		bytes_alloc_nested;
4719	unsigned long		bytes_dropped_nested;
4720	int			min_size_nested;
4721	int			max_size_nested;
4722	int			max_size;
4723	int			min_size;
4724	int			cpu;
4725	int			cnt;
4726};
4727
4728static struct rb_test_data rb_data[NR_CPUS] __initdata;
4729
4730/* 1 meg per cpu */
4731#define RB_TEST_BUFFER_SIZE	1048576
4732
4733static char rb_string[] __initdata =
4734	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4735	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4736	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4737
4738static bool rb_test_started __initdata;
4739
4740struct rb_item {
4741	int size;
4742	char str[];
4743};
4744
4745static __init int rb_write_something(struct rb_test_data *data, bool nested)
4746{
4747	struct ring_buffer_event *event;
4748	struct rb_item *item;
4749	bool started;
4750	int event_len;
4751	int size;
4752	int len;
4753	int cnt;
4754
4755	/* Have nested writes different that what is written */
4756	cnt = data->cnt + (nested ? 27 : 0);
4757
4758	/* Multiply cnt by ~e, to make some unique increment */
4759	size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4760
4761	len = size + sizeof(struct rb_item);
4762
4763	started = rb_test_started;
4764	/* read rb_test_started before checking buffer enabled */
4765	smp_rmb();
4766
4767	event = ring_buffer_lock_reserve(data->buffer, len);
4768	if (!event) {
4769		/* Ignore dropped events before test starts. */
4770		if (started) {
4771			if (nested)
4772				data->bytes_dropped += len;
4773			else
4774				data->bytes_dropped_nested += len;
4775		}
4776		return len;
4777	}
4778
4779	event_len = ring_buffer_event_length(event);
4780
4781	if (RB_WARN_ON(data->buffer, event_len < len))
4782		goto out;
4783
4784	item = ring_buffer_event_data(event);
4785	item->size = size;
4786	memcpy(item->str, rb_string, size);
4787
4788	if (nested) {
4789		data->bytes_alloc_nested += event_len;
4790		data->bytes_written_nested += len;
4791		data->events_nested++;
4792		if (!data->min_size_nested || len < data->min_size_nested)
4793			data->min_size_nested = len;
4794		if (len > data->max_size_nested)
4795			data->max_size_nested = len;
4796	} else {
4797		data->bytes_alloc += event_len;
4798		data->bytes_written += len;
4799		data->events++;
4800		if (!data->min_size || len < data->min_size)
4801			data->max_size = len;
4802		if (len > data->max_size)
4803			data->max_size = len;
4804	}
4805
4806 out:
4807	ring_buffer_unlock_commit(data->buffer, event);
4808
4809	return 0;
4810}
4811
4812static __init int rb_test(void *arg)
4813{
4814	struct rb_test_data *data = arg;
4815
4816	while (!kthread_should_stop()) {
4817		rb_write_something(data, false);
4818		data->cnt++;
4819
4820		set_current_state(TASK_INTERRUPTIBLE);
4821		/* Now sleep between a min of 100-300us and a max of 1ms */
4822		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4823	}
4824
4825	return 0;
4826}
4827
4828static __init void rb_ipi(void *ignore)
4829{
4830	struct rb_test_data *data;
4831	int cpu = smp_processor_id();
4832
4833	data = &rb_data[cpu];
4834	rb_write_something(data, true);
4835}
4836
4837static __init int rb_hammer_test(void *arg)
4838{
4839	while (!kthread_should_stop()) {
4840
4841		/* Send an IPI to all cpus to write data! */
4842		smp_call_function(rb_ipi, NULL, 1);
4843		/* No sleep, but for non preempt, let others run */
4844		schedule();
4845	}
4846
4847	return 0;
4848}
4849
4850static __init int test_ringbuffer(void)
4851{
4852	struct task_struct *rb_hammer;
4853	struct ring_buffer *buffer;
4854	int cpu;
4855	int ret = 0;
4856
4857	pr_info("Running ring buffer tests...\n");
4858
4859	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4860	if (WARN_ON(!buffer))
4861		return 0;
4862
4863	/* Disable buffer so that threads can't write to it yet */
4864	ring_buffer_record_off(buffer);
4865
4866	for_each_online_cpu(cpu) {
4867		rb_data[cpu].buffer = buffer;
4868		rb_data[cpu].cpu = cpu;
4869		rb_data[cpu].cnt = cpu;
4870		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4871						 "rbtester/%d", cpu);
4872		if (WARN_ON(!rb_threads[cpu])) {
4873			pr_cont("FAILED\n");
4874			ret = -1;
4875			goto out_free;
4876		}
4877
4878		kthread_bind(rb_threads[cpu], cpu);
4879 		wake_up_process(rb_threads[cpu]);
4880	}
4881
4882	/* Now create the rb hammer! */
4883	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4884	if (WARN_ON(!rb_hammer)) {
4885		pr_cont("FAILED\n");
4886		ret = -1;
4887		goto out_free;
4888	}
4889
4890	ring_buffer_record_on(buffer);
4891	/*
4892	 * Show buffer is enabled before setting rb_test_started.
4893	 * Yes there's a small race window where events could be
4894	 * dropped and the thread wont catch it. But when a ring
4895	 * buffer gets enabled, there will always be some kind of
4896	 * delay before other CPUs see it. Thus, we don't care about
4897	 * those dropped events. We care about events dropped after
4898	 * the threads see that the buffer is active.
4899	 */
4900	smp_wmb();
4901	rb_test_started = true;
4902
4903	set_current_state(TASK_INTERRUPTIBLE);
4904	/* Just run for 10 seconds */;
4905	schedule_timeout(10 * HZ);
4906
4907	kthread_stop(rb_hammer);
4908
4909 out_free:
4910	for_each_online_cpu(cpu) {
4911		if (!rb_threads[cpu])
4912			break;
4913		kthread_stop(rb_threads[cpu]);
4914	}
4915	if (ret) {
4916		ring_buffer_free(buffer);
4917		return ret;
4918	}
4919
4920	/* Report! */
4921	pr_info("finished\n");
4922	for_each_online_cpu(cpu) {
4923		struct ring_buffer_event *event;
4924		struct rb_test_data *data = &rb_data[cpu];
4925		struct rb_item *item;
4926		unsigned long total_events;
4927		unsigned long total_dropped;
4928		unsigned long total_written;
4929		unsigned long total_alloc;
4930		unsigned long total_read = 0;
4931		unsigned long total_size = 0;
4932		unsigned long total_len = 0;
4933		unsigned long total_lost = 0;
4934		unsigned long lost;
4935		int big_event_size;
4936		int small_event_size;
4937
4938		ret = -1;
4939
4940		total_events = data->events + data->events_nested;
4941		total_written = data->bytes_written + data->bytes_written_nested;
4942		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4943		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4944
4945		big_event_size = data->max_size + data->max_size_nested;
4946		small_event_size = data->min_size + data->min_size_nested;
4947
4948		pr_info("CPU %d:\n", cpu);
4949		pr_info("              events:    %ld\n", total_events);
4950		pr_info("       dropped bytes:    %ld\n", total_dropped);
4951		pr_info("       alloced bytes:    %ld\n", total_alloc);
4952		pr_info("       written bytes:    %ld\n", total_written);
4953		pr_info("       biggest event:    %d\n", big_event_size);
4954		pr_info("      smallest event:    %d\n", small_event_size);
4955
4956		if (RB_WARN_ON(buffer, total_dropped))
4957			break;
4958
4959		ret = 0;
4960
4961		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4962			total_lost += lost;
4963			item = ring_buffer_event_data(event);
4964			total_len += ring_buffer_event_length(event);
4965			total_size += item->size + sizeof(struct rb_item);
4966			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4967				pr_info("FAILED!\n");
4968				pr_info("buffer had: %.*s\n", item->size, item->str);
4969				pr_info("expected:   %.*s\n", item->size, rb_string);
4970				RB_WARN_ON(buffer, 1);
4971				ret = -1;
4972				break;
4973			}
4974			total_read++;
4975		}
4976		if (ret)
4977			break;
4978
4979		ret = -1;
4980
4981		pr_info("         read events:   %ld\n", total_read);
4982		pr_info("         lost events:   %ld\n", total_lost);
4983		pr_info("        total events:   %ld\n", total_lost + total_read);
4984		pr_info("  recorded len bytes:   %ld\n", total_len);
4985		pr_info(" recorded size bytes:   %ld\n", total_size);
4986		if (total_lost)
4987			pr_info(" With dropped events, record len and size may not match\n"
4988				" alloced and written from above\n");
4989		if (!total_lost) {
4990			if (RB_WARN_ON(buffer, total_len != total_alloc ||
4991				       total_size != total_written))
4992				break;
4993		}
4994		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4995			break;
4996
4997		ret = 0;
4998	}
4999	if (!ret)
5000		pr_info("Ring buffer PASSED!\n");
5001
5002	ring_buffer_free(buffer);
5003	return 0;
5004}
5005
5006late_initcall(test_ringbuffer);
5007#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
v4.17
   1/*
   2 * Generic ring buffer
   3 *
   4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   5 */
   6#include <linux/trace_events.h>
   7#include <linux/ring_buffer.h>
   8#include <linux/trace_clock.h>
   9#include <linux/sched/clock.h>
  10#include <linux/trace_seq.h>
  11#include <linux/spinlock.h>
  12#include <linux/irq_work.h>
  13#include <linux/uaccess.h>
  14#include <linux/hardirq.h>
  15#include <linux/kthread.h>	/* for self test */
 
  16#include <linux/module.h>
  17#include <linux/percpu.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/slab.h>
  21#include <linux/init.h>
  22#include <linux/hash.h>
  23#include <linux/list.h>
  24#include <linux/cpu.h>
  25#include <linux/oom.h>
  26
  27#include <asm/local.h>
  28
  29static void update_pages_handler(struct work_struct *work);
  30
  31/*
  32 * The ring buffer header is special. We must manually up keep it.
  33 */
  34int ring_buffer_print_entry_header(struct trace_seq *s)
  35{
  36	trace_seq_puts(s, "# compressed entry header\n");
  37	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  38	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  39	trace_seq_puts(s, "\tarray       :   32 bits\n");
  40	trace_seq_putc(s, '\n');
  41	trace_seq_printf(s, "\tpadding     : type == %d\n",
  42			 RINGBUF_TYPE_PADDING);
  43	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  44			 RINGBUF_TYPE_TIME_EXTEND);
  45	trace_seq_printf(s, "\ttime_stamp : type == %d\n",
  46			 RINGBUF_TYPE_TIME_STAMP);
  47	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  48			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  49
  50	return !trace_seq_has_overflowed(s);
  51}
  52
  53/*
  54 * The ring buffer is made up of a list of pages. A separate list of pages is
  55 * allocated for each CPU. A writer may only write to a buffer that is
  56 * associated with the CPU it is currently executing on.  A reader may read
  57 * from any per cpu buffer.
  58 *
  59 * The reader is special. For each per cpu buffer, the reader has its own
  60 * reader page. When a reader has read the entire reader page, this reader
  61 * page is swapped with another page in the ring buffer.
  62 *
  63 * Now, as long as the writer is off the reader page, the reader can do what
  64 * ever it wants with that page. The writer will never write to that page
  65 * again (as long as it is out of the ring buffer).
  66 *
  67 * Here's some silly ASCII art.
  68 *
  69 *   +------+
  70 *   |reader|          RING BUFFER
  71 *   |page  |
  72 *   +------+        +---+   +---+   +---+
  73 *                   |   |-->|   |-->|   |
  74 *                   +---+   +---+   +---+
  75 *                     ^               |
  76 *                     |               |
  77 *                     +---------------+
  78 *
  79 *
  80 *   +------+
  81 *   |reader|          RING BUFFER
  82 *   |page  |------------------v
  83 *   +------+        +---+   +---+   +---+
  84 *                   |   |-->|   |-->|   |
  85 *                   +---+   +---+   +---+
  86 *                     ^               |
  87 *                     |               |
  88 *                     +---------------+
  89 *
  90 *
  91 *   +------+
  92 *   |reader|          RING BUFFER
  93 *   |page  |------------------v
  94 *   +------+        +---+   +---+   +---+
  95 *      ^            |   |-->|   |-->|   |
  96 *      |            +---+   +---+   +---+
  97 *      |                              |
  98 *      |                              |
  99 *      +------------------------------+
 100 *
 101 *
 102 *   +------+
 103 *   |buffer|          RING BUFFER
 104 *   |page  |------------------v
 105 *   +------+        +---+   +---+   +---+
 106 *      ^            |   |   |   |-->|   |
 107 *      |   New      +---+   +---+   +---+
 108 *      |  Reader------^               |
 109 *      |   page                       |
 110 *      +------------------------------+
 111 *
 112 *
 113 * After we make this swap, the reader can hand this page off to the splice
 114 * code and be done with it. It can even allocate a new page if it needs to
 115 * and swap that into the ring buffer.
 116 *
 117 * We will be using cmpxchg soon to make all this lockless.
 118 *
 119 */
 120
 121/* Used for individual buffers (after the counter) */
 122#define RB_BUFFER_OFF		(1 << 20)
 123
 124#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 125
 126#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 127#define RB_ALIGNMENT		4U
 128#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 129#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 130
 131#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 132# define RB_FORCE_8BYTE_ALIGNMENT	0
 133# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 134#else
 135# define RB_FORCE_8BYTE_ALIGNMENT	1
 136# define RB_ARCH_ALIGNMENT		8U
 137#endif
 138
 139#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 140
 141/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 142#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 143
 144enum {
 145	RB_LEN_TIME_EXTEND = 8,
 146	RB_LEN_TIME_STAMP =  8,
 147};
 148
 149#define skip_time_extend(event) \
 150	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 151
 152#define extended_time(event) \
 153	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
 154
 155static inline int rb_null_event(struct ring_buffer_event *event)
 156{
 157	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 158}
 159
 160static void rb_event_set_padding(struct ring_buffer_event *event)
 161{
 162	/* padding has a NULL time_delta */
 163	event->type_len = RINGBUF_TYPE_PADDING;
 164	event->time_delta = 0;
 165}
 166
 167static unsigned
 168rb_event_data_length(struct ring_buffer_event *event)
 169{
 170	unsigned length;
 171
 172	if (event->type_len)
 173		length = event->type_len * RB_ALIGNMENT;
 174	else
 175		length = event->array[0];
 176	return length + RB_EVNT_HDR_SIZE;
 177}
 178
 179/*
 180 * Return the length of the given event. Will return
 181 * the length of the time extend if the event is a
 182 * time extend.
 183 */
 184static inline unsigned
 185rb_event_length(struct ring_buffer_event *event)
 186{
 187	switch (event->type_len) {
 188	case RINGBUF_TYPE_PADDING:
 189		if (rb_null_event(event))
 190			/* undefined */
 191			return -1;
 192		return  event->array[0] + RB_EVNT_HDR_SIZE;
 193
 194	case RINGBUF_TYPE_TIME_EXTEND:
 195		return RB_LEN_TIME_EXTEND;
 196
 197	case RINGBUF_TYPE_TIME_STAMP:
 198		return RB_LEN_TIME_STAMP;
 199
 200	case RINGBUF_TYPE_DATA:
 201		return rb_event_data_length(event);
 202	default:
 203		BUG();
 204	}
 205	/* not hit */
 206	return 0;
 207}
 208
 209/*
 210 * Return total length of time extend and data,
 211 *   or just the event length for all other events.
 212 */
 213static inline unsigned
 214rb_event_ts_length(struct ring_buffer_event *event)
 215{
 216	unsigned len = 0;
 217
 218	if (extended_time(event)) {
 219		/* time extends include the data event after it */
 220		len = RB_LEN_TIME_EXTEND;
 221		event = skip_time_extend(event);
 222	}
 223	return len + rb_event_length(event);
 224}
 225
 226/**
 227 * ring_buffer_event_length - return the length of the event
 228 * @event: the event to get the length of
 229 *
 230 * Returns the size of the data load of a data event.
 231 * If the event is something other than a data event, it
 232 * returns the size of the event itself. With the exception
 233 * of a TIME EXTEND, where it still returns the size of the
 234 * data load of the data event after it.
 235 */
 236unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 237{
 238	unsigned length;
 239
 240	if (extended_time(event))
 241		event = skip_time_extend(event);
 242
 243	length = rb_event_length(event);
 244	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 245		return length;
 246	length -= RB_EVNT_HDR_SIZE;
 247	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 248                length -= sizeof(event->array[0]);
 249	return length;
 250}
 251EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 252
 253/* inline for ring buffer fast paths */
 254static __always_inline void *
 255rb_event_data(struct ring_buffer_event *event)
 256{
 257	if (extended_time(event))
 258		event = skip_time_extend(event);
 259	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 260	/* If length is in len field, then array[0] has the data */
 261	if (event->type_len)
 262		return (void *)&event->array[0];
 263	/* Otherwise length is in array[0] and array[1] has the data */
 264	return (void *)&event->array[1];
 265}
 266
 267/**
 268 * ring_buffer_event_data - return the data of the event
 269 * @event: the event to get the data from
 270 */
 271void *ring_buffer_event_data(struct ring_buffer_event *event)
 272{
 273	return rb_event_data(event);
 274}
 275EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 276
 277#define for_each_buffer_cpu(buffer, cpu)		\
 278	for_each_cpu(cpu, buffer->cpumask)
 279
 280#define TS_SHIFT	27
 281#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 282#define TS_DELTA_TEST	(~TS_MASK)
 283
 284/**
 285 * ring_buffer_event_time_stamp - return the event's extended timestamp
 286 * @event: the event to get the timestamp of
 287 *
 288 * Returns the extended timestamp associated with a data event.
 289 * An extended time_stamp is a 64-bit timestamp represented
 290 * internally in a special way that makes the best use of space
 291 * contained within a ring buffer event.  This function decodes
 292 * it and maps it to a straight u64 value.
 293 */
 294u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
 295{
 296	u64 ts;
 297
 298	ts = event->array[0];
 299	ts <<= TS_SHIFT;
 300	ts += event->time_delta;
 301
 302	return ts;
 303}
 304
 305/* Flag when events were overwritten */
 306#define RB_MISSED_EVENTS	(1 << 31)
 307/* Missed count stored at end */
 308#define RB_MISSED_STORED	(1 << 30)
 309
 310#define RB_MISSED_FLAGS		(RB_MISSED_EVENTS|RB_MISSED_STORED)
 311
 312struct buffer_data_page {
 313	u64		 time_stamp;	/* page time stamp */
 314	local_t		 commit;	/* write committed index */
 315	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 316};
 317
 318/*
 319 * Note, the buffer_page list must be first. The buffer pages
 320 * are allocated in cache lines, which means that each buffer
 321 * page will be at the beginning of a cache line, and thus
 322 * the least significant bits will be zero. We use this to
 323 * add flags in the list struct pointers, to make the ring buffer
 324 * lockless.
 325 */
 326struct buffer_page {
 327	struct list_head list;		/* list of buffer pages */
 328	local_t		 write;		/* index for next write */
 329	unsigned	 read;		/* index for next read */
 330	local_t		 entries;	/* entries on this page */
 331	unsigned long	 real_end;	/* real end of data */
 332	struct buffer_data_page *page;	/* Actual data page */
 333};
 334
 335/*
 336 * The buffer page counters, write and entries, must be reset
 337 * atomically when crossing page boundaries. To synchronize this
 338 * update, two counters are inserted into the number. One is
 339 * the actual counter for the write position or count on the page.
 340 *
 341 * The other is a counter of updaters. Before an update happens
 342 * the update partition of the counter is incremented. This will
 343 * allow the updater to update the counter atomically.
 344 *
 345 * The counter is 20 bits, and the state data is 12.
 346 */
 347#define RB_WRITE_MASK		0xfffff
 348#define RB_WRITE_INTCNT		(1 << 20)
 349
 350static void rb_init_page(struct buffer_data_page *bpage)
 351{
 352	local_set(&bpage->commit, 0);
 353}
 354
 355/**
 356 * ring_buffer_page_len - the size of data on the page.
 357 * @page: The page to read
 358 *
 359 * Returns the amount of data on the page, including buffer page header.
 360 */
 361size_t ring_buffer_page_len(void *page)
 362{
 363	struct buffer_data_page *bpage = page;
 364
 365	return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
 366		+ BUF_PAGE_HDR_SIZE;
 367}
 368
 369/*
 370 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 371 * this issue out.
 372 */
 373static void free_buffer_page(struct buffer_page *bpage)
 374{
 375	free_page((unsigned long)bpage->page);
 376	kfree(bpage);
 377}
 378
 379/*
 380 * We need to fit the time_stamp delta into 27 bits.
 381 */
 382static inline int test_time_stamp(u64 delta)
 383{
 384	if (delta & TS_DELTA_TEST)
 385		return 1;
 386	return 0;
 387}
 388
 389#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 390
 391/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 392#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 393
 394int ring_buffer_print_page_header(struct trace_seq *s)
 395{
 396	struct buffer_data_page field;
 397
 398	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 399			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 400			 (unsigned int)sizeof(field.time_stamp),
 401			 (unsigned int)is_signed_type(u64));
 402
 403	trace_seq_printf(s, "\tfield: local_t commit;\t"
 404			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 405			 (unsigned int)offsetof(typeof(field), commit),
 406			 (unsigned int)sizeof(field.commit),
 407			 (unsigned int)is_signed_type(long));
 408
 409	trace_seq_printf(s, "\tfield: int overwrite;\t"
 410			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 411			 (unsigned int)offsetof(typeof(field), commit),
 412			 1,
 413			 (unsigned int)is_signed_type(long));
 414
 415	trace_seq_printf(s, "\tfield: char data;\t"
 416			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 417			 (unsigned int)offsetof(typeof(field), data),
 418			 (unsigned int)BUF_PAGE_SIZE,
 419			 (unsigned int)is_signed_type(char));
 420
 421	return !trace_seq_has_overflowed(s);
 422}
 423
 424struct rb_irq_work {
 425	struct irq_work			work;
 426	wait_queue_head_t		waiters;
 427	wait_queue_head_t		full_waiters;
 428	bool				waiters_pending;
 429	bool				full_waiters_pending;
 430	bool				wakeup_full;
 431};
 432
 433/*
 434 * Structure to hold event state and handle nested events.
 435 */
 436struct rb_event_info {
 437	u64			ts;
 438	u64			delta;
 439	unsigned long		length;
 440	struct buffer_page	*tail_page;
 441	int			add_timestamp;
 442};
 443
 444/*
 445 * Used for which event context the event is in.
 446 *  NMI     = 0
 447 *  IRQ     = 1
 448 *  SOFTIRQ = 2
 449 *  NORMAL  = 3
 450 *
 451 * See trace_recursive_lock() comment below for more details.
 452 */
 453enum {
 454	RB_CTX_NMI,
 455	RB_CTX_IRQ,
 456	RB_CTX_SOFTIRQ,
 457	RB_CTX_NORMAL,
 458	RB_CTX_MAX
 459};
 460
 461/*
 462 * head_page == tail_page && head == tail then buffer is empty.
 463 */
 464struct ring_buffer_per_cpu {
 465	int				cpu;
 466	atomic_t			record_disabled;
 467	struct ring_buffer		*buffer;
 468	raw_spinlock_t			reader_lock;	/* serialize readers */
 469	arch_spinlock_t			lock;
 470	struct lock_class_key		lock_key;
 471	struct buffer_data_page		*free_page;
 472	unsigned long			nr_pages;
 473	unsigned int			current_context;
 474	struct list_head		*pages;
 475	struct buffer_page		*head_page;	/* read from head */
 476	struct buffer_page		*tail_page;	/* write to tail */
 477	struct buffer_page		*commit_page;	/* committed pages */
 478	struct buffer_page		*reader_page;
 479	unsigned long			lost_events;
 480	unsigned long			last_overrun;
 481	unsigned long			nest;
 482	local_t				entries_bytes;
 483	local_t				entries;
 484	local_t				overrun;
 485	local_t				commit_overrun;
 486	local_t				dropped_events;
 487	local_t				committing;
 488	local_t				commits;
 489	unsigned long			read;
 490	unsigned long			read_bytes;
 491	u64				write_stamp;
 492	u64				read_stamp;
 493	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 494	long				nr_pages_to_update;
 495	struct list_head		new_pages; /* new pages to add */
 496	struct work_struct		update_pages_work;
 497	struct completion		update_done;
 498
 499	struct rb_irq_work		irq_work;
 500};
 501
 502struct ring_buffer {
 503	unsigned			flags;
 504	int				cpus;
 505	atomic_t			record_disabled;
 506	atomic_t			resize_disabled;
 507	cpumask_var_t			cpumask;
 508
 509	struct lock_class_key		*reader_lock_key;
 510
 511	struct mutex			mutex;
 512
 513	struct ring_buffer_per_cpu	**buffers;
 514
 515	struct hlist_node		node;
 
 
 516	u64				(*clock)(void);
 517
 518	struct rb_irq_work		irq_work;
 519	bool				time_stamp_abs;
 520};
 521
 522struct ring_buffer_iter {
 523	struct ring_buffer_per_cpu	*cpu_buffer;
 524	unsigned long			head;
 525	struct buffer_page		*head_page;
 526	struct buffer_page		*cache_reader_page;
 527	unsigned long			cache_read;
 528	u64				read_stamp;
 529};
 530
 531/*
 532 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 533 *
 534 * Schedules a delayed work to wake up any task that is blocked on the
 535 * ring buffer waiters queue.
 536 */
 537static void rb_wake_up_waiters(struct irq_work *work)
 538{
 539	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 540
 541	wake_up_all(&rbwork->waiters);
 542	if (rbwork->wakeup_full) {
 543		rbwork->wakeup_full = false;
 544		wake_up_all(&rbwork->full_waiters);
 545	}
 546}
 547
 548/**
 549 * ring_buffer_wait - wait for input to the ring buffer
 550 * @buffer: buffer to wait on
 551 * @cpu: the cpu buffer to wait on
 552 * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
 553 *
 554 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 555 * as data is added to any of the @buffer's cpu buffers. Otherwise
 556 * it will wait for data to be added to a specific cpu buffer.
 557 */
 558int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
 559{
 560	struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
 561	DEFINE_WAIT(wait);
 562	struct rb_irq_work *work;
 563	int ret = 0;
 564
 565	/*
 566	 * Depending on what the caller is waiting for, either any
 567	 * data in any cpu buffer, or a specific buffer, put the
 568	 * caller on the appropriate wait queue.
 569	 */
 570	if (cpu == RING_BUFFER_ALL_CPUS) {
 571		work = &buffer->irq_work;
 572		/* Full only makes sense on per cpu reads */
 573		full = false;
 574	} else {
 575		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 576			return -ENODEV;
 577		cpu_buffer = buffer->buffers[cpu];
 578		work = &cpu_buffer->irq_work;
 579	}
 580
 581
 582	while (true) {
 583		if (full)
 584			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
 585		else
 586			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 587
 588		/*
 589		 * The events can happen in critical sections where
 590		 * checking a work queue can cause deadlocks.
 591		 * After adding a task to the queue, this flag is set
 592		 * only to notify events to try to wake up the queue
 593		 * using irq_work.
 594		 *
 595		 * We don't clear it even if the buffer is no longer
 596		 * empty. The flag only causes the next event to run
 597		 * irq_work to do the work queue wake up. The worse
 598		 * that can happen if we race with !trace_empty() is that
 599		 * an event will cause an irq_work to try to wake up
 600		 * an empty queue.
 601		 *
 602		 * There's no reason to protect this flag either, as
 603		 * the work queue and irq_work logic will do the necessary
 604		 * synchronization for the wake ups. The only thing
 605		 * that is necessary is that the wake up happens after
 606		 * a task has been queued. It's OK for spurious wake ups.
 607		 */
 608		if (full)
 609			work->full_waiters_pending = true;
 610		else
 611			work->waiters_pending = true;
 612
 613		if (signal_pending(current)) {
 614			ret = -EINTR;
 615			break;
 616		}
 617
 618		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
 619			break;
 620
 621		if (cpu != RING_BUFFER_ALL_CPUS &&
 622		    !ring_buffer_empty_cpu(buffer, cpu)) {
 623			unsigned long flags;
 624			bool pagebusy;
 625
 626			if (!full)
 627				break;
 628
 629			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 630			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 631			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 632
 633			if (!pagebusy)
 634				break;
 635		}
 636
 637		schedule();
 638	}
 639
 640	if (full)
 641		finish_wait(&work->full_waiters, &wait);
 642	else
 643		finish_wait(&work->waiters, &wait);
 644
 645	return ret;
 646}
 647
 648/**
 649 * ring_buffer_poll_wait - poll on buffer input
 650 * @buffer: buffer to wait on
 651 * @cpu: the cpu buffer to wait on
 652 * @filp: the file descriptor
 653 * @poll_table: The poll descriptor
 654 *
 655 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 656 * as data is added to any of the @buffer's cpu buffers. Otherwise
 657 * it will wait for data to be added to a specific cpu buffer.
 658 *
 659 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
 660 * zero otherwise.
 661 */
 662__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
 663			  struct file *filp, poll_table *poll_table)
 664{
 665	struct ring_buffer_per_cpu *cpu_buffer;
 666	struct rb_irq_work *work;
 667
 668	if (cpu == RING_BUFFER_ALL_CPUS)
 669		work = &buffer->irq_work;
 670	else {
 671		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 672			return -EINVAL;
 673
 674		cpu_buffer = buffer->buffers[cpu];
 675		work = &cpu_buffer->irq_work;
 676	}
 677
 678	poll_wait(filp, &work->waiters, poll_table);
 679	work->waiters_pending = true;
 680	/*
 681	 * There's a tight race between setting the waiters_pending and
 682	 * checking if the ring buffer is empty.  Once the waiters_pending bit
 683	 * is set, the next event will wake the task up, but we can get stuck
 684	 * if there's only a single event in.
 685	 *
 686	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
 687	 * but adding a memory barrier to all events will cause too much of a
 688	 * performance hit in the fast path.  We only need a memory barrier when
 689	 * the buffer goes from empty to having content.  But as this race is
 690	 * extremely small, and it's not a problem if another event comes in, we
 691	 * will fix it later.
 692	 */
 693	smp_mb();
 694
 695	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
 696	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
 697		return EPOLLIN | EPOLLRDNORM;
 698	return 0;
 699}
 700
 701/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 702#define RB_WARN_ON(b, cond)						\
 703	({								\
 704		int _____ret = unlikely(cond);				\
 705		if (_____ret) {						\
 706			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
 707				struct ring_buffer_per_cpu *__b =	\
 708					(void *)b;			\
 709				atomic_inc(&__b->buffer->record_disabled); \
 710			} else						\
 711				atomic_inc(&b->record_disabled);	\
 712			WARN_ON(1);					\
 713		}							\
 714		_____ret;						\
 715	})
 716
 717/* Up this if you want to test the TIME_EXTENTS and normalization */
 718#define DEBUG_SHIFT 0
 719
 720static inline u64 rb_time_stamp(struct ring_buffer *buffer)
 721{
 722	/* shift to debug/test normalization and TIME_EXTENTS */
 723	return buffer->clock() << DEBUG_SHIFT;
 724}
 725
 726u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 727{
 728	u64 time;
 729
 730	preempt_disable_notrace();
 731	time = rb_time_stamp(buffer);
 732	preempt_enable_no_resched_notrace();
 733
 734	return time;
 735}
 736EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 737
 738void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
 739				      int cpu, u64 *ts)
 740{
 741	/* Just stupid testing the normalize function and deltas */
 742	*ts >>= DEBUG_SHIFT;
 743}
 744EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 745
 746/*
 747 * Making the ring buffer lockless makes things tricky.
 748 * Although writes only happen on the CPU that they are on,
 749 * and they only need to worry about interrupts. Reads can
 750 * happen on any CPU.
 751 *
 752 * The reader page is always off the ring buffer, but when the
 753 * reader finishes with a page, it needs to swap its page with
 754 * a new one from the buffer. The reader needs to take from
 755 * the head (writes go to the tail). But if a writer is in overwrite
 756 * mode and wraps, it must push the head page forward.
 757 *
 758 * Here lies the problem.
 759 *
 760 * The reader must be careful to replace only the head page, and
 761 * not another one. As described at the top of the file in the
 762 * ASCII art, the reader sets its old page to point to the next
 763 * page after head. It then sets the page after head to point to
 764 * the old reader page. But if the writer moves the head page
 765 * during this operation, the reader could end up with the tail.
 766 *
 767 * We use cmpxchg to help prevent this race. We also do something
 768 * special with the page before head. We set the LSB to 1.
 769 *
 770 * When the writer must push the page forward, it will clear the
 771 * bit that points to the head page, move the head, and then set
 772 * the bit that points to the new head page.
 773 *
 774 * We also don't want an interrupt coming in and moving the head
 775 * page on another writer. Thus we use the second LSB to catch
 776 * that too. Thus:
 777 *
 778 * head->list->prev->next        bit 1          bit 0
 779 *                              -------        -------
 780 * Normal page                     0              0
 781 * Points to head page             0              1
 782 * New head page                   1              0
 783 *
 784 * Note we can not trust the prev pointer of the head page, because:
 785 *
 786 * +----+       +-----+        +-----+
 787 * |    |------>|  T  |---X--->|  N  |
 788 * |    |<------|     |        |     |
 789 * +----+       +-----+        +-----+
 790 *   ^                           ^ |
 791 *   |          +-----+          | |
 792 *   +----------|  R  |----------+ |
 793 *              |     |<-----------+
 794 *              +-----+
 795 *
 796 * Key:  ---X-->  HEAD flag set in pointer
 797 *         T      Tail page
 798 *         R      Reader page
 799 *         N      Next page
 800 *
 801 * (see __rb_reserve_next() to see where this happens)
 802 *
 803 *  What the above shows is that the reader just swapped out
 804 *  the reader page with a page in the buffer, but before it
 805 *  could make the new header point back to the new page added
 806 *  it was preempted by a writer. The writer moved forward onto
 807 *  the new page added by the reader and is about to move forward
 808 *  again.
 809 *
 810 *  You can see, it is legitimate for the previous pointer of
 811 *  the head (or any page) not to point back to itself. But only
 812 *  temporarially.
 813 */
 814
 815#define RB_PAGE_NORMAL		0UL
 816#define RB_PAGE_HEAD		1UL
 817#define RB_PAGE_UPDATE		2UL
 818
 819
 820#define RB_FLAG_MASK		3UL
 821
 822/* PAGE_MOVED is not part of the mask */
 823#define RB_PAGE_MOVED		4UL
 824
 825/*
 826 * rb_list_head - remove any bit
 827 */
 828static struct list_head *rb_list_head(struct list_head *list)
 829{
 830	unsigned long val = (unsigned long)list;
 831
 832	return (struct list_head *)(val & ~RB_FLAG_MASK);
 833}
 834
 835/*
 836 * rb_is_head_page - test if the given page is the head page
 837 *
 838 * Because the reader may move the head_page pointer, we can
 839 * not trust what the head page is (it may be pointing to
 840 * the reader page). But if the next page is a header page,
 841 * its flags will be non zero.
 842 */
 843static inline int
 844rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 845		struct buffer_page *page, struct list_head *list)
 846{
 847	unsigned long val;
 848
 849	val = (unsigned long)list->next;
 850
 851	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
 852		return RB_PAGE_MOVED;
 853
 854	return val & RB_FLAG_MASK;
 855}
 856
 857/*
 858 * rb_is_reader_page
 859 *
 860 * The unique thing about the reader page, is that, if the
 861 * writer is ever on it, the previous pointer never points
 862 * back to the reader page.
 863 */
 864static bool rb_is_reader_page(struct buffer_page *page)
 865{
 866	struct list_head *list = page->list.prev;
 867
 868	return rb_list_head(list->next) != &page->list;
 869}
 870
 871/*
 872 * rb_set_list_to_head - set a list_head to be pointing to head.
 873 */
 874static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
 875				struct list_head *list)
 876{
 877	unsigned long *ptr;
 878
 879	ptr = (unsigned long *)&list->next;
 880	*ptr |= RB_PAGE_HEAD;
 881	*ptr &= ~RB_PAGE_UPDATE;
 882}
 883
 884/*
 885 * rb_head_page_activate - sets up head page
 886 */
 887static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
 888{
 889	struct buffer_page *head;
 890
 891	head = cpu_buffer->head_page;
 892	if (!head)
 893		return;
 894
 895	/*
 896	 * Set the previous list pointer to have the HEAD flag.
 897	 */
 898	rb_set_list_to_head(cpu_buffer, head->list.prev);
 899}
 900
 901static void rb_list_head_clear(struct list_head *list)
 902{
 903	unsigned long *ptr = (unsigned long *)&list->next;
 904
 905	*ptr &= ~RB_FLAG_MASK;
 906}
 907
 908/*
 909 * rb_head_page_dactivate - clears head page ptr (for free list)
 910 */
 911static void
 912rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
 913{
 914	struct list_head *hd;
 915
 916	/* Go through the whole list and clear any pointers found. */
 917	rb_list_head_clear(cpu_buffer->pages);
 918
 919	list_for_each(hd, cpu_buffer->pages)
 920		rb_list_head_clear(hd);
 921}
 922
 923static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 924			    struct buffer_page *head,
 925			    struct buffer_page *prev,
 926			    int old_flag, int new_flag)
 927{
 928	struct list_head *list;
 929	unsigned long val = (unsigned long)&head->list;
 930	unsigned long ret;
 931
 932	list = &prev->list;
 933
 934	val &= ~RB_FLAG_MASK;
 935
 936	ret = cmpxchg((unsigned long *)&list->next,
 937		      val | old_flag, val | new_flag);
 938
 939	/* check if the reader took the page */
 940	if ((ret & ~RB_FLAG_MASK) != val)
 941		return RB_PAGE_MOVED;
 942
 943	return ret & RB_FLAG_MASK;
 944}
 945
 946static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
 947				   struct buffer_page *head,
 948				   struct buffer_page *prev,
 949				   int old_flag)
 950{
 951	return rb_head_page_set(cpu_buffer, head, prev,
 952				old_flag, RB_PAGE_UPDATE);
 953}
 954
 955static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
 956				 struct buffer_page *head,
 957				 struct buffer_page *prev,
 958				 int old_flag)
 959{
 960	return rb_head_page_set(cpu_buffer, head, prev,
 961				old_flag, RB_PAGE_HEAD);
 962}
 963
 964static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
 965				   struct buffer_page *head,
 966				   struct buffer_page *prev,
 967				   int old_flag)
 968{
 969	return rb_head_page_set(cpu_buffer, head, prev,
 970				old_flag, RB_PAGE_NORMAL);
 971}
 972
 973static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
 974			       struct buffer_page **bpage)
 975{
 976	struct list_head *p = rb_list_head((*bpage)->list.next);
 977
 978	*bpage = list_entry(p, struct buffer_page, list);
 979}
 980
 981static struct buffer_page *
 982rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
 983{
 984	struct buffer_page *head;
 985	struct buffer_page *page;
 986	struct list_head *list;
 987	int i;
 988
 989	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
 990		return NULL;
 991
 992	/* sanity check */
 993	list = cpu_buffer->pages;
 994	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
 995		return NULL;
 996
 997	page = head = cpu_buffer->head_page;
 998	/*
 999	 * It is possible that the writer moves the header behind
1000	 * where we started, and we miss in one loop.
1001	 * A second loop should grab the header, but we'll do
1002	 * three loops just because I'm paranoid.
1003	 */
1004	for (i = 0; i < 3; i++) {
1005		do {
1006			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
1007				cpu_buffer->head_page = page;
1008				return page;
1009			}
1010			rb_inc_page(cpu_buffer, &page);
1011		} while (page != head);
1012	}
1013
1014	RB_WARN_ON(cpu_buffer, 1);
1015
1016	return NULL;
1017}
1018
1019static int rb_head_page_replace(struct buffer_page *old,
1020				struct buffer_page *new)
1021{
1022	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1023	unsigned long val;
1024	unsigned long ret;
1025
1026	val = *ptr & ~RB_FLAG_MASK;
1027	val |= RB_PAGE_HEAD;
1028
1029	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1030
1031	return ret == val;
1032}
1033
1034/*
1035 * rb_tail_page_update - move the tail page forward
1036 */
1037static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1038			       struct buffer_page *tail_page,
1039			       struct buffer_page *next_page)
1040{
1041	unsigned long old_entries;
1042	unsigned long old_write;
1043
1044	/*
1045	 * The tail page now needs to be moved forward.
1046	 *
1047	 * We need to reset the tail page, but without messing
1048	 * with possible erasing of data brought in by interrupts
1049	 * that have moved the tail page and are currently on it.
1050	 *
1051	 * We add a counter to the write field to denote this.
1052	 */
1053	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1054	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1055
1056	/*
1057	 * Just make sure we have seen our old_write and synchronize
1058	 * with any interrupts that come in.
1059	 */
1060	barrier();
1061
1062	/*
1063	 * If the tail page is still the same as what we think
1064	 * it is, then it is up to us to update the tail
1065	 * pointer.
1066	 */
1067	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1068		/* Zero the write counter */
1069		unsigned long val = old_write & ~RB_WRITE_MASK;
1070		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1071
1072		/*
1073		 * This will only succeed if an interrupt did
1074		 * not come in and change it. In which case, we
1075		 * do not want to modify it.
1076		 *
1077		 * We add (void) to let the compiler know that we do not care
1078		 * about the return value of these functions. We use the
1079		 * cmpxchg to only update if an interrupt did not already
1080		 * do it for us. If the cmpxchg fails, we don't care.
1081		 */
1082		(void)local_cmpxchg(&next_page->write, old_write, val);
1083		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1084
1085		/*
1086		 * No need to worry about races with clearing out the commit.
1087		 * it only can increment when a commit takes place. But that
1088		 * only happens in the outer most nested commit.
1089		 */
1090		local_set(&next_page->page->commit, 0);
1091
1092		/* Again, either we update tail_page or an interrupt does */
1093		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1094	}
1095}
1096
1097static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1098			  struct buffer_page *bpage)
1099{
1100	unsigned long val = (unsigned long)bpage;
1101
1102	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1103		return 1;
1104
1105	return 0;
1106}
1107
1108/**
1109 * rb_check_list - make sure a pointer to a list has the last bits zero
1110 */
1111static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1112			 struct list_head *list)
1113{
1114	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1115		return 1;
1116	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1117		return 1;
1118	return 0;
1119}
1120
1121/**
1122 * rb_check_pages - integrity check of buffer pages
1123 * @cpu_buffer: CPU buffer with pages to test
1124 *
1125 * As a safety measure we check to make sure the data pages have not
1126 * been corrupted.
1127 */
1128static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1129{
1130	struct list_head *head = cpu_buffer->pages;
1131	struct buffer_page *bpage, *tmp;
1132
1133	/* Reset the head page if it exists */
1134	if (cpu_buffer->head_page)
1135		rb_set_head_page(cpu_buffer);
1136
1137	rb_head_page_deactivate(cpu_buffer);
1138
1139	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1140		return -1;
1141	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1142		return -1;
1143
1144	if (rb_check_list(cpu_buffer, head))
1145		return -1;
1146
1147	list_for_each_entry_safe(bpage, tmp, head, list) {
1148		if (RB_WARN_ON(cpu_buffer,
1149			       bpage->list.next->prev != &bpage->list))
1150			return -1;
1151		if (RB_WARN_ON(cpu_buffer,
1152			       bpage->list.prev->next != &bpage->list))
1153			return -1;
1154		if (rb_check_list(cpu_buffer, &bpage->list))
1155			return -1;
1156	}
1157
1158	rb_head_page_activate(cpu_buffer);
1159
1160	return 0;
1161}
1162
1163static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1164{
 
1165	struct buffer_page *bpage, *tmp;
1166	bool user_thread = current->mm != NULL;
1167	gfp_t mflags;
1168	long i;
1169
1170	/*
1171	 * Check if the available memory is there first.
1172	 * Note, si_mem_available() only gives us a rough estimate of available
1173	 * memory. It may not be accurate. But we don't care, we just want
1174	 * to prevent doing any allocation when it is obvious that it is
1175	 * not going to succeed.
1176	 */
1177	i = si_mem_available();
1178	if (i < nr_pages)
1179		return -ENOMEM;
1180
1181	/*
1182	 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1183	 * gracefully without invoking oom-killer and the system is not
1184	 * destabilized.
1185	 */
1186	mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1187
1188	/*
1189	 * If a user thread allocates too much, and si_mem_available()
1190	 * reports there's enough memory, even though there is not.
1191	 * Make sure the OOM killer kills this thread. This can happen
1192	 * even with RETRY_MAYFAIL because another task may be doing
1193	 * an allocation after this task has taken all memory.
1194	 * This is the task the OOM killer needs to take out during this
1195	 * loop, even if it was triggered by an allocation somewhere else.
1196	 */
1197	if (user_thread)
1198		set_current_oom_origin();
1199	for (i = 0; i < nr_pages; i++) {
1200		struct page *page;
1201
 
 
 
 
1202		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1203				    mflags, cpu_to_node(cpu));
 
1204		if (!bpage)
1205			goto free_pages;
1206
1207		list_add(&bpage->list, pages);
1208
1209		page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
 
1210		if (!page)
1211			goto free_pages;
1212		bpage->page = page_address(page);
1213		rb_init_page(bpage->page);
1214
1215		if (user_thread && fatal_signal_pending(current))
1216			goto free_pages;
1217	}
1218	if (user_thread)
1219		clear_current_oom_origin();
1220
1221	return 0;
1222
1223free_pages:
1224	list_for_each_entry_safe(bpage, tmp, pages, list) {
1225		list_del_init(&bpage->list);
1226		free_buffer_page(bpage);
1227	}
1228	if (user_thread)
1229		clear_current_oom_origin();
1230
1231	return -ENOMEM;
1232}
1233
1234static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1235			     unsigned long nr_pages)
1236{
1237	LIST_HEAD(pages);
1238
1239	WARN_ON(!nr_pages);
1240
1241	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1242		return -ENOMEM;
1243
1244	/*
1245	 * The ring buffer page list is a circular list that does not
1246	 * start and end with a list head. All page list items point to
1247	 * other pages.
1248	 */
1249	cpu_buffer->pages = pages.next;
1250	list_del(&pages);
1251
1252	cpu_buffer->nr_pages = nr_pages;
1253
1254	rb_check_pages(cpu_buffer);
1255
1256	return 0;
1257}
1258
1259static struct ring_buffer_per_cpu *
1260rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1261{
1262	struct ring_buffer_per_cpu *cpu_buffer;
1263	struct buffer_page *bpage;
1264	struct page *page;
1265	int ret;
1266
1267	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1268				  GFP_KERNEL, cpu_to_node(cpu));
1269	if (!cpu_buffer)
1270		return NULL;
1271
1272	cpu_buffer->cpu = cpu;
1273	cpu_buffer->buffer = buffer;
1274	raw_spin_lock_init(&cpu_buffer->reader_lock);
1275	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1276	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1277	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1278	init_completion(&cpu_buffer->update_done);
1279	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1280	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1281	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1282
1283	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1284			    GFP_KERNEL, cpu_to_node(cpu));
1285	if (!bpage)
1286		goto fail_free_buffer;
1287
1288	rb_check_bpage(cpu_buffer, bpage);
1289
1290	cpu_buffer->reader_page = bpage;
1291	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1292	if (!page)
1293		goto fail_free_reader;
1294	bpage->page = page_address(page);
1295	rb_init_page(bpage->page);
1296
1297	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1298	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1299
1300	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1301	if (ret < 0)
1302		goto fail_free_reader;
1303
1304	cpu_buffer->head_page
1305		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1306	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1307
1308	rb_head_page_activate(cpu_buffer);
1309
1310	return cpu_buffer;
1311
1312 fail_free_reader:
1313	free_buffer_page(cpu_buffer->reader_page);
1314
1315 fail_free_buffer:
1316	kfree(cpu_buffer);
1317	return NULL;
1318}
1319
1320static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1321{
1322	struct list_head *head = cpu_buffer->pages;
1323	struct buffer_page *bpage, *tmp;
1324
1325	free_buffer_page(cpu_buffer->reader_page);
1326
1327	rb_head_page_deactivate(cpu_buffer);
1328
1329	if (head) {
1330		list_for_each_entry_safe(bpage, tmp, head, list) {
1331			list_del_init(&bpage->list);
1332			free_buffer_page(bpage);
1333		}
1334		bpage = list_entry(head, struct buffer_page, list);
1335		free_buffer_page(bpage);
1336	}
1337
1338	kfree(cpu_buffer);
1339}
1340
 
 
 
 
 
1341/**
1342 * __ring_buffer_alloc - allocate a new ring_buffer
1343 * @size: the size in bytes per cpu that is needed.
1344 * @flags: attributes to set for the ring buffer.
1345 *
1346 * Currently the only flag that is available is the RB_FL_OVERWRITE
1347 * flag. This flag means that the buffer will overwrite old data
1348 * when the buffer wraps. If this flag is not set, the buffer will
1349 * drop data when the tail hits the head.
1350 */
1351struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1352					struct lock_class_key *key)
1353{
1354	struct ring_buffer *buffer;
1355	long nr_pages;
1356	int bsize;
1357	int cpu;
1358	int ret;
1359
1360	/* keep it in its own cache line */
1361	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1362			 GFP_KERNEL);
1363	if (!buffer)
1364		return NULL;
1365
1366	if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1367		goto fail_free_buffer;
1368
1369	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1370	buffer->flags = flags;
1371	buffer->clock = trace_clock_local;
1372	buffer->reader_lock_key = key;
1373
1374	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1375	init_waitqueue_head(&buffer->irq_work.waiters);
1376
1377	/* need at least two pages */
1378	if (nr_pages < 2)
1379		nr_pages = 2;
1380
 
 
 
 
 
 
 
 
 
 
 
1381	buffer->cpus = nr_cpu_ids;
1382
1383	bsize = sizeof(void *) * nr_cpu_ids;
1384	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1385				  GFP_KERNEL);
1386	if (!buffer->buffers)
1387		goto fail_free_cpumask;
1388
1389	cpu = raw_smp_processor_id();
1390	cpumask_set_cpu(cpu, buffer->cpumask);
1391	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1392	if (!buffer->buffers[cpu])
1393		goto fail_free_buffers;
 
1394
1395	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1396	if (ret < 0)
1397		goto fail_free_buffers;
 
 
 
1398
1399	mutex_init(&buffer->mutex);
1400
1401	return buffer;
1402
1403 fail_free_buffers:
1404	for_each_buffer_cpu(buffer, cpu) {
1405		if (buffer->buffers[cpu])
1406			rb_free_cpu_buffer(buffer->buffers[cpu]);
1407	}
1408	kfree(buffer->buffers);
1409
1410 fail_free_cpumask:
1411	free_cpumask_var(buffer->cpumask);
 
 
 
1412
1413 fail_free_buffer:
1414	kfree(buffer);
1415	return NULL;
1416}
1417EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1418
1419/**
1420 * ring_buffer_free - free a ring buffer.
1421 * @buffer: the buffer to free.
1422 */
1423void
1424ring_buffer_free(struct ring_buffer *buffer)
1425{
1426	int cpu;
1427
1428	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
 
 
 
1429
1430	for_each_buffer_cpu(buffer, cpu)
1431		rb_free_cpu_buffer(buffer->buffers[cpu]);
1432
 
 
 
 
1433	kfree(buffer->buffers);
1434	free_cpumask_var(buffer->cpumask);
1435
1436	kfree(buffer);
1437}
1438EXPORT_SYMBOL_GPL(ring_buffer_free);
1439
1440void ring_buffer_set_clock(struct ring_buffer *buffer,
1441			   u64 (*clock)(void))
1442{
1443	buffer->clock = clock;
1444}
1445
1446void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs)
1447{
1448	buffer->time_stamp_abs = abs;
1449}
1450
1451bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer)
1452{
1453	return buffer->time_stamp_abs;
1454}
1455
1456static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1457
1458static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1459{
1460	return local_read(&bpage->entries) & RB_WRITE_MASK;
1461}
1462
1463static inline unsigned long rb_page_write(struct buffer_page *bpage)
1464{
1465	return local_read(&bpage->write) & RB_WRITE_MASK;
1466}
1467
1468static int
1469rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1470{
1471	struct list_head *tail_page, *to_remove, *next_page;
1472	struct buffer_page *to_remove_page, *tmp_iter_page;
1473	struct buffer_page *last_page, *first_page;
1474	unsigned long nr_removed;
1475	unsigned long head_bit;
1476	int page_entries;
1477
1478	head_bit = 0;
1479
1480	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1481	atomic_inc(&cpu_buffer->record_disabled);
1482	/*
1483	 * We don't race with the readers since we have acquired the reader
1484	 * lock. We also don't race with writers after disabling recording.
1485	 * This makes it easy to figure out the first and the last page to be
1486	 * removed from the list. We unlink all the pages in between including
1487	 * the first and last pages. This is done in a busy loop so that we
1488	 * lose the least number of traces.
1489	 * The pages are freed after we restart recording and unlock readers.
1490	 */
1491	tail_page = &cpu_buffer->tail_page->list;
1492
1493	/*
1494	 * tail page might be on reader page, we remove the next page
1495	 * from the ring buffer
1496	 */
1497	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1498		tail_page = rb_list_head(tail_page->next);
1499	to_remove = tail_page;
1500
1501	/* start of pages to remove */
1502	first_page = list_entry(rb_list_head(to_remove->next),
1503				struct buffer_page, list);
1504
1505	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1506		to_remove = rb_list_head(to_remove)->next;
1507		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1508	}
1509
1510	next_page = rb_list_head(to_remove)->next;
1511
1512	/*
1513	 * Now we remove all pages between tail_page and next_page.
1514	 * Make sure that we have head_bit value preserved for the
1515	 * next page
1516	 */
1517	tail_page->next = (struct list_head *)((unsigned long)next_page |
1518						head_bit);
1519	next_page = rb_list_head(next_page);
1520	next_page->prev = tail_page;
1521
1522	/* make sure pages points to a valid page in the ring buffer */
1523	cpu_buffer->pages = next_page;
1524
1525	/* update head page */
1526	if (head_bit)
1527		cpu_buffer->head_page = list_entry(next_page,
1528						struct buffer_page, list);
1529
1530	/*
1531	 * change read pointer to make sure any read iterators reset
1532	 * themselves
1533	 */
1534	cpu_buffer->read = 0;
1535
1536	/* pages are removed, resume tracing and then free the pages */
1537	atomic_dec(&cpu_buffer->record_disabled);
1538	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1539
1540	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1541
1542	/* last buffer page to remove */
1543	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1544				list);
1545	tmp_iter_page = first_page;
1546
1547	do {
1548		to_remove_page = tmp_iter_page;
1549		rb_inc_page(cpu_buffer, &tmp_iter_page);
1550
1551		/* update the counters */
1552		page_entries = rb_page_entries(to_remove_page);
1553		if (page_entries) {
1554			/*
1555			 * If something was added to this page, it was full
1556			 * since it is not the tail page. So we deduct the
1557			 * bytes consumed in ring buffer from here.
1558			 * Increment overrun to account for the lost events.
1559			 */
1560			local_add(page_entries, &cpu_buffer->overrun);
1561			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1562		}
1563
1564		/*
1565		 * We have already removed references to this list item, just
1566		 * free up the buffer_page and its page
1567		 */
1568		free_buffer_page(to_remove_page);
1569		nr_removed--;
1570
1571	} while (to_remove_page != last_page);
1572
1573	RB_WARN_ON(cpu_buffer, nr_removed);
1574
1575	return nr_removed == 0;
1576}
1577
1578static int
1579rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1580{
1581	struct list_head *pages = &cpu_buffer->new_pages;
1582	int retries, success;
1583
1584	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1585	/*
1586	 * We are holding the reader lock, so the reader page won't be swapped
1587	 * in the ring buffer. Now we are racing with the writer trying to
1588	 * move head page and the tail page.
1589	 * We are going to adapt the reader page update process where:
1590	 * 1. We first splice the start and end of list of new pages between
1591	 *    the head page and its previous page.
1592	 * 2. We cmpxchg the prev_page->next to point from head page to the
1593	 *    start of new pages list.
1594	 * 3. Finally, we update the head->prev to the end of new list.
1595	 *
1596	 * We will try this process 10 times, to make sure that we don't keep
1597	 * spinning.
1598	 */
1599	retries = 10;
1600	success = 0;
1601	while (retries--) {
1602		struct list_head *head_page, *prev_page, *r;
1603		struct list_head *last_page, *first_page;
1604		struct list_head *head_page_with_bit;
1605
1606		head_page = &rb_set_head_page(cpu_buffer)->list;
1607		if (!head_page)
1608			break;
1609		prev_page = head_page->prev;
1610
1611		first_page = pages->next;
1612		last_page  = pages->prev;
1613
1614		head_page_with_bit = (struct list_head *)
1615				     ((unsigned long)head_page | RB_PAGE_HEAD);
1616
1617		last_page->next = head_page_with_bit;
1618		first_page->prev = prev_page;
1619
1620		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1621
1622		if (r == head_page_with_bit) {
1623			/*
1624			 * yay, we replaced the page pointer to our new list,
1625			 * now, we just have to update to head page's prev
1626			 * pointer to point to end of list
1627			 */
1628			head_page->prev = last_page;
1629			success = 1;
1630			break;
1631		}
1632	}
1633
1634	if (success)
1635		INIT_LIST_HEAD(pages);
1636	/*
1637	 * If we weren't successful in adding in new pages, warn and stop
1638	 * tracing
1639	 */
1640	RB_WARN_ON(cpu_buffer, !success);
1641	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1642
1643	/* free pages if they weren't inserted */
1644	if (!success) {
1645		struct buffer_page *bpage, *tmp;
1646		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1647					 list) {
1648			list_del_init(&bpage->list);
1649			free_buffer_page(bpage);
1650		}
1651	}
1652	return success;
1653}
1654
1655static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1656{
1657	int success;
1658
1659	if (cpu_buffer->nr_pages_to_update > 0)
1660		success = rb_insert_pages(cpu_buffer);
1661	else
1662		success = rb_remove_pages(cpu_buffer,
1663					-cpu_buffer->nr_pages_to_update);
1664
1665	if (success)
1666		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1667}
1668
1669static void update_pages_handler(struct work_struct *work)
1670{
1671	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1672			struct ring_buffer_per_cpu, update_pages_work);
1673	rb_update_pages(cpu_buffer);
1674	complete(&cpu_buffer->update_done);
1675}
1676
1677/**
1678 * ring_buffer_resize - resize the ring buffer
1679 * @buffer: the buffer to resize.
1680 * @size: the new size.
1681 * @cpu_id: the cpu buffer to resize
1682 *
1683 * Minimum size is 2 * BUF_PAGE_SIZE.
1684 *
1685 * Returns 0 on success and < 0 on failure.
1686 */
1687int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1688			int cpu_id)
1689{
1690	struct ring_buffer_per_cpu *cpu_buffer;
1691	unsigned long nr_pages;
1692	int cpu, err = 0;
1693
1694	/*
1695	 * Always succeed at resizing a non-existent buffer:
1696	 */
1697	if (!buffer)
1698		return size;
1699
1700	/* Make sure the requested buffer exists */
1701	if (cpu_id != RING_BUFFER_ALL_CPUS &&
1702	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
1703		return size;
1704
1705	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
1706
1707	/* we need a minimum of two pages */
1708	if (nr_pages < 2)
1709		nr_pages = 2;
1710
1711	size = nr_pages * BUF_PAGE_SIZE;
1712
1713	/*
1714	 * Don't succeed if resizing is disabled, as a reader might be
1715	 * manipulating the ring buffer and is expecting a sane state while
1716	 * this is true.
1717	 */
1718	if (atomic_read(&buffer->resize_disabled))
1719		return -EBUSY;
1720
1721	/* prevent another thread from changing buffer sizes */
1722	mutex_lock(&buffer->mutex);
1723
1724	if (cpu_id == RING_BUFFER_ALL_CPUS) {
1725		/* calculate the pages to update */
1726		for_each_buffer_cpu(buffer, cpu) {
1727			cpu_buffer = buffer->buffers[cpu];
1728
1729			cpu_buffer->nr_pages_to_update = nr_pages -
1730							cpu_buffer->nr_pages;
1731			/*
1732			 * nothing more to do for removing pages or no update
1733			 */
1734			if (cpu_buffer->nr_pages_to_update <= 0)
1735				continue;
1736			/*
1737			 * to add pages, make sure all new pages can be
1738			 * allocated without receiving ENOMEM
1739			 */
1740			INIT_LIST_HEAD(&cpu_buffer->new_pages);
1741			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1742						&cpu_buffer->new_pages, cpu)) {
1743				/* not enough memory for new pages */
1744				err = -ENOMEM;
1745				goto out_err;
1746			}
1747		}
1748
1749		get_online_cpus();
1750		/*
1751		 * Fire off all the required work handlers
1752		 * We can't schedule on offline CPUs, but it's not necessary
1753		 * since we can change their buffer sizes without any race.
1754		 */
1755		for_each_buffer_cpu(buffer, cpu) {
1756			cpu_buffer = buffer->buffers[cpu];
1757			if (!cpu_buffer->nr_pages_to_update)
1758				continue;
1759
1760			/* Can't run something on an offline CPU. */
1761			if (!cpu_online(cpu)) {
1762				rb_update_pages(cpu_buffer);
1763				cpu_buffer->nr_pages_to_update = 0;
1764			} else {
1765				schedule_work_on(cpu,
1766						&cpu_buffer->update_pages_work);
1767			}
1768		}
1769
1770		/* wait for all the updates to complete */
1771		for_each_buffer_cpu(buffer, cpu) {
1772			cpu_buffer = buffer->buffers[cpu];
1773			if (!cpu_buffer->nr_pages_to_update)
1774				continue;
1775
1776			if (cpu_online(cpu))
1777				wait_for_completion(&cpu_buffer->update_done);
1778			cpu_buffer->nr_pages_to_update = 0;
1779		}
1780
1781		put_online_cpus();
1782	} else {
1783		/* Make sure this CPU has been intitialized */
1784		if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1785			goto out;
1786
1787		cpu_buffer = buffer->buffers[cpu_id];
1788
1789		if (nr_pages == cpu_buffer->nr_pages)
1790			goto out;
1791
1792		cpu_buffer->nr_pages_to_update = nr_pages -
1793						cpu_buffer->nr_pages;
1794
1795		INIT_LIST_HEAD(&cpu_buffer->new_pages);
1796		if (cpu_buffer->nr_pages_to_update > 0 &&
1797			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1798					    &cpu_buffer->new_pages, cpu_id)) {
1799			err = -ENOMEM;
1800			goto out_err;
1801		}
1802
1803		get_online_cpus();
1804
1805		/* Can't run something on an offline CPU. */
1806		if (!cpu_online(cpu_id))
1807			rb_update_pages(cpu_buffer);
1808		else {
1809			schedule_work_on(cpu_id,
1810					 &cpu_buffer->update_pages_work);
1811			wait_for_completion(&cpu_buffer->update_done);
1812		}
1813
1814		cpu_buffer->nr_pages_to_update = 0;
1815		put_online_cpus();
1816	}
1817
1818 out:
1819	/*
1820	 * The ring buffer resize can happen with the ring buffer
1821	 * enabled, so that the update disturbs the tracing as little
1822	 * as possible. But if the buffer is disabled, we do not need
1823	 * to worry about that, and we can take the time to verify
1824	 * that the buffer is not corrupt.
1825	 */
1826	if (atomic_read(&buffer->record_disabled)) {
1827		atomic_inc(&buffer->record_disabled);
1828		/*
1829		 * Even though the buffer was disabled, we must make sure
1830		 * that it is truly disabled before calling rb_check_pages.
1831		 * There could have been a race between checking
1832		 * record_disable and incrementing it.
1833		 */
1834		synchronize_sched();
1835		for_each_buffer_cpu(buffer, cpu) {
1836			cpu_buffer = buffer->buffers[cpu];
1837			rb_check_pages(cpu_buffer);
1838		}
1839		atomic_dec(&buffer->record_disabled);
1840	}
1841
1842	mutex_unlock(&buffer->mutex);
1843	return size;
1844
1845 out_err:
1846	for_each_buffer_cpu(buffer, cpu) {
1847		struct buffer_page *bpage, *tmp;
1848
1849		cpu_buffer = buffer->buffers[cpu];
1850		cpu_buffer->nr_pages_to_update = 0;
1851
1852		if (list_empty(&cpu_buffer->new_pages))
1853			continue;
1854
1855		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1856					list) {
1857			list_del_init(&bpage->list);
1858			free_buffer_page(bpage);
1859		}
1860	}
1861	mutex_unlock(&buffer->mutex);
1862	return err;
1863}
1864EXPORT_SYMBOL_GPL(ring_buffer_resize);
1865
1866void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1867{
1868	mutex_lock(&buffer->mutex);
1869	if (val)
1870		buffer->flags |= RB_FL_OVERWRITE;
1871	else
1872		buffer->flags &= ~RB_FL_OVERWRITE;
1873	mutex_unlock(&buffer->mutex);
1874}
1875EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1876
1877static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
 
 
 
 
 
 
1878{
1879	return bpage->page->data + index;
1880}
1881
1882static __always_inline struct ring_buffer_event *
1883rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1884{
1885	return __rb_page_index(cpu_buffer->reader_page,
1886			       cpu_buffer->reader_page->read);
1887}
1888
1889static __always_inline struct ring_buffer_event *
1890rb_iter_head_event(struct ring_buffer_iter *iter)
1891{
1892	return __rb_page_index(iter->head_page, iter->head);
1893}
1894
1895static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
1896{
1897	return local_read(&bpage->page->commit);
1898}
1899
1900/* Size is determined by what has been committed */
1901static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
1902{
1903	return rb_page_commit(bpage);
1904}
1905
1906static __always_inline unsigned
1907rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1908{
1909	return rb_page_commit(cpu_buffer->commit_page);
1910}
1911
1912static __always_inline unsigned
1913rb_event_index(struct ring_buffer_event *event)
1914{
1915	unsigned long addr = (unsigned long)event;
1916
1917	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1918}
1919
1920static void rb_inc_iter(struct ring_buffer_iter *iter)
1921{
1922	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1923
1924	/*
1925	 * The iterator could be on the reader page (it starts there).
1926	 * But the head could have moved, since the reader was
1927	 * found. Check for this case and assign the iterator
1928	 * to the head page instead of next.
1929	 */
1930	if (iter->head_page == cpu_buffer->reader_page)
1931		iter->head_page = rb_set_head_page(cpu_buffer);
1932	else
1933		rb_inc_page(cpu_buffer, &iter->head_page);
1934
1935	iter->read_stamp = iter->head_page->page->time_stamp;
1936	iter->head = 0;
1937}
1938
1939/*
1940 * rb_handle_head_page - writer hit the head page
1941 *
1942 * Returns: +1 to retry page
1943 *           0 to continue
1944 *          -1 on error
1945 */
1946static int
1947rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1948		    struct buffer_page *tail_page,
1949		    struct buffer_page *next_page)
1950{
1951	struct buffer_page *new_head;
1952	int entries;
1953	int type;
1954	int ret;
1955
1956	entries = rb_page_entries(next_page);
1957
1958	/*
1959	 * The hard part is here. We need to move the head
1960	 * forward, and protect against both readers on
1961	 * other CPUs and writers coming in via interrupts.
1962	 */
1963	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1964				       RB_PAGE_HEAD);
1965
1966	/*
1967	 * type can be one of four:
1968	 *  NORMAL - an interrupt already moved it for us
1969	 *  HEAD   - we are the first to get here.
1970	 *  UPDATE - we are the interrupt interrupting
1971	 *           a current move.
1972	 *  MOVED  - a reader on another CPU moved the next
1973	 *           pointer to its reader page. Give up
1974	 *           and try again.
1975	 */
1976
1977	switch (type) {
1978	case RB_PAGE_HEAD:
1979		/*
1980		 * We changed the head to UPDATE, thus
1981		 * it is our responsibility to update
1982		 * the counters.
1983		 */
1984		local_add(entries, &cpu_buffer->overrun);
1985		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1986
1987		/*
1988		 * The entries will be zeroed out when we move the
1989		 * tail page.
1990		 */
1991
1992		/* still more to do */
1993		break;
1994
1995	case RB_PAGE_UPDATE:
1996		/*
1997		 * This is an interrupt that interrupt the
1998		 * previous update. Still more to do.
1999		 */
2000		break;
2001	case RB_PAGE_NORMAL:
2002		/*
2003		 * An interrupt came in before the update
2004		 * and processed this for us.
2005		 * Nothing left to do.
2006		 */
2007		return 1;
2008	case RB_PAGE_MOVED:
2009		/*
2010		 * The reader is on another CPU and just did
2011		 * a swap with our next_page.
2012		 * Try again.
2013		 */
2014		return 1;
2015	default:
2016		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2017		return -1;
2018	}
2019
2020	/*
2021	 * Now that we are here, the old head pointer is
2022	 * set to UPDATE. This will keep the reader from
2023	 * swapping the head page with the reader page.
2024	 * The reader (on another CPU) will spin till
2025	 * we are finished.
2026	 *
2027	 * We just need to protect against interrupts
2028	 * doing the job. We will set the next pointer
2029	 * to HEAD. After that, we set the old pointer
2030	 * to NORMAL, but only if it was HEAD before.
2031	 * otherwise we are an interrupt, and only
2032	 * want the outer most commit to reset it.
2033	 */
2034	new_head = next_page;
2035	rb_inc_page(cpu_buffer, &new_head);
2036
2037	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2038				    RB_PAGE_NORMAL);
2039
2040	/*
2041	 * Valid returns are:
2042	 *  HEAD   - an interrupt came in and already set it.
2043	 *  NORMAL - One of two things:
2044	 *            1) We really set it.
2045	 *            2) A bunch of interrupts came in and moved
2046	 *               the page forward again.
2047	 */
2048	switch (ret) {
2049	case RB_PAGE_HEAD:
2050	case RB_PAGE_NORMAL:
2051		/* OK */
2052		break;
2053	default:
2054		RB_WARN_ON(cpu_buffer, 1);
2055		return -1;
2056	}
2057
2058	/*
2059	 * It is possible that an interrupt came in,
2060	 * set the head up, then more interrupts came in
2061	 * and moved it again. When we get back here,
2062	 * the page would have been set to NORMAL but we
2063	 * just set it back to HEAD.
2064	 *
2065	 * How do you detect this? Well, if that happened
2066	 * the tail page would have moved.
2067	 */
2068	if (ret == RB_PAGE_NORMAL) {
2069		struct buffer_page *buffer_tail_page;
2070
2071		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2072		/*
2073		 * If the tail had moved passed next, then we need
2074		 * to reset the pointer.
2075		 */
2076		if (buffer_tail_page != tail_page &&
2077		    buffer_tail_page != next_page)
2078			rb_head_page_set_normal(cpu_buffer, new_head,
2079						next_page,
2080						RB_PAGE_HEAD);
2081	}
2082
2083	/*
2084	 * If this was the outer most commit (the one that
2085	 * changed the original pointer from HEAD to UPDATE),
2086	 * then it is up to us to reset it to NORMAL.
2087	 */
2088	if (type == RB_PAGE_HEAD) {
2089		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2090					      tail_page,
2091					      RB_PAGE_UPDATE);
2092		if (RB_WARN_ON(cpu_buffer,
2093			       ret != RB_PAGE_UPDATE))
2094			return -1;
2095	}
2096
2097	return 0;
2098}
2099
2100static inline void
2101rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2102	      unsigned long tail, struct rb_event_info *info)
2103{
2104	struct buffer_page *tail_page = info->tail_page;
2105	struct ring_buffer_event *event;
2106	unsigned long length = info->length;
2107
2108	/*
2109	 * Only the event that crossed the page boundary
2110	 * must fill the old tail_page with padding.
2111	 */
2112	if (tail >= BUF_PAGE_SIZE) {
2113		/*
2114		 * If the page was filled, then we still need
2115		 * to update the real_end. Reset it to zero
2116		 * and the reader will ignore it.
2117		 */
2118		if (tail == BUF_PAGE_SIZE)
2119			tail_page->real_end = 0;
2120
2121		local_sub(length, &tail_page->write);
2122		return;
2123	}
2124
2125	event = __rb_page_index(tail_page, tail);
 
2126
2127	/* account for padding bytes */
2128	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2129
2130	/*
2131	 * Save the original length to the meta data.
2132	 * This will be used by the reader to add lost event
2133	 * counter.
2134	 */
2135	tail_page->real_end = tail;
2136
2137	/*
2138	 * If this event is bigger than the minimum size, then
2139	 * we need to be careful that we don't subtract the
2140	 * write counter enough to allow another writer to slip
2141	 * in on this page.
2142	 * We put in a discarded commit instead, to make sure
2143	 * that this space is not used again.
2144	 *
2145	 * If we are less than the minimum size, we don't need to
2146	 * worry about it.
2147	 */
2148	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2149		/* No room for any events */
2150
2151		/* Mark the rest of the page with padding */
2152		rb_event_set_padding(event);
2153
2154		/* Set the write back to the previous setting */
2155		local_sub(length, &tail_page->write);
2156		return;
2157	}
2158
2159	/* Put in a discarded event */
2160	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2161	event->type_len = RINGBUF_TYPE_PADDING;
2162	/* time delta must be non zero */
2163	event->time_delta = 1;
2164
2165	/* Set write to end of buffer */
2166	length = (tail + length) - BUF_PAGE_SIZE;
2167	local_sub(length, &tail_page->write);
2168}
2169
2170static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2171
2172/*
2173 * This is the slow path, force gcc not to inline it.
2174 */
2175static noinline struct ring_buffer_event *
2176rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2177	     unsigned long tail, struct rb_event_info *info)
2178{
2179	struct buffer_page *tail_page = info->tail_page;
2180	struct buffer_page *commit_page = cpu_buffer->commit_page;
2181	struct ring_buffer *buffer = cpu_buffer->buffer;
2182	struct buffer_page *next_page;
2183	int ret;
2184
2185	next_page = tail_page;
2186
2187	rb_inc_page(cpu_buffer, &next_page);
2188
2189	/*
2190	 * If for some reason, we had an interrupt storm that made
2191	 * it all the way around the buffer, bail, and warn
2192	 * about it.
2193	 */
2194	if (unlikely(next_page == commit_page)) {
2195		local_inc(&cpu_buffer->commit_overrun);
2196		goto out_reset;
2197	}
2198
2199	/*
2200	 * This is where the fun begins!
2201	 *
2202	 * We are fighting against races between a reader that
2203	 * could be on another CPU trying to swap its reader
2204	 * page with the buffer head.
2205	 *
2206	 * We are also fighting against interrupts coming in and
2207	 * moving the head or tail on us as well.
2208	 *
2209	 * If the next page is the head page then we have filled
2210	 * the buffer, unless the commit page is still on the
2211	 * reader page.
2212	 */
2213	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2214
2215		/*
2216		 * If the commit is not on the reader page, then
2217		 * move the header page.
2218		 */
2219		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2220			/*
2221			 * If we are not in overwrite mode,
2222			 * this is easy, just stop here.
2223			 */
2224			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2225				local_inc(&cpu_buffer->dropped_events);
2226				goto out_reset;
2227			}
2228
2229			ret = rb_handle_head_page(cpu_buffer,
2230						  tail_page,
2231						  next_page);
2232			if (ret < 0)
2233				goto out_reset;
2234			if (ret)
2235				goto out_again;
2236		} else {
2237			/*
2238			 * We need to be careful here too. The
2239			 * commit page could still be on the reader
2240			 * page. We could have a small buffer, and
2241			 * have filled up the buffer with events
2242			 * from interrupts and such, and wrapped.
2243			 *
2244			 * Note, if the tail page is also the on the
2245			 * reader_page, we let it move out.
2246			 */
2247			if (unlikely((cpu_buffer->commit_page !=
2248				      cpu_buffer->tail_page) &&
2249				     (cpu_buffer->commit_page ==
2250				      cpu_buffer->reader_page))) {
2251				local_inc(&cpu_buffer->commit_overrun);
2252				goto out_reset;
2253			}
2254		}
2255	}
2256
2257	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2258
2259 out_again:
2260
2261	rb_reset_tail(cpu_buffer, tail, info);
2262
2263	/* Commit what we have for now. */
2264	rb_end_commit(cpu_buffer);
2265	/* rb_end_commit() decs committing */
2266	local_inc(&cpu_buffer->committing);
2267
2268	/* fail and let the caller try again */
2269	return ERR_PTR(-EAGAIN);
2270
2271 out_reset:
2272	/* reset write */
2273	rb_reset_tail(cpu_buffer, tail, info);
2274
2275	return NULL;
2276}
2277
2278/* Slow path, do not inline */
2279static noinline struct ring_buffer_event *
2280rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2281{
2282	if (abs)
2283		event->type_len = RINGBUF_TYPE_TIME_STAMP;
2284	else
2285		event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2286
2287	/* Not the first event on the page, or not delta? */
2288	if (abs || rb_event_index(event)) {
2289		event->time_delta = delta & TS_MASK;
2290		event->array[0] = delta >> TS_SHIFT;
2291	} else {
2292		/* nope, just zero it */
2293		event->time_delta = 0;
2294		event->array[0] = 0;
2295	}
2296
2297	return skip_time_extend(event);
2298}
2299
2300static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2301				     struct ring_buffer_event *event);
2302
2303/**
2304 * rb_update_event - update event type and data
2305 * @event: the event to update
2306 * @type: the type of event
2307 * @length: the size of the event field in the ring buffer
2308 *
2309 * Update the type and data fields of the event. The length
2310 * is the actual size that is written to the ring buffer,
2311 * and with this, we can determine what to place into the
2312 * data field.
2313 */
2314static void
2315rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2316		struct ring_buffer_event *event,
2317		struct rb_event_info *info)
2318{
2319	unsigned length = info->length;
2320	u64 delta = info->delta;
2321
2322	/* Only a commit updates the timestamp */
2323	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2324		delta = 0;
2325
2326	/*
2327	 * If we need to add a timestamp, then we
2328	 * add it to the start of the resevered space.
2329	 */
2330	if (unlikely(info->add_timestamp)) {
2331		bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
2332
2333		event = rb_add_time_stamp(event, info->delta, abs);
2334		length -= RB_LEN_TIME_EXTEND;
2335		delta = 0;
2336	}
2337
2338	event->time_delta = delta;
2339	length -= RB_EVNT_HDR_SIZE;
2340	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2341		event->type_len = 0;
2342		event->array[0] = length;
2343	} else
2344		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2345}
2346
2347static unsigned rb_calculate_event_length(unsigned length)
2348{
2349	struct ring_buffer_event event; /* Used only for sizeof array */
2350
2351	/* zero length can cause confusions */
2352	if (!length)
2353		length++;
2354
2355	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2356		length += sizeof(event.array[0]);
2357
2358	length += RB_EVNT_HDR_SIZE;
2359	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2360
2361	/*
2362	 * In case the time delta is larger than the 27 bits for it
2363	 * in the header, we need to add a timestamp. If another
2364	 * event comes in when trying to discard this one to increase
2365	 * the length, then the timestamp will be added in the allocated
2366	 * space of this event. If length is bigger than the size needed
2367	 * for the TIME_EXTEND, then padding has to be used. The events
2368	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2369	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2370	 * As length is a multiple of 4, we only need to worry if it
2371	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2372	 */
2373	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2374		length += RB_ALIGNMENT;
2375
2376	return length;
2377}
2378
2379#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2380static inline bool sched_clock_stable(void)
2381{
2382	return true;
2383}
2384#endif
2385
2386static inline int
2387rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2388		  struct ring_buffer_event *event)
2389{
2390	unsigned long new_index, old_index;
2391	struct buffer_page *bpage;
2392	unsigned long index;
2393	unsigned long addr;
2394
2395	new_index = rb_event_index(event);
2396	old_index = new_index + rb_event_ts_length(event);
2397	addr = (unsigned long)event;
2398	addr &= PAGE_MASK;
2399
2400	bpage = READ_ONCE(cpu_buffer->tail_page);
2401
2402	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2403		unsigned long write_mask =
2404			local_read(&bpage->write) & ~RB_WRITE_MASK;
2405		unsigned long event_length = rb_event_length(event);
2406		/*
2407		 * This is on the tail page. It is possible that
2408		 * a write could come in and move the tail page
2409		 * and write to the next page. That is fine
2410		 * because we just shorten what is on this page.
2411		 */
2412		old_index += write_mask;
2413		new_index += write_mask;
2414		index = local_cmpxchg(&bpage->write, old_index, new_index);
2415		if (index == old_index) {
2416			/* update counters */
2417			local_sub(event_length, &cpu_buffer->entries_bytes);
2418			return 1;
2419		}
2420	}
2421
2422	/* could not discard */
2423	return 0;
2424}
2425
2426static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2427{
2428	local_inc(&cpu_buffer->committing);
2429	local_inc(&cpu_buffer->commits);
2430}
2431
2432static __always_inline void
2433rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2434{
2435	unsigned long max_count;
2436
2437	/*
2438	 * We only race with interrupts and NMIs on this CPU.
2439	 * If we own the commit event, then we can commit
2440	 * all others that interrupted us, since the interruptions
2441	 * are in stack format (they finish before they come
2442	 * back to us). This allows us to do a simple loop to
2443	 * assign the commit to the tail.
2444	 */
2445 again:
2446	max_count = cpu_buffer->nr_pages * 100;
2447
2448	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2449		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2450			return;
2451		if (RB_WARN_ON(cpu_buffer,
2452			       rb_is_reader_page(cpu_buffer->tail_page)))
2453			return;
2454		local_set(&cpu_buffer->commit_page->page->commit,
2455			  rb_page_write(cpu_buffer->commit_page));
2456		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2457		/* Only update the write stamp if the page has an event */
2458		if (rb_page_write(cpu_buffer->commit_page))
2459			cpu_buffer->write_stamp =
2460				cpu_buffer->commit_page->page->time_stamp;
2461		/* add barrier to keep gcc from optimizing too much */
2462		barrier();
2463	}
2464	while (rb_commit_index(cpu_buffer) !=
2465	       rb_page_write(cpu_buffer->commit_page)) {
2466
2467		local_set(&cpu_buffer->commit_page->page->commit,
2468			  rb_page_write(cpu_buffer->commit_page));
2469		RB_WARN_ON(cpu_buffer,
2470			   local_read(&cpu_buffer->commit_page->page->commit) &
2471			   ~RB_WRITE_MASK);
2472		barrier();
2473	}
2474
2475	/* again, keep gcc from optimizing */
2476	barrier();
2477
2478	/*
2479	 * If an interrupt came in just after the first while loop
2480	 * and pushed the tail page forward, we will be left with
2481	 * a dangling commit that will never go forward.
2482	 */
2483	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2484		goto again;
2485}
2486
2487static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2488{
2489	unsigned long commits;
2490
2491	if (RB_WARN_ON(cpu_buffer,
2492		       !local_read(&cpu_buffer->committing)))
2493		return;
2494
2495 again:
2496	commits = local_read(&cpu_buffer->commits);
2497	/* synchronize with interrupts */
2498	barrier();
2499	if (local_read(&cpu_buffer->committing) == 1)
2500		rb_set_commit_to_write(cpu_buffer);
2501
2502	local_dec(&cpu_buffer->committing);
2503
2504	/* synchronize with interrupts */
2505	barrier();
2506
2507	/*
2508	 * Need to account for interrupts coming in between the
2509	 * updating of the commit page and the clearing of the
2510	 * committing counter.
2511	 */
2512	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2513	    !local_read(&cpu_buffer->committing)) {
2514		local_inc(&cpu_buffer->committing);
2515		goto again;
2516	}
2517}
2518
2519static inline void rb_event_discard(struct ring_buffer_event *event)
2520{
2521	if (extended_time(event))
2522		event = skip_time_extend(event);
2523
2524	/* array[0] holds the actual length for the discarded event */
2525	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2526	event->type_len = RINGBUF_TYPE_PADDING;
2527	/* time delta must be non zero */
2528	if (!event->time_delta)
2529		event->time_delta = 1;
2530}
2531
2532static __always_inline bool
2533rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2534		   struct ring_buffer_event *event)
2535{
2536	unsigned long addr = (unsigned long)event;
2537	unsigned long index;
2538
2539	index = rb_event_index(event);
2540	addr &= PAGE_MASK;
2541
2542	return cpu_buffer->commit_page->page == (void *)addr &&
2543		rb_commit_index(cpu_buffer) == index;
2544}
2545
2546static __always_inline void
2547rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2548		      struct ring_buffer_event *event)
2549{
2550	u64 delta;
2551
2552	/*
2553	 * The event first in the commit queue updates the
2554	 * time stamp.
2555	 */
2556	if (rb_event_is_commit(cpu_buffer, event)) {
2557		/*
2558		 * A commit event that is first on a page
2559		 * updates the write timestamp with the page stamp
2560		 */
2561		if (!rb_event_index(event))
2562			cpu_buffer->write_stamp =
2563				cpu_buffer->commit_page->page->time_stamp;
2564		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2565			delta = ring_buffer_event_time_stamp(event);
 
 
2566			cpu_buffer->write_stamp += delta;
2567		} else if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
2568			delta = ring_buffer_event_time_stamp(event);
2569			cpu_buffer->write_stamp = delta;
2570		} else
2571			cpu_buffer->write_stamp += event->time_delta;
2572	}
2573}
2574
2575static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2576		      struct ring_buffer_event *event)
2577{
2578	local_inc(&cpu_buffer->entries);
2579	rb_update_write_stamp(cpu_buffer, event);
2580	rb_end_commit(cpu_buffer);
2581}
2582
2583static __always_inline void
2584rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2585{
2586	bool pagebusy;
2587
2588	if (buffer->irq_work.waiters_pending) {
2589		buffer->irq_work.waiters_pending = false;
2590		/* irq_work_queue() supplies it's own memory barriers */
2591		irq_work_queue(&buffer->irq_work.work);
2592	}
2593
2594	if (cpu_buffer->irq_work.waiters_pending) {
2595		cpu_buffer->irq_work.waiters_pending = false;
2596		/* irq_work_queue() supplies it's own memory barriers */
2597		irq_work_queue(&cpu_buffer->irq_work.work);
2598	}
2599
2600	pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
2601
2602	if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
2603		cpu_buffer->irq_work.wakeup_full = true;
2604		cpu_buffer->irq_work.full_waiters_pending = false;
2605		/* irq_work_queue() supplies it's own memory barriers */
2606		irq_work_queue(&cpu_buffer->irq_work.work);
2607	}
2608}
2609
2610/*
2611 * The lock and unlock are done within a preempt disable section.
2612 * The current_context per_cpu variable can only be modified
2613 * by the current task between lock and unlock. But it can
2614 * be modified more than once via an interrupt. To pass this
2615 * information from the lock to the unlock without having to
2616 * access the 'in_interrupt()' functions again (which do show
2617 * a bit of overhead in something as critical as function tracing,
2618 * we use a bitmask trick.
2619 *
2620 *  bit 0 =  NMI context
2621 *  bit 1 =  IRQ context
2622 *  bit 2 =  SoftIRQ context
2623 *  bit 3 =  normal context.
2624 *
2625 * This works because this is the order of contexts that can
2626 * preempt other contexts. A SoftIRQ never preempts an IRQ
2627 * context.
2628 *
2629 * When the context is determined, the corresponding bit is
2630 * checked and set (if it was set, then a recursion of that context
2631 * happened).
2632 *
2633 * On unlock, we need to clear this bit. To do so, just subtract
2634 * 1 from the current_context and AND it to itself.
2635 *
2636 * (binary)
2637 *  101 - 1 = 100
2638 *  101 & 100 = 100 (clearing bit zero)
2639 *
2640 *  1010 - 1 = 1001
2641 *  1010 & 1001 = 1000 (clearing bit 1)
2642 *
2643 * The least significant bit can be cleared this way, and it
2644 * just so happens that it is the same bit corresponding to
2645 * the current context.
2646 */
2647
2648static __always_inline int
2649trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2650{
2651	unsigned int val = cpu_buffer->current_context;
2652	unsigned long pc = preempt_count();
2653	int bit;
2654
2655	if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
 
 
 
 
 
 
 
2656		bit = RB_CTX_NORMAL;
2657	else
2658		bit = pc & NMI_MASK ? RB_CTX_NMI :
2659			pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
2660
2661	if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
2662		return 1;
2663
2664	val |= (1 << (bit + cpu_buffer->nest));
2665	cpu_buffer->current_context = val;
2666
2667	return 0;
2668}
2669
2670static __always_inline void
2671trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2672{
2673	cpu_buffer->current_context &=
2674		cpu_buffer->current_context - (1 << cpu_buffer->nest);
2675}
2676
2677/* The recursive locking above uses 4 bits */
2678#define NESTED_BITS 4
2679
2680/**
2681 * ring_buffer_nest_start - Allow to trace while nested
2682 * @buffer: The ring buffer to modify
2683 *
2684 * The ring buffer has a safty mechanism to prevent recursion.
2685 * But there may be a case where a trace needs to be done while
2686 * tracing something else. In this case, calling this function
2687 * will allow this function to nest within a currently active
2688 * ring_buffer_lock_reserve().
2689 *
2690 * Call this function before calling another ring_buffer_lock_reserve() and
2691 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
2692 */
2693void ring_buffer_nest_start(struct ring_buffer *buffer)
2694{
2695	struct ring_buffer_per_cpu *cpu_buffer;
2696	int cpu;
2697
2698	/* Enabled by ring_buffer_nest_end() */
2699	preempt_disable_notrace();
2700	cpu = raw_smp_processor_id();
2701	cpu_buffer = buffer->buffers[cpu];
2702	/* This is the shift value for the above recusive locking */
2703	cpu_buffer->nest += NESTED_BITS;
2704}
2705
2706/**
2707 * ring_buffer_nest_end - Allow to trace while nested
2708 * @buffer: The ring buffer to modify
2709 *
2710 * Must be called after ring_buffer_nest_start() and after the
2711 * ring_buffer_unlock_commit().
2712 */
2713void ring_buffer_nest_end(struct ring_buffer *buffer)
2714{
2715	struct ring_buffer_per_cpu *cpu_buffer;
2716	int cpu;
2717
2718	/* disabled by ring_buffer_nest_start() */
2719	cpu = raw_smp_processor_id();
2720	cpu_buffer = buffer->buffers[cpu];
2721	/* This is the shift value for the above recusive locking */
2722	cpu_buffer->nest -= NESTED_BITS;
2723	preempt_enable_notrace();
2724}
2725
2726/**
2727 * ring_buffer_unlock_commit - commit a reserved
2728 * @buffer: The buffer to commit to
2729 * @event: The event pointer to commit.
2730 *
2731 * This commits the data to the ring buffer, and releases any locks held.
2732 *
2733 * Must be paired with ring_buffer_lock_reserve.
2734 */
2735int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2736			      struct ring_buffer_event *event)
2737{
2738	struct ring_buffer_per_cpu *cpu_buffer;
2739	int cpu = raw_smp_processor_id();
2740
2741	cpu_buffer = buffer->buffers[cpu];
2742
2743	rb_commit(cpu_buffer, event);
2744
2745	rb_wakeups(buffer, cpu_buffer);
2746
2747	trace_recursive_unlock(cpu_buffer);
2748
2749	preempt_enable_notrace();
2750
2751	return 0;
2752}
2753EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2754
2755static noinline void
2756rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2757		    struct rb_event_info *info)
2758{
2759	WARN_ONCE(info->delta > (1ULL << 59),
2760		  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2761		  (unsigned long long)info->delta,
2762		  (unsigned long long)info->ts,
2763		  (unsigned long long)cpu_buffer->write_stamp,
2764		  sched_clock_stable() ? "" :
2765		  "If you just came from a suspend/resume,\n"
2766		  "please switch to the trace global clock:\n"
2767		  "  echo global > /sys/kernel/debug/tracing/trace_clock\n"
2768		  "or add trace_clock=global to the kernel command line\n");
2769	info->add_timestamp = 1;
2770}
2771
2772static struct ring_buffer_event *
2773__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2774		  struct rb_event_info *info)
2775{
2776	struct ring_buffer_event *event;
2777	struct buffer_page *tail_page;
2778	unsigned long tail, write;
2779
2780	/*
2781	 * If the time delta since the last event is too big to
2782	 * hold in the time field of the event, then we append a
2783	 * TIME EXTEND event ahead of the data event.
2784	 */
2785	if (unlikely(info->add_timestamp))
2786		info->length += RB_LEN_TIME_EXTEND;
2787
2788	/* Don't let the compiler play games with cpu_buffer->tail_page */
2789	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2790	write = local_add_return(info->length, &tail_page->write);
2791
2792	/* set write to only the index of the write */
2793	write &= RB_WRITE_MASK;
2794	tail = write - info->length;
2795
2796	/*
2797	 * If this is the first commit on the page, then it has the same
2798	 * timestamp as the page itself.
2799	 */
2800	if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer))
2801		info->delta = 0;
2802
2803	/* See if we shot pass the end of this buffer page */
2804	if (unlikely(write > BUF_PAGE_SIZE))
2805		return rb_move_tail(cpu_buffer, tail, info);
2806
2807	/* We reserved something on the buffer */
2808
2809	event = __rb_page_index(tail_page, tail);
 
2810	rb_update_event(cpu_buffer, event, info);
2811
2812	local_inc(&tail_page->entries);
2813
2814	/*
2815	 * If this is the first commit on the page, then update
2816	 * its timestamp.
2817	 */
2818	if (!tail)
2819		tail_page->page->time_stamp = info->ts;
2820
2821	/* account for these added bytes */
2822	local_add(info->length, &cpu_buffer->entries_bytes);
2823
2824	return event;
2825}
2826
2827static __always_inline struct ring_buffer_event *
2828rb_reserve_next_event(struct ring_buffer *buffer,
2829		      struct ring_buffer_per_cpu *cpu_buffer,
2830		      unsigned long length)
2831{
2832	struct ring_buffer_event *event;
2833	struct rb_event_info info;
2834	int nr_loops = 0;
2835	u64 diff;
2836
2837	rb_start_commit(cpu_buffer);
2838
2839#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2840	/*
2841	 * Due to the ability to swap a cpu buffer from a buffer
2842	 * it is possible it was swapped before we committed.
2843	 * (committing stops a swap). We check for it here and
2844	 * if it happened, we have to fail the write.
2845	 */
2846	barrier();
2847	if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
2848		local_dec(&cpu_buffer->committing);
2849		local_dec(&cpu_buffer->commits);
2850		return NULL;
2851	}
2852#endif
2853
2854	info.length = rb_calculate_event_length(length);
2855 again:
2856	info.add_timestamp = 0;
2857	info.delta = 0;
2858
2859	/*
2860	 * We allow for interrupts to reenter here and do a trace.
2861	 * If one does, it will cause this original code to loop
2862	 * back here. Even with heavy interrupts happening, this
2863	 * should only happen a few times in a row. If this happens
2864	 * 1000 times in a row, there must be either an interrupt
2865	 * storm or we have something buggy.
2866	 * Bail!
2867	 */
2868	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2869		goto out_fail;
2870
2871	info.ts = rb_time_stamp(cpu_buffer->buffer);
2872	diff = info.ts - cpu_buffer->write_stamp;
2873
2874	/* make sure this diff is calculated here */
2875	barrier();
2876
2877	if (ring_buffer_time_stamp_abs(buffer)) {
2878		info.delta = info.ts;
2879		rb_handle_timestamp(cpu_buffer, &info);
2880	} else /* Did the write stamp get updated already? */
2881		if (likely(info.ts >= cpu_buffer->write_stamp)) {
2882		info.delta = diff;
2883		if (unlikely(test_time_stamp(info.delta)))
2884			rb_handle_timestamp(cpu_buffer, &info);
2885	}
2886
2887	event = __rb_reserve_next(cpu_buffer, &info);
2888
2889	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2890		if (info.add_timestamp)
2891			info.length -= RB_LEN_TIME_EXTEND;
2892		goto again;
2893	}
2894
2895	if (!event)
2896		goto out_fail;
2897
2898	return event;
2899
2900 out_fail:
2901	rb_end_commit(cpu_buffer);
2902	return NULL;
2903}
2904
2905/**
2906 * ring_buffer_lock_reserve - reserve a part of the buffer
2907 * @buffer: the ring buffer to reserve from
2908 * @length: the length of the data to reserve (excluding event header)
2909 *
2910 * Returns a reseverd event on the ring buffer to copy directly to.
2911 * The user of this interface will need to get the body to write into
2912 * and can use the ring_buffer_event_data() interface.
2913 *
2914 * The length is the length of the data needed, not the event length
2915 * which also includes the event header.
2916 *
2917 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2918 * If NULL is returned, then nothing has been allocated or locked.
2919 */
2920struct ring_buffer_event *
2921ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2922{
2923	struct ring_buffer_per_cpu *cpu_buffer;
2924	struct ring_buffer_event *event;
2925	int cpu;
2926
2927	/* If we are tracing schedule, we don't want to recurse */
2928	preempt_disable_notrace();
2929
2930	if (unlikely(atomic_read(&buffer->record_disabled)))
2931		goto out;
2932
2933	cpu = raw_smp_processor_id();
2934
2935	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
2936		goto out;
2937
2938	cpu_buffer = buffer->buffers[cpu];
2939
2940	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
2941		goto out;
2942
2943	if (unlikely(length > BUF_MAX_DATA_SIZE))
2944		goto out;
2945
2946	if (unlikely(trace_recursive_lock(cpu_buffer)))
2947		goto out;
2948
2949	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2950	if (!event)
2951		goto out_unlock;
2952
2953	return event;
2954
2955 out_unlock:
2956	trace_recursive_unlock(cpu_buffer);
2957 out:
2958	preempt_enable_notrace();
2959	return NULL;
2960}
2961EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2962
2963/*
2964 * Decrement the entries to the page that an event is on.
2965 * The event does not even need to exist, only the pointer
2966 * to the page it is on. This may only be called before the commit
2967 * takes place.
2968 */
2969static inline void
2970rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2971		   struct ring_buffer_event *event)
2972{
2973	unsigned long addr = (unsigned long)event;
2974	struct buffer_page *bpage = cpu_buffer->commit_page;
2975	struct buffer_page *start;
2976
2977	addr &= PAGE_MASK;
2978
2979	/* Do the likely case first */
2980	if (likely(bpage->page == (void *)addr)) {
2981		local_dec(&bpage->entries);
2982		return;
2983	}
2984
2985	/*
2986	 * Because the commit page may be on the reader page we
2987	 * start with the next page and check the end loop there.
2988	 */
2989	rb_inc_page(cpu_buffer, &bpage);
2990	start = bpage;
2991	do {
2992		if (bpage->page == (void *)addr) {
2993			local_dec(&bpage->entries);
2994			return;
2995		}
2996		rb_inc_page(cpu_buffer, &bpage);
2997	} while (bpage != start);
2998
2999	/* commit not part of this buffer?? */
3000	RB_WARN_ON(cpu_buffer, 1);
3001}
3002
3003/**
3004 * ring_buffer_commit_discard - discard an event that has not been committed
3005 * @buffer: the ring buffer
3006 * @event: non committed event to discard
3007 *
3008 * Sometimes an event that is in the ring buffer needs to be ignored.
3009 * This function lets the user discard an event in the ring buffer
3010 * and then that event will not be read later.
3011 *
3012 * This function only works if it is called before the the item has been
3013 * committed. It will try to free the event from the ring buffer
3014 * if another event has not been added behind it.
3015 *
3016 * If another event has been added behind it, it will set the event
3017 * up as discarded, and perform the commit.
3018 *
3019 * If this function is called, do not call ring_buffer_unlock_commit on
3020 * the event.
3021 */
3022void ring_buffer_discard_commit(struct ring_buffer *buffer,
3023				struct ring_buffer_event *event)
3024{
3025	struct ring_buffer_per_cpu *cpu_buffer;
3026	int cpu;
3027
3028	/* The event is discarded regardless */
3029	rb_event_discard(event);
3030
3031	cpu = smp_processor_id();
3032	cpu_buffer = buffer->buffers[cpu];
3033
3034	/*
3035	 * This must only be called if the event has not been
3036	 * committed yet. Thus we can assume that preemption
3037	 * is still disabled.
3038	 */
3039	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3040
3041	rb_decrement_entry(cpu_buffer, event);
3042	if (rb_try_to_discard(cpu_buffer, event))
3043		goto out;
3044
3045	/*
3046	 * The commit is still visible by the reader, so we
3047	 * must still update the timestamp.
3048	 */
3049	rb_update_write_stamp(cpu_buffer, event);
3050 out:
3051	rb_end_commit(cpu_buffer);
3052
3053	trace_recursive_unlock(cpu_buffer);
3054
3055	preempt_enable_notrace();
3056
3057}
3058EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3059
3060/**
3061 * ring_buffer_write - write data to the buffer without reserving
3062 * @buffer: The ring buffer to write to.
3063 * @length: The length of the data being written (excluding the event header)
3064 * @data: The data to write to the buffer.
3065 *
3066 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3067 * one function. If you already have the data to write to the buffer, it
3068 * may be easier to simply call this function.
3069 *
3070 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3071 * and not the length of the event which would hold the header.
3072 */
3073int ring_buffer_write(struct ring_buffer *buffer,
3074		      unsigned long length,
3075		      void *data)
3076{
3077	struct ring_buffer_per_cpu *cpu_buffer;
3078	struct ring_buffer_event *event;
3079	void *body;
3080	int ret = -EBUSY;
3081	int cpu;
3082
3083	preempt_disable_notrace();
3084
3085	if (atomic_read(&buffer->record_disabled))
3086		goto out;
3087
3088	cpu = raw_smp_processor_id();
3089
3090	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3091		goto out;
3092
3093	cpu_buffer = buffer->buffers[cpu];
3094
3095	if (atomic_read(&cpu_buffer->record_disabled))
3096		goto out;
3097
3098	if (length > BUF_MAX_DATA_SIZE)
3099		goto out;
3100
3101	if (unlikely(trace_recursive_lock(cpu_buffer)))
3102		goto out;
3103
3104	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3105	if (!event)
3106		goto out_unlock;
3107
3108	body = rb_event_data(event);
3109
3110	memcpy(body, data, length);
3111
3112	rb_commit(cpu_buffer, event);
3113
3114	rb_wakeups(buffer, cpu_buffer);
3115
3116	ret = 0;
3117
3118 out_unlock:
3119	trace_recursive_unlock(cpu_buffer);
3120
3121 out:
3122	preempt_enable_notrace();
3123
3124	return ret;
3125}
3126EXPORT_SYMBOL_GPL(ring_buffer_write);
3127
3128static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3129{
3130	struct buffer_page *reader = cpu_buffer->reader_page;
3131	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3132	struct buffer_page *commit = cpu_buffer->commit_page;
3133
3134	/* In case of error, head will be NULL */
3135	if (unlikely(!head))
3136		return true;
3137
3138	return reader->read == rb_page_commit(reader) &&
3139		(commit == reader ||
3140		 (commit == head &&
3141		  head->read == rb_page_commit(commit)));
3142}
3143
3144/**
3145 * ring_buffer_record_disable - stop all writes into the buffer
3146 * @buffer: The ring buffer to stop writes to.
3147 *
3148 * This prevents all writes to the buffer. Any attempt to write
3149 * to the buffer after this will fail and return NULL.
3150 *
3151 * The caller should call synchronize_sched() after this.
3152 */
3153void ring_buffer_record_disable(struct ring_buffer *buffer)
3154{
3155	atomic_inc(&buffer->record_disabled);
3156}
3157EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3158
3159/**
3160 * ring_buffer_record_enable - enable writes to the buffer
3161 * @buffer: The ring buffer to enable writes
3162 *
3163 * Note, multiple disables will need the same number of enables
3164 * to truly enable the writing (much like preempt_disable).
3165 */
3166void ring_buffer_record_enable(struct ring_buffer *buffer)
3167{
3168	atomic_dec(&buffer->record_disabled);
3169}
3170EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3171
3172/**
3173 * ring_buffer_record_off - stop all writes into the buffer
3174 * @buffer: The ring buffer to stop writes to.
3175 *
3176 * This prevents all writes to the buffer. Any attempt to write
3177 * to the buffer after this will fail and return NULL.
3178 *
3179 * This is different than ring_buffer_record_disable() as
3180 * it works like an on/off switch, where as the disable() version
3181 * must be paired with a enable().
3182 */
3183void ring_buffer_record_off(struct ring_buffer *buffer)
3184{
3185	unsigned int rd;
3186	unsigned int new_rd;
3187
3188	do {
3189		rd = atomic_read(&buffer->record_disabled);
3190		new_rd = rd | RB_BUFFER_OFF;
3191	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3192}
3193EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3194
3195/**
3196 * ring_buffer_record_on - restart writes into the buffer
3197 * @buffer: The ring buffer to start writes to.
3198 *
3199 * This enables all writes to the buffer that was disabled by
3200 * ring_buffer_record_off().
3201 *
3202 * This is different than ring_buffer_record_enable() as
3203 * it works like an on/off switch, where as the enable() version
3204 * must be paired with a disable().
3205 */
3206void ring_buffer_record_on(struct ring_buffer *buffer)
3207{
3208	unsigned int rd;
3209	unsigned int new_rd;
3210
3211	do {
3212		rd = atomic_read(&buffer->record_disabled);
3213		new_rd = rd & ~RB_BUFFER_OFF;
3214	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3215}
3216EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3217
3218/**
3219 * ring_buffer_record_is_on - return true if the ring buffer can write
3220 * @buffer: The ring buffer to see if write is enabled
3221 *
3222 * Returns true if the ring buffer is in a state that it accepts writes.
3223 */
3224int ring_buffer_record_is_on(struct ring_buffer *buffer)
3225{
3226	return !atomic_read(&buffer->record_disabled);
3227}
3228
3229/**
3230 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3231 * @buffer: The ring buffer to stop writes to.
3232 * @cpu: The CPU buffer to stop
3233 *
3234 * This prevents all writes to the buffer. Any attempt to write
3235 * to the buffer after this will fail and return NULL.
3236 *
3237 * The caller should call synchronize_sched() after this.
3238 */
3239void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3240{
3241	struct ring_buffer_per_cpu *cpu_buffer;
3242
3243	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3244		return;
3245
3246	cpu_buffer = buffer->buffers[cpu];
3247	atomic_inc(&cpu_buffer->record_disabled);
3248}
3249EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3250
3251/**
3252 * ring_buffer_record_enable_cpu - enable writes to the buffer
3253 * @buffer: The ring buffer to enable writes
3254 * @cpu: The CPU to enable.
3255 *
3256 * Note, multiple disables will need the same number of enables
3257 * to truly enable the writing (much like preempt_disable).
3258 */
3259void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3260{
3261	struct ring_buffer_per_cpu *cpu_buffer;
3262
3263	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3264		return;
3265
3266	cpu_buffer = buffer->buffers[cpu];
3267	atomic_dec(&cpu_buffer->record_disabled);
3268}
3269EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3270
3271/*
3272 * The total entries in the ring buffer is the running counter
3273 * of entries entered into the ring buffer, minus the sum of
3274 * the entries read from the ring buffer and the number of
3275 * entries that were overwritten.
3276 */
3277static inline unsigned long
3278rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3279{
3280	return local_read(&cpu_buffer->entries) -
3281		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3282}
3283
3284/**
3285 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3286 * @buffer: The ring buffer
3287 * @cpu: The per CPU buffer to read from.
3288 */
3289u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3290{
3291	unsigned long flags;
3292	struct ring_buffer_per_cpu *cpu_buffer;
3293	struct buffer_page *bpage;
3294	u64 ret = 0;
3295
3296	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3297		return 0;
3298
3299	cpu_buffer = buffer->buffers[cpu];
3300	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3301	/*
3302	 * if the tail is on reader_page, oldest time stamp is on the reader
3303	 * page
3304	 */
3305	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3306		bpage = cpu_buffer->reader_page;
3307	else
3308		bpage = rb_set_head_page(cpu_buffer);
3309	if (bpage)
3310		ret = bpage->page->time_stamp;
3311	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3312
3313	return ret;
3314}
3315EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3316
3317/**
3318 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3319 * @buffer: The ring buffer
3320 * @cpu: The per CPU buffer to read from.
3321 */
3322unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3323{
3324	struct ring_buffer_per_cpu *cpu_buffer;
3325	unsigned long ret;
3326
3327	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3328		return 0;
3329
3330	cpu_buffer = buffer->buffers[cpu];
3331	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3332
3333	return ret;
3334}
3335EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3336
3337/**
3338 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3339 * @buffer: The ring buffer
3340 * @cpu: The per CPU buffer to get the entries from.
3341 */
3342unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3343{
3344	struct ring_buffer_per_cpu *cpu_buffer;
3345
3346	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3347		return 0;
3348
3349	cpu_buffer = buffer->buffers[cpu];
3350
3351	return rb_num_of_entries(cpu_buffer);
3352}
3353EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3354
3355/**
3356 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3357 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3358 * @buffer: The ring buffer
3359 * @cpu: The per CPU buffer to get the number of overruns from
3360 */
3361unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3362{
3363	struct ring_buffer_per_cpu *cpu_buffer;
3364	unsigned long ret;
3365
3366	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3367		return 0;
3368
3369	cpu_buffer = buffer->buffers[cpu];
3370	ret = local_read(&cpu_buffer->overrun);
3371
3372	return ret;
3373}
3374EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3375
3376/**
3377 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3378 * commits failing due to the buffer wrapping around while there are uncommitted
3379 * events, such as during an interrupt storm.
3380 * @buffer: The ring buffer
3381 * @cpu: The per CPU buffer to get the number of overruns from
3382 */
3383unsigned long
3384ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3385{
3386	struct ring_buffer_per_cpu *cpu_buffer;
3387	unsigned long ret;
3388
3389	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3390		return 0;
3391
3392	cpu_buffer = buffer->buffers[cpu];
3393	ret = local_read(&cpu_buffer->commit_overrun);
3394
3395	return ret;
3396}
3397EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3398
3399/**
3400 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3401 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3402 * @buffer: The ring buffer
3403 * @cpu: The per CPU buffer to get the number of overruns from
3404 */
3405unsigned long
3406ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3407{
3408	struct ring_buffer_per_cpu *cpu_buffer;
3409	unsigned long ret;
3410
3411	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3412		return 0;
3413
3414	cpu_buffer = buffer->buffers[cpu];
3415	ret = local_read(&cpu_buffer->dropped_events);
3416
3417	return ret;
3418}
3419EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3420
3421/**
3422 * ring_buffer_read_events_cpu - get the number of events successfully read
3423 * @buffer: The ring buffer
3424 * @cpu: The per CPU buffer to get the number of events read
3425 */
3426unsigned long
3427ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3428{
3429	struct ring_buffer_per_cpu *cpu_buffer;
3430
3431	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3432		return 0;
3433
3434	cpu_buffer = buffer->buffers[cpu];
3435	return cpu_buffer->read;
3436}
3437EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3438
3439/**
3440 * ring_buffer_entries - get the number of entries in a buffer
3441 * @buffer: The ring buffer
3442 *
3443 * Returns the total number of entries in the ring buffer
3444 * (all CPU entries)
3445 */
3446unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3447{
3448	struct ring_buffer_per_cpu *cpu_buffer;
3449	unsigned long entries = 0;
3450	int cpu;
3451
3452	/* if you care about this being correct, lock the buffer */
3453	for_each_buffer_cpu(buffer, cpu) {
3454		cpu_buffer = buffer->buffers[cpu];
3455		entries += rb_num_of_entries(cpu_buffer);
3456	}
3457
3458	return entries;
3459}
3460EXPORT_SYMBOL_GPL(ring_buffer_entries);
3461
3462/**
3463 * ring_buffer_overruns - get the number of overruns in buffer
3464 * @buffer: The ring buffer
3465 *
3466 * Returns the total number of overruns in the ring buffer
3467 * (all CPU entries)
3468 */
3469unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3470{
3471	struct ring_buffer_per_cpu *cpu_buffer;
3472	unsigned long overruns = 0;
3473	int cpu;
3474
3475	/* if you care about this being correct, lock the buffer */
3476	for_each_buffer_cpu(buffer, cpu) {
3477		cpu_buffer = buffer->buffers[cpu];
3478		overruns += local_read(&cpu_buffer->overrun);
3479	}
3480
3481	return overruns;
3482}
3483EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3484
3485static void rb_iter_reset(struct ring_buffer_iter *iter)
3486{
3487	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3488
3489	/* Iterator usage is expected to have record disabled */
3490	iter->head_page = cpu_buffer->reader_page;
3491	iter->head = cpu_buffer->reader_page->read;
3492
3493	iter->cache_reader_page = iter->head_page;
3494	iter->cache_read = cpu_buffer->read;
3495
3496	if (iter->head)
3497		iter->read_stamp = cpu_buffer->read_stamp;
3498	else
3499		iter->read_stamp = iter->head_page->page->time_stamp;
3500}
3501
3502/**
3503 * ring_buffer_iter_reset - reset an iterator
3504 * @iter: The iterator to reset
3505 *
3506 * Resets the iterator, so that it will start from the beginning
3507 * again.
3508 */
3509void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3510{
3511	struct ring_buffer_per_cpu *cpu_buffer;
3512	unsigned long flags;
3513
3514	if (!iter)
3515		return;
3516
3517	cpu_buffer = iter->cpu_buffer;
3518
3519	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3520	rb_iter_reset(iter);
3521	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3522}
3523EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3524
3525/**
3526 * ring_buffer_iter_empty - check if an iterator has no more to read
3527 * @iter: The iterator to check
3528 */
3529int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3530{
3531	struct ring_buffer_per_cpu *cpu_buffer;
3532	struct buffer_page *reader;
3533	struct buffer_page *head_page;
3534	struct buffer_page *commit_page;
3535	unsigned commit;
3536
3537	cpu_buffer = iter->cpu_buffer;
3538
3539	/* Remember, trace recording is off when iterator is in use */
3540	reader = cpu_buffer->reader_page;
3541	head_page = cpu_buffer->head_page;
3542	commit_page = cpu_buffer->commit_page;
3543	commit = rb_page_commit(commit_page);
3544
3545	return ((iter->head_page == commit_page && iter->head == commit) ||
3546		(iter->head_page == reader && commit_page == head_page &&
3547		 head_page->read == commit &&
3548		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
3549}
3550EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3551
3552static void
3553rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3554		     struct ring_buffer_event *event)
3555{
3556	u64 delta;
3557
3558	switch (event->type_len) {
3559	case RINGBUF_TYPE_PADDING:
3560		return;
3561
3562	case RINGBUF_TYPE_TIME_EXTEND:
3563		delta = ring_buffer_event_time_stamp(event);
 
 
3564		cpu_buffer->read_stamp += delta;
3565		return;
3566
3567	case RINGBUF_TYPE_TIME_STAMP:
3568		delta = ring_buffer_event_time_stamp(event);
3569		cpu_buffer->read_stamp = delta;
3570		return;
3571
3572	case RINGBUF_TYPE_DATA:
3573		cpu_buffer->read_stamp += event->time_delta;
3574		return;
3575
3576	default:
3577		BUG();
3578	}
3579	return;
3580}
3581
3582static void
3583rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3584			  struct ring_buffer_event *event)
3585{
3586	u64 delta;
3587
3588	switch (event->type_len) {
3589	case RINGBUF_TYPE_PADDING:
3590		return;
3591
3592	case RINGBUF_TYPE_TIME_EXTEND:
3593		delta = ring_buffer_event_time_stamp(event);
 
 
3594		iter->read_stamp += delta;
3595		return;
3596
3597	case RINGBUF_TYPE_TIME_STAMP:
3598		delta = ring_buffer_event_time_stamp(event);
3599		iter->read_stamp = delta;
3600		return;
3601
3602	case RINGBUF_TYPE_DATA:
3603		iter->read_stamp += event->time_delta;
3604		return;
3605
3606	default:
3607		BUG();
3608	}
3609	return;
3610}
3611
3612static struct buffer_page *
3613rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3614{
3615	struct buffer_page *reader = NULL;
3616	unsigned long overwrite;
3617	unsigned long flags;
3618	int nr_loops = 0;
3619	int ret;
3620
3621	local_irq_save(flags);
3622	arch_spin_lock(&cpu_buffer->lock);
3623
3624 again:
3625	/*
3626	 * This should normally only loop twice. But because the
3627	 * start of the reader inserts an empty page, it causes
3628	 * a case where we will loop three times. There should be no
3629	 * reason to loop four times (that I know of).
3630	 */
3631	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3632		reader = NULL;
3633		goto out;
3634	}
3635
3636	reader = cpu_buffer->reader_page;
3637
3638	/* If there's more to read, return this page */
3639	if (cpu_buffer->reader_page->read < rb_page_size(reader))
3640		goto out;
3641
3642	/* Never should we have an index greater than the size */
3643	if (RB_WARN_ON(cpu_buffer,
3644		       cpu_buffer->reader_page->read > rb_page_size(reader)))
3645		goto out;
3646
3647	/* check if we caught up to the tail */
3648	reader = NULL;
3649	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3650		goto out;
3651
3652	/* Don't bother swapping if the ring buffer is empty */
3653	if (rb_num_of_entries(cpu_buffer) == 0)
3654		goto out;
3655
3656	/*
3657	 * Reset the reader page to size zero.
3658	 */
3659	local_set(&cpu_buffer->reader_page->write, 0);
3660	local_set(&cpu_buffer->reader_page->entries, 0);
3661	local_set(&cpu_buffer->reader_page->page->commit, 0);
3662	cpu_buffer->reader_page->real_end = 0;
3663
3664 spin:
3665	/*
3666	 * Splice the empty reader page into the list around the head.
3667	 */
3668	reader = rb_set_head_page(cpu_buffer);
3669	if (!reader)
3670		goto out;
3671	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3672	cpu_buffer->reader_page->list.prev = reader->list.prev;
3673
3674	/*
3675	 * cpu_buffer->pages just needs to point to the buffer, it
3676	 *  has no specific buffer page to point to. Lets move it out
3677	 *  of our way so we don't accidentally swap it.
3678	 */
3679	cpu_buffer->pages = reader->list.prev;
3680
3681	/* The reader page will be pointing to the new head */
3682	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3683
3684	/*
3685	 * We want to make sure we read the overruns after we set up our
3686	 * pointers to the next object. The writer side does a
3687	 * cmpxchg to cross pages which acts as the mb on the writer
3688	 * side. Note, the reader will constantly fail the swap
3689	 * while the writer is updating the pointers, so this
3690	 * guarantees that the overwrite recorded here is the one we
3691	 * want to compare with the last_overrun.
3692	 */
3693	smp_mb();
3694	overwrite = local_read(&(cpu_buffer->overrun));
3695
3696	/*
3697	 * Here's the tricky part.
3698	 *
3699	 * We need to move the pointer past the header page.
3700	 * But we can only do that if a writer is not currently
3701	 * moving it. The page before the header page has the
3702	 * flag bit '1' set if it is pointing to the page we want.
3703	 * but if the writer is in the process of moving it
3704	 * than it will be '2' or already moved '0'.
3705	 */
3706
3707	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3708
3709	/*
3710	 * If we did not convert it, then we must try again.
3711	 */
3712	if (!ret)
3713		goto spin;
3714
3715	/*
3716	 * Yeah! We succeeded in replacing the page.
3717	 *
3718	 * Now make the new head point back to the reader page.
3719	 */
3720	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3721	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3722
3723	/* Finally update the reader page to the new head */
3724	cpu_buffer->reader_page = reader;
3725	cpu_buffer->reader_page->read = 0;
3726
3727	if (overwrite != cpu_buffer->last_overrun) {
3728		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3729		cpu_buffer->last_overrun = overwrite;
3730	}
3731
3732	goto again;
3733
3734 out:
3735	/* Update the read_stamp on the first event */
3736	if (reader && reader->read == 0)
3737		cpu_buffer->read_stamp = reader->page->time_stamp;
3738
3739	arch_spin_unlock(&cpu_buffer->lock);
3740	local_irq_restore(flags);
3741
3742	return reader;
3743}
3744
3745static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3746{
3747	struct ring_buffer_event *event;
3748	struct buffer_page *reader;
3749	unsigned length;
3750
3751	reader = rb_get_reader_page(cpu_buffer);
3752
3753	/* This function should not be called when buffer is empty */
3754	if (RB_WARN_ON(cpu_buffer, !reader))
3755		return;
3756
3757	event = rb_reader_event(cpu_buffer);
3758
3759	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3760		cpu_buffer->read++;
3761
3762	rb_update_read_stamp(cpu_buffer, event);
3763
3764	length = rb_event_length(event);
3765	cpu_buffer->reader_page->read += length;
3766}
3767
3768static void rb_advance_iter(struct ring_buffer_iter *iter)
3769{
3770	struct ring_buffer_per_cpu *cpu_buffer;
3771	struct ring_buffer_event *event;
3772	unsigned length;
3773
3774	cpu_buffer = iter->cpu_buffer;
3775
3776	/*
3777	 * Check if we are at the end of the buffer.
3778	 */
3779	if (iter->head >= rb_page_size(iter->head_page)) {
3780		/* discarded commits can make the page empty */
3781		if (iter->head_page == cpu_buffer->commit_page)
3782			return;
3783		rb_inc_iter(iter);
3784		return;
3785	}
3786
3787	event = rb_iter_head_event(iter);
3788
3789	length = rb_event_length(event);
3790
3791	/*
3792	 * This should not be called to advance the header if we are
3793	 * at the tail of the buffer.
3794	 */
3795	if (RB_WARN_ON(cpu_buffer,
3796		       (iter->head_page == cpu_buffer->commit_page) &&
3797		       (iter->head + length > rb_commit_index(cpu_buffer))))
3798		return;
3799
3800	rb_update_iter_read_stamp(iter, event);
3801
3802	iter->head += length;
3803
3804	/* check for end of page padding */
3805	if ((iter->head >= rb_page_size(iter->head_page)) &&
3806	    (iter->head_page != cpu_buffer->commit_page))
3807		rb_inc_iter(iter);
3808}
3809
3810static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3811{
3812	return cpu_buffer->lost_events;
3813}
3814
3815static struct ring_buffer_event *
3816rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3817	       unsigned long *lost_events)
3818{
3819	struct ring_buffer_event *event;
3820	struct buffer_page *reader;
3821	int nr_loops = 0;
3822
3823	if (ts)
3824		*ts = 0;
3825 again:
3826	/*
3827	 * We repeat when a time extend is encountered.
3828	 * Since the time extend is always attached to a data event,
3829	 * we should never loop more than once.
3830	 * (We never hit the following condition more than twice).
3831	 */
3832	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3833		return NULL;
3834
3835	reader = rb_get_reader_page(cpu_buffer);
3836	if (!reader)
3837		return NULL;
3838
3839	event = rb_reader_event(cpu_buffer);
3840
3841	switch (event->type_len) {
3842	case RINGBUF_TYPE_PADDING:
3843		if (rb_null_event(event))
3844			RB_WARN_ON(cpu_buffer, 1);
3845		/*
3846		 * Because the writer could be discarding every
3847		 * event it creates (which would probably be bad)
3848		 * if we were to go back to "again" then we may never
3849		 * catch up, and will trigger the warn on, or lock
3850		 * the box. Return the padding, and we will release
3851		 * the current locks, and try again.
3852		 */
3853		return event;
3854
3855	case RINGBUF_TYPE_TIME_EXTEND:
3856		/* Internal data, OK to advance */
3857		rb_advance_reader(cpu_buffer);
3858		goto again;
3859
3860	case RINGBUF_TYPE_TIME_STAMP:
3861		if (ts) {
3862			*ts = ring_buffer_event_time_stamp(event);
3863			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3864							 cpu_buffer->cpu, ts);
3865		}
3866		/* Internal data, OK to advance */
3867		rb_advance_reader(cpu_buffer);
3868		goto again;
3869
3870	case RINGBUF_TYPE_DATA:
3871		if (ts && !(*ts)) {
3872			*ts = cpu_buffer->read_stamp + event->time_delta;
3873			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3874							 cpu_buffer->cpu, ts);
3875		}
3876		if (lost_events)
3877			*lost_events = rb_lost_events(cpu_buffer);
3878		return event;
3879
3880	default:
3881		BUG();
3882	}
3883
3884	return NULL;
3885}
3886EXPORT_SYMBOL_GPL(ring_buffer_peek);
3887
3888static struct ring_buffer_event *
3889rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3890{
3891	struct ring_buffer *buffer;
3892	struct ring_buffer_per_cpu *cpu_buffer;
3893	struct ring_buffer_event *event;
3894	int nr_loops = 0;
3895
3896	if (ts)
3897		*ts = 0;
3898
3899	cpu_buffer = iter->cpu_buffer;
3900	buffer = cpu_buffer->buffer;
3901
3902	/*
3903	 * Check if someone performed a consuming read to
3904	 * the buffer. A consuming read invalidates the iterator
3905	 * and we need to reset the iterator in this case.
3906	 */
3907	if (unlikely(iter->cache_read != cpu_buffer->read ||
3908		     iter->cache_reader_page != cpu_buffer->reader_page))
3909		rb_iter_reset(iter);
3910
3911 again:
3912	if (ring_buffer_iter_empty(iter))
3913		return NULL;
3914
3915	/*
3916	 * We repeat when a time extend is encountered or we hit
3917	 * the end of the page. Since the time extend is always attached
3918	 * to a data event, we should never loop more than three times.
3919	 * Once for going to next page, once on time extend, and
3920	 * finally once to get the event.
3921	 * (We never hit the following condition more than thrice).
3922	 */
3923	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3924		return NULL;
3925
3926	if (rb_per_cpu_empty(cpu_buffer))
3927		return NULL;
3928
3929	if (iter->head >= rb_page_size(iter->head_page)) {
3930		rb_inc_iter(iter);
3931		goto again;
3932	}
3933
3934	event = rb_iter_head_event(iter);
3935
3936	switch (event->type_len) {
3937	case RINGBUF_TYPE_PADDING:
3938		if (rb_null_event(event)) {
3939			rb_inc_iter(iter);
3940			goto again;
3941		}
3942		rb_advance_iter(iter);
3943		return event;
3944
3945	case RINGBUF_TYPE_TIME_EXTEND:
3946		/* Internal data, OK to advance */
3947		rb_advance_iter(iter);
3948		goto again;
3949
3950	case RINGBUF_TYPE_TIME_STAMP:
3951		if (ts) {
3952			*ts = ring_buffer_event_time_stamp(event);
3953			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3954							 cpu_buffer->cpu, ts);
3955		}
3956		/* Internal data, OK to advance */
3957		rb_advance_iter(iter);
3958		goto again;
3959
3960	case RINGBUF_TYPE_DATA:
3961		if (ts && !(*ts)) {
3962			*ts = iter->read_stamp + event->time_delta;
3963			ring_buffer_normalize_time_stamp(buffer,
3964							 cpu_buffer->cpu, ts);
3965		}
3966		return event;
3967
3968	default:
3969		BUG();
3970	}
3971
3972	return NULL;
3973}
3974EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3975
3976static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
3977{
3978	if (likely(!in_nmi())) {
3979		raw_spin_lock(&cpu_buffer->reader_lock);
3980		return true;
3981	}
3982
3983	/*
3984	 * If an NMI die dumps out the content of the ring buffer
3985	 * trylock must be used to prevent a deadlock if the NMI
3986	 * preempted a task that holds the ring buffer locks. If
3987	 * we get the lock then all is fine, if not, then continue
3988	 * to do the read, but this can corrupt the ring buffer,
3989	 * so it must be permanently disabled from future writes.
3990	 * Reading from NMI is a oneshot deal.
3991	 */
3992	if (raw_spin_trylock(&cpu_buffer->reader_lock))
3993		return true;
3994
3995	/* Continue without locking, but disable the ring buffer */
3996	atomic_inc(&cpu_buffer->record_disabled);
3997	return false;
3998}
3999
4000static inline void
4001rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4002{
4003	if (likely(locked))
4004		raw_spin_unlock(&cpu_buffer->reader_lock);
4005	return;
4006}
4007
4008/**
4009 * ring_buffer_peek - peek at the next event to be read
4010 * @buffer: The ring buffer to read
4011 * @cpu: The cpu to peak at
4012 * @ts: The timestamp counter of this event.
4013 * @lost_events: a variable to store if events were lost (may be NULL)
4014 *
4015 * This will return the event that will be read next, but does
4016 * not consume the data.
4017 */
4018struct ring_buffer_event *
4019ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
4020		 unsigned long *lost_events)
4021{
4022	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4023	struct ring_buffer_event *event;
4024	unsigned long flags;
4025	bool dolock;
4026
4027	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4028		return NULL;
4029
4030 again:
4031	local_irq_save(flags);
4032	dolock = rb_reader_lock(cpu_buffer);
4033	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4034	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4035		rb_advance_reader(cpu_buffer);
4036	rb_reader_unlock(cpu_buffer, dolock);
4037	local_irq_restore(flags);
4038
4039	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4040		goto again;
4041
4042	return event;
4043}
4044
4045/**
4046 * ring_buffer_iter_peek - peek at the next event to be read
4047 * @iter: The ring buffer iterator
4048 * @ts: The timestamp counter of this event.
4049 *
4050 * This will return the event that will be read next, but does
4051 * not increment the iterator.
4052 */
4053struct ring_buffer_event *
4054ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4055{
4056	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4057	struct ring_buffer_event *event;
4058	unsigned long flags;
4059
4060 again:
4061	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4062	event = rb_iter_peek(iter, ts);
4063	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4064
4065	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4066		goto again;
4067
4068	return event;
4069}
4070
4071/**
4072 * ring_buffer_consume - return an event and consume it
4073 * @buffer: The ring buffer to get the next event from
4074 * @cpu: the cpu to read the buffer from
4075 * @ts: a variable to store the timestamp (may be NULL)
4076 * @lost_events: a variable to store if events were lost (may be NULL)
4077 *
4078 * Returns the next event in the ring buffer, and that event is consumed.
4079 * Meaning, that sequential reads will keep returning a different event,
4080 * and eventually empty the ring buffer if the producer is slower.
4081 */
4082struct ring_buffer_event *
4083ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
4084		    unsigned long *lost_events)
4085{
4086	struct ring_buffer_per_cpu *cpu_buffer;
4087	struct ring_buffer_event *event = NULL;
4088	unsigned long flags;
4089	bool dolock;
4090
4091 again:
4092	/* might be called in atomic */
4093	preempt_disable();
4094
4095	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4096		goto out;
4097
4098	cpu_buffer = buffer->buffers[cpu];
4099	local_irq_save(flags);
4100	dolock = rb_reader_lock(cpu_buffer);
4101
4102	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4103	if (event) {
4104		cpu_buffer->lost_events = 0;
4105		rb_advance_reader(cpu_buffer);
4106	}
4107
4108	rb_reader_unlock(cpu_buffer, dolock);
4109	local_irq_restore(flags);
4110
4111 out:
4112	preempt_enable();
4113
4114	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4115		goto again;
4116
4117	return event;
4118}
4119EXPORT_SYMBOL_GPL(ring_buffer_consume);
4120
4121/**
4122 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4123 * @buffer: The ring buffer to read from
4124 * @cpu: The cpu buffer to iterate over
4125 *
4126 * This performs the initial preparations necessary to iterate
4127 * through the buffer.  Memory is allocated, buffer recording
4128 * is disabled, and the iterator pointer is returned to the caller.
4129 *
4130 * Disabling buffer recordng prevents the reading from being
4131 * corrupted. This is not a consuming read, so a producer is not
4132 * expected.
4133 *
4134 * After a sequence of ring_buffer_read_prepare calls, the user is
4135 * expected to make at least one call to ring_buffer_read_prepare_sync.
4136 * Afterwards, ring_buffer_read_start is invoked to get things going
4137 * for real.
4138 *
4139 * This overall must be paired with ring_buffer_read_finish.
4140 */
4141struct ring_buffer_iter *
4142ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
4143{
4144	struct ring_buffer_per_cpu *cpu_buffer;
4145	struct ring_buffer_iter *iter;
4146
4147	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4148		return NULL;
4149
4150	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
4151	if (!iter)
4152		return NULL;
4153
4154	cpu_buffer = buffer->buffers[cpu];
4155
4156	iter->cpu_buffer = cpu_buffer;
4157
4158	atomic_inc(&buffer->resize_disabled);
4159	atomic_inc(&cpu_buffer->record_disabled);
4160
4161	return iter;
4162}
4163EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4164
4165/**
4166 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4167 *
4168 * All previously invoked ring_buffer_read_prepare calls to prepare
4169 * iterators will be synchronized.  Afterwards, read_buffer_read_start
4170 * calls on those iterators are allowed.
4171 */
4172void
4173ring_buffer_read_prepare_sync(void)
4174{
4175	synchronize_sched();
4176}
4177EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4178
4179/**
4180 * ring_buffer_read_start - start a non consuming read of the buffer
4181 * @iter: The iterator returned by ring_buffer_read_prepare
4182 *
4183 * This finalizes the startup of an iteration through the buffer.
4184 * The iterator comes from a call to ring_buffer_read_prepare and
4185 * an intervening ring_buffer_read_prepare_sync must have been
4186 * performed.
4187 *
4188 * Must be paired with ring_buffer_read_finish.
4189 */
4190void
4191ring_buffer_read_start(struct ring_buffer_iter *iter)
4192{
4193	struct ring_buffer_per_cpu *cpu_buffer;
4194	unsigned long flags;
4195
4196	if (!iter)
4197		return;
4198
4199	cpu_buffer = iter->cpu_buffer;
4200
4201	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4202	arch_spin_lock(&cpu_buffer->lock);
4203	rb_iter_reset(iter);
4204	arch_spin_unlock(&cpu_buffer->lock);
4205	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4206}
4207EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4208
4209/**
4210 * ring_buffer_read_finish - finish reading the iterator of the buffer
4211 * @iter: The iterator retrieved by ring_buffer_start
4212 *
4213 * This re-enables the recording to the buffer, and frees the
4214 * iterator.
4215 */
4216void
4217ring_buffer_read_finish(struct ring_buffer_iter *iter)
4218{
4219	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4220	unsigned long flags;
4221
4222	/*
4223	 * Ring buffer is disabled from recording, here's a good place
4224	 * to check the integrity of the ring buffer.
4225	 * Must prevent readers from trying to read, as the check
4226	 * clears the HEAD page and readers require it.
4227	 */
4228	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4229	rb_check_pages(cpu_buffer);
4230	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4231
4232	atomic_dec(&cpu_buffer->record_disabled);
4233	atomic_dec(&cpu_buffer->buffer->resize_disabled);
4234	kfree(iter);
4235}
4236EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4237
4238/**
4239 * ring_buffer_read - read the next item in the ring buffer by the iterator
4240 * @iter: The ring buffer iterator
4241 * @ts: The time stamp of the event read.
4242 *
4243 * This reads the next event in the ring buffer and increments the iterator.
4244 */
4245struct ring_buffer_event *
4246ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4247{
4248	struct ring_buffer_event *event;
4249	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4250	unsigned long flags;
4251
4252	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4253 again:
4254	event = rb_iter_peek(iter, ts);
4255	if (!event)
4256		goto out;
4257
4258	if (event->type_len == RINGBUF_TYPE_PADDING)
4259		goto again;
4260
4261	rb_advance_iter(iter);
4262 out:
4263	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4264
4265	return event;
4266}
4267EXPORT_SYMBOL_GPL(ring_buffer_read);
4268
4269/**
4270 * ring_buffer_size - return the size of the ring buffer (in bytes)
4271 * @buffer: The ring buffer.
4272 */
4273unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4274{
4275	/*
4276	 * Earlier, this method returned
4277	 *	BUF_PAGE_SIZE * buffer->nr_pages
4278	 * Since the nr_pages field is now removed, we have converted this to
4279	 * return the per cpu buffer value.
4280	 */
4281	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4282		return 0;
4283
4284	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4285}
4286EXPORT_SYMBOL_GPL(ring_buffer_size);
4287
4288static void
4289rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4290{
4291	rb_head_page_deactivate(cpu_buffer);
4292
4293	cpu_buffer->head_page
4294		= list_entry(cpu_buffer->pages, struct buffer_page, list);
4295	local_set(&cpu_buffer->head_page->write, 0);
4296	local_set(&cpu_buffer->head_page->entries, 0);
4297	local_set(&cpu_buffer->head_page->page->commit, 0);
4298
4299	cpu_buffer->head_page->read = 0;
4300
4301	cpu_buffer->tail_page = cpu_buffer->head_page;
4302	cpu_buffer->commit_page = cpu_buffer->head_page;
4303
4304	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4305	INIT_LIST_HEAD(&cpu_buffer->new_pages);
4306	local_set(&cpu_buffer->reader_page->write, 0);
4307	local_set(&cpu_buffer->reader_page->entries, 0);
4308	local_set(&cpu_buffer->reader_page->page->commit, 0);
4309	cpu_buffer->reader_page->read = 0;
4310
4311	local_set(&cpu_buffer->entries_bytes, 0);
4312	local_set(&cpu_buffer->overrun, 0);
4313	local_set(&cpu_buffer->commit_overrun, 0);
4314	local_set(&cpu_buffer->dropped_events, 0);
4315	local_set(&cpu_buffer->entries, 0);
4316	local_set(&cpu_buffer->committing, 0);
4317	local_set(&cpu_buffer->commits, 0);
4318	cpu_buffer->read = 0;
4319	cpu_buffer->read_bytes = 0;
4320
4321	cpu_buffer->write_stamp = 0;
4322	cpu_buffer->read_stamp = 0;
4323
4324	cpu_buffer->lost_events = 0;
4325	cpu_buffer->last_overrun = 0;
4326
4327	rb_head_page_activate(cpu_buffer);
4328}
4329
4330/**
4331 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4332 * @buffer: The ring buffer to reset a per cpu buffer of
4333 * @cpu: The CPU buffer to be reset
4334 */
4335void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4336{
4337	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4338	unsigned long flags;
4339
4340	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4341		return;
4342
4343	atomic_inc(&buffer->resize_disabled);
4344	atomic_inc(&cpu_buffer->record_disabled);
4345
4346	/* Make sure all commits have finished */
4347	synchronize_sched();
4348
4349	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4350
4351	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4352		goto out;
4353
4354	arch_spin_lock(&cpu_buffer->lock);
4355
4356	rb_reset_cpu(cpu_buffer);
4357
4358	arch_spin_unlock(&cpu_buffer->lock);
4359
4360 out:
4361	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4362
4363	atomic_dec(&cpu_buffer->record_disabled);
4364	atomic_dec(&buffer->resize_disabled);
4365}
4366EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4367
4368/**
4369 * ring_buffer_reset - reset a ring buffer
4370 * @buffer: The ring buffer to reset all cpu buffers
4371 */
4372void ring_buffer_reset(struct ring_buffer *buffer)
4373{
4374	int cpu;
4375
4376	for_each_buffer_cpu(buffer, cpu)
4377		ring_buffer_reset_cpu(buffer, cpu);
4378}
4379EXPORT_SYMBOL_GPL(ring_buffer_reset);
4380
4381/**
4382 * rind_buffer_empty - is the ring buffer empty?
4383 * @buffer: The ring buffer to test
4384 */
4385bool ring_buffer_empty(struct ring_buffer *buffer)
4386{
4387	struct ring_buffer_per_cpu *cpu_buffer;
4388	unsigned long flags;
4389	bool dolock;
4390	int cpu;
4391	int ret;
4392
4393	/* yes this is racy, but if you don't like the race, lock the buffer */
4394	for_each_buffer_cpu(buffer, cpu) {
4395		cpu_buffer = buffer->buffers[cpu];
4396		local_irq_save(flags);
4397		dolock = rb_reader_lock(cpu_buffer);
4398		ret = rb_per_cpu_empty(cpu_buffer);
4399		rb_reader_unlock(cpu_buffer, dolock);
4400		local_irq_restore(flags);
4401
4402		if (!ret)
4403			return false;
4404	}
4405
4406	return true;
4407}
4408EXPORT_SYMBOL_GPL(ring_buffer_empty);
4409
4410/**
4411 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4412 * @buffer: The ring buffer
4413 * @cpu: The CPU buffer to test
4414 */
4415bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4416{
4417	struct ring_buffer_per_cpu *cpu_buffer;
4418	unsigned long flags;
4419	bool dolock;
4420	int ret;
4421
4422	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4423		return true;
4424
4425	cpu_buffer = buffer->buffers[cpu];
4426	local_irq_save(flags);
4427	dolock = rb_reader_lock(cpu_buffer);
4428	ret = rb_per_cpu_empty(cpu_buffer);
4429	rb_reader_unlock(cpu_buffer, dolock);
4430	local_irq_restore(flags);
4431
4432	return ret;
4433}
4434EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4435
4436#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4437/**
4438 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4439 * @buffer_a: One buffer to swap with
4440 * @buffer_b: The other buffer to swap with
4441 *
4442 * This function is useful for tracers that want to take a "snapshot"
4443 * of a CPU buffer and has another back up buffer lying around.
4444 * it is expected that the tracer handles the cpu buffer not being
4445 * used at the moment.
4446 */
4447int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4448			 struct ring_buffer *buffer_b, int cpu)
4449{
4450	struct ring_buffer_per_cpu *cpu_buffer_a;
4451	struct ring_buffer_per_cpu *cpu_buffer_b;
4452	int ret = -EINVAL;
4453
4454	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4455	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
4456		goto out;
4457
4458	cpu_buffer_a = buffer_a->buffers[cpu];
4459	cpu_buffer_b = buffer_b->buffers[cpu];
4460
4461	/* At least make sure the two buffers are somewhat the same */
4462	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4463		goto out;
4464
4465	ret = -EAGAIN;
4466
4467	if (atomic_read(&buffer_a->record_disabled))
4468		goto out;
4469
4470	if (atomic_read(&buffer_b->record_disabled))
4471		goto out;
4472
4473	if (atomic_read(&cpu_buffer_a->record_disabled))
4474		goto out;
4475
4476	if (atomic_read(&cpu_buffer_b->record_disabled))
4477		goto out;
4478
4479	/*
4480	 * We can't do a synchronize_sched here because this
4481	 * function can be called in atomic context.
4482	 * Normally this will be called from the same CPU as cpu.
4483	 * If not it's up to the caller to protect this.
4484	 */
4485	atomic_inc(&cpu_buffer_a->record_disabled);
4486	atomic_inc(&cpu_buffer_b->record_disabled);
4487
4488	ret = -EBUSY;
4489	if (local_read(&cpu_buffer_a->committing))
4490		goto out_dec;
4491	if (local_read(&cpu_buffer_b->committing))
4492		goto out_dec;
4493
4494	buffer_a->buffers[cpu] = cpu_buffer_b;
4495	buffer_b->buffers[cpu] = cpu_buffer_a;
4496
4497	cpu_buffer_b->buffer = buffer_a;
4498	cpu_buffer_a->buffer = buffer_b;
4499
4500	ret = 0;
4501
4502out_dec:
4503	atomic_dec(&cpu_buffer_a->record_disabled);
4504	atomic_dec(&cpu_buffer_b->record_disabled);
4505out:
4506	return ret;
4507}
4508EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4509#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4510
4511/**
4512 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4513 * @buffer: the buffer to allocate for.
4514 * @cpu: the cpu buffer to allocate.
4515 *
4516 * This function is used in conjunction with ring_buffer_read_page.
4517 * When reading a full page from the ring buffer, these functions
4518 * can be used to speed up the process. The calling function should
4519 * allocate a few pages first with this function. Then when it
4520 * needs to get pages from the ring buffer, it passes the result
4521 * of this function into ring_buffer_read_page, which will swap
4522 * the page that was allocated, with the read page of the buffer.
4523 *
4524 * Returns:
4525 *  The page allocated, or ERR_PTR
4526 */
4527void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4528{
4529	struct ring_buffer_per_cpu *cpu_buffer;
4530	struct buffer_data_page *bpage = NULL;
4531	unsigned long flags;
4532	struct page *page;
4533
4534	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4535		return ERR_PTR(-ENODEV);
4536
4537	cpu_buffer = buffer->buffers[cpu];
4538	local_irq_save(flags);
4539	arch_spin_lock(&cpu_buffer->lock);
4540
4541	if (cpu_buffer->free_page) {
4542		bpage = cpu_buffer->free_page;
4543		cpu_buffer->free_page = NULL;
4544	}
4545
4546	arch_spin_unlock(&cpu_buffer->lock);
4547	local_irq_restore(flags);
4548
4549	if (bpage)
4550		goto out;
4551
4552	page = alloc_pages_node(cpu_to_node(cpu),
4553				GFP_KERNEL | __GFP_NORETRY, 0);
4554	if (!page)
4555		return ERR_PTR(-ENOMEM);
4556
4557	bpage = page_address(page);
4558
4559 out:
4560	rb_init_page(bpage);
4561
4562	return bpage;
4563}
4564EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4565
4566/**
4567 * ring_buffer_free_read_page - free an allocated read page
4568 * @buffer: the buffer the page was allocate for
4569 * @cpu: the cpu buffer the page came from
4570 * @data: the page to free
4571 *
4572 * Free a page allocated from ring_buffer_alloc_read_page.
4573 */
4574void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
4575{
4576	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4577	struct buffer_data_page *bpage = data;
4578	struct page *page = virt_to_page(bpage);
4579	unsigned long flags;
4580
4581	/* If the page is still in use someplace else, we can't reuse it */
4582	if (page_ref_count(page) > 1)
4583		goto out;
4584
4585	local_irq_save(flags);
4586	arch_spin_lock(&cpu_buffer->lock);
4587
4588	if (!cpu_buffer->free_page) {
4589		cpu_buffer->free_page = bpage;
4590		bpage = NULL;
4591	}
4592
4593	arch_spin_unlock(&cpu_buffer->lock);
4594	local_irq_restore(flags);
4595
4596 out:
4597	free_page((unsigned long)bpage);
4598}
4599EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4600
4601/**
4602 * ring_buffer_read_page - extract a page from the ring buffer
4603 * @buffer: buffer to extract from
4604 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4605 * @len: amount to extract
4606 * @cpu: the cpu of the buffer to extract
4607 * @full: should the extraction only happen when the page is full.
4608 *
4609 * This function will pull out a page from the ring buffer and consume it.
4610 * @data_page must be the address of the variable that was returned
4611 * from ring_buffer_alloc_read_page. This is because the page might be used
4612 * to swap with a page in the ring buffer.
4613 *
4614 * for example:
4615 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
4616 *	if (IS_ERR(rpage))
4617 *		return PTR_ERR(rpage);
4618 *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4619 *	if (ret >= 0)
4620 *		process_page(rpage, ret);
4621 *
4622 * When @full is set, the function will not return true unless
4623 * the writer is off the reader page.
4624 *
4625 * Note: it is up to the calling functions to handle sleeps and wakeups.
4626 *  The ring buffer can be used anywhere in the kernel and can not
4627 *  blindly call wake_up. The layer that uses the ring buffer must be
4628 *  responsible for that.
4629 *
4630 * Returns:
4631 *  >=0 if data has been transferred, returns the offset of consumed data.
4632 *  <0 if no data has been transferred.
4633 */
4634int ring_buffer_read_page(struct ring_buffer *buffer,
4635			  void **data_page, size_t len, int cpu, int full)
4636{
4637	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4638	struct ring_buffer_event *event;
4639	struct buffer_data_page *bpage;
4640	struct buffer_page *reader;
4641	unsigned long missed_events;
4642	unsigned long flags;
4643	unsigned int commit;
4644	unsigned int read;
4645	u64 save_timestamp;
4646	int ret = -1;
4647
4648	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4649		goto out;
4650
4651	/*
4652	 * If len is not big enough to hold the page header, then
4653	 * we can not copy anything.
4654	 */
4655	if (len <= BUF_PAGE_HDR_SIZE)
4656		goto out;
4657
4658	len -= BUF_PAGE_HDR_SIZE;
4659
4660	if (!data_page)
4661		goto out;
4662
4663	bpage = *data_page;
4664	if (!bpage)
4665		goto out;
4666
4667	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4668
4669	reader = rb_get_reader_page(cpu_buffer);
4670	if (!reader)
4671		goto out_unlock;
4672
4673	event = rb_reader_event(cpu_buffer);
4674
4675	read = reader->read;
4676	commit = rb_page_commit(reader);
4677
4678	/* Check if any events were dropped */
4679	missed_events = cpu_buffer->lost_events;
4680
4681	/*
4682	 * If this page has been partially read or
4683	 * if len is not big enough to read the rest of the page or
4684	 * a writer is still on the page, then
4685	 * we must copy the data from the page to the buffer.
4686	 * Otherwise, we can simply swap the page with the one passed in.
4687	 */
4688	if (read || (len < (commit - read)) ||
4689	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
4690		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4691		unsigned int rpos = read;
4692		unsigned int pos = 0;
4693		unsigned int size;
4694
4695		if (full)
4696			goto out_unlock;
4697
4698		if (len > (commit - read))
4699			len = (commit - read);
4700
4701		/* Always keep the time extend and data together */
4702		size = rb_event_ts_length(event);
4703
4704		if (len < size)
4705			goto out_unlock;
4706
4707		/* save the current timestamp, since the user will need it */
4708		save_timestamp = cpu_buffer->read_stamp;
4709
4710		/* Need to copy one event at a time */
4711		do {
4712			/* We need the size of one event, because
4713			 * rb_advance_reader only advances by one event,
4714			 * whereas rb_event_ts_length may include the size of
4715			 * one or two events.
4716			 * We have already ensured there's enough space if this
4717			 * is a time extend. */
4718			size = rb_event_length(event);
4719			memcpy(bpage->data + pos, rpage->data + rpos, size);
4720
4721			len -= size;
4722
4723			rb_advance_reader(cpu_buffer);
4724			rpos = reader->read;
4725			pos += size;
4726
4727			if (rpos >= commit)
4728				break;
4729
4730			event = rb_reader_event(cpu_buffer);
4731			/* Always keep the time extend and data together */
4732			size = rb_event_ts_length(event);
4733		} while (len >= size);
4734
4735		/* update bpage */
4736		local_set(&bpage->commit, pos);
4737		bpage->time_stamp = save_timestamp;
4738
4739		/* we copied everything to the beginning */
4740		read = 0;
4741	} else {
4742		/* update the entry counter */
4743		cpu_buffer->read += rb_page_entries(reader);
4744		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4745
4746		/* swap the pages */
4747		rb_init_page(bpage);
4748		bpage = reader->page;
4749		reader->page = *data_page;
4750		local_set(&reader->write, 0);
4751		local_set(&reader->entries, 0);
4752		reader->read = 0;
4753		*data_page = bpage;
4754
4755		/*
4756		 * Use the real_end for the data size,
4757		 * This gives us a chance to store the lost events
4758		 * on the page.
4759		 */
4760		if (reader->real_end)
4761			local_set(&bpage->commit, reader->real_end);
4762	}
4763	ret = read;
4764
4765	cpu_buffer->lost_events = 0;
4766
4767	commit = local_read(&bpage->commit);
4768	/*
4769	 * Set a flag in the commit field if we lost events
4770	 */
4771	if (missed_events) {
4772		/* If there is room at the end of the page to save the
4773		 * missed events, then record it there.
4774		 */
4775		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4776			memcpy(&bpage->data[commit], &missed_events,
4777			       sizeof(missed_events));
4778			local_add(RB_MISSED_STORED, &bpage->commit);
4779			commit += sizeof(missed_events);
4780		}
4781		local_add(RB_MISSED_EVENTS, &bpage->commit);
4782	}
4783
4784	/*
4785	 * This page may be off to user land. Zero it out here.
4786	 */
4787	if (commit < BUF_PAGE_SIZE)
4788		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4789
4790 out_unlock:
4791	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4792
4793 out:
4794	return ret;
4795}
4796EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4797
4798/*
4799 * We only allocate new buffers, never free them if the CPU goes down.
4800 * If we were to free the buffer, then the user would lose any trace that was in
4801 * the buffer.
4802 */
4803int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
4804{
4805	struct ring_buffer *buffer;
4806	long nr_pages_same;
4807	int cpu_i;
4808	unsigned long nr_pages;
4809
4810	buffer = container_of(node, struct ring_buffer, node);
4811	if (cpumask_test_cpu(cpu, buffer->cpumask))
4812		return 0;
4813
4814	nr_pages = 0;
4815	nr_pages_same = 1;
4816	/* check if all cpu sizes are same */
4817	for_each_buffer_cpu(buffer, cpu_i) {
4818		/* fill in the size from first enabled cpu */
4819		if (nr_pages == 0)
4820			nr_pages = buffer->buffers[cpu_i]->nr_pages;
4821		if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4822			nr_pages_same = 0;
4823			break;
 
 
 
 
 
 
 
 
 
 
 
4824		}
 
 
 
 
 
 
 
 
 
 
 
 
 
4825	}
4826	/* allocate minimum pages, user can later expand it */
4827	if (!nr_pages_same)
4828		nr_pages = 2;
4829	buffer->buffers[cpu] =
4830		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4831	if (!buffer->buffers[cpu]) {
4832		WARN(1, "failed to allocate ring buffer on CPU %u\n",
4833		     cpu);
4834		return -ENOMEM;
4835	}
4836	smp_wmb();
4837	cpumask_set_cpu(cpu, buffer->cpumask);
4838	return 0;
4839}
 
4840
4841#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4842/*
4843 * This is a basic integrity check of the ring buffer.
4844 * Late in the boot cycle this test will run when configured in.
4845 * It will kick off a thread per CPU that will go into a loop
4846 * writing to the per cpu ring buffer various sizes of data.
4847 * Some of the data will be large items, some small.
4848 *
4849 * Another thread is created that goes into a spin, sending out
4850 * IPIs to the other CPUs to also write into the ring buffer.
4851 * this is to test the nesting ability of the buffer.
4852 *
4853 * Basic stats are recorded and reported. If something in the
4854 * ring buffer should happen that's not expected, a big warning
4855 * is displayed and all ring buffers are disabled.
4856 */
4857static struct task_struct *rb_threads[NR_CPUS] __initdata;
4858
4859struct rb_test_data {
4860	struct ring_buffer	*buffer;
4861	unsigned long		events;
4862	unsigned long		bytes_written;
4863	unsigned long		bytes_alloc;
4864	unsigned long		bytes_dropped;
4865	unsigned long		events_nested;
4866	unsigned long		bytes_written_nested;
4867	unsigned long		bytes_alloc_nested;
4868	unsigned long		bytes_dropped_nested;
4869	int			min_size_nested;
4870	int			max_size_nested;
4871	int			max_size;
4872	int			min_size;
4873	int			cpu;
4874	int			cnt;
4875};
4876
4877static struct rb_test_data rb_data[NR_CPUS] __initdata;
4878
4879/* 1 meg per cpu */
4880#define RB_TEST_BUFFER_SIZE	1048576
4881
4882static char rb_string[] __initdata =
4883	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4884	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4885	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4886
4887static bool rb_test_started __initdata;
4888
4889struct rb_item {
4890	int size;
4891	char str[];
4892};
4893
4894static __init int rb_write_something(struct rb_test_data *data, bool nested)
4895{
4896	struct ring_buffer_event *event;
4897	struct rb_item *item;
4898	bool started;
4899	int event_len;
4900	int size;
4901	int len;
4902	int cnt;
4903
4904	/* Have nested writes different that what is written */
4905	cnt = data->cnt + (nested ? 27 : 0);
4906
4907	/* Multiply cnt by ~e, to make some unique increment */
4908	size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4909
4910	len = size + sizeof(struct rb_item);
4911
4912	started = rb_test_started;
4913	/* read rb_test_started before checking buffer enabled */
4914	smp_rmb();
4915
4916	event = ring_buffer_lock_reserve(data->buffer, len);
4917	if (!event) {
4918		/* Ignore dropped events before test starts. */
4919		if (started) {
4920			if (nested)
4921				data->bytes_dropped += len;
4922			else
4923				data->bytes_dropped_nested += len;
4924		}
4925		return len;
4926	}
4927
4928	event_len = ring_buffer_event_length(event);
4929
4930	if (RB_WARN_ON(data->buffer, event_len < len))
4931		goto out;
4932
4933	item = ring_buffer_event_data(event);
4934	item->size = size;
4935	memcpy(item->str, rb_string, size);
4936
4937	if (nested) {
4938		data->bytes_alloc_nested += event_len;
4939		data->bytes_written_nested += len;
4940		data->events_nested++;
4941		if (!data->min_size_nested || len < data->min_size_nested)
4942			data->min_size_nested = len;
4943		if (len > data->max_size_nested)
4944			data->max_size_nested = len;
4945	} else {
4946		data->bytes_alloc += event_len;
4947		data->bytes_written += len;
4948		data->events++;
4949		if (!data->min_size || len < data->min_size)
4950			data->max_size = len;
4951		if (len > data->max_size)
4952			data->max_size = len;
4953	}
4954
4955 out:
4956	ring_buffer_unlock_commit(data->buffer, event);
4957
4958	return 0;
4959}
4960
4961static __init int rb_test(void *arg)
4962{
4963	struct rb_test_data *data = arg;
4964
4965	while (!kthread_should_stop()) {
4966		rb_write_something(data, false);
4967		data->cnt++;
4968
4969		set_current_state(TASK_INTERRUPTIBLE);
4970		/* Now sleep between a min of 100-300us and a max of 1ms */
4971		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4972	}
4973
4974	return 0;
4975}
4976
4977static __init void rb_ipi(void *ignore)
4978{
4979	struct rb_test_data *data;
4980	int cpu = smp_processor_id();
4981
4982	data = &rb_data[cpu];
4983	rb_write_something(data, true);
4984}
4985
4986static __init int rb_hammer_test(void *arg)
4987{
4988	while (!kthread_should_stop()) {
4989
4990		/* Send an IPI to all cpus to write data! */
4991		smp_call_function(rb_ipi, NULL, 1);
4992		/* No sleep, but for non preempt, let others run */
4993		schedule();
4994	}
4995
4996	return 0;
4997}
4998
4999static __init int test_ringbuffer(void)
5000{
5001	struct task_struct *rb_hammer;
5002	struct ring_buffer *buffer;
5003	int cpu;
5004	int ret = 0;
5005
5006	pr_info("Running ring buffer tests...\n");
5007
5008	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
5009	if (WARN_ON(!buffer))
5010		return 0;
5011
5012	/* Disable buffer so that threads can't write to it yet */
5013	ring_buffer_record_off(buffer);
5014
5015	for_each_online_cpu(cpu) {
5016		rb_data[cpu].buffer = buffer;
5017		rb_data[cpu].cpu = cpu;
5018		rb_data[cpu].cnt = cpu;
5019		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
5020						 "rbtester/%d", cpu);
5021		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
5022			pr_cont("FAILED\n");
5023			ret = PTR_ERR(rb_threads[cpu]);
5024			goto out_free;
5025		}
5026
5027		kthread_bind(rb_threads[cpu], cpu);
5028 		wake_up_process(rb_threads[cpu]);
5029	}
5030
5031	/* Now create the rb hammer! */
5032	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
5033	if (WARN_ON(IS_ERR(rb_hammer))) {
5034		pr_cont("FAILED\n");
5035		ret = PTR_ERR(rb_hammer);
5036		goto out_free;
5037	}
5038
5039	ring_buffer_record_on(buffer);
5040	/*
5041	 * Show buffer is enabled before setting rb_test_started.
5042	 * Yes there's a small race window where events could be
5043	 * dropped and the thread wont catch it. But when a ring
5044	 * buffer gets enabled, there will always be some kind of
5045	 * delay before other CPUs see it. Thus, we don't care about
5046	 * those dropped events. We care about events dropped after
5047	 * the threads see that the buffer is active.
5048	 */
5049	smp_wmb();
5050	rb_test_started = true;
5051
5052	set_current_state(TASK_INTERRUPTIBLE);
5053	/* Just run for 10 seconds */;
5054	schedule_timeout(10 * HZ);
5055
5056	kthread_stop(rb_hammer);
5057
5058 out_free:
5059	for_each_online_cpu(cpu) {
5060		if (!rb_threads[cpu])
5061			break;
5062		kthread_stop(rb_threads[cpu]);
5063	}
5064	if (ret) {
5065		ring_buffer_free(buffer);
5066		return ret;
5067	}
5068
5069	/* Report! */
5070	pr_info("finished\n");
5071	for_each_online_cpu(cpu) {
5072		struct ring_buffer_event *event;
5073		struct rb_test_data *data = &rb_data[cpu];
5074		struct rb_item *item;
5075		unsigned long total_events;
5076		unsigned long total_dropped;
5077		unsigned long total_written;
5078		unsigned long total_alloc;
5079		unsigned long total_read = 0;
5080		unsigned long total_size = 0;
5081		unsigned long total_len = 0;
5082		unsigned long total_lost = 0;
5083		unsigned long lost;
5084		int big_event_size;
5085		int small_event_size;
5086
5087		ret = -1;
5088
5089		total_events = data->events + data->events_nested;
5090		total_written = data->bytes_written + data->bytes_written_nested;
5091		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
5092		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
5093
5094		big_event_size = data->max_size + data->max_size_nested;
5095		small_event_size = data->min_size + data->min_size_nested;
5096
5097		pr_info("CPU %d:\n", cpu);
5098		pr_info("              events:    %ld\n", total_events);
5099		pr_info("       dropped bytes:    %ld\n", total_dropped);
5100		pr_info("       alloced bytes:    %ld\n", total_alloc);
5101		pr_info("       written bytes:    %ld\n", total_written);
5102		pr_info("       biggest event:    %d\n", big_event_size);
5103		pr_info("      smallest event:    %d\n", small_event_size);
5104
5105		if (RB_WARN_ON(buffer, total_dropped))
5106			break;
5107
5108		ret = 0;
5109
5110		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
5111			total_lost += lost;
5112			item = ring_buffer_event_data(event);
5113			total_len += ring_buffer_event_length(event);
5114			total_size += item->size + sizeof(struct rb_item);
5115			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
5116				pr_info("FAILED!\n");
5117				pr_info("buffer had: %.*s\n", item->size, item->str);
5118				pr_info("expected:   %.*s\n", item->size, rb_string);
5119				RB_WARN_ON(buffer, 1);
5120				ret = -1;
5121				break;
5122			}
5123			total_read++;
5124		}
5125		if (ret)
5126			break;
5127
5128		ret = -1;
5129
5130		pr_info("         read events:   %ld\n", total_read);
5131		pr_info("         lost events:   %ld\n", total_lost);
5132		pr_info("        total events:   %ld\n", total_lost + total_read);
5133		pr_info("  recorded len bytes:   %ld\n", total_len);
5134		pr_info(" recorded size bytes:   %ld\n", total_size);
5135		if (total_lost)
5136			pr_info(" With dropped events, record len and size may not match\n"
5137				" alloced and written from above\n");
5138		if (!total_lost) {
5139			if (RB_WARN_ON(buffer, total_len != total_alloc ||
5140				       total_size != total_written))
5141				break;
5142		}
5143		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
5144			break;
5145
5146		ret = 0;
5147	}
5148	if (!ret)
5149		pr_info("Ring buffer PASSED!\n");
5150
5151	ring_buffer_free(buffer);
5152	return 0;
5153}
5154
5155late_initcall(test_ringbuffer);
5156#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */