Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Generic ring buffer
   4 *
   5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   6 */
 
   7#include <linux/trace_events.h>
   8#include <linux/ring_buffer.h>
   9#include <linux/trace_clock.h>
  10#include <linux/sched/clock.h>
  11#include <linux/trace_seq.h>
  12#include <linux/spinlock.h>
  13#include <linux/irq_work.h>
 
  14#include <linux/uaccess.h>
  15#include <linux/hardirq.h>
  16#include <linux/kthread.h>	/* for self test */
  17#include <linux/module.h>
  18#include <linux/percpu.h>
  19#include <linux/mutex.h>
  20#include <linux/delay.h>
  21#include <linux/slab.h>
  22#include <linux/init.h>
  23#include <linux/hash.h>
  24#include <linux/list.h>
  25#include <linux/cpu.h>
  26#include <linux/oom.h>
  27
  28#include <asm/local.h>
  29
  30static void update_pages_handler(struct work_struct *work);
  31
  32/*
  33 * The ring buffer header is special. We must manually up keep it.
  34 */
  35int ring_buffer_print_entry_header(struct trace_seq *s)
  36{
  37	trace_seq_puts(s, "# compressed entry header\n");
  38	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  39	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  40	trace_seq_puts(s, "\tarray       :   32 bits\n");
  41	trace_seq_putc(s, '\n');
  42	trace_seq_printf(s, "\tpadding     : type == %d\n",
  43			 RINGBUF_TYPE_PADDING);
  44	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  45			 RINGBUF_TYPE_TIME_EXTEND);
  46	trace_seq_printf(s, "\ttime_stamp : type == %d\n",
  47			 RINGBUF_TYPE_TIME_STAMP);
  48	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  49			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  50
  51	return !trace_seq_has_overflowed(s);
  52}
  53
  54/*
  55 * The ring buffer is made up of a list of pages. A separate list of pages is
  56 * allocated for each CPU. A writer may only write to a buffer that is
  57 * associated with the CPU it is currently executing on.  A reader may read
  58 * from any per cpu buffer.
  59 *
  60 * The reader is special. For each per cpu buffer, the reader has its own
  61 * reader page. When a reader has read the entire reader page, this reader
  62 * page is swapped with another page in the ring buffer.
  63 *
  64 * Now, as long as the writer is off the reader page, the reader can do what
  65 * ever it wants with that page. The writer will never write to that page
  66 * again (as long as it is out of the ring buffer).
  67 *
  68 * Here's some silly ASCII art.
  69 *
  70 *   +------+
  71 *   |reader|          RING BUFFER
  72 *   |page  |
  73 *   +------+        +---+   +---+   +---+
  74 *                   |   |-->|   |-->|   |
  75 *                   +---+   +---+   +---+
  76 *                     ^               |
  77 *                     |               |
  78 *                     +---------------+
  79 *
  80 *
  81 *   +------+
  82 *   |reader|          RING BUFFER
  83 *   |page  |------------------v
  84 *   +------+        +---+   +---+   +---+
  85 *                   |   |-->|   |-->|   |
  86 *                   +---+   +---+   +---+
  87 *                     ^               |
  88 *                     |               |
  89 *                     +---------------+
  90 *
  91 *
  92 *   +------+
  93 *   |reader|          RING BUFFER
  94 *   |page  |------------------v
  95 *   +------+        +---+   +---+   +---+
  96 *      ^            |   |-->|   |-->|   |
  97 *      |            +---+   +---+   +---+
  98 *      |                              |
  99 *      |                              |
 100 *      +------------------------------+
 101 *
 102 *
 103 *   +------+
 104 *   |buffer|          RING BUFFER
 105 *   |page  |------------------v
 106 *   +------+        +---+   +---+   +---+
 107 *      ^            |   |   |   |-->|   |
 108 *      |   New      +---+   +---+   +---+
 109 *      |  Reader------^               |
 110 *      |   page                       |
 111 *      +------------------------------+
 112 *
 113 *
 114 * After we make this swap, the reader can hand this page off to the splice
 115 * code and be done with it. It can even allocate a new page if it needs to
 116 * and swap that into the ring buffer.
 117 *
 118 * We will be using cmpxchg soon to make all this lockless.
 119 *
 120 */
 121
 122/* Used for individual buffers (after the counter) */
 123#define RB_BUFFER_OFF		(1 << 20)
 124
 125#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 126
 127#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 128#define RB_ALIGNMENT		4U
 129#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 130#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 131#define RB_ALIGN_DATA		__aligned(RB_ALIGNMENT)
 
 
 
 
 
 
 
 
 
 132
 133/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 134#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 135
 136enum {
 137	RB_LEN_TIME_EXTEND = 8,
 138	RB_LEN_TIME_STAMP =  8,
 139};
 140
 141#define skip_time_extend(event) \
 142	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 143
 144#define extended_time(event) \
 145	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
 146
 147static inline int rb_null_event(struct ring_buffer_event *event)
 148{
 149	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 150}
 151
 152static void rb_event_set_padding(struct ring_buffer_event *event)
 153{
 154	/* padding has a NULL time_delta */
 155	event->type_len = RINGBUF_TYPE_PADDING;
 156	event->time_delta = 0;
 157}
 158
 159static unsigned
 160rb_event_data_length(struct ring_buffer_event *event)
 161{
 162	unsigned length;
 163
 164	if (event->type_len)
 165		length = event->type_len * RB_ALIGNMENT;
 166	else
 167		length = event->array[0];
 168	return length + RB_EVNT_HDR_SIZE;
 169}
 170
 171/*
 172 * Return the length of the given event. Will return
 173 * the length of the time extend if the event is a
 174 * time extend.
 175 */
 176static inline unsigned
 177rb_event_length(struct ring_buffer_event *event)
 178{
 179	switch (event->type_len) {
 180	case RINGBUF_TYPE_PADDING:
 181		if (rb_null_event(event))
 182			/* undefined */
 183			return -1;
 184		return  event->array[0] + RB_EVNT_HDR_SIZE;
 185
 186	case RINGBUF_TYPE_TIME_EXTEND:
 187		return RB_LEN_TIME_EXTEND;
 188
 189	case RINGBUF_TYPE_TIME_STAMP:
 190		return RB_LEN_TIME_STAMP;
 191
 192	case RINGBUF_TYPE_DATA:
 193		return rb_event_data_length(event);
 194	default:
 195		BUG();
 196	}
 197	/* not hit */
 198	return 0;
 199}
 200
 201/*
 202 * Return total length of time extend and data,
 203 *   or just the event length for all other events.
 204 */
 205static inline unsigned
 206rb_event_ts_length(struct ring_buffer_event *event)
 207{
 208	unsigned len = 0;
 209
 210	if (extended_time(event)) {
 211		/* time extends include the data event after it */
 212		len = RB_LEN_TIME_EXTEND;
 213		event = skip_time_extend(event);
 214	}
 215	return len + rb_event_length(event);
 216}
 217
 218/**
 219 * ring_buffer_event_length - return the length of the event
 220 * @event: the event to get the length of
 221 *
 222 * Returns the size of the data load of a data event.
 223 * If the event is something other than a data event, it
 224 * returns the size of the event itself. With the exception
 225 * of a TIME EXTEND, where it still returns the size of the
 226 * data load of the data event after it.
 227 */
 228unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 229{
 230	unsigned length;
 231
 232	if (extended_time(event))
 233		event = skip_time_extend(event);
 234
 235	length = rb_event_length(event);
 236	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 237		return length;
 238	length -= RB_EVNT_HDR_SIZE;
 239	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 240                length -= sizeof(event->array[0]);
 241	return length;
 242}
 243EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 244
 245/* inline for ring buffer fast paths */
 246static __always_inline void *
 247rb_event_data(struct ring_buffer_event *event)
 248{
 249	if (extended_time(event))
 250		event = skip_time_extend(event);
 251	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 252	/* If length is in len field, then array[0] has the data */
 253	if (event->type_len)
 254		return (void *)&event->array[0];
 255	/* Otherwise length is in array[0] and array[1] has the data */
 256	return (void *)&event->array[1];
 257}
 258
 259/**
 260 * ring_buffer_event_data - return the data of the event
 261 * @event: the event to get the data from
 262 */
 263void *ring_buffer_event_data(struct ring_buffer_event *event)
 264{
 265	return rb_event_data(event);
 266}
 267EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 268
 269#define for_each_buffer_cpu(buffer, cpu)		\
 270	for_each_cpu(cpu, buffer->cpumask)
 271
 
 
 
 272#define TS_SHIFT	27
 273#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 274#define TS_DELTA_TEST	(~TS_MASK)
 275
 276/**
 277 * ring_buffer_event_time_stamp - return the event's extended timestamp
 278 * @event: the event to get the timestamp of
 279 *
 280 * Returns the extended timestamp associated with a data event.
 281 * An extended time_stamp is a 64-bit timestamp represented
 282 * internally in a special way that makes the best use of space
 283 * contained within a ring buffer event.  This function decodes
 284 * it and maps it to a straight u64 value.
 285 */
 286u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event)
 287{
 288	u64 ts;
 289
 290	ts = event->array[0];
 291	ts <<= TS_SHIFT;
 292	ts += event->time_delta;
 293
 294	return ts;
 295}
 296
 297/* Flag when events were overwritten */
 298#define RB_MISSED_EVENTS	(1 << 31)
 299/* Missed count stored at end */
 300#define RB_MISSED_STORED	(1 << 30)
 301
 302#define RB_MISSED_FLAGS		(RB_MISSED_EVENTS|RB_MISSED_STORED)
 303
 304struct buffer_data_page {
 305	u64		 time_stamp;	/* page time stamp */
 306	local_t		 commit;	/* write committed index */
 307	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 308};
 309
 310/*
 311 * Note, the buffer_page list must be first. The buffer pages
 312 * are allocated in cache lines, which means that each buffer
 313 * page will be at the beginning of a cache line, and thus
 314 * the least significant bits will be zero. We use this to
 315 * add flags in the list struct pointers, to make the ring buffer
 316 * lockless.
 317 */
 318struct buffer_page {
 319	struct list_head list;		/* list of buffer pages */
 320	local_t		 write;		/* index for next write */
 321	unsigned	 read;		/* index for next read */
 322	local_t		 entries;	/* entries on this page */
 323	unsigned long	 real_end;	/* real end of data */
 324	struct buffer_data_page *page;	/* Actual data page */
 325};
 326
 327/*
 328 * The buffer page counters, write and entries, must be reset
 329 * atomically when crossing page boundaries. To synchronize this
 330 * update, two counters are inserted into the number. One is
 331 * the actual counter for the write position or count on the page.
 332 *
 333 * The other is a counter of updaters. Before an update happens
 334 * the update partition of the counter is incremented. This will
 335 * allow the updater to update the counter atomically.
 336 *
 337 * The counter is 20 bits, and the state data is 12.
 338 */
 339#define RB_WRITE_MASK		0xfffff
 340#define RB_WRITE_INTCNT		(1 << 20)
 341
 342static void rb_init_page(struct buffer_data_page *bpage)
 343{
 344	local_set(&bpage->commit, 0);
 345}
 346
 347/*
 348 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 349 * this issue out.
 350 */
 351static void free_buffer_page(struct buffer_page *bpage)
 352{
 353	free_page((unsigned long)bpage->page);
 354	kfree(bpage);
 355}
 356
 357/*
 358 * We need to fit the time_stamp delta into 27 bits.
 359 */
 360static inline int test_time_stamp(u64 delta)
 361{
 362	if (delta & TS_DELTA_TEST)
 363		return 1;
 364	return 0;
 365}
 366
 367#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 368
 369/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 370#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 371
 372int ring_buffer_print_page_header(struct trace_seq *s)
 373{
 374	struct buffer_data_page field;
 375
 376	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 377			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 378			 (unsigned int)sizeof(field.time_stamp),
 379			 (unsigned int)is_signed_type(u64));
 380
 381	trace_seq_printf(s, "\tfield: local_t commit;\t"
 382			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 383			 (unsigned int)offsetof(typeof(field), commit),
 384			 (unsigned int)sizeof(field.commit),
 385			 (unsigned int)is_signed_type(long));
 386
 387	trace_seq_printf(s, "\tfield: int overwrite;\t"
 388			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 389			 (unsigned int)offsetof(typeof(field), commit),
 390			 1,
 391			 (unsigned int)is_signed_type(long));
 392
 393	trace_seq_printf(s, "\tfield: char data;\t"
 394			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 395			 (unsigned int)offsetof(typeof(field), data),
 396			 (unsigned int)BUF_PAGE_SIZE,
 397			 (unsigned int)is_signed_type(char));
 398
 399	return !trace_seq_has_overflowed(s);
 400}
 401
 402struct rb_irq_work {
 403	struct irq_work			work;
 404	wait_queue_head_t		waiters;
 405	wait_queue_head_t		full_waiters;
 406	bool				waiters_pending;
 407	bool				full_waiters_pending;
 408	bool				wakeup_full;
 409};
 410
 411/*
 412 * Structure to hold event state and handle nested events.
 413 */
 414struct rb_event_info {
 415	u64			ts;
 416	u64			delta;
 
 
 417	unsigned long		length;
 418	struct buffer_page	*tail_page;
 419	int			add_timestamp;
 420};
 421
 422/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 423 * Used for which event context the event is in.
 424 *  NMI     = 0
 425 *  IRQ     = 1
 426 *  SOFTIRQ = 2
 427 *  NORMAL  = 3
 
 428 *
 429 * See trace_recursive_lock() comment below for more details.
 430 */
 431enum {
 
 432	RB_CTX_NMI,
 433	RB_CTX_IRQ,
 434	RB_CTX_SOFTIRQ,
 435	RB_CTX_NORMAL,
 436	RB_CTX_MAX
 437};
 438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 439/*
 440 * head_page == tail_page && head == tail then buffer is empty.
 441 */
 442struct ring_buffer_per_cpu {
 443	int				cpu;
 444	atomic_t			record_disabled;
 445	struct ring_buffer		*buffer;
 
 446	raw_spinlock_t			reader_lock;	/* serialize readers */
 447	arch_spinlock_t			lock;
 448	struct lock_class_key		lock_key;
 449	struct buffer_data_page		*free_page;
 450	unsigned long			nr_pages;
 451	unsigned int			current_context;
 452	struct list_head		*pages;
 453	struct buffer_page		*head_page;	/* read from head */
 454	struct buffer_page		*tail_page;	/* write to tail */
 455	struct buffer_page		*commit_page;	/* committed pages */
 456	struct buffer_page		*reader_page;
 457	unsigned long			lost_events;
 458	unsigned long			last_overrun;
 459	unsigned long			nest;
 460	local_t				entries_bytes;
 461	local_t				entries;
 462	local_t				overrun;
 463	local_t				commit_overrun;
 464	local_t				dropped_events;
 465	local_t				committing;
 466	local_t				commits;
 467	local_t				pages_touched;
 468	local_t				pages_read;
 469	long				last_pages_touch;
 470	size_t				shortest_full;
 471	unsigned long			read;
 472	unsigned long			read_bytes;
 473	u64				write_stamp;
 
 
 474	u64				read_stamp;
 475	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 476	long				nr_pages_to_update;
 477	struct list_head		new_pages; /* new pages to add */
 478	struct work_struct		update_pages_work;
 479	struct completion		update_done;
 480
 481	struct rb_irq_work		irq_work;
 482};
 483
 484struct ring_buffer {
 485	unsigned			flags;
 486	int				cpus;
 487	atomic_t			record_disabled;
 488	atomic_t			resize_disabled;
 489	cpumask_var_t			cpumask;
 490
 491	struct lock_class_key		*reader_lock_key;
 492
 493	struct mutex			mutex;
 494
 495	struct ring_buffer_per_cpu	**buffers;
 496
 497	struct hlist_node		node;
 498	u64				(*clock)(void);
 499
 500	struct rb_irq_work		irq_work;
 501	bool				time_stamp_abs;
 502};
 503
 504struct ring_buffer_iter {
 505	struct ring_buffer_per_cpu	*cpu_buffer;
 506	unsigned long			head;
 
 507	struct buffer_page		*head_page;
 508	struct buffer_page		*cache_reader_page;
 509	unsigned long			cache_read;
 510	u64				read_stamp;
 
 
 
 511};
 512
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 513/**
 514 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
 515 * @buffer: The ring_buffer to get the number of pages from
 516 * @cpu: The cpu of the ring_buffer to get the number of pages from
 517 *
 518 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
 519 */
 520size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
 521{
 522	return buffer->buffers[cpu]->nr_pages;
 523}
 524
 525/**
 526 * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
 527 * @buffer: The ring_buffer to get the number of pages from
 528 * @cpu: The cpu of the ring_buffer to get the number of pages from
 529 *
 530 * Returns the number of pages that have content in the ring buffer.
 531 */
 532size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu)
 533{
 534	size_t read;
 535	size_t cnt;
 536
 537	read = local_read(&buffer->buffers[cpu]->pages_read);
 538	cnt = local_read(&buffer->buffers[cpu]->pages_touched);
 539	/* The reader can read an empty page, but not more than that */
 540	if (cnt < read) {
 541		WARN_ON_ONCE(read > cnt + 1);
 542		return 0;
 543	}
 544
 545	return cnt - read;
 546}
 547
 548/*
 549 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 550 *
 551 * Schedules a delayed work to wake up any task that is blocked on the
 552 * ring buffer waiters queue.
 553 */
 554static void rb_wake_up_waiters(struct irq_work *work)
 555{
 556	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 557
 558	wake_up_all(&rbwork->waiters);
 559	if (rbwork->wakeup_full) {
 560		rbwork->wakeup_full = false;
 561		wake_up_all(&rbwork->full_waiters);
 562	}
 563}
 564
 565/**
 566 * ring_buffer_wait - wait for input to the ring buffer
 567 * @buffer: buffer to wait on
 568 * @cpu: the cpu buffer to wait on
 569 * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
 570 *
 571 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 572 * as data is added to any of the @buffer's cpu buffers. Otherwise
 573 * it will wait for data to be added to a specific cpu buffer.
 574 */
 575int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
 576{
 577	struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
 578	DEFINE_WAIT(wait);
 579	struct rb_irq_work *work;
 580	int ret = 0;
 581
 582	/*
 583	 * Depending on what the caller is waiting for, either any
 584	 * data in any cpu buffer, or a specific buffer, put the
 585	 * caller on the appropriate wait queue.
 586	 */
 587	if (cpu == RING_BUFFER_ALL_CPUS) {
 588		work = &buffer->irq_work;
 589		/* Full only makes sense on per cpu reads */
 590		full = 0;
 591	} else {
 592		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 593			return -ENODEV;
 594		cpu_buffer = buffer->buffers[cpu];
 595		work = &cpu_buffer->irq_work;
 596	}
 597
 598
 599	while (true) {
 600		if (full)
 601			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
 602		else
 603			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 604
 605		/*
 606		 * The events can happen in critical sections where
 607		 * checking a work queue can cause deadlocks.
 608		 * After adding a task to the queue, this flag is set
 609		 * only to notify events to try to wake up the queue
 610		 * using irq_work.
 611		 *
 612		 * We don't clear it even if the buffer is no longer
 613		 * empty. The flag only causes the next event to run
 614		 * irq_work to do the work queue wake up. The worse
 615		 * that can happen if we race with !trace_empty() is that
 616		 * an event will cause an irq_work to try to wake up
 617		 * an empty queue.
 618		 *
 619		 * There's no reason to protect this flag either, as
 620		 * the work queue and irq_work logic will do the necessary
 621		 * synchronization for the wake ups. The only thing
 622		 * that is necessary is that the wake up happens after
 623		 * a task has been queued. It's OK for spurious wake ups.
 624		 */
 625		if (full)
 626			work->full_waiters_pending = true;
 627		else
 628			work->waiters_pending = true;
 629
 630		if (signal_pending(current)) {
 631			ret = -EINTR;
 632			break;
 633		}
 634
 635		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
 636			break;
 637
 638		if (cpu != RING_BUFFER_ALL_CPUS &&
 639		    !ring_buffer_empty_cpu(buffer, cpu)) {
 640			unsigned long flags;
 641			bool pagebusy;
 642			size_t nr_pages;
 643			size_t dirty;
 644
 645			if (!full)
 646				break;
 647
 648			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 649			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 650			nr_pages = cpu_buffer->nr_pages;
 651			dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
 652			if (!cpu_buffer->shortest_full ||
 653			    cpu_buffer->shortest_full < full)
 654				cpu_buffer->shortest_full = full;
 655			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 656			if (!pagebusy &&
 657			    (!nr_pages || (dirty * 100) > full * nr_pages))
 658				break;
 659		}
 660
 661		schedule();
 662	}
 663
 664	if (full)
 665		finish_wait(&work->full_waiters, &wait);
 666	else
 667		finish_wait(&work->waiters, &wait);
 668
 669	return ret;
 670}
 671
 672/**
 673 * ring_buffer_poll_wait - poll on buffer input
 674 * @buffer: buffer to wait on
 675 * @cpu: the cpu buffer to wait on
 676 * @filp: the file descriptor
 677 * @poll_table: The poll descriptor
 678 *
 679 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 680 * as data is added to any of the @buffer's cpu buffers. Otherwise
 681 * it will wait for data to be added to a specific cpu buffer.
 682 *
 683 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
 684 * zero otherwise.
 685 */
 686__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
 687			  struct file *filp, poll_table *poll_table)
 688{
 689	struct ring_buffer_per_cpu *cpu_buffer;
 690	struct rb_irq_work *work;
 691
 692	if (cpu == RING_BUFFER_ALL_CPUS)
 693		work = &buffer->irq_work;
 694	else {
 695		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 696			return -EINVAL;
 697
 698		cpu_buffer = buffer->buffers[cpu];
 699		work = &cpu_buffer->irq_work;
 700	}
 701
 702	poll_wait(filp, &work->waiters, poll_table);
 703	work->waiters_pending = true;
 704	/*
 705	 * There's a tight race between setting the waiters_pending and
 706	 * checking if the ring buffer is empty.  Once the waiters_pending bit
 707	 * is set, the next event will wake the task up, but we can get stuck
 708	 * if there's only a single event in.
 709	 *
 710	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
 711	 * but adding a memory barrier to all events will cause too much of a
 712	 * performance hit in the fast path.  We only need a memory barrier when
 713	 * the buffer goes from empty to having content.  But as this race is
 714	 * extremely small, and it's not a problem if another event comes in, we
 715	 * will fix it later.
 716	 */
 717	smp_mb();
 718
 719	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
 720	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
 721		return EPOLLIN | EPOLLRDNORM;
 722	return 0;
 723}
 724
 725/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 726#define RB_WARN_ON(b, cond)						\
 727	({								\
 728		int _____ret = unlikely(cond);				\
 729		if (_____ret) {						\
 730			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
 731				struct ring_buffer_per_cpu *__b =	\
 732					(void *)b;			\
 733				atomic_inc(&__b->buffer->record_disabled); \
 734			} else						\
 735				atomic_inc(&b->record_disabled);	\
 736			WARN_ON(1);					\
 737		}							\
 738		_____ret;						\
 739	})
 740
 741/* Up this if you want to test the TIME_EXTENTS and normalization */
 742#define DEBUG_SHIFT 0
 743
 744static inline u64 rb_time_stamp(struct ring_buffer *buffer)
 745{
 
 
 
 
 
 
 
 
 746	/* shift to debug/test normalization and TIME_EXTENTS */
 747	return buffer->clock() << DEBUG_SHIFT;
 748}
 749
 750u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 751{
 752	u64 time;
 753
 754	preempt_disable_notrace();
 755	time = rb_time_stamp(buffer);
 756	preempt_enable_notrace();
 757
 758	return time;
 759}
 760EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 761
 762void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
 763				      int cpu, u64 *ts)
 764{
 765	/* Just stupid testing the normalize function and deltas */
 766	*ts >>= DEBUG_SHIFT;
 767}
 768EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 769
 770/*
 771 * Making the ring buffer lockless makes things tricky.
 772 * Although writes only happen on the CPU that they are on,
 773 * and they only need to worry about interrupts. Reads can
 774 * happen on any CPU.
 775 *
 776 * The reader page is always off the ring buffer, but when the
 777 * reader finishes with a page, it needs to swap its page with
 778 * a new one from the buffer. The reader needs to take from
 779 * the head (writes go to the tail). But if a writer is in overwrite
 780 * mode and wraps, it must push the head page forward.
 781 *
 782 * Here lies the problem.
 783 *
 784 * The reader must be careful to replace only the head page, and
 785 * not another one. As described at the top of the file in the
 786 * ASCII art, the reader sets its old page to point to the next
 787 * page after head. It then sets the page after head to point to
 788 * the old reader page. But if the writer moves the head page
 789 * during this operation, the reader could end up with the tail.
 790 *
 791 * We use cmpxchg to help prevent this race. We also do something
 792 * special with the page before head. We set the LSB to 1.
 793 *
 794 * When the writer must push the page forward, it will clear the
 795 * bit that points to the head page, move the head, and then set
 796 * the bit that points to the new head page.
 797 *
 798 * We also don't want an interrupt coming in and moving the head
 799 * page on another writer. Thus we use the second LSB to catch
 800 * that too. Thus:
 801 *
 802 * head->list->prev->next        bit 1          bit 0
 803 *                              -------        -------
 804 * Normal page                     0              0
 805 * Points to head page             0              1
 806 * New head page                   1              0
 807 *
 808 * Note we can not trust the prev pointer of the head page, because:
 809 *
 810 * +----+       +-----+        +-----+
 811 * |    |------>|  T  |---X--->|  N  |
 812 * |    |<------|     |        |     |
 813 * +----+       +-----+        +-----+
 814 *   ^                           ^ |
 815 *   |          +-----+          | |
 816 *   +----------|  R  |----------+ |
 817 *              |     |<-----------+
 818 *              +-----+
 819 *
 820 * Key:  ---X-->  HEAD flag set in pointer
 821 *         T      Tail page
 822 *         R      Reader page
 823 *         N      Next page
 824 *
 825 * (see __rb_reserve_next() to see where this happens)
 826 *
 827 *  What the above shows is that the reader just swapped out
 828 *  the reader page with a page in the buffer, but before it
 829 *  could make the new header point back to the new page added
 830 *  it was preempted by a writer. The writer moved forward onto
 831 *  the new page added by the reader and is about to move forward
 832 *  again.
 833 *
 834 *  You can see, it is legitimate for the previous pointer of
 835 *  the head (or any page) not to point back to itself. But only
 836 *  temporarily.
 837 */
 838
 839#define RB_PAGE_NORMAL		0UL
 840#define RB_PAGE_HEAD		1UL
 841#define RB_PAGE_UPDATE		2UL
 842
 843
 844#define RB_FLAG_MASK		3UL
 845
 846/* PAGE_MOVED is not part of the mask */
 847#define RB_PAGE_MOVED		4UL
 848
 849/*
 850 * rb_list_head - remove any bit
 851 */
 852static struct list_head *rb_list_head(struct list_head *list)
 853{
 854	unsigned long val = (unsigned long)list;
 855
 856	return (struct list_head *)(val & ~RB_FLAG_MASK);
 857}
 858
 859/*
 860 * rb_is_head_page - test if the given page is the head page
 861 *
 862 * Because the reader may move the head_page pointer, we can
 863 * not trust what the head page is (it may be pointing to
 864 * the reader page). But if the next page is a header page,
 865 * its flags will be non zero.
 866 */
 867static inline int
 868rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 869		struct buffer_page *page, struct list_head *list)
 870{
 871	unsigned long val;
 872
 873	val = (unsigned long)list->next;
 874
 875	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
 876		return RB_PAGE_MOVED;
 877
 878	return val & RB_FLAG_MASK;
 879}
 880
 881/*
 882 * rb_is_reader_page
 883 *
 884 * The unique thing about the reader page, is that, if the
 885 * writer is ever on it, the previous pointer never points
 886 * back to the reader page.
 887 */
 888static bool rb_is_reader_page(struct buffer_page *page)
 889{
 890	struct list_head *list = page->list.prev;
 891
 892	return rb_list_head(list->next) != &page->list;
 893}
 894
 895/*
 896 * rb_set_list_to_head - set a list_head to be pointing to head.
 897 */
 898static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
 899				struct list_head *list)
 900{
 901	unsigned long *ptr;
 902
 903	ptr = (unsigned long *)&list->next;
 904	*ptr |= RB_PAGE_HEAD;
 905	*ptr &= ~RB_PAGE_UPDATE;
 906}
 907
 908/*
 909 * rb_head_page_activate - sets up head page
 910 */
 911static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
 912{
 913	struct buffer_page *head;
 914
 915	head = cpu_buffer->head_page;
 916	if (!head)
 917		return;
 918
 919	/*
 920	 * Set the previous list pointer to have the HEAD flag.
 921	 */
 922	rb_set_list_to_head(cpu_buffer, head->list.prev);
 923}
 924
 925static void rb_list_head_clear(struct list_head *list)
 926{
 927	unsigned long *ptr = (unsigned long *)&list->next;
 928
 929	*ptr &= ~RB_FLAG_MASK;
 930}
 931
 932/*
 933 * rb_head_page_deactivate - clears head page ptr (for free list)
 934 */
 935static void
 936rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
 937{
 938	struct list_head *hd;
 939
 940	/* Go through the whole list and clear any pointers found. */
 941	rb_list_head_clear(cpu_buffer->pages);
 942
 943	list_for_each(hd, cpu_buffer->pages)
 944		rb_list_head_clear(hd);
 945}
 946
 947static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 948			    struct buffer_page *head,
 949			    struct buffer_page *prev,
 950			    int old_flag, int new_flag)
 951{
 952	struct list_head *list;
 953	unsigned long val = (unsigned long)&head->list;
 954	unsigned long ret;
 955
 956	list = &prev->list;
 957
 958	val &= ~RB_FLAG_MASK;
 959
 960	ret = cmpxchg((unsigned long *)&list->next,
 961		      val | old_flag, val | new_flag);
 962
 963	/* check if the reader took the page */
 964	if ((ret & ~RB_FLAG_MASK) != val)
 965		return RB_PAGE_MOVED;
 966
 967	return ret & RB_FLAG_MASK;
 968}
 969
 970static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
 971				   struct buffer_page *head,
 972				   struct buffer_page *prev,
 973				   int old_flag)
 974{
 975	return rb_head_page_set(cpu_buffer, head, prev,
 976				old_flag, RB_PAGE_UPDATE);
 977}
 978
 979static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
 980				 struct buffer_page *head,
 981				 struct buffer_page *prev,
 982				 int old_flag)
 983{
 984	return rb_head_page_set(cpu_buffer, head, prev,
 985				old_flag, RB_PAGE_HEAD);
 986}
 987
 988static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
 989				   struct buffer_page *head,
 990				   struct buffer_page *prev,
 991				   int old_flag)
 992{
 993	return rb_head_page_set(cpu_buffer, head, prev,
 994				old_flag, RB_PAGE_NORMAL);
 995}
 996
 997static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
 998			       struct buffer_page **bpage)
 999{
1000	struct list_head *p = rb_list_head((*bpage)->list.next);
1001
1002	*bpage = list_entry(p, struct buffer_page, list);
1003}
1004
1005static struct buffer_page *
1006rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1007{
1008	struct buffer_page *head;
1009	struct buffer_page *page;
1010	struct list_head *list;
1011	int i;
1012
1013	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1014		return NULL;
1015
1016	/* sanity check */
1017	list = cpu_buffer->pages;
1018	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1019		return NULL;
1020
1021	page = head = cpu_buffer->head_page;
1022	/*
1023	 * It is possible that the writer moves the header behind
1024	 * where we started, and we miss in one loop.
1025	 * A second loop should grab the header, but we'll do
1026	 * three loops just because I'm paranoid.
1027	 */
1028	for (i = 0; i < 3; i++) {
1029		do {
1030			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
1031				cpu_buffer->head_page = page;
1032				return page;
1033			}
1034			rb_inc_page(cpu_buffer, &page);
1035		} while (page != head);
1036	}
1037
1038	RB_WARN_ON(cpu_buffer, 1);
1039
1040	return NULL;
1041}
1042
1043static int rb_head_page_replace(struct buffer_page *old,
1044				struct buffer_page *new)
1045{
1046	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1047	unsigned long val;
1048	unsigned long ret;
1049
1050	val = *ptr & ~RB_FLAG_MASK;
1051	val |= RB_PAGE_HEAD;
1052
1053	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1054
1055	return ret == val;
1056}
1057
1058/*
1059 * rb_tail_page_update - move the tail page forward
1060 */
1061static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1062			       struct buffer_page *tail_page,
1063			       struct buffer_page *next_page)
1064{
1065	unsigned long old_entries;
1066	unsigned long old_write;
1067
1068	/*
1069	 * The tail page now needs to be moved forward.
1070	 *
1071	 * We need to reset the tail page, but without messing
1072	 * with possible erasing of data brought in by interrupts
1073	 * that have moved the tail page and are currently on it.
1074	 *
1075	 * We add a counter to the write field to denote this.
1076	 */
1077	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1078	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1079
1080	local_inc(&cpu_buffer->pages_touched);
1081	/*
1082	 * Just make sure we have seen our old_write and synchronize
1083	 * with any interrupts that come in.
1084	 */
1085	barrier();
1086
1087	/*
1088	 * If the tail page is still the same as what we think
1089	 * it is, then it is up to us to update the tail
1090	 * pointer.
1091	 */
1092	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1093		/* Zero the write counter */
1094		unsigned long val = old_write & ~RB_WRITE_MASK;
1095		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1096
1097		/*
1098		 * This will only succeed if an interrupt did
1099		 * not come in and change it. In which case, we
1100		 * do not want to modify it.
1101		 *
1102		 * We add (void) to let the compiler know that we do not care
1103		 * about the return value of these functions. We use the
1104		 * cmpxchg to only update if an interrupt did not already
1105		 * do it for us. If the cmpxchg fails, we don't care.
1106		 */
1107		(void)local_cmpxchg(&next_page->write, old_write, val);
1108		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1109
1110		/*
1111		 * No need to worry about races with clearing out the commit.
1112		 * it only can increment when a commit takes place. But that
1113		 * only happens in the outer most nested commit.
1114		 */
1115		local_set(&next_page->page->commit, 0);
1116
1117		/* Again, either we update tail_page or an interrupt does */
1118		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1119	}
1120}
1121
1122static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1123			  struct buffer_page *bpage)
1124{
1125	unsigned long val = (unsigned long)bpage;
1126
1127	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1128		return 1;
1129
1130	return 0;
1131}
1132
1133/**
1134 * rb_check_list - make sure a pointer to a list has the last bits zero
1135 */
1136static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1137			 struct list_head *list)
1138{
1139	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1140		return 1;
1141	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1142		return 1;
1143	return 0;
1144}
1145
1146/**
1147 * rb_check_pages - integrity check of buffer pages
1148 * @cpu_buffer: CPU buffer with pages to test
1149 *
1150 * As a safety measure we check to make sure the data pages have not
1151 * been corrupted.
1152 */
1153static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1154{
1155	struct list_head *head = cpu_buffer->pages;
1156	struct buffer_page *bpage, *tmp;
1157
1158	/* Reset the head page if it exists */
1159	if (cpu_buffer->head_page)
1160		rb_set_head_page(cpu_buffer);
1161
1162	rb_head_page_deactivate(cpu_buffer);
1163
1164	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1165		return -1;
1166	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1167		return -1;
1168
1169	if (rb_check_list(cpu_buffer, head))
1170		return -1;
1171
1172	list_for_each_entry_safe(bpage, tmp, head, list) {
1173		if (RB_WARN_ON(cpu_buffer,
1174			       bpage->list.next->prev != &bpage->list))
1175			return -1;
1176		if (RB_WARN_ON(cpu_buffer,
1177			       bpage->list.prev->next != &bpage->list))
1178			return -1;
1179		if (rb_check_list(cpu_buffer, &bpage->list))
1180			return -1;
1181	}
1182
1183	rb_head_page_activate(cpu_buffer);
1184
1185	return 0;
1186}
1187
1188static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
 
1189{
1190	struct buffer_page *bpage, *tmp;
1191	bool user_thread = current->mm != NULL;
1192	gfp_t mflags;
1193	long i;
1194
1195	/*
1196	 * Check if the available memory is there first.
1197	 * Note, si_mem_available() only gives us a rough estimate of available
1198	 * memory. It may not be accurate. But we don't care, we just want
1199	 * to prevent doing any allocation when it is obvious that it is
1200	 * not going to succeed.
1201	 */
1202	i = si_mem_available();
1203	if (i < nr_pages)
1204		return -ENOMEM;
1205
1206	/*
1207	 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1208	 * gracefully without invoking oom-killer and the system is not
1209	 * destabilized.
1210	 */
1211	mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1212
1213	/*
1214	 * If a user thread allocates too much, and si_mem_available()
1215	 * reports there's enough memory, even though there is not.
1216	 * Make sure the OOM killer kills this thread. This can happen
1217	 * even with RETRY_MAYFAIL because another task may be doing
1218	 * an allocation after this task has taken all memory.
1219	 * This is the task the OOM killer needs to take out during this
1220	 * loop, even if it was triggered by an allocation somewhere else.
1221	 */
1222	if (user_thread)
1223		set_current_oom_origin();
1224	for (i = 0; i < nr_pages; i++) {
1225		struct page *page;
1226
1227		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1228				    mflags, cpu_to_node(cpu));
1229		if (!bpage)
1230			goto free_pages;
1231
 
 
1232		list_add(&bpage->list, pages);
1233
1234		page = alloc_pages_node(cpu_to_node(cpu), mflags, 0);
1235		if (!page)
1236			goto free_pages;
1237		bpage->page = page_address(page);
1238		rb_init_page(bpage->page);
1239
1240		if (user_thread && fatal_signal_pending(current))
1241			goto free_pages;
1242	}
1243	if (user_thread)
1244		clear_current_oom_origin();
1245
1246	return 0;
1247
1248free_pages:
1249	list_for_each_entry_safe(bpage, tmp, pages, list) {
1250		list_del_init(&bpage->list);
1251		free_buffer_page(bpage);
1252	}
1253	if (user_thread)
1254		clear_current_oom_origin();
1255
1256	return -ENOMEM;
1257}
1258
1259static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1260			     unsigned long nr_pages)
1261{
1262	LIST_HEAD(pages);
1263
1264	WARN_ON(!nr_pages);
1265
1266	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1267		return -ENOMEM;
1268
1269	/*
1270	 * The ring buffer page list is a circular list that does not
1271	 * start and end with a list head. All page list items point to
1272	 * other pages.
1273	 */
1274	cpu_buffer->pages = pages.next;
1275	list_del(&pages);
1276
1277	cpu_buffer->nr_pages = nr_pages;
1278
1279	rb_check_pages(cpu_buffer);
1280
1281	return 0;
1282}
1283
1284static struct ring_buffer_per_cpu *
1285rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1286{
1287	struct ring_buffer_per_cpu *cpu_buffer;
1288	struct buffer_page *bpage;
1289	struct page *page;
1290	int ret;
1291
1292	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1293				  GFP_KERNEL, cpu_to_node(cpu));
1294	if (!cpu_buffer)
1295		return NULL;
1296
1297	cpu_buffer->cpu = cpu;
1298	cpu_buffer->buffer = buffer;
1299	raw_spin_lock_init(&cpu_buffer->reader_lock);
1300	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1301	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1302	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1303	init_completion(&cpu_buffer->update_done);
1304	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1305	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1306	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1307
1308	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1309			    GFP_KERNEL, cpu_to_node(cpu));
1310	if (!bpage)
1311		goto fail_free_buffer;
1312
1313	rb_check_bpage(cpu_buffer, bpage);
1314
1315	cpu_buffer->reader_page = bpage;
1316	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1317	if (!page)
1318		goto fail_free_reader;
1319	bpage->page = page_address(page);
1320	rb_init_page(bpage->page);
1321
1322	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1323	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1324
1325	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1326	if (ret < 0)
1327		goto fail_free_reader;
1328
1329	cpu_buffer->head_page
1330		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1331	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1332
1333	rb_head_page_activate(cpu_buffer);
1334
1335	return cpu_buffer;
1336
1337 fail_free_reader:
1338	free_buffer_page(cpu_buffer->reader_page);
1339
1340 fail_free_buffer:
1341	kfree(cpu_buffer);
1342	return NULL;
1343}
1344
1345static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1346{
1347	struct list_head *head = cpu_buffer->pages;
1348	struct buffer_page *bpage, *tmp;
1349
1350	free_buffer_page(cpu_buffer->reader_page);
1351
1352	rb_head_page_deactivate(cpu_buffer);
1353
1354	if (head) {
1355		list_for_each_entry_safe(bpage, tmp, head, list) {
1356			list_del_init(&bpage->list);
1357			free_buffer_page(bpage);
1358		}
1359		bpage = list_entry(head, struct buffer_page, list);
1360		free_buffer_page(bpage);
1361	}
1362
1363	kfree(cpu_buffer);
1364}
1365
1366/**
1367 * __ring_buffer_alloc - allocate a new ring_buffer
1368 * @size: the size in bytes per cpu that is needed.
1369 * @flags: attributes to set for the ring buffer.
 
1370 *
1371 * Currently the only flag that is available is the RB_FL_OVERWRITE
1372 * flag. This flag means that the buffer will overwrite old data
1373 * when the buffer wraps. If this flag is not set, the buffer will
1374 * drop data when the tail hits the head.
1375 */
1376struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1377					struct lock_class_key *key)
1378{
1379	struct ring_buffer *buffer;
1380	long nr_pages;
1381	int bsize;
1382	int cpu;
1383	int ret;
1384
1385	/* keep it in its own cache line */
1386	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1387			 GFP_KERNEL);
1388	if (!buffer)
1389		return NULL;
1390
1391	if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1392		goto fail_free_buffer;
1393
1394	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1395	buffer->flags = flags;
1396	buffer->clock = trace_clock_local;
1397	buffer->reader_lock_key = key;
1398
1399	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1400	init_waitqueue_head(&buffer->irq_work.waiters);
1401
1402	/* need at least two pages */
1403	if (nr_pages < 2)
1404		nr_pages = 2;
1405
1406	buffer->cpus = nr_cpu_ids;
1407
1408	bsize = sizeof(void *) * nr_cpu_ids;
1409	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1410				  GFP_KERNEL);
1411	if (!buffer->buffers)
1412		goto fail_free_cpumask;
1413
1414	cpu = raw_smp_processor_id();
1415	cpumask_set_cpu(cpu, buffer->cpumask);
1416	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1417	if (!buffer->buffers[cpu])
1418		goto fail_free_buffers;
1419
1420	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1421	if (ret < 0)
1422		goto fail_free_buffers;
1423
1424	mutex_init(&buffer->mutex);
1425
1426	return buffer;
1427
1428 fail_free_buffers:
1429	for_each_buffer_cpu(buffer, cpu) {
1430		if (buffer->buffers[cpu])
1431			rb_free_cpu_buffer(buffer->buffers[cpu]);
1432	}
1433	kfree(buffer->buffers);
1434
1435 fail_free_cpumask:
1436	free_cpumask_var(buffer->cpumask);
1437
1438 fail_free_buffer:
1439	kfree(buffer);
1440	return NULL;
1441}
1442EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1443
1444/**
1445 * ring_buffer_free - free a ring buffer.
1446 * @buffer: the buffer to free.
1447 */
1448void
1449ring_buffer_free(struct ring_buffer *buffer)
1450{
1451	int cpu;
1452
1453	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1454
1455	for_each_buffer_cpu(buffer, cpu)
1456		rb_free_cpu_buffer(buffer->buffers[cpu]);
1457
1458	kfree(buffer->buffers);
1459	free_cpumask_var(buffer->cpumask);
1460
1461	kfree(buffer);
1462}
1463EXPORT_SYMBOL_GPL(ring_buffer_free);
1464
1465void ring_buffer_set_clock(struct ring_buffer *buffer,
1466			   u64 (*clock)(void))
1467{
1468	buffer->clock = clock;
1469}
1470
1471void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs)
1472{
1473	buffer->time_stamp_abs = abs;
1474}
1475
1476bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer)
1477{
1478	return buffer->time_stamp_abs;
1479}
1480
1481static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1482
1483static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1484{
1485	return local_read(&bpage->entries) & RB_WRITE_MASK;
1486}
1487
1488static inline unsigned long rb_page_write(struct buffer_page *bpage)
1489{
1490	return local_read(&bpage->write) & RB_WRITE_MASK;
1491}
1492
1493static int
1494rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1495{
1496	struct list_head *tail_page, *to_remove, *next_page;
1497	struct buffer_page *to_remove_page, *tmp_iter_page;
1498	struct buffer_page *last_page, *first_page;
1499	unsigned long nr_removed;
1500	unsigned long head_bit;
1501	int page_entries;
1502
1503	head_bit = 0;
1504
1505	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1506	atomic_inc(&cpu_buffer->record_disabled);
1507	/*
1508	 * We don't race with the readers since we have acquired the reader
1509	 * lock. We also don't race with writers after disabling recording.
1510	 * This makes it easy to figure out the first and the last page to be
1511	 * removed from the list. We unlink all the pages in between including
1512	 * the first and last pages. This is done in a busy loop so that we
1513	 * lose the least number of traces.
1514	 * The pages are freed after we restart recording and unlock readers.
1515	 */
1516	tail_page = &cpu_buffer->tail_page->list;
1517
1518	/*
1519	 * tail page might be on reader page, we remove the next page
1520	 * from the ring buffer
1521	 */
1522	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1523		tail_page = rb_list_head(tail_page->next);
1524	to_remove = tail_page;
1525
1526	/* start of pages to remove */
1527	first_page = list_entry(rb_list_head(to_remove->next),
1528				struct buffer_page, list);
1529
1530	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1531		to_remove = rb_list_head(to_remove)->next;
1532		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1533	}
1534
1535	next_page = rb_list_head(to_remove)->next;
1536
1537	/*
1538	 * Now we remove all pages between tail_page and next_page.
1539	 * Make sure that we have head_bit value preserved for the
1540	 * next page
1541	 */
1542	tail_page->next = (struct list_head *)((unsigned long)next_page |
1543						head_bit);
1544	next_page = rb_list_head(next_page);
1545	next_page->prev = tail_page;
1546
1547	/* make sure pages points to a valid page in the ring buffer */
1548	cpu_buffer->pages = next_page;
1549
1550	/* update head page */
1551	if (head_bit)
1552		cpu_buffer->head_page = list_entry(next_page,
1553						struct buffer_page, list);
1554
1555	/*
1556	 * change read pointer to make sure any read iterators reset
1557	 * themselves
1558	 */
1559	cpu_buffer->read = 0;
1560
1561	/* pages are removed, resume tracing and then free the pages */
1562	atomic_dec(&cpu_buffer->record_disabled);
1563	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1564
1565	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1566
1567	/* last buffer page to remove */
1568	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1569				list);
1570	tmp_iter_page = first_page;
1571
1572	do {
1573		cond_resched();
1574
1575		to_remove_page = tmp_iter_page;
1576		rb_inc_page(cpu_buffer, &tmp_iter_page);
1577
1578		/* update the counters */
1579		page_entries = rb_page_entries(to_remove_page);
1580		if (page_entries) {
1581			/*
1582			 * If something was added to this page, it was full
1583			 * since it is not the tail page. So we deduct the
1584			 * bytes consumed in ring buffer from here.
1585			 * Increment overrun to account for the lost events.
1586			 */
1587			local_add(page_entries, &cpu_buffer->overrun);
1588			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1589		}
1590
1591		/*
1592		 * We have already removed references to this list item, just
1593		 * free up the buffer_page and its page
1594		 */
1595		free_buffer_page(to_remove_page);
1596		nr_removed--;
1597
1598	} while (to_remove_page != last_page);
1599
1600	RB_WARN_ON(cpu_buffer, nr_removed);
1601
1602	return nr_removed == 0;
1603}
1604
1605static int
1606rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1607{
1608	struct list_head *pages = &cpu_buffer->new_pages;
1609	int retries, success;
1610
1611	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1612	/*
1613	 * We are holding the reader lock, so the reader page won't be swapped
1614	 * in the ring buffer. Now we are racing with the writer trying to
1615	 * move head page and the tail page.
1616	 * We are going to adapt the reader page update process where:
1617	 * 1. We first splice the start and end of list of new pages between
1618	 *    the head page and its previous page.
1619	 * 2. We cmpxchg the prev_page->next to point from head page to the
1620	 *    start of new pages list.
1621	 * 3. Finally, we update the head->prev to the end of new list.
1622	 *
1623	 * We will try this process 10 times, to make sure that we don't keep
1624	 * spinning.
1625	 */
1626	retries = 10;
1627	success = 0;
1628	while (retries--) {
1629		struct list_head *head_page, *prev_page, *r;
1630		struct list_head *last_page, *first_page;
1631		struct list_head *head_page_with_bit;
1632
1633		head_page = &rb_set_head_page(cpu_buffer)->list;
1634		if (!head_page)
1635			break;
1636		prev_page = head_page->prev;
1637
1638		first_page = pages->next;
1639		last_page  = pages->prev;
1640
1641		head_page_with_bit = (struct list_head *)
1642				     ((unsigned long)head_page | RB_PAGE_HEAD);
1643
1644		last_page->next = head_page_with_bit;
1645		first_page->prev = prev_page;
1646
1647		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1648
1649		if (r == head_page_with_bit) {
1650			/*
1651			 * yay, we replaced the page pointer to our new list,
1652			 * now, we just have to update to head page's prev
1653			 * pointer to point to end of list
1654			 */
1655			head_page->prev = last_page;
1656			success = 1;
1657			break;
1658		}
1659	}
1660
1661	if (success)
1662		INIT_LIST_HEAD(pages);
1663	/*
1664	 * If we weren't successful in adding in new pages, warn and stop
1665	 * tracing
1666	 */
1667	RB_WARN_ON(cpu_buffer, !success);
1668	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1669
1670	/* free pages if they weren't inserted */
1671	if (!success) {
1672		struct buffer_page *bpage, *tmp;
1673		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1674					 list) {
1675			list_del_init(&bpage->list);
1676			free_buffer_page(bpage);
1677		}
1678	}
1679	return success;
1680}
1681
1682static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1683{
1684	int success;
1685
1686	if (cpu_buffer->nr_pages_to_update > 0)
1687		success = rb_insert_pages(cpu_buffer);
1688	else
1689		success = rb_remove_pages(cpu_buffer,
1690					-cpu_buffer->nr_pages_to_update);
1691
1692	if (success)
1693		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1694}
1695
1696static void update_pages_handler(struct work_struct *work)
1697{
1698	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1699			struct ring_buffer_per_cpu, update_pages_work);
1700	rb_update_pages(cpu_buffer);
1701	complete(&cpu_buffer->update_done);
1702}
1703
1704/**
1705 * ring_buffer_resize - resize the ring buffer
1706 * @buffer: the buffer to resize.
1707 * @size: the new size.
1708 * @cpu_id: the cpu buffer to resize
1709 *
1710 * Minimum size is 2 * BUF_PAGE_SIZE.
1711 *
1712 * Returns 0 on success and < 0 on failure.
1713 */
1714int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1715			int cpu_id)
1716{
1717	struct ring_buffer_per_cpu *cpu_buffer;
1718	unsigned long nr_pages;
1719	int cpu, err = 0;
1720
1721	/*
1722	 * Always succeed at resizing a non-existent buffer:
1723	 */
1724	if (!buffer)
1725		return size;
1726
1727	/* Make sure the requested buffer exists */
1728	if (cpu_id != RING_BUFFER_ALL_CPUS &&
1729	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
1730		return size;
1731
1732	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1733
1734	/* we need a minimum of two pages */
1735	if (nr_pages < 2)
1736		nr_pages = 2;
1737
1738	size = nr_pages * BUF_PAGE_SIZE;
1739
1740	/*
1741	 * Don't succeed if resizing is disabled, as a reader might be
1742	 * manipulating the ring buffer and is expecting a sane state while
1743	 * this is true.
1744	 */
1745	if (atomic_read(&buffer->resize_disabled))
1746		return -EBUSY;
1747
1748	/* prevent another thread from changing buffer sizes */
1749	mutex_lock(&buffer->mutex);
1750
 
1751	if (cpu_id == RING_BUFFER_ALL_CPUS) {
 
 
 
 
 
 
 
 
 
 
 
 
 
1752		/* calculate the pages to update */
1753		for_each_buffer_cpu(buffer, cpu) {
1754			cpu_buffer = buffer->buffers[cpu];
1755
1756			cpu_buffer->nr_pages_to_update = nr_pages -
1757							cpu_buffer->nr_pages;
1758			/*
1759			 * nothing more to do for removing pages or no update
1760			 */
1761			if (cpu_buffer->nr_pages_to_update <= 0)
1762				continue;
1763			/*
1764			 * to add pages, make sure all new pages can be
1765			 * allocated without receiving ENOMEM
1766			 */
1767			INIT_LIST_HEAD(&cpu_buffer->new_pages);
1768			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1769						&cpu_buffer->new_pages, cpu)) {
1770				/* not enough memory for new pages */
1771				err = -ENOMEM;
1772				goto out_err;
1773			}
1774		}
1775
1776		get_online_cpus();
1777		/*
1778		 * Fire off all the required work handlers
1779		 * We can't schedule on offline CPUs, but it's not necessary
1780		 * since we can change their buffer sizes without any race.
1781		 */
1782		for_each_buffer_cpu(buffer, cpu) {
1783			cpu_buffer = buffer->buffers[cpu];
1784			if (!cpu_buffer->nr_pages_to_update)
1785				continue;
1786
1787			/* Can't run something on an offline CPU. */
1788			if (!cpu_online(cpu)) {
1789				rb_update_pages(cpu_buffer);
1790				cpu_buffer->nr_pages_to_update = 0;
1791			} else {
1792				schedule_work_on(cpu,
1793						&cpu_buffer->update_pages_work);
1794			}
1795		}
1796
1797		/* wait for all the updates to complete */
1798		for_each_buffer_cpu(buffer, cpu) {
1799			cpu_buffer = buffer->buffers[cpu];
1800			if (!cpu_buffer->nr_pages_to_update)
1801				continue;
1802
1803			if (cpu_online(cpu))
1804				wait_for_completion(&cpu_buffer->update_done);
1805			cpu_buffer->nr_pages_to_update = 0;
1806		}
1807
1808		put_online_cpus();
1809	} else {
1810		/* Make sure this CPU has been initialized */
1811		if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1812			goto out;
1813
1814		cpu_buffer = buffer->buffers[cpu_id];
1815
1816		if (nr_pages == cpu_buffer->nr_pages)
1817			goto out;
1818
 
 
 
 
 
 
 
 
 
 
1819		cpu_buffer->nr_pages_to_update = nr_pages -
1820						cpu_buffer->nr_pages;
1821
1822		INIT_LIST_HEAD(&cpu_buffer->new_pages);
1823		if (cpu_buffer->nr_pages_to_update > 0 &&
1824			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1825					    &cpu_buffer->new_pages, cpu_id)) {
1826			err = -ENOMEM;
1827			goto out_err;
1828		}
1829
1830		get_online_cpus();
1831
1832		/* Can't run something on an offline CPU. */
1833		if (!cpu_online(cpu_id))
1834			rb_update_pages(cpu_buffer);
1835		else {
1836			schedule_work_on(cpu_id,
1837					 &cpu_buffer->update_pages_work);
1838			wait_for_completion(&cpu_buffer->update_done);
1839		}
1840
1841		cpu_buffer->nr_pages_to_update = 0;
1842		put_online_cpus();
1843	}
1844
1845 out:
1846	/*
1847	 * The ring buffer resize can happen with the ring buffer
1848	 * enabled, so that the update disturbs the tracing as little
1849	 * as possible. But if the buffer is disabled, we do not need
1850	 * to worry about that, and we can take the time to verify
1851	 * that the buffer is not corrupt.
1852	 */
1853	if (atomic_read(&buffer->record_disabled)) {
1854		atomic_inc(&buffer->record_disabled);
1855		/*
1856		 * Even though the buffer was disabled, we must make sure
1857		 * that it is truly disabled before calling rb_check_pages.
1858		 * There could have been a race between checking
1859		 * record_disable and incrementing it.
1860		 */
1861		synchronize_rcu();
1862		for_each_buffer_cpu(buffer, cpu) {
1863			cpu_buffer = buffer->buffers[cpu];
1864			rb_check_pages(cpu_buffer);
1865		}
1866		atomic_dec(&buffer->record_disabled);
1867	}
1868
1869	mutex_unlock(&buffer->mutex);
1870	return size;
1871
1872 out_err:
1873	for_each_buffer_cpu(buffer, cpu) {
1874		struct buffer_page *bpage, *tmp;
1875
1876		cpu_buffer = buffer->buffers[cpu];
1877		cpu_buffer->nr_pages_to_update = 0;
1878
1879		if (list_empty(&cpu_buffer->new_pages))
1880			continue;
1881
1882		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1883					list) {
1884			list_del_init(&bpage->list);
1885			free_buffer_page(bpage);
1886		}
1887	}
 
1888	mutex_unlock(&buffer->mutex);
1889	return err;
1890}
1891EXPORT_SYMBOL_GPL(ring_buffer_resize);
1892
1893void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1894{
1895	mutex_lock(&buffer->mutex);
1896	if (val)
1897		buffer->flags |= RB_FL_OVERWRITE;
1898	else
1899		buffer->flags &= ~RB_FL_OVERWRITE;
1900	mutex_unlock(&buffer->mutex);
1901}
1902EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1903
1904static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1905{
1906	return bpage->page->data + index;
1907}
1908
1909static __always_inline struct ring_buffer_event *
1910rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1911{
1912	return __rb_page_index(cpu_buffer->reader_page,
1913			       cpu_buffer->reader_page->read);
1914}
1915
1916static __always_inline struct ring_buffer_event *
1917rb_iter_head_event(struct ring_buffer_iter *iter)
1918{
1919	return __rb_page_index(iter->head_page, iter->head);
1920}
1921
1922static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
 
1923{
1924	return local_read(&bpage->page->commit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1925}
1926
1927/* Size is determined by what has been committed */
1928static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
1929{
1930	return rb_page_commit(bpage);
1931}
1932
1933static __always_inline unsigned
1934rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1935{
1936	return rb_page_commit(cpu_buffer->commit_page);
1937}
1938
1939static __always_inline unsigned
1940rb_event_index(struct ring_buffer_event *event)
1941{
1942	unsigned long addr = (unsigned long)event;
1943
1944	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1945}
1946
1947static void rb_inc_iter(struct ring_buffer_iter *iter)
1948{
1949	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1950
1951	/*
1952	 * The iterator could be on the reader page (it starts there).
1953	 * But the head could have moved, since the reader was
1954	 * found. Check for this case and assign the iterator
1955	 * to the head page instead of next.
1956	 */
1957	if (iter->head_page == cpu_buffer->reader_page)
1958		iter->head_page = rb_set_head_page(cpu_buffer);
1959	else
1960		rb_inc_page(cpu_buffer, &iter->head_page);
1961
1962	iter->read_stamp = iter->head_page->page->time_stamp;
1963	iter->head = 0;
 
1964}
1965
1966/*
1967 * rb_handle_head_page - writer hit the head page
1968 *
1969 * Returns: +1 to retry page
1970 *           0 to continue
1971 *          -1 on error
1972 */
1973static int
1974rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1975		    struct buffer_page *tail_page,
1976		    struct buffer_page *next_page)
1977{
1978	struct buffer_page *new_head;
1979	int entries;
1980	int type;
1981	int ret;
1982
1983	entries = rb_page_entries(next_page);
1984
1985	/*
1986	 * The hard part is here. We need to move the head
1987	 * forward, and protect against both readers on
1988	 * other CPUs and writers coming in via interrupts.
1989	 */
1990	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1991				       RB_PAGE_HEAD);
1992
1993	/*
1994	 * type can be one of four:
1995	 *  NORMAL - an interrupt already moved it for us
1996	 *  HEAD   - we are the first to get here.
1997	 *  UPDATE - we are the interrupt interrupting
1998	 *           a current move.
1999	 *  MOVED  - a reader on another CPU moved the next
2000	 *           pointer to its reader page. Give up
2001	 *           and try again.
2002	 */
2003
2004	switch (type) {
2005	case RB_PAGE_HEAD:
2006		/*
2007		 * We changed the head to UPDATE, thus
2008		 * it is our responsibility to update
2009		 * the counters.
2010		 */
2011		local_add(entries, &cpu_buffer->overrun);
2012		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2013
2014		/*
2015		 * The entries will be zeroed out when we move the
2016		 * tail page.
2017		 */
2018
2019		/* still more to do */
2020		break;
2021
2022	case RB_PAGE_UPDATE:
2023		/*
2024		 * This is an interrupt that interrupt the
2025		 * previous update. Still more to do.
2026		 */
2027		break;
2028	case RB_PAGE_NORMAL:
2029		/*
2030		 * An interrupt came in before the update
2031		 * and processed this for us.
2032		 * Nothing left to do.
2033		 */
2034		return 1;
2035	case RB_PAGE_MOVED:
2036		/*
2037		 * The reader is on another CPU and just did
2038		 * a swap with our next_page.
2039		 * Try again.
2040		 */
2041		return 1;
2042	default:
2043		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2044		return -1;
2045	}
2046
2047	/*
2048	 * Now that we are here, the old head pointer is
2049	 * set to UPDATE. This will keep the reader from
2050	 * swapping the head page with the reader page.
2051	 * The reader (on another CPU) will spin till
2052	 * we are finished.
2053	 *
2054	 * We just need to protect against interrupts
2055	 * doing the job. We will set the next pointer
2056	 * to HEAD. After that, we set the old pointer
2057	 * to NORMAL, but only if it was HEAD before.
2058	 * otherwise we are an interrupt, and only
2059	 * want the outer most commit to reset it.
2060	 */
2061	new_head = next_page;
2062	rb_inc_page(cpu_buffer, &new_head);
2063
2064	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2065				    RB_PAGE_NORMAL);
2066
2067	/*
2068	 * Valid returns are:
2069	 *  HEAD   - an interrupt came in and already set it.
2070	 *  NORMAL - One of two things:
2071	 *            1) We really set it.
2072	 *            2) A bunch of interrupts came in and moved
2073	 *               the page forward again.
2074	 */
2075	switch (ret) {
2076	case RB_PAGE_HEAD:
2077	case RB_PAGE_NORMAL:
2078		/* OK */
2079		break;
2080	default:
2081		RB_WARN_ON(cpu_buffer, 1);
2082		return -1;
2083	}
2084
2085	/*
2086	 * It is possible that an interrupt came in,
2087	 * set the head up, then more interrupts came in
2088	 * and moved it again. When we get back here,
2089	 * the page would have been set to NORMAL but we
2090	 * just set it back to HEAD.
2091	 *
2092	 * How do you detect this? Well, if that happened
2093	 * the tail page would have moved.
2094	 */
2095	if (ret == RB_PAGE_NORMAL) {
2096		struct buffer_page *buffer_tail_page;
2097
2098		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2099		/*
2100		 * If the tail had moved passed next, then we need
2101		 * to reset the pointer.
2102		 */
2103		if (buffer_tail_page != tail_page &&
2104		    buffer_tail_page != next_page)
2105			rb_head_page_set_normal(cpu_buffer, new_head,
2106						next_page,
2107						RB_PAGE_HEAD);
2108	}
2109
2110	/*
2111	 * If this was the outer most commit (the one that
2112	 * changed the original pointer from HEAD to UPDATE),
2113	 * then it is up to us to reset it to NORMAL.
2114	 */
2115	if (type == RB_PAGE_HEAD) {
2116		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2117					      tail_page,
2118					      RB_PAGE_UPDATE);
2119		if (RB_WARN_ON(cpu_buffer,
2120			       ret != RB_PAGE_UPDATE))
2121			return -1;
2122	}
2123
2124	return 0;
2125}
2126
2127static inline void
2128rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2129	      unsigned long tail, struct rb_event_info *info)
2130{
2131	struct buffer_page *tail_page = info->tail_page;
2132	struct ring_buffer_event *event;
2133	unsigned long length = info->length;
2134
2135	/*
2136	 * Only the event that crossed the page boundary
2137	 * must fill the old tail_page with padding.
2138	 */
2139	if (tail >= BUF_PAGE_SIZE) {
2140		/*
2141		 * If the page was filled, then we still need
2142		 * to update the real_end. Reset it to zero
2143		 * and the reader will ignore it.
2144		 */
2145		if (tail == BUF_PAGE_SIZE)
2146			tail_page->real_end = 0;
2147
2148		local_sub(length, &tail_page->write);
2149		return;
2150	}
2151
2152	event = __rb_page_index(tail_page, tail);
2153
2154	/* account for padding bytes */
2155	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2156
2157	/*
2158	 * Save the original length to the meta data.
2159	 * This will be used by the reader to add lost event
2160	 * counter.
2161	 */
2162	tail_page->real_end = tail;
2163
2164	/*
2165	 * If this event is bigger than the minimum size, then
2166	 * we need to be careful that we don't subtract the
2167	 * write counter enough to allow another writer to slip
2168	 * in on this page.
2169	 * We put in a discarded commit instead, to make sure
2170	 * that this space is not used again.
2171	 *
2172	 * If we are less than the minimum size, we don't need to
2173	 * worry about it.
2174	 */
2175	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2176		/* No room for any events */
2177
2178		/* Mark the rest of the page with padding */
2179		rb_event_set_padding(event);
2180
2181		/* Set the write back to the previous setting */
2182		local_sub(length, &tail_page->write);
2183		return;
2184	}
2185
2186	/* Put in a discarded event */
2187	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2188	event->type_len = RINGBUF_TYPE_PADDING;
2189	/* time delta must be non zero */
2190	event->time_delta = 1;
2191
2192	/* Set write to end of buffer */
2193	length = (tail + length) - BUF_PAGE_SIZE;
2194	local_sub(length, &tail_page->write);
2195}
2196
2197static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2198
2199/*
2200 * This is the slow path, force gcc not to inline it.
2201 */
2202static noinline struct ring_buffer_event *
2203rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2204	     unsigned long tail, struct rb_event_info *info)
2205{
2206	struct buffer_page *tail_page = info->tail_page;
2207	struct buffer_page *commit_page = cpu_buffer->commit_page;
2208	struct ring_buffer *buffer = cpu_buffer->buffer;
2209	struct buffer_page *next_page;
2210	int ret;
2211
2212	next_page = tail_page;
2213
2214	rb_inc_page(cpu_buffer, &next_page);
2215
2216	/*
2217	 * If for some reason, we had an interrupt storm that made
2218	 * it all the way around the buffer, bail, and warn
2219	 * about it.
2220	 */
2221	if (unlikely(next_page == commit_page)) {
2222		local_inc(&cpu_buffer->commit_overrun);
2223		goto out_reset;
2224	}
2225
2226	/*
2227	 * This is where the fun begins!
2228	 *
2229	 * We are fighting against races between a reader that
2230	 * could be on another CPU trying to swap its reader
2231	 * page with the buffer head.
2232	 *
2233	 * We are also fighting against interrupts coming in and
2234	 * moving the head or tail on us as well.
2235	 *
2236	 * If the next page is the head page then we have filled
2237	 * the buffer, unless the commit page is still on the
2238	 * reader page.
2239	 */
2240	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2241
2242		/*
2243		 * If the commit is not on the reader page, then
2244		 * move the header page.
2245		 */
2246		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2247			/*
2248			 * If we are not in overwrite mode,
2249			 * this is easy, just stop here.
2250			 */
2251			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2252				local_inc(&cpu_buffer->dropped_events);
2253				goto out_reset;
2254			}
2255
2256			ret = rb_handle_head_page(cpu_buffer,
2257						  tail_page,
2258						  next_page);
2259			if (ret < 0)
2260				goto out_reset;
2261			if (ret)
2262				goto out_again;
2263		} else {
2264			/*
2265			 * We need to be careful here too. The
2266			 * commit page could still be on the reader
2267			 * page. We could have a small buffer, and
2268			 * have filled up the buffer with events
2269			 * from interrupts and such, and wrapped.
2270			 *
2271			 * Note, if the tail page is also the on the
2272			 * reader_page, we let it move out.
2273			 */
2274			if (unlikely((cpu_buffer->commit_page !=
2275				      cpu_buffer->tail_page) &&
2276				     (cpu_buffer->commit_page ==
2277				      cpu_buffer->reader_page))) {
2278				local_inc(&cpu_buffer->commit_overrun);
2279				goto out_reset;
2280			}
2281		}
2282	}
2283
2284	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2285
2286 out_again:
2287
2288	rb_reset_tail(cpu_buffer, tail, info);
2289
2290	/* Commit what we have for now. */
2291	rb_end_commit(cpu_buffer);
2292	/* rb_end_commit() decs committing */
2293	local_inc(&cpu_buffer->committing);
2294
2295	/* fail and let the caller try again */
2296	return ERR_PTR(-EAGAIN);
2297
2298 out_reset:
2299	/* reset write */
2300	rb_reset_tail(cpu_buffer, tail, info);
2301
2302	return NULL;
2303}
2304
2305/* Slow path, do not inline */
2306static noinline struct ring_buffer_event *
2307rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2308{
2309	if (abs)
2310		event->type_len = RINGBUF_TYPE_TIME_STAMP;
2311	else
2312		event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2313
2314	/* Not the first event on the page, or not delta? */
2315	if (abs || rb_event_index(event)) {
2316		event->time_delta = delta & TS_MASK;
2317		event->array[0] = delta >> TS_SHIFT;
2318	} else {
2319		/* nope, just zero it */
2320		event->time_delta = 0;
2321		event->array[0] = 0;
2322	}
2323
2324	return skip_time_extend(event);
2325}
2326
2327static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2328				     struct ring_buffer_event *event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2329
2330/**
2331 * rb_update_event - update event type and data
 
2332 * @event: the event to update
2333 * @type: the type of event
2334 * @length: the size of the event field in the ring buffer
2335 *
2336 * Update the type and data fields of the event. The length
2337 * is the actual size that is written to the ring buffer,
2338 * and with this, we can determine what to place into the
2339 * data field.
2340 */
2341static void
2342rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2343		struct ring_buffer_event *event,
2344		struct rb_event_info *info)
2345{
2346	unsigned length = info->length;
2347	u64 delta = info->delta;
 
2348
2349	/* Only a commit updates the timestamp */
2350	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2351		delta = 0;
2352
2353	/*
2354	 * If we need to add a timestamp, then we
2355	 * add it to the start of the reserved space.
2356	 */
2357	if (unlikely(info->add_timestamp)) {
2358		bool abs = ring_buffer_time_stamp_abs(cpu_buffer->buffer);
2359
2360		event = rb_add_time_stamp(event, info->delta, abs);
2361		length -= RB_LEN_TIME_EXTEND;
2362		delta = 0;
2363	}
2364
2365	event->time_delta = delta;
2366	length -= RB_EVNT_HDR_SIZE;
2367	if (length > RB_MAX_SMALL_DATA) {
2368		event->type_len = 0;
2369		event->array[0] = length;
2370	} else
2371		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2372}
2373
2374static unsigned rb_calculate_event_length(unsigned length)
2375{
2376	struct ring_buffer_event event; /* Used only for sizeof array */
2377
2378	/* zero length can cause confusions */
2379	if (!length)
2380		length++;
2381
2382	if (length > RB_MAX_SMALL_DATA)
2383		length += sizeof(event.array[0]);
2384
2385	length += RB_EVNT_HDR_SIZE;
2386	length = ALIGN(length, RB_ALIGNMENT);
2387
2388	/*
2389	 * In case the time delta is larger than the 27 bits for it
2390	 * in the header, we need to add a timestamp. If another
2391	 * event comes in when trying to discard this one to increase
2392	 * the length, then the timestamp will be added in the allocated
2393	 * space of this event. If length is bigger than the size needed
2394	 * for the TIME_EXTEND, then padding has to be used. The events
2395	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2396	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2397	 * As length is a multiple of 4, we only need to worry if it
2398	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2399	 */
2400	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2401		length += RB_ALIGNMENT;
2402
2403	return length;
2404}
2405
2406#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2407static inline bool sched_clock_stable(void)
2408{
2409	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2410}
2411#endif
2412
2413static inline int
2414rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2415		  struct ring_buffer_event *event)
2416{
2417	unsigned long new_index, old_index;
2418	struct buffer_page *bpage;
2419	unsigned long index;
2420	unsigned long addr;
 
 
2421
2422	new_index = rb_event_index(event);
2423	old_index = new_index + rb_event_ts_length(event);
2424	addr = (unsigned long)event;
2425	addr &= PAGE_MASK;
2426
2427	bpage = READ_ONCE(cpu_buffer->tail_page);
2428
 
 
 
 
 
 
 
 
2429	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2430		unsigned long write_mask =
2431			local_read(&bpage->write) & ~RB_WRITE_MASK;
2432		unsigned long event_length = rb_event_length(event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2433		/*
2434		 * This is on the tail page. It is possible that
2435		 * a write could come in and move the tail page
2436		 * and write to the next page. That is fine
2437		 * because we just shorten what is on this page.
2438		 */
2439		old_index += write_mask;
2440		new_index += write_mask;
2441		index = local_cmpxchg(&bpage->write, old_index, new_index);
2442		if (index == old_index) {
2443			/* update counters */
2444			local_sub(event_length, &cpu_buffer->entries_bytes);
2445			return 1;
2446		}
2447	}
2448
2449	/* could not discard */
2450	return 0;
2451}
2452
2453static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2454{
2455	local_inc(&cpu_buffer->committing);
2456	local_inc(&cpu_buffer->commits);
2457}
2458
2459static __always_inline void
2460rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2461{
2462	unsigned long max_count;
2463
2464	/*
2465	 * We only race with interrupts and NMIs on this CPU.
2466	 * If we own the commit event, then we can commit
2467	 * all others that interrupted us, since the interruptions
2468	 * are in stack format (they finish before they come
2469	 * back to us). This allows us to do a simple loop to
2470	 * assign the commit to the tail.
2471	 */
2472 again:
2473	max_count = cpu_buffer->nr_pages * 100;
2474
2475	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2476		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2477			return;
2478		if (RB_WARN_ON(cpu_buffer,
2479			       rb_is_reader_page(cpu_buffer->tail_page)))
2480			return;
2481		local_set(&cpu_buffer->commit_page->page->commit,
2482			  rb_page_write(cpu_buffer->commit_page));
2483		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2484		/* Only update the write stamp if the page has an event */
2485		if (rb_page_write(cpu_buffer->commit_page))
2486			cpu_buffer->write_stamp =
2487				cpu_buffer->commit_page->page->time_stamp;
2488		/* add barrier to keep gcc from optimizing too much */
2489		barrier();
2490	}
2491	while (rb_commit_index(cpu_buffer) !=
2492	       rb_page_write(cpu_buffer->commit_page)) {
2493
2494		local_set(&cpu_buffer->commit_page->page->commit,
2495			  rb_page_write(cpu_buffer->commit_page));
2496		RB_WARN_ON(cpu_buffer,
2497			   local_read(&cpu_buffer->commit_page->page->commit) &
2498			   ~RB_WRITE_MASK);
2499		barrier();
2500	}
2501
2502	/* again, keep gcc from optimizing */
2503	barrier();
2504
2505	/*
2506	 * If an interrupt came in just after the first while loop
2507	 * and pushed the tail page forward, we will be left with
2508	 * a dangling commit that will never go forward.
2509	 */
2510	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2511		goto again;
2512}
2513
2514static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2515{
2516	unsigned long commits;
2517
2518	if (RB_WARN_ON(cpu_buffer,
2519		       !local_read(&cpu_buffer->committing)))
2520		return;
2521
2522 again:
2523	commits = local_read(&cpu_buffer->commits);
2524	/* synchronize with interrupts */
2525	barrier();
2526	if (local_read(&cpu_buffer->committing) == 1)
2527		rb_set_commit_to_write(cpu_buffer);
2528
2529	local_dec(&cpu_buffer->committing);
2530
2531	/* synchronize with interrupts */
2532	barrier();
2533
2534	/*
2535	 * Need to account for interrupts coming in between the
2536	 * updating of the commit page and the clearing of the
2537	 * committing counter.
2538	 */
2539	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2540	    !local_read(&cpu_buffer->committing)) {
2541		local_inc(&cpu_buffer->committing);
2542		goto again;
2543	}
2544}
2545
2546static inline void rb_event_discard(struct ring_buffer_event *event)
2547{
2548	if (extended_time(event))
2549		event = skip_time_extend(event);
2550
2551	/* array[0] holds the actual length for the discarded event */
2552	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2553	event->type_len = RINGBUF_TYPE_PADDING;
2554	/* time delta must be non zero */
2555	if (!event->time_delta)
2556		event->time_delta = 1;
2557}
2558
2559static __always_inline bool
2560rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2561		   struct ring_buffer_event *event)
2562{
2563	unsigned long addr = (unsigned long)event;
2564	unsigned long index;
2565
2566	index = rb_event_index(event);
2567	addr &= PAGE_MASK;
2568
2569	return cpu_buffer->commit_page->page == (void *)addr &&
2570		rb_commit_index(cpu_buffer) == index;
2571}
2572
2573static __always_inline void
2574rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2575		      struct ring_buffer_event *event)
2576{
2577	u64 delta;
2578
2579	/*
2580	 * The event first in the commit queue updates the
2581	 * time stamp.
2582	 */
2583	if (rb_event_is_commit(cpu_buffer, event)) {
2584		/*
2585		 * A commit event that is first on a page
2586		 * updates the write timestamp with the page stamp
2587		 */
2588		if (!rb_event_index(event))
2589			cpu_buffer->write_stamp =
2590				cpu_buffer->commit_page->page->time_stamp;
2591		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2592			delta = ring_buffer_event_time_stamp(event);
2593			cpu_buffer->write_stamp += delta;
2594		} else if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
2595			delta = ring_buffer_event_time_stamp(event);
2596			cpu_buffer->write_stamp = delta;
2597		} else
2598			cpu_buffer->write_stamp += event->time_delta;
2599	}
2600}
2601
2602static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2603		      struct ring_buffer_event *event)
2604{
2605	local_inc(&cpu_buffer->entries);
2606	rb_update_write_stamp(cpu_buffer, event);
2607	rb_end_commit(cpu_buffer);
2608}
2609
2610static __always_inline void
2611rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2612{
2613	size_t nr_pages;
2614	size_t dirty;
2615	size_t full;
2616
2617	if (buffer->irq_work.waiters_pending) {
2618		buffer->irq_work.waiters_pending = false;
2619		/* irq_work_queue() supplies it's own memory barriers */
2620		irq_work_queue(&buffer->irq_work.work);
2621	}
2622
2623	if (cpu_buffer->irq_work.waiters_pending) {
2624		cpu_buffer->irq_work.waiters_pending = false;
2625		/* irq_work_queue() supplies it's own memory barriers */
2626		irq_work_queue(&cpu_buffer->irq_work.work);
2627	}
2628
2629	if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
2630		return;
2631
2632	if (cpu_buffer->reader_page == cpu_buffer->commit_page)
2633		return;
2634
2635	if (!cpu_buffer->irq_work.full_waiters_pending)
2636		return;
2637
2638	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
2639
2640	full = cpu_buffer->shortest_full;
2641	nr_pages = cpu_buffer->nr_pages;
2642	dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
2643	if (full && nr_pages && (dirty * 100) <= full * nr_pages)
2644		return;
2645
2646	cpu_buffer->irq_work.wakeup_full = true;
2647	cpu_buffer->irq_work.full_waiters_pending = false;
2648	/* irq_work_queue() supplies it's own memory barriers */
2649	irq_work_queue(&cpu_buffer->irq_work.work);
2650}
2651
 
 
 
 
 
 
 
2652/*
2653 * The lock and unlock are done within a preempt disable section.
2654 * The current_context per_cpu variable can only be modified
2655 * by the current task between lock and unlock. But it can
2656 * be modified more than once via an interrupt. To pass this
2657 * information from the lock to the unlock without having to
2658 * access the 'in_interrupt()' functions again (which do show
2659 * a bit of overhead in something as critical as function tracing,
2660 * we use a bitmask trick.
2661 *
2662 *  bit 0 =  NMI context
2663 *  bit 1 =  IRQ context
2664 *  bit 2 =  SoftIRQ context
2665 *  bit 3 =  normal context.
2666 *
2667 * This works because this is the order of contexts that can
2668 * preempt other contexts. A SoftIRQ never preempts an IRQ
2669 * context.
2670 *
2671 * When the context is determined, the corresponding bit is
2672 * checked and set (if it was set, then a recursion of that context
2673 * happened).
2674 *
2675 * On unlock, we need to clear this bit. To do so, just subtract
2676 * 1 from the current_context and AND it to itself.
2677 *
2678 * (binary)
2679 *  101 - 1 = 100
2680 *  101 & 100 = 100 (clearing bit zero)
2681 *
2682 *  1010 - 1 = 1001
2683 *  1010 & 1001 = 1000 (clearing bit 1)
2684 *
2685 * The least significant bit can be cleared this way, and it
2686 * just so happens that it is the same bit corresponding to
2687 * the current context.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2688 */
2689
2690static __always_inline int
2691trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2692{
2693	unsigned int val = cpu_buffer->current_context;
2694	unsigned long pc = preempt_count();
2695	int bit;
2696
2697	if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
2698		bit = RB_CTX_NORMAL;
2699	else
2700		bit = pc & NMI_MASK ? RB_CTX_NMI :
2701			pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
2702
2703	if (unlikely(val & (1 << (bit + cpu_buffer->nest))))
2704		return 1;
 
 
 
 
 
 
 
 
 
 
2705
2706	val |= (1 << (bit + cpu_buffer->nest));
2707	cpu_buffer->current_context = val;
2708
2709	return 0;
2710}
2711
2712static __always_inline void
2713trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2714{
2715	cpu_buffer->current_context &=
2716		cpu_buffer->current_context - (1 << cpu_buffer->nest);
2717}
2718
2719/* The recursive locking above uses 4 bits */
2720#define NESTED_BITS 4
2721
2722/**
2723 * ring_buffer_nest_start - Allow to trace while nested
2724 * @buffer: The ring buffer to modify
2725 *
2726 * The ring buffer has a safety mechanism to prevent recursion.
2727 * But there may be a case where a trace needs to be done while
2728 * tracing something else. In this case, calling this function
2729 * will allow this function to nest within a currently active
2730 * ring_buffer_lock_reserve().
2731 *
2732 * Call this function before calling another ring_buffer_lock_reserve() and
2733 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
2734 */
2735void ring_buffer_nest_start(struct ring_buffer *buffer)
2736{
2737	struct ring_buffer_per_cpu *cpu_buffer;
2738	int cpu;
2739
2740	/* Enabled by ring_buffer_nest_end() */
2741	preempt_disable_notrace();
2742	cpu = raw_smp_processor_id();
2743	cpu_buffer = buffer->buffers[cpu];
2744	/* This is the shift value for the above recursive locking */
2745	cpu_buffer->nest += NESTED_BITS;
2746}
2747
2748/**
2749 * ring_buffer_nest_end - Allow to trace while nested
2750 * @buffer: The ring buffer to modify
2751 *
2752 * Must be called after ring_buffer_nest_start() and after the
2753 * ring_buffer_unlock_commit().
2754 */
2755void ring_buffer_nest_end(struct ring_buffer *buffer)
2756{
2757	struct ring_buffer_per_cpu *cpu_buffer;
2758	int cpu;
2759
2760	/* disabled by ring_buffer_nest_start() */
2761	cpu = raw_smp_processor_id();
2762	cpu_buffer = buffer->buffers[cpu];
2763	/* This is the shift value for the above recursive locking */
2764	cpu_buffer->nest -= NESTED_BITS;
2765	preempt_enable_notrace();
2766}
2767
2768/**
2769 * ring_buffer_unlock_commit - commit a reserved
2770 * @buffer: The buffer to commit to
2771 * @event: The event pointer to commit.
2772 *
2773 * This commits the data to the ring buffer, and releases any locks held.
2774 *
2775 * Must be paired with ring_buffer_lock_reserve.
2776 */
2777int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2778			      struct ring_buffer_event *event)
2779{
2780	struct ring_buffer_per_cpu *cpu_buffer;
2781	int cpu = raw_smp_processor_id();
2782
2783	cpu_buffer = buffer->buffers[cpu];
2784
2785	rb_commit(cpu_buffer, event);
2786
2787	rb_wakeups(buffer, cpu_buffer);
2788
2789	trace_recursive_unlock(cpu_buffer);
2790
2791	preempt_enable_notrace();
2792
2793	return 0;
2794}
2795EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2796
2797static noinline void
2798rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2799		    struct rb_event_info *info)
 
 
 
 
2800{
2801	WARN_ONCE(info->delta > (1ULL << 59),
2802		  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2803		  (unsigned long long)info->delta,
2804		  (unsigned long long)info->ts,
2805		  (unsigned long long)cpu_buffer->write_stamp,
2806		  sched_clock_stable() ? "" :
2807		  "If you just came from a suspend/resume,\n"
2808		  "please switch to the trace global clock:\n"
2809		  "  echo global > /sys/kernel/debug/tracing/trace_clock\n"
2810		  "or add trace_clock=global to the kernel command line\n");
2811	info->add_timestamp = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2812}
2813
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2814static struct ring_buffer_event *
2815__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2816		  struct rb_event_info *info)
2817{
2818	struct ring_buffer_event *event;
2819	struct buffer_page *tail_page;
2820	unsigned long tail, write;
2821
2822	/*
2823	 * If the time delta since the last event is too big to
2824	 * hold in the time field of the event, then we append a
2825	 * TIME EXTEND event ahead of the data event.
2826	 */
2827	if (unlikely(info->add_timestamp))
2828		info->length += RB_LEN_TIME_EXTEND;
2829
2830	/* Don't let the compiler play games with cpu_buffer->tail_page */
2831	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2832	write = local_add_return(info->length, &tail_page->write);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2833
2834	/* set write to only the index of the write */
2835	write &= RB_WRITE_MASK;
 
2836	tail = write - info->length;
2837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2838	/*
2839	 * If this is the first commit on the page, then it has the same
2840	 * timestamp as the page itself.
2841	 */
2842	if (!tail && !ring_buffer_time_stamp_abs(cpu_buffer->buffer))
 
2843		info->delta = 0;
2844
2845	/* See if we shot pass the end of this buffer page */
2846	if (unlikely(write > BUF_PAGE_SIZE))
2847		return rb_move_tail(cpu_buffer, tail, info);
2848
2849	/* We reserved something on the buffer */
2850
2851	event = __rb_page_index(tail_page, tail);
2852	rb_update_event(cpu_buffer, event, info);
2853
2854	local_inc(&tail_page->entries);
2855
2856	/*
2857	 * If this is the first commit on the page, then update
2858	 * its timestamp.
2859	 */
2860	if (!tail)
2861		tail_page->page->time_stamp = info->ts;
2862
2863	/* account for these added bytes */
2864	local_add(info->length, &cpu_buffer->entries_bytes);
2865
2866	return event;
2867}
2868
2869static __always_inline struct ring_buffer_event *
2870rb_reserve_next_event(struct ring_buffer *buffer,
2871		      struct ring_buffer_per_cpu *cpu_buffer,
2872		      unsigned long length)
2873{
2874	struct ring_buffer_event *event;
2875	struct rb_event_info info;
2876	int nr_loops = 0;
2877	u64 diff;
2878
2879	rb_start_commit(cpu_buffer);
 
2880
2881#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2882	/*
2883	 * Due to the ability to swap a cpu buffer from a buffer
2884	 * it is possible it was swapped before we committed.
2885	 * (committing stops a swap). We check for it here and
2886	 * if it happened, we have to fail the write.
2887	 */
2888	barrier();
2889	if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
2890		local_dec(&cpu_buffer->committing);
2891		local_dec(&cpu_buffer->commits);
2892		return NULL;
2893	}
2894#endif
2895
2896	info.length = rb_calculate_event_length(length);
 
 
 
 
 
 
 
 
2897 again:
2898	info.add_timestamp = 0;
2899	info.delta = 0;
2900
2901	/*
2902	 * We allow for interrupts to reenter here and do a trace.
2903	 * If one does, it will cause this original code to loop
2904	 * back here. Even with heavy interrupts happening, this
2905	 * should only happen a few times in a row. If this happens
2906	 * 1000 times in a row, there must be either an interrupt
2907	 * storm or we have something buggy.
2908	 * Bail!
2909	 */
2910	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2911		goto out_fail;
2912
2913	info.ts = rb_time_stamp(cpu_buffer->buffer);
2914	diff = info.ts - cpu_buffer->write_stamp;
2915
2916	/* make sure this diff is calculated here */
2917	barrier();
2918
2919	if (ring_buffer_time_stamp_abs(buffer)) {
2920		info.delta = info.ts;
2921		rb_handle_timestamp(cpu_buffer, &info);
2922	} else /* Did the write stamp get updated already? */
2923		if (likely(info.ts >= cpu_buffer->write_stamp)) {
2924		info.delta = diff;
2925		if (unlikely(test_time_stamp(info.delta)))
2926			rb_handle_timestamp(cpu_buffer, &info);
2927	}
2928
2929	event = __rb_reserve_next(cpu_buffer, &info);
2930
2931	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2932		if (info.add_timestamp)
2933			info.length -= RB_LEN_TIME_EXTEND;
2934		goto again;
2935	}
2936
2937	if (!event)
2938		goto out_fail;
2939
2940	return event;
2941
2942 out_fail:
2943	rb_end_commit(cpu_buffer);
2944	return NULL;
2945}
2946
2947/**
2948 * ring_buffer_lock_reserve - reserve a part of the buffer
2949 * @buffer: the ring buffer to reserve from
2950 * @length: the length of the data to reserve (excluding event header)
2951 *
2952 * Returns a reserved event on the ring buffer to copy directly to.
2953 * The user of this interface will need to get the body to write into
2954 * and can use the ring_buffer_event_data() interface.
2955 *
2956 * The length is the length of the data needed, not the event length
2957 * which also includes the event header.
2958 *
2959 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2960 * If NULL is returned, then nothing has been allocated or locked.
2961 */
2962struct ring_buffer_event *
2963ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2964{
2965	struct ring_buffer_per_cpu *cpu_buffer;
2966	struct ring_buffer_event *event;
2967	int cpu;
2968
2969	/* If we are tracing schedule, we don't want to recurse */
2970	preempt_disable_notrace();
2971
2972	if (unlikely(atomic_read(&buffer->record_disabled)))
2973		goto out;
2974
2975	cpu = raw_smp_processor_id();
2976
2977	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
2978		goto out;
2979
2980	cpu_buffer = buffer->buffers[cpu];
2981
2982	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
2983		goto out;
2984
2985	if (unlikely(length > BUF_MAX_DATA_SIZE))
2986		goto out;
2987
2988	if (unlikely(trace_recursive_lock(cpu_buffer)))
2989		goto out;
2990
2991	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2992	if (!event)
2993		goto out_unlock;
2994
2995	return event;
2996
2997 out_unlock:
2998	trace_recursive_unlock(cpu_buffer);
2999 out:
3000	preempt_enable_notrace();
3001	return NULL;
3002}
3003EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3004
3005/*
3006 * Decrement the entries to the page that an event is on.
3007 * The event does not even need to exist, only the pointer
3008 * to the page it is on. This may only be called before the commit
3009 * takes place.
3010 */
3011static inline void
3012rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3013		   struct ring_buffer_event *event)
3014{
3015	unsigned long addr = (unsigned long)event;
3016	struct buffer_page *bpage = cpu_buffer->commit_page;
3017	struct buffer_page *start;
3018
3019	addr &= PAGE_MASK;
3020
3021	/* Do the likely case first */
3022	if (likely(bpage->page == (void *)addr)) {
3023		local_dec(&bpage->entries);
3024		return;
3025	}
3026
3027	/*
3028	 * Because the commit page may be on the reader page we
3029	 * start with the next page and check the end loop there.
3030	 */
3031	rb_inc_page(cpu_buffer, &bpage);
3032	start = bpage;
3033	do {
3034		if (bpage->page == (void *)addr) {
3035			local_dec(&bpage->entries);
3036			return;
3037		}
3038		rb_inc_page(cpu_buffer, &bpage);
3039	} while (bpage != start);
3040
3041	/* commit not part of this buffer?? */
3042	RB_WARN_ON(cpu_buffer, 1);
3043}
3044
3045/**
3046 * ring_buffer_commit_discard - discard an event that has not been committed
3047 * @buffer: the ring buffer
3048 * @event: non committed event to discard
3049 *
3050 * Sometimes an event that is in the ring buffer needs to be ignored.
3051 * This function lets the user discard an event in the ring buffer
3052 * and then that event will not be read later.
3053 *
3054 * This function only works if it is called before the item has been
3055 * committed. It will try to free the event from the ring buffer
3056 * if another event has not been added behind it.
3057 *
3058 * If another event has been added behind it, it will set the event
3059 * up as discarded, and perform the commit.
3060 *
3061 * If this function is called, do not call ring_buffer_unlock_commit on
3062 * the event.
3063 */
3064void ring_buffer_discard_commit(struct ring_buffer *buffer,
3065				struct ring_buffer_event *event)
3066{
3067	struct ring_buffer_per_cpu *cpu_buffer;
3068	int cpu;
3069
3070	/* The event is discarded regardless */
3071	rb_event_discard(event);
3072
3073	cpu = smp_processor_id();
3074	cpu_buffer = buffer->buffers[cpu];
3075
3076	/*
3077	 * This must only be called if the event has not been
3078	 * committed yet. Thus we can assume that preemption
3079	 * is still disabled.
3080	 */
3081	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3082
3083	rb_decrement_entry(cpu_buffer, event);
3084	if (rb_try_to_discard(cpu_buffer, event))
3085		goto out;
3086
3087	/*
3088	 * The commit is still visible by the reader, so we
3089	 * must still update the timestamp.
3090	 */
3091	rb_update_write_stamp(cpu_buffer, event);
3092 out:
3093	rb_end_commit(cpu_buffer);
3094
3095	trace_recursive_unlock(cpu_buffer);
3096
3097	preempt_enable_notrace();
3098
3099}
3100EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3101
3102/**
3103 * ring_buffer_write - write data to the buffer without reserving
3104 * @buffer: The ring buffer to write to.
3105 * @length: The length of the data being written (excluding the event header)
3106 * @data: The data to write to the buffer.
3107 *
3108 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3109 * one function. If you already have the data to write to the buffer, it
3110 * may be easier to simply call this function.
3111 *
3112 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3113 * and not the length of the event which would hold the header.
3114 */
3115int ring_buffer_write(struct ring_buffer *buffer,
3116		      unsigned long length,
3117		      void *data)
3118{
3119	struct ring_buffer_per_cpu *cpu_buffer;
3120	struct ring_buffer_event *event;
3121	void *body;
3122	int ret = -EBUSY;
3123	int cpu;
3124
3125	preempt_disable_notrace();
3126
3127	if (atomic_read(&buffer->record_disabled))
3128		goto out;
3129
3130	cpu = raw_smp_processor_id();
3131
3132	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3133		goto out;
3134
3135	cpu_buffer = buffer->buffers[cpu];
3136
3137	if (atomic_read(&cpu_buffer->record_disabled))
3138		goto out;
3139
3140	if (length > BUF_MAX_DATA_SIZE)
3141		goto out;
3142
3143	if (unlikely(trace_recursive_lock(cpu_buffer)))
3144		goto out;
3145
3146	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3147	if (!event)
3148		goto out_unlock;
3149
3150	body = rb_event_data(event);
3151
3152	memcpy(body, data, length);
3153
3154	rb_commit(cpu_buffer, event);
3155
3156	rb_wakeups(buffer, cpu_buffer);
3157
3158	ret = 0;
3159
3160 out_unlock:
3161	trace_recursive_unlock(cpu_buffer);
3162
3163 out:
3164	preempt_enable_notrace();
3165
3166	return ret;
3167}
3168EXPORT_SYMBOL_GPL(ring_buffer_write);
3169
3170static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3171{
3172	struct buffer_page *reader = cpu_buffer->reader_page;
3173	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3174	struct buffer_page *commit = cpu_buffer->commit_page;
3175
3176	/* In case of error, head will be NULL */
3177	if (unlikely(!head))
3178		return true;
3179
3180	return reader->read == rb_page_commit(reader) &&
3181		(commit == reader ||
3182		 (commit == head &&
3183		  head->read == rb_page_commit(commit)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3184}
3185
3186/**
3187 * ring_buffer_record_disable - stop all writes into the buffer
3188 * @buffer: The ring buffer to stop writes to.
3189 *
3190 * This prevents all writes to the buffer. Any attempt to write
3191 * to the buffer after this will fail and return NULL.
3192 *
3193 * The caller should call synchronize_rcu() after this.
3194 */
3195void ring_buffer_record_disable(struct ring_buffer *buffer)
3196{
3197	atomic_inc(&buffer->record_disabled);
3198}
3199EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3200
3201/**
3202 * ring_buffer_record_enable - enable writes to the buffer
3203 * @buffer: The ring buffer to enable writes
3204 *
3205 * Note, multiple disables will need the same number of enables
3206 * to truly enable the writing (much like preempt_disable).
3207 */
3208void ring_buffer_record_enable(struct ring_buffer *buffer)
3209{
3210	atomic_dec(&buffer->record_disabled);
3211}
3212EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3213
3214/**
3215 * ring_buffer_record_off - stop all writes into the buffer
3216 * @buffer: The ring buffer to stop writes to.
3217 *
3218 * This prevents all writes to the buffer. Any attempt to write
3219 * to the buffer after this will fail and return NULL.
3220 *
3221 * This is different than ring_buffer_record_disable() as
3222 * it works like an on/off switch, where as the disable() version
3223 * must be paired with a enable().
3224 */
3225void ring_buffer_record_off(struct ring_buffer *buffer)
3226{
3227	unsigned int rd;
3228	unsigned int new_rd;
3229
3230	do {
3231		rd = atomic_read(&buffer->record_disabled);
3232		new_rd = rd | RB_BUFFER_OFF;
3233	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3234}
3235EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3236
3237/**
3238 * ring_buffer_record_on - restart writes into the buffer
3239 * @buffer: The ring buffer to start writes to.
3240 *
3241 * This enables all writes to the buffer that was disabled by
3242 * ring_buffer_record_off().
3243 *
3244 * This is different than ring_buffer_record_enable() as
3245 * it works like an on/off switch, where as the enable() version
3246 * must be paired with a disable().
3247 */
3248void ring_buffer_record_on(struct ring_buffer *buffer)
3249{
3250	unsigned int rd;
3251	unsigned int new_rd;
3252
3253	do {
3254		rd = atomic_read(&buffer->record_disabled);
3255		new_rd = rd & ~RB_BUFFER_OFF;
3256	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3257}
3258EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3259
3260/**
3261 * ring_buffer_record_is_on - return true if the ring buffer can write
3262 * @buffer: The ring buffer to see if write is enabled
3263 *
3264 * Returns true if the ring buffer is in a state that it accepts writes.
3265 */
3266bool ring_buffer_record_is_on(struct ring_buffer *buffer)
3267{
3268	return !atomic_read(&buffer->record_disabled);
3269}
3270
3271/**
3272 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
3273 * @buffer: The ring buffer to see if write is set enabled
3274 *
3275 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
3276 * Note that this does NOT mean it is in a writable state.
3277 *
3278 * It may return true when the ring buffer has been disabled by
3279 * ring_buffer_record_disable(), as that is a temporary disabling of
3280 * the ring buffer.
3281 */
3282bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
3283{
3284	return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
3285}
3286
3287/**
3288 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3289 * @buffer: The ring buffer to stop writes to.
3290 * @cpu: The CPU buffer to stop
3291 *
3292 * This prevents all writes to the buffer. Any attempt to write
3293 * to the buffer after this will fail and return NULL.
3294 *
3295 * The caller should call synchronize_rcu() after this.
3296 */
3297void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3298{
3299	struct ring_buffer_per_cpu *cpu_buffer;
3300
3301	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3302		return;
3303
3304	cpu_buffer = buffer->buffers[cpu];
3305	atomic_inc(&cpu_buffer->record_disabled);
3306}
3307EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3308
3309/**
3310 * ring_buffer_record_enable_cpu - enable writes to the buffer
3311 * @buffer: The ring buffer to enable writes
3312 * @cpu: The CPU to enable.
3313 *
3314 * Note, multiple disables will need the same number of enables
3315 * to truly enable the writing (much like preempt_disable).
3316 */
3317void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3318{
3319	struct ring_buffer_per_cpu *cpu_buffer;
3320
3321	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3322		return;
3323
3324	cpu_buffer = buffer->buffers[cpu];
3325	atomic_dec(&cpu_buffer->record_disabled);
3326}
3327EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3328
3329/*
3330 * The total entries in the ring buffer is the running counter
3331 * of entries entered into the ring buffer, minus the sum of
3332 * the entries read from the ring buffer and the number of
3333 * entries that were overwritten.
3334 */
3335static inline unsigned long
3336rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3337{
3338	return local_read(&cpu_buffer->entries) -
3339		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3340}
3341
3342/**
3343 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3344 * @buffer: The ring buffer
3345 * @cpu: The per CPU buffer to read from.
3346 */
3347u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3348{
3349	unsigned long flags;
3350	struct ring_buffer_per_cpu *cpu_buffer;
3351	struct buffer_page *bpage;
3352	u64 ret = 0;
3353
3354	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3355		return 0;
3356
3357	cpu_buffer = buffer->buffers[cpu];
3358	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3359	/*
3360	 * if the tail is on reader_page, oldest time stamp is on the reader
3361	 * page
3362	 */
3363	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3364		bpage = cpu_buffer->reader_page;
3365	else
3366		bpage = rb_set_head_page(cpu_buffer);
3367	if (bpage)
3368		ret = bpage->page->time_stamp;
3369	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3370
3371	return ret;
3372}
3373EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3374
3375/**
3376 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3377 * @buffer: The ring buffer
3378 * @cpu: The per CPU buffer to read from.
3379 */
3380unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3381{
3382	struct ring_buffer_per_cpu *cpu_buffer;
3383	unsigned long ret;
3384
3385	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3386		return 0;
3387
3388	cpu_buffer = buffer->buffers[cpu];
3389	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3390
3391	return ret;
3392}
3393EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3394
3395/**
3396 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3397 * @buffer: The ring buffer
3398 * @cpu: The per CPU buffer to get the entries from.
3399 */
3400unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3401{
3402	struct ring_buffer_per_cpu *cpu_buffer;
3403
3404	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3405		return 0;
3406
3407	cpu_buffer = buffer->buffers[cpu];
3408
3409	return rb_num_of_entries(cpu_buffer);
3410}
3411EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3412
3413/**
3414 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3415 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3416 * @buffer: The ring buffer
3417 * @cpu: The per CPU buffer to get the number of overruns from
3418 */
3419unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3420{
3421	struct ring_buffer_per_cpu *cpu_buffer;
3422	unsigned long ret;
3423
3424	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3425		return 0;
3426
3427	cpu_buffer = buffer->buffers[cpu];
3428	ret = local_read(&cpu_buffer->overrun);
3429
3430	return ret;
3431}
3432EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3433
3434/**
3435 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3436 * commits failing due to the buffer wrapping around while there are uncommitted
3437 * events, such as during an interrupt storm.
3438 * @buffer: The ring buffer
3439 * @cpu: The per CPU buffer to get the number of overruns from
3440 */
3441unsigned long
3442ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3443{
3444	struct ring_buffer_per_cpu *cpu_buffer;
3445	unsigned long ret;
3446
3447	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3448		return 0;
3449
3450	cpu_buffer = buffer->buffers[cpu];
3451	ret = local_read(&cpu_buffer->commit_overrun);
3452
3453	return ret;
3454}
3455EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3456
3457/**
3458 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3459 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3460 * @buffer: The ring buffer
3461 * @cpu: The per CPU buffer to get the number of overruns from
3462 */
3463unsigned long
3464ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3465{
3466	struct ring_buffer_per_cpu *cpu_buffer;
3467	unsigned long ret;
3468
3469	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3470		return 0;
3471
3472	cpu_buffer = buffer->buffers[cpu];
3473	ret = local_read(&cpu_buffer->dropped_events);
3474
3475	return ret;
3476}
3477EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3478
3479/**
3480 * ring_buffer_read_events_cpu - get the number of events successfully read
3481 * @buffer: The ring buffer
3482 * @cpu: The per CPU buffer to get the number of events read
3483 */
3484unsigned long
3485ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3486{
3487	struct ring_buffer_per_cpu *cpu_buffer;
3488
3489	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3490		return 0;
3491
3492	cpu_buffer = buffer->buffers[cpu];
3493	return cpu_buffer->read;
3494}
3495EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3496
3497/**
3498 * ring_buffer_entries - get the number of entries in a buffer
3499 * @buffer: The ring buffer
3500 *
3501 * Returns the total number of entries in the ring buffer
3502 * (all CPU entries)
3503 */
3504unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3505{
3506	struct ring_buffer_per_cpu *cpu_buffer;
3507	unsigned long entries = 0;
3508	int cpu;
3509
3510	/* if you care about this being correct, lock the buffer */
3511	for_each_buffer_cpu(buffer, cpu) {
3512		cpu_buffer = buffer->buffers[cpu];
3513		entries += rb_num_of_entries(cpu_buffer);
3514	}
3515
3516	return entries;
3517}
3518EXPORT_SYMBOL_GPL(ring_buffer_entries);
3519
3520/**
3521 * ring_buffer_overruns - get the number of overruns in buffer
3522 * @buffer: The ring buffer
3523 *
3524 * Returns the total number of overruns in the ring buffer
3525 * (all CPU entries)
3526 */
3527unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3528{
3529	struct ring_buffer_per_cpu *cpu_buffer;
3530	unsigned long overruns = 0;
3531	int cpu;
3532
3533	/* if you care about this being correct, lock the buffer */
3534	for_each_buffer_cpu(buffer, cpu) {
3535		cpu_buffer = buffer->buffers[cpu];
3536		overruns += local_read(&cpu_buffer->overrun);
3537	}
3538
3539	return overruns;
3540}
3541EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3542
3543static void rb_iter_reset(struct ring_buffer_iter *iter)
3544{
3545	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3546
3547	/* Iterator usage is expected to have record disabled */
3548	iter->head_page = cpu_buffer->reader_page;
3549	iter->head = cpu_buffer->reader_page->read;
 
3550
3551	iter->cache_reader_page = iter->head_page;
3552	iter->cache_read = cpu_buffer->read;
3553
3554	if (iter->head)
3555		iter->read_stamp = cpu_buffer->read_stamp;
3556	else
 
3557		iter->read_stamp = iter->head_page->page->time_stamp;
 
 
3558}
3559
3560/**
3561 * ring_buffer_iter_reset - reset an iterator
3562 * @iter: The iterator to reset
3563 *
3564 * Resets the iterator, so that it will start from the beginning
3565 * again.
3566 */
3567void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3568{
3569	struct ring_buffer_per_cpu *cpu_buffer;
3570	unsigned long flags;
3571
3572	if (!iter)
3573		return;
3574
3575	cpu_buffer = iter->cpu_buffer;
3576
3577	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3578	rb_iter_reset(iter);
3579	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3580}
3581EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3582
3583/**
3584 * ring_buffer_iter_empty - check if an iterator has no more to read
3585 * @iter: The iterator to check
3586 */
3587int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3588{
3589	struct ring_buffer_per_cpu *cpu_buffer;
3590	struct buffer_page *reader;
3591	struct buffer_page *head_page;
3592	struct buffer_page *commit_page;
 
3593	unsigned commit;
 
 
3594
3595	cpu_buffer = iter->cpu_buffer;
3596
3597	/* Remember, trace recording is off when iterator is in use */
3598	reader = cpu_buffer->reader_page;
3599	head_page = cpu_buffer->head_page;
3600	commit_page = cpu_buffer->commit_page;
 
 
 
 
 
 
 
 
3601	commit = rb_page_commit(commit_page);
 
 
3602
3603	return ((iter->head_page == commit_page && iter->head == commit) ||
 
 
 
 
 
 
 
 
 
 
3604		(iter->head_page == reader && commit_page == head_page &&
3605		 head_page->read == commit &&
3606		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
3607}
3608EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3609
3610static void
3611rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3612		     struct ring_buffer_event *event)
3613{
3614	u64 delta;
3615
3616	switch (event->type_len) {
3617	case RINGBUF_TYPE_PADDING:
3618		return;
3619
3620	case RINGBUF_TYPE_TIME_EXTEND:
3621		delta = ring_buffer_event_time_stamp(event);
3622		cpu_buffer->read_stamp += delta;
3623		return;
3624
3625	case RINGBUF_TYPE_TIME_STAMP:
3626		delta = ring_buffer_event_time_stamp(event);
3627		cpu_buffer->read_stamp = delta;
3628		return;
3629
3630	case RINGBUF_TYPE_DATA:
3631		cpu_buffer->read_stamp += event->time_delta;
3632		return;
3633
3634	default:
3635		BUG();
3636	}
3637	return;
3638}
3639
3640static void
3641rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3642			  struct ring_buffer_event *event)
3643{
3644	u64 delta;
3645
3646	switch (event->type_len) {
3647	case RINGBUF_TYPE_PADDING:
3648		return;
3649
3650	case RINGBUF_TYPE_TIME_EXTEND:
3651		delta = ring_buffer_event_time_stamp(event);
3652		iter->read_stamp += delta;
3653		return;
3654
3655	case RINGBUF_TYPE_TIME_STAMP:
3656		delta = ring_buffer_event_time_stamp(event);
3657		iter->read_stamp = delta;
3658		return;
3659
3660	case RINGBUF_TYPE_DATA:
3661		iter->read_stamp += event->time_delta;
3662		return;
3663
3664	default:
3665		BUG();
3666	}
3667	return;
3668}
3669
3670static struct buffer_page *
3671rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3672{
3673	struct buffer_page *reader = NULL;
3674	unsigned long overwrite;
3675	unsigned long flags;
3676	int nr_loops = 0;
3677	int ret;
3678
3679	local_irq_save(flags);
3680	arch_spin_lock(&cpu_buffer->lock);
3681
3682 again:
3683	/*
3684	 * This should normally only loop twice. But because the
3685	 * start of the reader inserts an empty page, it causes
3686	 * a case where we will loop three times. There should be no
3687	 * reason to loop four times (that I know of).
3688	 */
3689	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3690		reader = NULL;
3691		goto out;
3692	}
3693
3694	reader = cpu_buffer->reader_page;
3695
3696	/* If there's more to read, return this page */
3697	if (cpu_buffer->reader_page->read < rb_page_size(reader))
3698		goto out;
3699
3700	/* Never should we have an index greater than the size */
3701	if (RB_WARN_ON(cpu_buffer,
3702		       cpu_buffer->reader_page->read > rb_page_size(reader)))
3703		goto out;
3704
3705	/* check if we caught up to the tail */
3706	reader = NULL;
3707	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3708		goto out;
3709
3710	/* Don't bother swapping if the ring buffer is empty */
3711	if (rb_num_of_entries(cpu_buffer) == 0)
3712		goto out;
3713
3714	/*
3715	 * Reset the reader page to size zero.
3716	 */
3717	local_set(&cpu_buffer->reader_page->write, 0);
3718	local_set(&cpu_buffer->reader_page->entries, 0);
3719	local_set(&cpu_buffer->reader_page->page->commit, 0);
3720	cpu_buffer->reader_page->real_end = 0;
3721
3722 spin:
3723	/*
3724	 * Splice the empty reader page into the list around the head.
3725	 */
3726	reader = rb_set_head_page(cpu_buffer);
3727	if (!reader)
3728		goto out;
3729	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3730	cpu_buffer->reader_page->list.prev = reader->list.prev;
3731
3732	/*
3733	 * cpu_buffer->pages just needs to point to the buffer, it
3734	 *  has no specific buffer page to point to. Lets move it out
3735	 *  of our way so we don't accidentally swap it.
3736	 */
3737	cpu_buffer->pages = reader->list.prev;
3738
3739	/* The reader page will be pointing to the new head */
3740	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3741
3742	/*
3743	 * We want to make sure we read the overruns after we set up our
3744	 * pointers to the next object. The writer side does a
3745	 * cmpxchg to cross pages which acts as the mb on the writer
3746	 * side. Note, the reader will constantly fail the swap
3747	 * while the writer is updating the pointers, so this
3748	 * guarantees that the overwrite recorded here is the one we
3749	 * want to compare with the last_overrun.
3750	 */
3751	smp_mb();
3752	overwrite = local_read(&(cpu_buffer->overrun));
3753
3754	/*
3755	 * Here's the tricky part.
3756	 *
3757	 * We need to move the pointer past the header page.
3758	 * But we can only do that if a writer is not currently
3759	 * moving it. The page before the header page has the
3760	 * flag bit '1' set if it is pointing to the page we want.
3761	 * but if the writer is in the process of moving it
3762	 * than it will be '2' or already moved '0'.
3763	 */
3764
3765	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3766
3767	/*
3768	 * If we did not convert it, then we must try again.
3769	 */
3770	if (!ret)
3771		goto spin;
3772
3773	/*
3774	 * Yay! We succeeded in replacing the page.
3775	 *
3776	 * Now make the new head point back to the reader page.
3777	 */
3778	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3779	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3780
3781	local_inc(&cpu_buffer->pages_read);
3782
3783	/* Finally update the reader page to the new head */
3784	cpu_buffer->reader_page = reader;
3785	cpu_buffer->reader_page->read = 0;
3786
3787	if (overwrite != cpu_buffer->last_overrun) {
3788		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3789		cpu_buffer->last_overrun = overwrite;
3790	}
3791
3792	goto again;
3793
3794 out:
3795	/* Update the read_stamp on the first event */
3796	if (reader && reader->read == 0)
3797		cpu_buffer->read_stamp = reader->page->time_stamp;
3798
3799	arch_spin_unlock(&cpu_buffer->lock);
3800	local_irq_restore(flags);
3801
3802	return reader;
3803}
3804
3805static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3806{
3807	struct ring_buffer_event *event;
3808	struct buffer_page *reader;
3809	unsigned length;
3810
3811	reader = rb_get_reader_page(cpu_buffer);
3812
3813	/* This function should not be called when buffer is empty */
3814	if (RB_WARN_ON(cpu_buffer, !reader))
3815		return;
3816
3817	event = rb_reader_event(cpu_buffer);
3818
3819	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3820		cpu_buffer->read++;
3821
3822	rb_update_read_stamp(cpu_buffer, event);
3823
3824	length = rb_event_length(event);
3825	cpu_buffer->reader_page->read += length;
3826}
3827
3828static void rb_advance_iter(struct ring_buffer_iter *iter)
3829{
3830	struct ring_buffer_per_cpu *cpu_buffer;
3831	struct ring_buffer_event *event;
3832	unsigned length;
3833
3834	cpu_buffer = iter->cpu_buffer;
3835
 
 
 
 
 
 
 
 
 
3836	/*
3837	 * Check if we are at the end of the buffer.
3838	 */
3839	if (iter->head >= rb_page_size(iter->head_page)) {
3840		/* discarded commits can make the page empty */
3841		if (iter->head_page == cpu_buffer->commit_page)
3842			return;
3843		rb_inc_iter(iter);
3844		return;
3845	}
3846
3847	event = rb_iter_head_event(iter);
3848
3849	length = rb_event_length(event);
3850
3851	/*
3852	 * This should not be called to advance the header if we are
3853	 * at the tail of the buffer.
3854	 */
3855	if (RB_WARN_ON(cpu_buffer,
3856		       (iter->head_page == cpu_buffer->commit_page) &&
3857		       (iter->head + length > rb_commit_index(cpu_buffer))))
3858		return;
3859
3860	rb_update_iter_read_stamp(iter, event);
3861
3862	iter->head += length;
3863
3864	/* check for end of page padding */
3865	if ((iter->head >= rb_page_size(iter->head_page)) &&
3866	    (iter->head_page != cpu_buffer->commit_page))
3867		rb_inc_iter(iter);
3868}
3869
3870static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3871{
3872	return cpu_buffer->lost_events;
3873}
3874
3875static struct ring_buffer_event *
3876rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3877	       unsigned long *lost_events)
3878{
3879	struct ring_buffer_event *event;
3880	struct buffer_page *reader;
3881	int nr_loops = 0;
3882
3883	if (ts)
3884		*ts = 0;
3885 again:
3886	/*
3887	 * We repeat when a time extend is encountered.
3888	 * Since the time extend is always attached to a data event,
3889	 * we should never loop more than once.
3890	 * (We never hit the following condition more than twice).
3891	 */
3892	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3893		return NULL;
3894
3895	reader = rb_get_reader_page(cpu_buffer);
3896	if (!reader)
3897		return NULL;
3898
3899	event = rb_reader_event(cpu_buffer);
3900
3901	switch (event->type_len) {
3902	case RINGBUF_TYPE_PADDING:
3903		if (rb_null_event(event))
3904			RB_WARN_ON(cpu_buffer, 1);
3905		/*
3906		 * Because the writer could be discarding every
3907		 * event it creates (which would probably be bad)
3908		 * if we were to go back to "again" then we may never
3909		 * catch up, and will trigger the warn on, or lock
3910		 * the box. Return the padding, and we will release
3911		 * the current locks, and try again.
3912		 */
3913		return event;
3914
3915	case RINGBUF_TYPE_TIME_EXTEND:
3916		/* Internal data, OK to advance */
3917		rb_advance_reader(cpu_buffer);
3918		goto again;
3919
3920	case RINGBUF_TYPE_TIME_STAMP:
3921		if (ts) {
3922			*ts = ring_buffer_event_time_stamp(event);
3923			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3924							 cpu_buffer->cpu, ts);
3925		}
3926		/* Internal data, OK to advance */
3927		rb_advance_reader(cpu_buffer);
3928		goto again;
3929
3930	case RINGBUF_TYPE_DATA:
3931		if (ts && !(*ts)) {
3932			*ts = cpu_buffer->read_stamp + event->time_delta;
3933			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3934							 cpu_buffer->cpu, ts);
3935		}
3936		if (lost_events)
3937			*lost_events = rb_lost_events(cpu_buffer);
3938		return event;
3939
3940	default:
3941		BUG();
3942	}
3943
3944	return NULL;
3945}
3946EXPORT_SYMBOL_GPL(ring_buffer_peek);
3947
3948static struct ring_buffer_event *
3949rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3950{
3951	struct ring_buffer *buffer;
3952	struct ring_buffer_per_cpu *cpu_buffer;
3953	struct ring_buffer_event *event;
3954	int nr_loops = 0;
3955
3956	if (ts)
3957		*ts = 0;
3958
3959	cpu_buffer = iter->cpu_buffer;
3960	buffer = cpu_buffer->buffer;
3961
3962	/*
3963	 * Check if someone performed a consuming read to
3964	 * the buffer. A consuming read invalidates the iterator
3965	 * and we need to reset the iterator in this case.
3966	 */
3967	if (unlikely(iter->cache_read != cpu_buffer->read ||
3968		     iter->cache_reader_page != cpu_buffer->reader_page))
3969		rb_iter_reset(iter);
3970
3971 again:
3972	if (ring_buffer_iter_empty(iter))
3973		return NULL;
3974
3975	/*
3976	 * We repeat when a time extend is encountered or we hit
3977	 * the end of the page. Since the time extend is always attached
3978	 * to a data event, we should never loop more than three times.
3979	 * Once for going to next page, once on time extend, and
3980	 * finally once to get the event.
3981	 * (We never hit the following condition more than thrice).
3982	 */
3983	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3984		return NULL;
3985
3986	if (rb_per_cpu_empty(cpu_buffer))
3987		return NULL;
3988
3989	if (iter->head >= rb_page_size(iter->head_page)) {
3990		rb_inc_iter(iter);
3991		goto again;
3992	}
3993
3994	event = rb_iter_head_event(iter);
 
 
3995
3996	switch (event->type_len) {
3997	case RINGBUF_TYPE_PADDING:
3998		if (rb_null_event(event)) {
3999			rb_inc_iter(iter);
4000			goto again;
4001		}
4002		rb_advance_iter(iter);
4003		return event;
4004
4005	case RINGBUF_TYPE_TIME_EXTEND:
4006		/* Internal data, OK to advance */
4007		rb_advance_iter(iter);
4008		goto again;
4009
4010	case RINGBUF_TYPE_TIME_STAMP:
4011		if (ts) {
4012			*ts = ring_buffer_event_time_stamp(event);
4013			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4014							 cpu_buffer->cpu, ts);
4015		}
4016		/* Internal data, OK to advance */
4017		rb_advance_iter(iter);
4018		goto again;
4019
4020	case RINGBUF_TYPE_DATA:
4021		if (ts && !(*ts)) {
4022			*ts = iter->read_stamp + event->time_delta;
4023			ring_buffer_normalize_time_stamp(buffer,
4024							 cpu_buffer->cpu, ts);
4025		}
4026		return event;
4027
4028	default:
4029		BUG();
4030	}
4031
4032	return NULL;
4033}
4034EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4035
4036static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4037{
4038	if (likely(!in_nmi())) {
4039		raw_spin_lock(&cpu_buffer->reader_lock);
4040		return true;
4041	}
4042
4043	/*
4044	 * If an NMI die dumps out the content of the ring buffer
4045	 * trylock must be used to prevent a deadlock if the NMI
4046	 * preempted a task that holds the ring buffer locks. If
4047	 * we get the lock then all is fine, if not, then continue
4048	 * to do the read, but this can corrupt the ring buffer,
4049	 * so it must be permanently disabled from future writes.
4050	 * Reading from NMI is a oneshot deal.
4051	 */
4052	if (raw_spin_trylock(&cpu_buffer->reader_lock))
4053		return true;
4054
4055	/* Continue without locking, but disable the ring buffer */
4056	atomic_inc(&cpu_buffer->record_disabled);
4057	return false;
4058}
4059
4060static inline void
4061rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4062{
4063	if (likely(locked))
4064		raw_spin_unlock(&cpu_buffer->reader_lock);
4065	return;
4066}
4067
4068/**
4069 * ring_buffer_peek - peek at the next event to be read
4070 * @buffer: The ring buffer to read
4071 * @cpu: The cpu to peak at
4072 * @ts: The timestamp counter of this event.
4073 * @lost_events: a variable to store if events were lost (may be NULL)
4074 *
4075 * This will return the event that will be read next, but does
4076 * not consume the data.
4077 */
4078struct ring_buffer_event *
4079ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
4080		 unsigned long *lost_events)
4081{
4082	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4083	struct ring_buffer_event *event;
4084	unsigned long flags;
4085	bool dolock;
4086
4087	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4088		return NULL;
4089
4090 again:
4091	local_irq_save(flags);
4092	dolock = rb_reader_lock(cpu_buffer);
4093	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4094	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4095		rb_advance_reader(cpu_buffer);
4096	rb_reader_unlock(cpu_buffer, dolock);
4097	local_irq_restore(flags);
4098
4099	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4100		goto again;
4101
4102	return event;
4103}
4104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4105/**
4106 * ring_buffer_iter_peek - peek at the next event to be read
4107 * @iter: The ring buffer iterator
4108 * @ts: The timestamp counter of this event.
4109 *
4110 * This will return the event that will be read next, but does
4111 * not increment the iterator.
4112 */
4113struct ring_buffer_event *
4114ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4115{
4116	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4117	struct ring_buffer_event *event;
4118	unsigned long flags;
4119
4120 again:
4121	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4122	event = rb_iter_peek(iter, ts);
4123	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4124
4125	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4126		goto again;
4127
4128	return event;
4129}
4130
4131/**
4132 * ring_buffer_consume - return an event and consume it
4133 * @buffer: The ring buffer to get the next event from
4134 * @cpu: the cpu to read the buffer from
4135 * @ts: a variable to store the timestamp (may be NULL)
4136 * @lost_events: a variable to store if events were lost (may be NULL)
4137 *
4138 * Returns the next event in the ring buffer, and that event is consumed.
4139 * Meaning, that sequential reads will keep returning a different event,
4140 * and eventually empty the ring buffer if the producer is slower.
4141 */
4142struct ring_buffer_event *
4143ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
4144		    unsigned long *lost_events)
4145{
4146	struct ring_buffer_per_cpu *cpu_buffer;
4147	struct ring_buffer_event *event = NULL;
4148	unsigned long flags;
4149	bool dolock;
4150
4151 again:
4152	/* might be called in atomic */
4153	preempt_disable();
4154
4155	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4156		goto out;
4157
4158	cpu_buffer = buffer->buffers[cpu];
4159	local_irq_save(flags);
4160	dolock = rb_reader_lock(cpu_buffer);
4161
4162	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4163	if (event) {
4164		cpu_buffer->lost_events = 0;
4165		rb_advance_reader(cpu_buffer);
4166	}
4167
4168	rb_reader_unlock(cpu_buffer, dolock);
4169	local_irq_restore(flags);
4170
4171 out:
4172	preempt_enable();
4173
4174	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4175		goto again;
4176
4177	return event;
4178}
4179EXPORT_SYMBOL_GPL(ring_buffer_consume);
4180
4181/**
4182 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4183 * @buffer: The ring buffer to read from
4184 * @cpu: The cpu buffer to iterate over
4185 * @flags: gfp flags to use for memory allocation
4186 *
4187 * This performs the initial preparations necessary to iterate
4188 * through the buffer.  Memory is allocated, buffer recording
4189 * is disabled, and the iterator pointer is returned to the caller.
4190 *
4191 * Disabling buffer recording prevents the reading from being
4192 * corrupted. This is not a consuming read, so a producer is not
4193 * expected.
4194 *
4195 * After a sequence of ring_buffer_read_prepare calls, the user is
4196 * expected to make at least one call to ring_buffer_read_prepare_sync.
4197 * Afterwards, ring_buffer_read_start is invoked to get things going
4198 * for real.
4199 *
4200 * This overall must be paired with ring_buffer_read_finish.
4201 */
4202struct ring_buffer_iter *
4203ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
4204{
4205	struct ring_buffer_per_cpu *cpu_buffer;
4206	struct ring_buffer_iter *iter;
4207
4208	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4209		return NULL;
4210
4211	iter = kmalloc(sizeof(*iter), flags);
4212	if (!iter)
4213		return NULL;
4214
 
 
 
 
 
 
4215	cpu_buffer = buffer->buffers[cpu];
4216
4217	iter->cpu_buffer = cpu_buffer;
4218
4219	atomic_inc(&buffer->resize_disabled);
4220	atomic_inc(&cpu_buffer->record_disabled);
4221
4222	return iter;
4223}
4224EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4225
4226/**
4227 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4228 *
4229 * All previously invoked ring_buffer_read_prepare calls to prepare
4230 * iterators will be synchronized.  Afterwards, read_buffer_read_start
4231 * calls on those iterators are allowed.
4232 */
4233void
4234ring_buffer_read_prepare_sync(void)
4235{
4236	synchronize_rcu();
4237}
4238EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4239
4240/**
4241 * ring_buffer_read_start - start a non consuming read of the buffer
4242 * @iter: The iterator returned by ring_buffer_read_prepare
4243 *
4244 * This finalizes the startup of an iteration through the buffer.
4245 * The iterator comes from a call to ring_buffer_read_prepare and
4246 * an intervening ring_buffer_read_prepare_sync must have been
4247 * performed.
4248 *
4249 * Must be paired with ring_buffer_read_finish.
4250 */
4251void
4252ring_buffer_read_start(struct ring_buffer_iter *iter)
4253{
4254	struct ring_buffer_per_cpu *cpu_buffer;
4255	unsigned long flags;
4256
4257	if (!iter)
4258		return;
4259
4260	cpu_buffer = iter->cpu_buffer;
4261
4262	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4263	arch_spin_lock(&cpu_buffer->lock);
4264	rb_iter_reset(iter);
4265	arch_spin_unlock(&cpu_buffer->lock);
4266	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4267}
4268EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4269
4270/**
4271 * ring_buffer_read_finish - finish reading the iterator of the buffer
4272 * @iter: The iterator retrieved by ring_buffer_start
4273 *
4274 * This re-enables the recording to the buffer, and frees the
4275 * iterator.
4276 */
4277void
4278ring_buffer_read_finish(struct ring_buffer_iter *iter)
4279{
4280	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4281	unsigned long flags;
4282
4283	/*
4284	 * Ring buffer is disabled from recording, here's a good place
4285	 * to check the integrity of the ring buffer.
4286	 * Must prevent readers from trying to read, as the check
4287	 * clears the HEAD page and readers require it.
4288	 */
4289	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4290	rb_check_pages(cpu_buffer);
4291	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4292
4293	atomic_dec(&cpu_buffer->record_disabled);
4294	atomic_dec(&cpu_buffer->buffer->resize_disabled);
4295	kfree(iter);
4296}
4297EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4298
4299/**
4300 * ring_buffer_read - read the next item in the ring buffer by the iterator
4301 * @iter: The ring buffer iterator
4302 * @ts: The time stamp of the event read.
4303 *
4304 * This reads the next event in the ring buffer and increments the iterator.
 
4305 */
4306struct ring_buffer_event *
4307ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4308{
4309	struct ring_buffer_event *event;
4310	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4311	unsigned long flags;
4312
4313	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4314 again:
4315	event = rb_iter_peek(iter, ts);
4316	if (!event)
4317		goto out;
4318
4319	if (event->type_len == RINGBUF_TYPE_PADDING)
4320		goto again;
4321
4322	rb_advance_iter(iter);
4323 out:
4324	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4325
4326	return event;
4327}
4328EXPORT_SYMBOL_GPL(ring_buffer_read);
4329
4330/**
4331 * ring_buffer_size - return the size of the ring buffer (in bytes)
4332 * @buffer: The ring buffer.
 
4333 */
4334unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4335{
4336	/*
4337	 * Earlier, this method returned
4338	 *	BUF_PAGE_SIZE * buffer->nr_pages
4339	 * Since the nr_pages field is now removed, we have converted this to
4340	 * return the per cpu buffer value.
4341	 */
4342	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4343		return 0;
4344
4345	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4346}
4347EXPORT_SYMBOL_GPL(ring_buffer_size);
4348
4349static void
4350rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4351{
4352	rb_head_page_deactivate(cpu_buffer);
4353
4354	cpu_buffer->head_page
4355		= list_entry(cpu_buffer->pages, struct buffer_page, list);
4356	local_set(&cpu_buffer->head_page->write, 0);
4357	local_set(&cpu_buffer->head_page->entries, 0);
4358	local_set(&cpu_buffer->head_page->page->commit, 0);
4359
4360	cpu_buffer->head_page->read = 0;
4361
4362	cpu_buffer->tail_page = cpu_buffer->head_page;
4363	cpu_buffer->commit_page = cpu_buffer->head_page;
4364
4365	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4366	INIT_LIST_HEAD(&cpu_buffer->new_pages);
4367	local_set(&cpu_buffer->reader_page->write, 0);
4368	local_set(&cpu_buffer->reader_page->entries, 0);
4369	local_set(&cpu_buffer->reader_page->page->commit, 0);
4370	cpu_buffer->reader_page->read = 0;
4371
4372	local_set(&cpu_buffer->entries_bytes, 0);
4373	local_set(&cpu_buffer->overrun, 0);
4374	local_set(&cpu_buffer->commit_overrun, 0);
4375	local_set(&cpu_buffer->dropped_events, 0);
4376	local_set(&cpu_buffer->entries, 0);
4377	local_set(&cpu_buffer->committing, 0);
4378	local_set(&cpu_buffer->commits, 0);
4379	local_set(&cpu_buffer->pages_touched, 0);
4380	local_set(&cpu_buffer->pages_read, 0);
4381	cpu_buffer->last_pages_touch = 0;
4382	cpu_buffer->shortest_full = 0;
4383	cpu_buffer->read = 0;
4384	cpu_buffer->read_bytes = 0;
4385
4386	cpu_buffer->write_stamp = 0;
4387	cpu_buffer->read_stamp = 0;
 
 
4388
4389	cpu_buffer->lost_events = 0;
4390	cpu_buffer->last_overrun = 0;
4391
4392	rb_head_page_activate(cpu_buffer);
4393}
4394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4395/**
4396 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4397 * @buffer: The ring buffer to reset a per cpu buffer of
4398 * @cpu: The CPU buffer to be reset
4399 */
4400void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4401{
4402	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4403	unsigned long flags;
4404
4405	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4406		return;
4407
4408	atomic_inc(&buffer->resize_disabled);
 
 
 
4409	atomic_inc(&cpu_buffer->record_disabled);
4410
4411	/* Make sure all commits have finished */
4412	synchronize_rcu();
4413
4414	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4415
4416	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4417		goto out;
4418
4419	arch_spin_lock(&cpu_buffer->lock);
 
 
4420
4421	rb_reset_cpu(cpu_buffer);
 
 
 
 
 
 
 
 
4422
4423	arch_spin_unlock(&cpu_buffer->lock);
 
4424
4425 out:
4426	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4427
4428	atomic_dec(&cpu_buffer->record_disabled);
4429	atomic_dec(&buffer->resize_disabled);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4430}
4431EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4432
4433/**
4434 * ring_buffer_reset - reset a ring buffer
4435 * @buffer: The ring buffer to reset all cpu buffers
4436 */
4437void ring_buffer_reset(struct ring_buffer *buffer)
4438{
 
4439	int cpu;
4440
4441	for_each_buffer_cpu(buffer, cpu)
4442		ring_buffer_reset_cpu(buffer, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4443}
4444EXPORT_SYMBOL_GPL(ring_buffer_reset);
4445
4446/**
4447 * rind_buffer_empty - is the ring buffer empty?
4448 * @buffer: The ring buffer to test
4449 */
4450bool ring_buffer_empty(struct ring_buffer *buffer)
4451{
4452	struct ring_buffer_per_cpu *cpu_buffer;
4453	unsigned long flags;
4454	bool dolock;
4455	int cpu;
4456	int ret;
4457
4458	/* yes this is racy, but if you don't like the race, lock the buffer */
4459	for_each_buffer_cpu(buffer, cpu) {
4460		cpu_buffer = buffer->buffers[cpu];
4461		local_irq_save(flags);
4462		dolock = rb_reader_lock(cpu_buffer);
4463		ret = rb_per_cpu_empty(cpu_buffer);
4464		rb_reader_unlock(cpu_buffer, dolock);
4465		local_irq_restore(flags);
4466
4467		if (!ret)
4468			return false;
4469	}
4470
4471	return true;
4472}
4473EXPORT_SYMBOL_GPL(ring_buffer_empty);
4474
4475/**
4476 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4477 * @buffer: The ring buffer
4478 * @cpu: The CPU buffer to test
4479 */
4480bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4481{
4482	struct ring_buffer_per_cpu *cpu_buffer;
4483	unsigned long flags;
4484	bool dolock;
4485	int ret;
4486
4487	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4488		return true;
4489
4490	cpu_buffer = buffer->buffers[cpu];
4491	local_irq_save(flags);
4492	dolock = rb_reader_lock(cpu_buffer);
4493	ret = rb_per_cpu_empty(cpu_buffer);
4494	rb_reader_unlock(cpu_buffer, dolock);
4495	local_irq_restore(flags);
4496
4497	return ret;
4498}
4499EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4500
4501#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4502/**
4503 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4504 * @buffer_a: One buffer to swap with
4505 * @buffer_b: The other buffer to swap with
 
4506 *
4507 * This function is useful for tracers that want to take a "snapshot"
4508 * of a CPU buffer and has another back up buffer lying around.
4509 * it is expected that the tracer handles the cpu buffer not being
4510 * used at the moment.
4511 */
4512int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4513			 struct ring_buffer *buffer_b, int cpu)
4514{
4515	struct ring_buffer_per_cpu *cpu_buffer_a;
4516	struct ring_buffer_per_cpu *cpu_buffer_b;
4517	int ret = -EINVAL;
4518
4519	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4520	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
4521		goto out;
4522
4523	cpu_buffer_a = buffer_a->buffers[cpu];
4524	cpu_buffer_b = buffer_b->buffers[cpu];
4525
4526	/* At least make sure the two buffers are somewhat the same */
4527	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4528		goto out;
4529
4530	ret = -EAGAIN;
4531
4532	if (atomic_read(&buffer_a->record_disabled))
4533		goto out;
4534
4535	if (atomic_read(&buffer_b->record_disabled))
4536		goto out;
4537
4538	if (atomic_read(&cpu_buffer_a->record_disabled))
4539		goto out;
4540
4541	if (atomic_read(&cpu_buffer_b->record_disabled))
4542		goto out;
4543
4544	/*
4545	 * We can't do a synchronize_rcu here because this
4546	 * function can be called in atomic context.
4547	 * Normally this will be called from the same CPU as cpu.
4548	 * If not it's up to the caller to protect this.
4549	 */
4550	atomic_inc(&cpu_buffer_a->record_disabled);
4551	atomic_inc(&cpu_buffer_b->record_disabled);
4552
4553	ret = -EBUSY;
4554	if (local_read(&cpu_buffer_a->committing))
4555		goto out_dec;
4556	if (local_read(&cpu_buffer_b->committing))
4557		goto out_dec;
4558
4559	buffer_a->buffers[cpu] = cpu_buffer_b;
4560	buffer_b->buffers[cpu] = cpu_buffer_a;
4561
4562	cpu_buffer_b->buffer = buffer_a;
4563	cpu_buffer_a->buffer = buffer_b;
4564
4565	ret = 0;
4566
4567out_dec:
4568	atomic_dec(&cpu_buffer_a->record_disabled);
4569	atomic_dec(&cpu_buffer_b->record_disabled);
4570out:
4571	return ret;
4572}
4573EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4574#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4575
4576/**
4577 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4578 * @buffer: the buffer to allocate for.
4579 * @cpu: the cpu buffer to allocate.
4580 *
4581 * This function is used in conjunction with ring_buffer_read_page.
4582 * When reading a full page from the ring buffer, these functions
4583 * can be used to speed up the process. The calling function should
4584 * allocate a few pages first with this function. Then when it
4585 * needs to get pages from the ring buffer, it passes the result
4586 * of this function into ring_buffer_read_page, which will swap
4587 * the page that was allocated, with the read page of the buffer.
4588 *
4589 * Returns:
4590 *  The page allocated, or ERR_PTR
4591 */
4592void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4593{
4594	struct ring_buffer_per_cpu *cpu_buffer;
4595	struct buffer_data_page *bpage = NULL;
4596	unsigned long flags;
4597	struct page *page;
4598
4599	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4600		return ERR_PTR(-ENODEV);
4601
4602	cpu_buffer = buffer->buffers[cpu];
4603	local_irq_save(flags);
4604	arch_spin_lock(&cpu_buffer->lock);
4605
4606	if (cpu_buffer->free_page) {
4607		bpage = cpu_buffer->free_page;
4608		cpu_buffer->free_page = NULL;
4609	}
4610
4611	arch_spin_unlock(&cpu_buffer->lock);
4612	local_irq_restore(flags);
4613
4614	if (bpage)
4615		goto out;
4616
4617	page = alloc_pages_node(cpu_to_node(cpu),
4618				GFP_KERNEL | __GFP_NORETRY, 0);
4619	if (!page)
4620		return ERR_PTR(-ENOMEM);
4621
4622	bpage = page_address(page);
4623
4624 out:
4625	rb_init_page(bpage);
4626
4627	return bpage;
4628}
4629EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4630
4631/**
4632 * ring_buffer_free_read_page - free an allocated read page
4633 * @buffer: the buffer the page was allocate for
4634 * @cpu: the cpu buffer the page came from
4635 * @data: the page to free
4636 *
4637 * Free a page allocated from ring_buffer_alloc_read_page.
4638 */
4639void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data)
4640{
4641	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4642	struct buffer_data_page *bpage = data;
4643	struct page *page = virt_to_page(bpage);
4644	unsigned long flags;
4645
4646	/* If the page is still in use someplace else, we can't reuse it */
4647	if (page_ref_count(page) > 1)
4648		goto out;
4649
4650	local_irq_save(flags);
4651	arch_spin_lock(&cpu_buffer->lock);
4652
4653	if (!cpu_buffer->free_page) {
4654		cpu_buffer->free_page = bpage;
4655		bpage = NULL;
4656	}
4657
4658	arch_spin_unlock(&cpu_buffer->lock);
4659	local_irq_restore(flags);
4660
4661 out:
4662	free_page((unsigned long)bpage);
4663}
4664EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4665
4666/**
4667 * ring_buffer_read_page - extract a page from the ring buffer
4668 * @buffer: buffer to extract from
4669 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4670 * @len: amount to extract
4671 * @cpu: the cpu of the buffer to extract
4672 * @full: should the extraction only happen when the page is full.
4673 *
4674 * This function will pull out a page from the ring buffer and consume it.
4675 * @data_page must be the address of the variable that was returned
4676 * from ring_buffer_alloc_read_page. This is because the page might be used
4677 * to swap with a page in the ring buffer.
4678 *
4679 * for example:
4680 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
4681 *	if (IS_ERR(rpage))
4682 *		return PTR_ERR(rpage);
4683 *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4684 *	if (ret >= 0)
4685 *		process_page(rpage, ret);
4686 *
4687 * When @full is set, the function will not return true unless
4688 * the writer is off the reader page.
4689 *
4690 * Note: it is up to the calling functions to handle sleeps and wakeups.
4691 *  The ring buffer can be used anywhere in the kernel and can not
4692 *  blindly call wake_up. The layer that uses the ring buffer must be
4693 *  responsible for that.
4694 *
4695 * Returns:
4696 *  >=0 if data has been transferred, returns the offset of consumed data.
4697 *  <0 if no data has been transferred.
4698 */
4699int ring_buffer_read_page(struct ring_buffer *buffer,
4700			  void **data_page, size_t len, int cpu, int full)
4701{
4702	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4703	struct ring_buffer_event *event;
4704	struct buffer_data_page *bpage;
4705	struct buffer_page *reader;
4706	unsigned long missed_events;
4707	unsigned long flags;
4708	unsigned int commit;
4709	unsigned int read;
4710	u64 save_timestamp;
4711	int ret = -1;
4712
4713	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4714		goto out;
4715
4716	/*
4717	 * If len is not big enough to hold the page header, then
4718	 * we can not copy anything.
4719	 */
4720	if (len <= BUF_PAGE_HDR_SIZE)
4721		goto out;
4722
4723	len -= BUF_PAGE_HDR_SIZE;
4724
4725	if (!data_page)
4726		goto out;
4727
4728	bpage = *data_page;
4729	if (!bpage)
4730		goto out;
4731
4732	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4733
4734	reader = rb_get_reader_page(cpu_buffer);
4735	if (!reader)
4736		goto out_unlock;
4737
4738	event = rb_reader_event(cpu_buffer);
4739
4740	read = reader->read;
4741	commit = rb_page_commit(reader);
4742
4743	/* Check if any events were dropped */
4744	missed_events = cpu_buffer->lost_events;
4745
4746	/*
4747	 * If this page has been partially read or
4748	 * if len is not big enough to read the rest of the page or
4749	 * a writer is still on the page, then
4750	 * we must copy the data from the page to the buffer.
4751	 * Otherwise, we can simply swap the page with the one passed in.
4752	 */
4753	if (read || (len < (commit - read)) ||
4754	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
4755		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4756		unsigned int rpos = read;
4757		unsigned int pos = 0;
4758		unsigned int size;
4759
4760		if (full)
4761			goto out_unlock;
4762
4763		if (len > (commit - read))
4764			len = (commit - read);
4765
4766		/* Always keep the time extend and data together */
4767		size = rb_event_ts_length(event);
4768
4769		if (len < size)
4770			goto out_unlock;
4771
4772		/* save the current timestamp, since the user will need it */
4773		save_timestamp = cpu_buffer->read_stamp;
4774
4775		/* Need to copy one event at a time */
4776		do {
4777			/* We need the size of one event, because
4778			 * rb_advance_reader only advances by one event,
4779			 * whereas rb_event_ts_length may include the size of
4780			 * one or two events.
4781			 * We have already ensured there's enough space if this
4782			 * is a time extend. */
4783			size = rb_event_length(event);
4784			memcpy(bpage->data + pos, rpage->data + rpos, size);
4785
4786			len -= size;
4787
4788			rb_advance_reader(cpu_buffer);
4789			rpos = reader->read;
4790			pos += size;
4791
4792			if (rpos >= commit)
4793				break;
4794
4795			event = rb_reader_event(cpu_buffer);
4796			/* Always keep the time extend and data together */
4797			size = rb_event_ts_length(event);
4798		} while (len >= size);
4799
4800		/* update bpage */
4801		local_set(&bpage->commit, pos);
4802		bpage->time_stamp = save_timestamp;
4803
4804		/* we copied everything to the beginning */
4805		read = 0;
4806	} else {
4807		/* update the entry counter */
4808		cpu_buffer->read += rb_page_entries(reader);
4809		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4810
4811		/* swap the pages */
4812		rb_init_page(bpage);
4813		bpage = reader->page;
4814		reader->page = *data_page;
4815		local_set(&reader->write, 0);
4816		local_set(&reader->entries, 0);
4817		reader->read = 0;
4818		*data_page = bpage;
4819
4820		/*
4821		 * Use the real_end for the data size,
4822		 * This gives us a chance to store the lost events
4823		 * on the page.
4824		 */
4825		if (reader->real_end)
4826			local_set(&bpage->commit, reader->real_end);
4827	}
4828	ret = read;
4829
4830	cpu_buffer->lost_events = 0;
4831
4832	commit = local_read(&bpage->commit);
4833	/*
4834	 * Set a flag in the commit field if we lost events
4835	 */
4836	if (missed_events) {
4837		/* If there is room at the end of the page to save the
4838		 * missed events, then record it there.
4839		 */
4840		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4841			memcpy(&bpage->data[commit], &missed_events,
4842			       sizeof(missed_events));
4843			local_add(RB_MISSED_STORED, &bpage->commit);
4844			commit += sizeof(missed_events);
4845		}
4846		local_add(RB_MISSED_EVENTS, &bpage->commit);
4847	}
4848
4849	/*
4850	 * This page may be off to user land. Zero it out here.
4851	 */
4852	if (commit < BUF_PAGE_SIZE)
4853		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4854
4855 out_unlock:
4856	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4857
4858 out:
4859	return ret;
4860}
4861EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4862
4863/*
4864 * We only allocate new buffers, never free them if the CPU goes down.
4865 * If we were to free the buffer, then the user would lose any trace that was in
4866 * the buffer.
4867 */
4868int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
4869{
4870	struct ring_buffer *buffer;
4871	long nr_pages_same;
4872	int cpu_i;
4873	unsigned long nr_pages;
4874
4875	buffer = container_of(node, struct ring_buffer, node);
4876	if (cpumask_test_cpu(cpu, buffer->cpumask))
4877		return 0;
4878
4879	nr_pages = 0;
4880	nr_pages_same = 1;
4881	/* check if all cpu sizes are same */
4882	for_each_buffer_cpu(buffer, cpu_i) {
4883		/* fill in the size from first enabled cpu */
4884		if (nr_pages == 0)
4885			nr_pages = buffer->buffers[cpu_i]->nr_pages;
4886		if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4887			nr_pages_same = 0;
4888			break;
4889		}
4890	}
4891	/* allocate minimum pages, user can later expand it */
4892	if (!nr_pages_same)
4893		nr_pages = 2;
4894	buffer->buffers[cpu] =
4895		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4896	if (!buffer->buffers[cpu]) {
4897		WARN(1, "failed to allocate ring buffer on CPU %u\n",
4898		     cpu);
4899		return -ENOMEM;
4900	}
4901	smp_wmb();
4902	cpumask_set_cpu(cpu, buffer->cpumask);
4903	return 0;
4904}
4905
4906#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4907/*
4908 * This is a basic integrity check of the ring buffer.
4909 * Late in the boot cycle this test will run when configured in.
4910 * It will kick off a thread per CPU that will go into a loop
4911 * writing to the per cpu ring buffer various sizes of data.
4912 * Some of the data will be large items, some small.
4913 *
4914 * Another thread is created that goes into a spin, sending out
4915 * IPIs to the other CPUs to also write into the ring buffer.
4916 * this is to test the nesting ability of the buffer.
4917 *
4918 * Basic stats are recorded and reported. If something in the
4919 * ring buffer should happen that's not expected, a big warning
4920 * is displayed and all ring buffers are disabled.
4921 */
4922static struct task_struct *rb_threads[NR_CPUS] __initdata;
4923
4924struct rb_test_data {
4925	struct ring_buffer	*buffer;
4926	unsigned long		events;
4927	unsigned long		bytes_written;
4928	unsigned long		bytes_alloc;
4929	unsigned long		bytes_dropped;
4930	unsigned long		events_nested;
4931	unsigned long		bytes_written_nested;
4932	unsigned long		bytes_alloc_nested;
4933	unsigned long		bytes_dropped_nested;
4934	int			min_size_nested;
4935	int			max_size_nested;
4936	int			max_size;
4937	int			min_size;
4938	int			cpu;
4939	int			cnt;
4940};
4941
4942static struct rb_test_data rb_data[NR_CPUS] __initdata;
4943
4944/* 1 meg per cpu */
4945#define RB_TEST_BUFFER_SIZE	1048576
4946
4947static char rb_string[] __initdata =
4948	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4949	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4950	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4951
4952static bool rb_test_started __initdata;
4953
4954struct rb_item {
4955	int size;
4956	char str[];
4957};
4958
4959static __init int rb_write_something(struct rb_test_data *data, bool nested)
4960{
4961	struct ring_buffer_event *event;
4962	struct rb_item *item;
4963	bool started;
4964	int event_len;
4965	int size;
4966	int len;
4967	int cnt;
4968
4969	/* Have nested writes different that what is written */
4970	cnt = data->cnt + (nested ? 27 : 0);
4971
4972	/* Multiply cnt by ~e, to make some unique increment */
4973	size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
4974
4975	len = size + sizeof(struct rb_item);
4976
4977	started = rb_test_started;
4978	/* read rb_test_started before checking buffer enabled */
4979	smp_rmb();
4980
4981	event = ring_buffer_lock_reserve(data->buffer, len);
4982	if (!event) {
4983		/* Ignore dropped events before test starts. */
4984		if (started) {
4985			if (nested)
4986				data->bytes_dropped += len;
4987			else
4988				data->bytes_dropped_nested += len;
4989		}
4990		return len;
4991	}
4992
4993	event_len = ring_buffer_event_length(event);
4994
4995	if (RB_WARN_ON(data->buffer, event_len < len))
4996		goto out;
4997
4998	item = ring_buffer_event_data(event);
4999	item->size = size;
5000	memcpy(item->str, rb_string, size);
5001
5002	if (nested) {
5003		data->bytes_alloc_nested += event_len;
5004		data->bytes_written_nested += len;
5005		data->events_nested++;
5006		if (!data->min_size_nested || len < data->min_size_nested)
5007			data->min_size_nested = len;
5008		if (len > data->max_size_nested)
5009			data->max_size_nested = len;
5010	} else {
5011		data->bytes_alloc += event_len;
5012		data->bytes_written += len;
5013		data->events++;
5014		if (!data->min_size || len < data->min_size)
5015			data->max_size = len;
5016		if (len > data->max_size)
5017			data->max_size = len;
5018	}
5019
5020 out:
5021	ring_buffer_unlock_commit(data->buffer, event);
5022
5023	return 0;
5024}
5025
5026static __init int rb_test(void *arg)
5027{
5028	struct rb_test_data *data = arg;
5029
5030	while (!kthread_should_stop()) {
5031		rb_write_something(data, false);
5032		data->cnt++;
5033
5034		set_current_state(TASK_INTERRUPTIBLE);
5035		/* Now sleep between a min of 100-300us and a max of 1ms */
5036		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
5037	}
5038
5039	return 0;
5040}
5041
5042static __init void rb_ipi(void *ignore)
5043{
5044	struct rb_test_data *data;
5045	int cpu = smp_processor_id();
5046
5047	data = &rb_data[cpu];
5048	rb_write_something(data, true);
5049}
5050
5051static __init int rb_hammer_test(void *arg)
5052{
5053	while (!kthread_should_stop()) {
5054
5055		/* Send an IPI to all cpus to write data! */
5056		smp_call_function(rb_ipi, NULL, 1);
5057		/* No sleep, but for non preempt, let others run */
5058		schedule();
5059	}
5060
5061	return 0;
5062}
5063
5064static __init int test_ringbuffer(void)
5065{
5066	struct task_struct *rb_hammer;
5067	struct ring_buffer *buffer;
5068	int cpu;
5069	int ret = 0;
 
 
 
 
 
5070
5071	pr_info("Running ring buffer tests...\n");
5072
5073	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
5074	if (WARN_ON(!buffer))
5075		return 0;
5076
5077	/* Disable buffer so that threads can't write to it yet */
5078	ring_buffer_record_off(buffer);
5079
5080	for_each_online_cpu(cpu) {
5081		rb_data[cpu].buffer = buffer;
5082		rb_data[cpu].cpu = cpu;
5083		rb_data[cpu].cnt = cpu;
5084		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
5085						 "rbtester/%d", cpu);
5086		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
5087			pr_cont("FAILED\n");
5088			ret = PTR_ERR(rb_threads[cpu]);
5089			goto out_free;
5090		}
5091
5092		kthread_bind(rb_threads[cpu], cpu);
5093 		wake_up_process(rb_threads[cpu]);
5094	}
5095
5096	/* Now create the rb hammer! */
5097	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
5098	if (WARN_ON(IS_ERR(rb_hammer))) {
5099		pr_cont("FAILED\n");
5100		ret = PTR_ERR(rb_hammer);
5101		goto out_free;
5102	}
5103
5104	ring_buffer_record_on(buffer);
5105	/*
5106	 * Show buffer is enabled before setting rb_test_started.
5107	 * Yes there's a small race window where events could be
5108	 * dropped and the thread wont catch it. But when a ring
5109	 * buffer gets enabled, there will always be some kind of
5110	 * delay before other CPUs see it. Thus, we don't care about
5111	 * those dropped events. We care about events dropped after
5112	 * the threads see that the buffer is active.
5113	 */
5114	smp_wmb();
5115	rb_test_started = true;
5116
5117	set_current_state(TASK_INTERRUPTIBLE);
5118	/* Just run for 10 seconds */;
5119	schedule_timeout(10 * HZ);
5120
5121	kthread_stop(rb_hammer);
5122
5123 out_free:
5124	for_each_online_cpu(cpu) {
5125		if (!rb_threads[cpu])
5126			break;
5127		kthread_stop(rb_threads[cpu]);
5128	}
5129	if (ret) {
5130		ring_buffer_free(buffer);
5131		return ret;
5132	}
5133
5134	/* Report! */
5135	pr_info("finished\n");
5136	for_each_online_cpu(cpu) {
5137		struct ring_buffer_event *event;
5138		struct rb_test_data *data = &rb_data[cpu];
5139		struct rb_item *item;
5140		unsigned long total_events;
5141		unsigned long total_dropped;
5142		unsigned long total_written;
5143		unsigned long total_alloc;
5144		unsigned long total_read = 0;
5145		unsigned long total_size = 0;
5146		unsigned long total_len = 0;
5147		unsigned long total_lost = 0;
5148		unsigned long lost;
5149		int big_event_size;
5150		int small_event_size;
5151
5152		ret = -1;
5153
5154		total_events = data->events + data->events_nested;
5155		total_written = data->bytes_written + data->bytes_written_nested;
5156		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
5157		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
5158
5159		big_event_size = data->max_size + data->max_size_nested;
5160		small_event_size = data->min_size + data->min_size_nested;
5161
5162		pr_info("CPU %d:\n", cpu);
5163		pr_info("              events:    %ld\n", total_events);
5164		pr_info("       dropped bytes:    %ld\n", total_dropped);
5165		pr_info("       alloced bytes:    %ld\n", total_alloc);
5166		pr_info("       written bytes:    %ld\n", total_written);
5167		pr_info("       biggest event:    %d\n", big_event_size);
5168		pr_info("      smallest event:    %d\n", small_event_size);
5169
5170		if (RB_WARN_ON(buffer, total_dropped))
5171			break;
5172
5173		ret = 0;
5174
5175		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
5176			total_lost += lost;
5177			item = ring_buffer_event_data(event);
5178			total_len += ring_buffer_event_length(event);
5179			total_size += item->size + sizeof(struct rb_item);
5180			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
5181				pr_info("FAILED!\n");
5182				pr_info("buffer had: %.*s\n", item->size, item->str);
5183				pr_info("expected:   %.*s\n", item->size, rb_string);
5184				RB_WARN_ON(buffer, 1);
5185				ret = -1;
5186				break;
5187			}
5188			total_read++;
5189		}
5190		if (ret)
5191			break;
5192
5193		ret = -1;
5194
5195		pr_info("         read events:   %ld\n", total_read);
5196		pr_info("         lost events:   %ld\n", total_lost);
5197		pr_info("        total events:   %ld\n", total_lost + total_read);
5198		pr_info("  recorded len bytes:   %ld\n", total_len);
5199		pr_info(" recorded size bytes:   %ld\n", total_size);
5200		if (total_lost)
5201			pr_info(" With dropped events, record len and size may not match\n"
5202				" alloced and written from above\n");
5203		if (!total_lost) {
5204			if (RB_WARN_ON(buffer, total_len != total_alloc ||
5205				       total_size != total_written))
5206				break;
5207		}
5208		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
5209			break;
5210
5211		ret = 0;
5212	}
5213	if (!ret)
5214		pr_info("Ring buffer PASSED!\n");
5215
5216	ring_buffer_free(buffer);
5217	return 0;
5218}
5219
5220late_initcall(test_ringbuffer);
5221#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Generic ring buffer
   4 *
   5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   6 */
   7#include <linux/trace_recursion.h>
   8#include <linux/trace_events.h>
   9#include <linux/ring_buffer.h>
  10#include <linux/trace_clock.h>
  11#include <linux/sched/clock.h>
  12#include <linux/trace_seq.h>
  13#include <linux/spinlock.h>
  14#include <linux/irq_work.h>
  15#include <linux/security.h>
  16#include <linux/uaccess.h>
  17#include <linux/hardirq.h>
  18#include <linux/kthread.h>	/* for self test */
  19#include <linux/module.h>
  20#include <linux/percpu.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/slab.h>
  24#include <linux/init.h>
  25#include <linux/hash.h>
  26#include <linux/list.h>
  27#include <linux/cpu.h>
  28#include <linux/oom.h>
  29
  30#include <asm/local.h>
  31
  32static void update_pages_handler(struct work_struct *work);
  33
  34/*
  35 * The ring buffer header is special. We must manually up keep it.
  36 */
  37int ring_buffer_print_entry_header(struct trace_seq *s)
  38{
  39	trace_seq_puts(s, "# compressed entry header\n");
  40	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  41	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  42	trace_seq_puts(s, "\tarray       :   32 bits\n");
  43	trace_seq_putc(s, '\n');
  44	trace_seq_printf(s, "\tpadding     : type == %d\n",
  45			 RINGBUF_TYPE_PADDING);
  46	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  47			 RINGBUF_TYPE_TIME_EXTEND);
  48	trace_seq_printf(s, "\ttime_stamp : type == %d\n",
  49			 RINGBUF_TYPE_TIME_STAMP);
  50	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  51			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  52
  53	return !trace_seq_has_overflowed(s);
  54}
  55
  56/*
  57 * The ring buffer is made up of a list of pages. A separate list of pages is
  58 * allocated for each CPU. A writer may only write to a buffer that is
  59 * associated with the CPU it is currently executing on.  A reader may read
  60 * from any per cpu buffer.
  61 *
  62 * The reader is special. For each per cpu buffer, the reader has its own
  63 * reader page. When a reader has read the entire reader page, this reader
  64 * page is swapped with another page in the ring buffer.
  65 *
  66 * Now, as long as the writer is off the reader page, the reader can do what
  67 * ever it wants with that page. The writer will never write to that page
  68 * again (as long as it is out of the ring buffer).
  69 *
  70 * Here's some silly ASCII art.
  71 *
  72 *   +------+
  73 *   |reader|          RING BUFFER
  74 *   |page  |
  75 *   +------+        +---+   +---+   +---+
  76 *                   |   |-->|   |-->|   |
  77 *                   +---+   +---+   +---+
  78 *                     ^               |
  79 *                     |               |
  80 *                     +---------------+
  81 *
  82 *
  83 *   +------+
  84 *   |reader|          RING BUFFER
  85 *   |page  |------------------v
  86 *   +------+        +---+   +---+   +---+
  87 *                   |   |-->|   |-->|   |
  88 *                   +---+   +---+   +---+
  89 *                     ^               |
  90 *                     |               |
  91 *                     +---------------+
  92 *
  93 *
  94 *   +------+
  95 *   |reader|          RING BUFFER
  96 *   |page  |------------------v
  97 *   +------+        +---+   +---+   +---+
  98 *      ^            |   |-->|   |-->|   |
  99 *      |            +---+   +---+   +---+
 100 *      |                              |
 101 *      |                              |
 102 *      +------------------------------+
 103 *
 104 *
 105 *   +------+
 106 *   |buffer|          RING BUFFER
 107 *   |page  |------------------v
 108 *   +------+        +---+   +---+   +---+
 109 *      ^            |   |   |   |-->|   |
 110 *      |   New      +---+   +---+   +---+
 111 *      |  Reader------^               |
 112 *      |   page                       |
 113 *      +------------------------------+
 114 *
 115 *
 116 * After we make this swap, the reader can hand this page off to the splice
 117 * code and be done with it. It can even allocate a new page if it needs to
 118 * and swap that into the ring buffer.
 119 *
 120 * We will be using cmpxchg soon to make all this lockless.
 121 *
 122 */
 123
 124/* Used for individual buffers (after the counter) */
 125#define RB_BUFFER_OFF		(1 << 20)
 126
 127#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 128
 129#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 130#define RB_ALIGNMENT		4U
 131#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 132#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 133
 134#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 135# define RB_FORCE_8BYTE_ALIGNMENT	0
 136# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 137#else
 138# define RB_FORCE_8BYTE_ALIGNMENT	1
 139# define RB_ARCH_ALIGNMENT		8U
 140#endif
 141
 142#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 143
 144/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 145#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 146
 147enum {
 148	RB_LEN_TIME_EXTEND = 8,
 149	RB_LEN_TIME_STAMP =  8,
 150};
 151
 152#define skip_time_extend(event) \
 153	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 154
 155#define extended_time(event) \
 156	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
 157
 158static inline int rb_null_event(struct ring_buffer_event *event)
 159{
 160	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 161}
 162
 163static void rb_event_set_padding(struct ring_buffer_event *event)
 164{
 165	/* padding has a NULL time_delta */
 166	event->type_len = RINGBUF_TYPE_PADDING;
 167	event->time_delta = 0;
 168}
 169
 170static unsigned
 171rb_event_data_length(struct ring_buffer_event *event)
 172{
 173	unsigned length;
 174
 175	if (event->type_len)
 176		length = event->type_len * RB_ALIGNMENT;
 177	else
 178		length = event->array[0];
 179	return length + RB_EVNT_HDR_SIZE;
 180}
 181
 182/*
 183 * Return the length of the given event. Will return
 184 * the length of the time extend if the event is a
 185 * time extend.
 186 */
 187static inline unsigned
 188rb_event_length(struct ring_buffer_event *event)
 189{
 190	switch (event->type_len) {
 191	case RINGBUF_TYPE_PADDING:
 192		if (rb_null_event(event))
 193			/* undefined */
 194			return -1;
 195		return  event->array[0] + RB_EVNT_HDR_SIZE;
 196
 197	case RINGBUF_TYPE_TIME_EXTEND:
 198		return RB_LEN_TIME_EXTEND;
 199
 200	case RINGBUF_TYPE_TIME_STAMP:
 201		return RB_LEN_TIME_STAMP;
 202
 203	case RINGBUF_TYPE_DATA:
 204		return rb_event_data_length(event);
 205	default:
 206		WARN_ON_ONCE(1);
 207	}
 208	/* not hit */
 209	return 0;
 210}
 211
 212/*
 213 * Return total length of time extend and data,
 214 *   or just the event length for all other events.
 215 */
 216static inline unsigned
 217rb_event_ts_length(struct ring_buffer_event *event)
 218{
 219	unsigned len = 0;
 220
 221	if (extended_time(event)) {
 222		/* time extends include the data event after it */
 223		len = RB_LEN_TIME_EXTEND;
 224		event = skip_time_extend(event);
 225	}
 226	return len + rb_event_length(event);
 227}
 228
 229/**
 230 * ring_buffer_event_length - return the length of the event
 231 * @event: the event to get the length of
 232 *
 233 * Returns the size of the data load of a data event.
 234 * If the event is something other than a data event, it
 235 * returns the size of the event itself. With the exception
 236 * of a TIME EXTEND, where it still returns the size of the
 237 * data load of the data event after it.
 238 */
 239unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 240{
 241	unsigned length;
 242
 243	if (extended_time(event))
 244		event = skip_time_extend(event);
 245
 246	length = rb_event_length(event);
 247	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 248		return length;
 249	length -= RB_EVNT_HDR_SIZE;
 250	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 251                length -= sizeof(event->array[0]);
 252	return length;
 253}
 254EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 255
 256/* inline for ring buffer fast paths */
 257static __always_inline void *
 258rb_event_data(struct ring_buffer_event *event)
 259{
 260	if (extended_time(event))
 261		event = skip_time_extend(event);
 262	WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 263	/* If length is in len field, then array[0] has the data */
 264	if (event->type_len)
 265		return (void *)&event->array[0];
 266	/* Otherwise length is in array[0] and array[1] has the data */
 267	return (void *)&event->array[1];
 268}
 269
 270/**
 271 * ring_buffer_event_data - return the data of the event
 272 * @event: the event to get the data from
 273 */
 274void *ring_buffer_event_data(struct ring_buffer_event *event)
 275{
 276	return rb_event_data(event);
 277}
 278EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 279
 280#define for_each_buffer_cpu(buffer, cpu)		\
 281	for_each_cpu(cpu, buffer->cpumask)
 282
 283#define for_each_online_buffer_cpu(buffer, cpu)		\
 284	for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
 285
 286#define TS_SHIFT	27
 287#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 288#define TS_DELTA_TEST	(~TS_MASK)
 289
 290static u64 rb_event_time_stamp(struct ring_buffer_event *event)
 
 
 
 
 
 
 
 
 
 
 291{
 292	u64 ts;
 293
 294	ts = event->array[0];
 295	ts <<= TS_SHIFT;
 296	ts += event->time_delta;
 297
 298	return ts;
 299}
 300
 301/* Flag when events were overwritten */
 302#define RB_MISSED_EVENTS	(1 << 31)
 303/* Missed count stored at end */
 304#define RB_MISSED_STORED	(1 << 30)
 305
 
 
 306struct buffer_data_page {
 307	u64		 time_stamp;	/* page time stamp */
 308	local_t		 commit;	/* write committed index */
 309	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 310};
 311
 312/*
 313 * Note, the buffer_page list must be first. The buffer pages
 314 * are allocated in cache lines, which means that each buffer
 315 * page will be at the beginning of a cache line, and thus
 316 * the least significant bits will be zero. We use this to
 317 * add flags in the list struct pointers, to make the ring buffer
 318 * lockless.
 319 */
 320struct buffer_page {
 321	struct list_head list;		/* list of buffer pages */
 322	local_t		 write;		/* index for next write */
 323	unsigned	 read;		/* index for next read */
 324	local_t		 entries;	/* entries on this page */
 325	unsigned long	 real_end;	/* real end of data */
 326	struct buffer_data_page *page;	/* Actual data page */
 327};
 328
 329/*
 330 * The buffer page counters, write and entries, must be reset
 331 * atomically when crossing page boundaries. To synchronize this
 332 * update, two counters are inserted into the number. One is
 333 * the actual counter for the write position or count on the page.
 334 *
 335 * The other is a counter of updaters. Before an update happens
 336 * the update partition of the counter is incremented. This will
 337 * allow the updater to update the counter atomically.
 338 *
 339 * The counter is 20 bits, and the state data is 12.
 340 */
 341#define RB_WRITE_MASK		0xfffff
 342#define RB_WRITE_INTCNT		(1 << 20)
 343
 344static void rb_init_page(struct buffer_data_page *bpage)
 345{
 346	local_set(&bpage->commit, 0);
 347}
 348
 349/*
 350 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 351 * this issue out.
 352 */
 353static void free_buffer_page(struct buffer_page *bpage)
 354{
 355	free_page((unsigned long)bpage->page);
 356	kfree(bpage);
 357}
 358
 359/*
 360 * We need to fit the time_stamp delta into 27 bits.
 361 */
 362static inline int test_time_stamp(u64 delta)
 363{
 364	if (delta & TS_DELTA_TEST)
 365		return 1;
 366	return 0;
 367}
 368
 369#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 370
 371/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 372#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 373
 374int ring_buffer_print_page_header(struct trace_seq *s)
 375{
 376	struct buffer_data_page field;
 377
 378	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 379			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 380			 (unsigned int)sizeof(field.time_stamp),
 381			 (unsigned int)is_signed_type(u64));
 382
 383	trace_seq_printf(s, "\tfield: local_t commit;\t"
 384			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 385			 (unsigned int)offsetof(typeof(field), commit),
 386			 (unsigned int)sizeof(field.commit),
 387			 (unsigned int)is_signed_type(long));
 388
 389	trace_seq_printf(s, "\tfield: int overwrite;\t"
 390			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 391			 (unsigned int)offsetof(typeof(field), commit),
 392			 1,
 393			 (unsigned int)is_signed_type(long));
 394
 395	trace_seq_printf(s, "\tfield: char data;\t"
 396			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 397			 (unsigned int)offsetof(typeof(field), data),
 398			 (unsigned int)BUF_PAGE_SIZE,
 399			 (unsigned int)is_signed_type(char));
 400
 401	return !trace_seq_has_overflowed(s);
 402}
 403
 404struct rb_irq_work {
 405	struct irq_work			work;
 406	wait_queue_head_t		waiters;
 407	wait_queue_head_t		full_waiters;
 408	bool				waiters_pending;
 409	bool				full_waiters_pending;
 410	bool				wakeup_full;
 411};
 412
 413/*
 414 * Structure to hold event state and handle nested events.
 415 */
 416struct rb_event_info {
 417	u64			ts;
 418	u64			delta;
 419	u64			before;
 420	u64			after;
 421	unsigned long		length;
 422	struct buffer_page	*tail_page;
 423	int			add_timestamp;
 424};
 425
 426/*
 427 * Used for the add_timestamp
 428 *  NONE
 429 *  EXTEND - wants a time extend
 430 *  ABSOLUTE - the buffer requests all events to have absolute time stamps
 431 *  FORCE - force a full time stamp.
 432 */
 433enum {
 434	RB_ADD_STAMP_NONE		= 0,
 435	RB_ADD_STAMP_EXTEND		= BIT(1),
 436	RB_ADD_STAMP_ABSOLUTE		= BIT(2),
 437	RB_ADD_STAMP_FORCE		= BIT(3)
 438};
 439/*
 440 * Used for which event context the event is in.
 441 *  TRANSITION = 0
 442 *  NMI     = 1
 443 *  IRQ     = 2
 444 *  SOFTIRQ = 3
 445 *  NORMAL  = 4
 446 *
 447 * See trace_recursive_lock() comment below for more details.
 448 */
 449enum {
 450	RB_CTX_TRANSITION,
 451	RB_CTX_NMI,
 452	RB_CTX_IRQ,
 453	RB_CTX_SOFTIRQ,
 454	RB_CTX_NORMAL,
 455	RB_CTX_MAX
 456};
 457
 458#if BITS_PER_LONG == 32
 459#define RB_TIME_32
 460#endif
 461
 462/* To test on 64 bit machines */
 463//#define RB_TIME_32
 464
 465#ifdef RB_TIME_32
 466
 467struct rb_time_struct {
 468	local_t		cnt;
 469	local_t		top;
 470	local_t		bottom;
 471};
 472#else
 473#include <asm/local64.h>
 474struct rb_time_struct {
 475	local64_t	time;
 476};
 477#endif
 478typedef struct rb_time_struct rb_time_t;
 479
 480#define MAX_NEST	5
 481
 482/*
 483 * head_page == tail_page && head == tail then buffer is empty.
 484 */
 485struct ring_buffer_per_cpu {
 486	int				cpu;
 487	atomic_t			record_disabled;
 488	atomic_t			resize_disabled;
 489	struct trace_buffer	*buffer;
 490	raw_spinlock_t			reader_lock;	/* serialize readers */
 491	arch_spinlock_t			lock;
 492	struct lock_class_key		lock_key;
 493	struct buffer_data_page		*free_page;
 494	unsigned long			nr_pages;
 495	unsigned int			current_context;
 496	struct list_head		*pages;
 497	struct buffer_page		*head_page;	/* read from head */
 498	struct buffer_page		*tail_page;	/* write to tail */
 499	struct buffer_page		*commit_page;	/* committed pages */
 500	struct buffer_page		*reader_page;
 501	unsigned long			lost_events;
 502	unsigned long			last_overrun;
 503	unsigned long			nest;
 504	local_t				entries_bytes;
 505	local_t				entries;
 506	local_t				overrun;
 507	local_t				commit_overrun;
 508	local_t				dropped_events;
 509	local_t				committing;
 510	local_t				commits;
 511	local_t				pages_touched;
 512	local_t				pages_read;
 513	long				last_pages_touch;
 514	size_t				shortest_full;
 515	unsigned long			read;
 516	unsigned long			read_bytes;
 517	rb_time_t			write_stamp;
 518	rb_time_t			before_stamp;
 519	u64				event_stamp[MAX_NEST];
 520	u64				read_stamp;
 521	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 522	long				nr_pages_to_update;
 523	struct list_head		new_pages; /* new pages to add */
 524	struct work_struct		update_pages_work;
 525	struct completion		update_done;
 526
 527	struct rb_irq_work		irq_work;
 528};
 529
 530struct trace_buffer {
 531	unsigned			flags;
 532	int				cpus;
 533	atomic_t			record_disabled;
 
 534	cpumask_var_t			cpumask;
 535
 536	struct lock_class_key		*reader_lock_key;
 537
 538	struct mutex			mutex;
 539
 540	struct ring_buffer_per_cpu	**buffers;
 541
 542	struct hlist_node		node;
 543	u64				(*clock)(void);
 544
 545	struct rb_irq_work		irq_work;
 546	bool				time_stamp_abs;
 547};
 548
 549struct ring_buffer_iter {
 550	struct ring_buffer_per_cpu	*cpu_buffer;
 551	unsigned long			head;
 552	unsigned long			next_event;
 553	struct buffer_page		*head_page;
 554	struct buffer_page		*cache_reader_page;
 555	unsigned long			cache_read;
 556	u64				read_stamp;
 557	u64				page_stamp;
 558	struct ring_buffer_event	*event;
 559	int				missed_events;
 560};
 561
 562#ifdef RB_TIME_32
 563
 564/*
 565 * On 32 bit machines, local64_t is very expensive. As the ring
 566 * buffer doesn't need all the features of a true 64 bit atomic,
 567 * on 32 bit, it uses these functions (64 still uses local64_t).
 568 *
 569 * For the ring buffer, 64 bit required operations for the time is
 570 * the following:
 571 *
 572 *  - Only need 59 bits (uses 60 to make it even).
 573 *  - Reads may fail if it interrupted a modification of the time stamp.
 574 *      It will succeed if it did not interrupt another write even if
 575 *      the read itself is interrupted by a write.
 576 *      It returns whether it was successful or not.
 577 *
 578 *  - Writes always succeed and will overwrite other writes and writes
 579 *      that were done by events interrupting the current write.
 580 *
 581 *  - A write followed by a read of the same time stamp will always succeed,
 582 *      but may not contain the same value.
 583 *
 584 *  - A cmpxchg will fail if it interrupted another write or cmpxchg.
 585 *      Other than that, it acts like a normal cmpxchg.
 586 *
 587 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half
 588 *  (bottom being the least significant 30 bits of the 60 bit time stamp).
 589 *
 590 * The two most significant bits of each half holds a 2 bit counter (0-3).
 591 * Each update will increment this counter by one.
 592 * When reading the top and bottom, if the two counter bits match then the
 593 *  top and bottom together make a valid 60 bit number.
 594 */
 595#define RB_TIME_SHIFT	30
 596#define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
 597
 598static inline int rb_time_cnt(unsigned long val)
 599{
 600	return (val >> RB_TIME_SHIFT) & 3;
 601}
 602
 603static inline u64 rb_time_val(unsigned long top, unsigned long bottom)
 604{
 605	u64 val;
 606
 607	val = top & RB_TIME_VAL_MASK;
 608	val <<= RB_TIME_SHIFT;
 609	val |= bottom & RB_TIME_VAL_MASK;
 610
 611	return val;
 612}
 613
 614static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
 615{
 616	unsigned long top, bottom;
 617	unsigned long c;
 618
 619	/*
 620	 * If the read is interrupted by a write, then the cnt will
 621	 * be different. Loop until both top and bottom have been read
 622	 * without interruption.
 623	 */
 624	do {
 625		c = local_read(&t->cnt);
 626		top = local_read(&t->top);
 627		bottom = local_read(&t->bottom);
 628	} while (c != local_read(&t->cnt));
 629
 630	*cnt = rb_time_cnt(top);
 631
 632	/* If top and bottom counts don't match, this interrupted a write */
 633	if (*cnt != rb_time_cnt(bottom))
 634		return false;
 635
 636	*ret = rb_time_val(top, bottom);
 637	return true;
 638}
 639
 640static bool rb_time_read(rb_time_t *t, u64 *ret)
 641{
 642	unsigned long cnt;
 643
 644	return __rb_time_read(t, ret, &cnt);
 645}
 646
 647static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt)
 648{
 649	return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT);
 650}
 651
 652static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom)
 653{
 654	*top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK);
 655	*bottom = (unsigned long)(val & RB_TIME_VAL_MASK);
 656}
 657
 658static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt)
 659{
 660	val = rb_time_val_cnt(val, cnt);
 661	local_set(t, val);
 662}
 663
 664static void rb_time_set(rb_time_t *t, u64 val)
 665{
 666	unsigned long cnt, top, bottom;
 667
 668	rb_time_split(val, &top, &bottom);
 669
 670	/* Writes always succeed with a valid number even if it gets interrupted. */
 671	do {
 672		cnt = local_inc_return(&t->cnt);
 673		rb_time_val_set(&t->top, top, cnt);
 674		rb_time_val_set(&t->bottom, bottom, cnt);
 675	} while (cnt != local_read(&t->cnt));
 676}
 677
 678static inline bool
 679rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
 680{
 681	unsigned long ret;
 682
 683	ret = local_cmpxchg(l, expect, set);
 684	return ret == expect;
 685}
 686
 687static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
 688{
 689	unsigned long cnt, top, bottom;
 690	unsigned long cnt2, top2, bottom2;
 691	u64 val;
 692
 693	/* The cmpxchg always fails if it interrupted an update */
 694	 if (!__rb_time_read(t, &val, &cnt2))
 695		 return false;
 696
 697	 if (val != expect)
 698		 return false;
 699
 700	 cnt = local_read(&t->cnt);
 701	 if ((cnt & 3) != cnt2)
 702		 return false;
 703
 704	 cnt2 = cnt + 1;
 705
 706	 rb_time_split(val, &top, &bottom);
 707	 top = rb_time_val_cnt(top, cnt);
 708	 bottom = rb_time_val_cnt(bottom, cnt);
 709
 710	 rb_time_split(set, &top2, &bottom2);
 711	 top2 = rb_time_val_cnt(top2, cnt2);
 712	 bottom2 = rb_time_val_cnt(bottom2, cnt2);
 713
 714	if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
 715		return false;
 716	if (!rb_time_read_cmpxchg(&t->top, top, top2))
 717		return false;
 718	if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
 719		return false;
 720	return true;
 721}
 722
 723#else /* 64 bits */
 724
 725/* local64_t always succeeds */
 726
 727static inline bool rb_time_read(rb_time_t *t, u64 *ret)
 728{
 729	*ret = local64_read(&t->time);
 730	return true;
 731}
 732static void rb_time_set(rb_time_t *t, u64 val)
 733{
 734	local64_set(&t->time, val);
 735}
 736
 737static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
 738{
 739	u64 val;
 740	val = local64_cmpxchg(&t->time, expect, set);
 741	return val == expect;
 742}
 743#endif
 744
 745/*
 746 * Enable this to make sure that the event passed to
 747 * ring_buffer_event_time_stamp() is not committed and also
 748 * is on the buffer that it passed in.
 749 */
 750//#define RB_VERIFY_EVENT
 751#ifdef RB_VERIFY_EVENT
 752static struct list_head *rb_list_head(struct list_head *list);
 753static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 754			 void *event)
 755{
 756	struct buffer_page *page = cpu_buffer->commit_page;
 757	struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
 758	struct list_head *next;
 759	long commit, write;
 760	unsigned long addr = (unsigned long)event;
 761	bool done = false;
 762	int stop = 0;
 763
 764	/* Make sure the event exists and is not committed yet */
 765	do {
 766		if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
 767			done = true;
 768		commit = local_read(&page->page->commit);
 769		write = local_read(&page->write);
 770		if (addr >= (unsigned long)&page->page->data[commit] &&
 771		    addr < (unsigned long)&page->page->data[write])
 772			return;
 773
 774		next = rb_list_head(page->list.next);
 775		page = list_entry(next, struct buffer_page, list);
 776	} while (!done);
 777	WARN_ON_ONCE(1);
 778}
 779#else
 780static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 781			 void *event)
 782{
 783}
 784#endif
 785
 786
 787static inline u64 rb_time_stamp(struct trace_buffer *buffer);
 788
 789/**
 790 * ring_buffer_event_time_stamp - return the event's current time stamp
 791 * @buffer: The buffer that the event is on
 792 * @event: the event to get the time stamp of
 793 *
 794 * Note, this must be called after @event is reserved, and before it is
 795 * committed to the ring buffer. And must be called from the same
 796 * context where the event was reserved (normal, softirq, irq, etc).
 797 *
 798 * Returns the time stamp associated with the current event.
 799 * If the event has an extended time stamp, then that is used as
 800 * the time stamp to return.
 801 * In the highly unlikely case that the event was nested more than
 802 * the max nesting, then the write_stamp of the buffer is returned,
 803 * otherwise  current time is returned, but that really neither of
 804 * the last two cases should ever happen.
 805 */
 806u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
 807				 struct ring_buffer_event *event)
 808{
 809	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
 810	unsigned int nest;
 811	u64 ts;
 812
 813	/* If the event includes an absolute time, then just use that */
 814	if (event->type_len == RINGBUF_TYPE_TIME_STAMP)
 815		return rb_event_time_stamp(event);
 816
 817	nest = local_read(&cpu_buffer->committing);
 818	verify_event(cpu_buffer, event);
 819	if (WARN_ON_ONCE(!nest))
 820		goto fail;
 821
 822	/* Read the current saved nesting level time stamp */
 823	if (likely(--nest < MAX_NEST))
 824		return cpu_buffer->event_stamp[nest];
 825
 826	/* Shouldn't happen, warn if it does */
 827	WARN_ONCE(1, "nest (%d) greater than max", nest);
 828
 829 fail:
 830	/* Can only fail on 32 bit */
 831	if (!rb_time_read(&cpu_buffer->write_stamp, &ts))
 832		/* Screw it, just read the current time */
 833		ts = rb_time_stamp(cpu_buffer->buffer);
 834
 835	return ts;
 836}
 837
 838/**
 839 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
 840 * @buffer: The ring_buffer to get the number of pages from
 841 * @cpu: The cpu of the ring_buffer to get the number of pages from
 842 *
 843 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
 844 */
 845size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
 846{
 847	return buffer->buffers[cpu]->nr_pages;
 848}
 849
 850/**
 851 * ring_buffer_nr_pages_dirty - get the number of used pages in the ring buffer
 852 * @buffer: The ring_buffer to get the number of pages from
 853 * @cpu: The cpu of the ring_buffer to get the number of pages from
 854 *
 855 * Returns the number of pages that have content in the ring buffer.
 856 */
 857size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
 858{
 859	size_t read;
 860	size_t cnt;
 861
 862	read = local_read(&buffer->buffers[cpu]->pages_read);
 863	cnt = local_read(&buffer->buffers[cpu]->pages_touched);
 864	/* The reader can read an empty page, but not more than that */
 865	if (cnt < read) {
 866		WARN_ON_ONCE(read > cnt + 1);
 867		return 0;
 868	}
 869
 870	return cnt - read;
 871}
 872
 873/*
 874 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 875 *
 876 * Schedules a delayed work to wake up any task that is blocked on the
 877 * ring buffer waiters queue.
 878 */
 879static void rb_wake_up_waiters(struct irq_work *work)
 880{
 881	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 882
 883	wake_up_all(&rbwork->waiters);
 884	if (rbwork->wakeup_full) {
 885		rbwork->wakeup_full = false;
 886		wake_up_all(&rbwork->full_waiters);
 887	}
 888}
 889
 890/**
 891 * ring_buffer_wait - wait for input to the ring buffer
 892 * @buffer: buffer to wait on
 893 * @cpu: the cpu buffer to wait on
 894 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
 895 *
 896 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 897 * as data is added to any of the @buffer's cpu buffers. Otherwise
 898 * it will wait for data to be added to a specific cpu buffer.
 899 */
 900int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
 901{
 902	struct ring_buffer_per_cpu *cpu_buffer;
 903	DEFINE_WAIT(wait);
 904	struct rb_irq_work *work;
 905	int ret = 0;
 906
 907	/*
 908	 * Depending on what the caller is waiting for, either any
 909	 * data in any cpu buffer, or a specific buffer, put the
 910	 * caller on the appropriate wait queue.
 911	 */
 912	if (cpu == RING_BUFFER_ALL_CPUS) {
 913		work = &buffer->irq_work;
 914		/* Full only makes sense on per cpu reads */
 915		full = 0;
 916	} else {
 917		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 918			return -ENODEV;
 919		cpu_buffer = buffer->buffers[cpu];
 920		work = &cpu_buffer->irq_work;
 921	}
 922
 923
 924	while (true) {
 925		if (full)
 926			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
 927		else
 928			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 929
 930		/*
 931		 * The events can happen in critical sections where
 932		 * checking a work queue can cause deadlocks.
 933		 * After adding a task to the queue, this flag is set
 934		 * only to notify events to try to wake up the queue
 935		 * using irq_work.
 936		 *
 937		 * We don't clear it even if the buffer is no longer
 938		 * empty. The flag only causes the next event to run
 939		 * irq_work to do the work queue wake up. The worse
 940		 * that can happen if we race with !trace_empty() is that
 941		 * an event will cause an irq_work to try to wake up
 942		 * an empty queue.
 943		 *
 944		 * There's no reason to protect this flag either, as
 945		 * the work queue and irq_work logic will do the necessary
 946		 * synchronization for the wake ups. The only thing
 947		 * that is necessary is that the wake up happens after
 948		 * a task has been queued. It's OK for spurious wake ups.
 949		 */
 950		if (full)
 951			work->full_waiters_pending = true;
 952		else
 953			work->waiters_pending = true;
 954
 955		if (signal_pending(current)) {
 956			ret = -EINTR;
 957			break;
 958		}
 959
 960		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
 961			break;
 962
 963		if (cpu != RING_BUFFER_ALL_CPUS &&
 964		    !ring_buffer_empty_cpu(buffer, cpu)) {
 965			unsigned long flags;
 966			bool pagebusy;
 967			size_t nr_pages;
 968			size_t dirty;
 969
 970			if (!full)
 971				break;
 972
 973			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 974			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 975			nr_pages = cpu_buffer->nr_pages;
 976			dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
 977			if (!cpu_buffer->shortest_full ||
 978			    cpu_buffer->shortest_full < full)
 979				cpu_buffer->shortest_full = full;
 980			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 981			if (!pagebusy &&
 982			    (!nr_pages || (dirty * 100) > full * nr_pages))
 983				break;
 984		}
 985
 986		schedule();
 987	}
 988
 989	if (full)
 990		finish_wait(&work->full_waiters, &wait);
 991	else
 992		finish_wait(&work->waiters, &wait);
 993
 994	return ret;
 995}
 996
 997/**
 998 * ring_buffer_poll_wait - poll on buffer input
 999 * @buffer: buffer to wait on
1000 * @cpu: the cpu buffer to wait on
1001 * @filp: the file descriptor
1002 * @poll_table: The poll descriptor
1003 *
1004 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1005 * as data is added to any of the @buffer's cpu buffers. Otherwise
1006 * it will wait for data to be added to a specific cpu buffer.
1007 *
1008 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
1009 * zero otherwise.
1010 */
1011__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
1012			  struct file *filp, poll_table *poll_table)
1013{
1014	struct ring_buffer_per_cpu *cpu_buffer;
1015	struct rb_irq_work *work;
1016
1017	if (cpu == RING_BUFFER_ALL_CPUS)
1018		work = &buffer->irq_work;
1019	else {
1020		if (!cpumask_test_cpu(cpu, buffer->cpumask))
1021			return -EINVAL;
1022
1023		cpu_buffer = buffer->buffers[cpu];
1024		work = &cpu_buffer->irq_work;
1025	}
1026
1027	poll_wait(filp, &work->waiters, poll_table);
1028	work->waiters_pending = true;
1029	/*
1030	 * There's a tight race between setting the waiters_pending and
1031	 * checking if the ring buffer is empty.  Once the waiters_pending bit
1032	 * is set, the next event will wake the task up, but we can get stuck
1033	 * if there's only a single event in.
1034	 *
1035	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
1036	 * but adding a memory barrier to all events will cause too much of a
1037	 * performance hit in the fast path.  We only need a memory barrier when
1038	 * the buffer goes from empty to having content.  But as this race is
1039	 * extremely small, and it's not a problem if another event comes in, we
1040	 * will fix it later.
1041	 */
1042	smp_mb();
1043
1044	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1045	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1046		return EPOLLIN | EPOLLRDNORM;
1047	return 0;
1048}
1049
1050/* buffer may be either ring_buffer or ring_buffer_per_cpu */
1051#define RB_WARN_ON(b, cond)						\
1052	({								\
1053		int _____ret = unlikely(cond);				\
1054		if (_____ret) {						\
1055			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1056				struct ring_buffer_per_cpu *__b =	\
1057					(void *)b;			\
1058				atomic_inc(&__b->buffer->record_disabled); \
1059			} else						\
1060				atomic_inc(&b->record_disabled);	\
1061			WARN_ON(1);					\
1062		}							\
1063		_____ret;						\
1064	})
1065
1066/* Up this if you want to test the TIME_EXTENTS and normalization */
1067#define DEBUG_SHIFT 0
1068
1069static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1070{
1071	u64 ts;
1072
1073	/* Skip retpolines :-( */
1074	if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1075		ts = trace_clock_local();
1076	else
1077		ts = buffer->clock();
1078
1079	/* shift to debug/test normalization and TIME_EXTENTS */
1080	return ts << DEBUG_SHIFT;
1081}
1082
1083u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1084{
1085	u64 time;
1086
1087	preempt_disable_notrace();
1088	time = rb_time_stamp(buffer);
1089	preempt_enable_notrace();
1090
1091	return time;
1092}
1093EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1094
1095void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1096				      int cpu, u64 *ts)
1097{
1098	/* Just stupid testing the normalize function and deltas */
1099	*ts >>= DEBUG_SHIFT;
1100}
1101EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1102
1103/*
1104 * Making the ring buffer lockless makes things tricky.
1105 * Although writes only happen on the CPU that they are on,
1106 * and they only need to worry about interrupts. Reads can
1107 * happen on any CPU.
1108 *
1109 * The reader page is always off the ring buffer, but when the
1110 * reader finishes with a page, it needs to swap its page with
1111 * a new one from the buffer. The reader needs to take from
1112 * the head (writes go to the tail). But if a writer is in overwrite
1113 * mode and wraps, it must push the head page forward.
1114 *
1115 * Here lies the problem.
1116 *
1117 * The reader must be careful to replace only the head page, and
1118 * not another one. As described at the top of the file in the
1119 * ASCII art, the reader sets its old page to point to the next
1120 * page after head. It then sets the page after head to point to
1121 * the old reader page. But if the writer moves the head page
1122 * during this operation, the reader could end up with the tail.
1123 *
1124 * We use cmpxchg to help prevent this race. We also do something
1125 * special with the page before head. We set the LSB to 1.
1126 *
1127 * When the writer must push the page forward, it will clear the
1128 * bit that points to the head page, move the head, and then set
1129 * the bit that points to the new head page.
1130 *
1131 * We also don't want an interrupt coming in and moving the head
1132 * page on another writer. Thus we use the second LSB to catch
1133 * that too. Thus:
1134 *
1135 * head->list->prev->next        bit 1          bit 0
1136 *                              -------        -------
1137 * Normal page                     0              0
1138 * Points to head page             0              1
1139 * New head page                   1              0
1140 *
1141 * Note we can not trust the prev pointer of the head page, because:
1142 *
1143 * +----+       +-----+        +-----+
1144 * |    |------>|  T  |---X--->|  N  |
1145 * |    |<------|     |        |     |
1146 * +----+       +-----+        +-----+
1147 *   ^                           ^ |
1148 *   |          +-----+          | |
1149 *   +----------|  R  |----------+ |
1150 *              |     |<-----------+
1151 *              +-----+
1152 *
1153 * Key:  ---X-->  HEAD flag set in pointer
1154 *         T      Tail page
1155 *         R      Reader page
1156 *         N      Next page
1157 *
1158 * (see __rb_reserve_next() to see where this happens)
1159 *
1160 *  What the above shows is that the reader just swapped out
1161 *  the reader page with a page in the buffer, but before it
1162 *  could make the new header point back to the new page added
1163 *  it was preempted by a writer. The writer moved forward onto
1164 *  the new page added by the reader and is about to move forward
1165 *  again.
1166 *
1167 *  You can see, it is legitimate for the previous pointer of
1168 *  the head (or any page) not to point back to itself. But only
1169 *  temporarily.
1170 */
1171
1172#define RB_PAGE_NORMAL		0UL
1173#define RB_PAGE_HEAD		1UL
1174#define RB_PAGE_UPDATE		2UL
1175
1176
1177#define RB_FLAG_MASK		3UL
1178
1179/* PAGE_MOVED is not part of the mask */
1180#define RB_PAGE_MOVED		4UL
1181
1182/*
1183 * rb_list_head - remove any bit
1184 */
1185static struct list_head *rb_list_head(struct list_head *list)
1186{
1187	unsigned long val = (unsigned long)list;
1188
1189	return (struct list_head *)(val & ~RB_FLAG_MASK);
1190}
1191
1192/*
1193 * rb_is_head_page - test if the given page is the head page
1194 *
1195 * Because the reader may move the head_page pointer, we can
1196 * not trust what the head page is (it may be pointing to
1197 * the reader page). But if the next page is a header page,
1198 * its flags will be non zero.
1199 */
1200static inline int
1201rb_is_head_page(struct buffer_page *page, struct list_head *list)
 
1202{
1203	unsigned long val;
1204
1205	val = (unsigned long)list->next;
1206
1207	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1208		return RB_PAGE_MOVED;
1209
1210	return val & RB_FLAG_MASK;
1211}
1212
1213/*
1214 * rb_is_reader_page
1215 *
1216 * The unique thing about the reader page, is that, if the
1217 * writer is ever on it, the previous pointer never points
1218 * back to the reader page.
1219 */
1220static bool rb_is_reader_page(struct buffer_page *page)
1221{
1222	struct list_head *list = page->list.prev;
1223
1224	return rb_list_head(list->next) != &page->list;
1225}
1226
1227/*
1228 * rb_set_list_to_head - set a list_head to be pointing to head.
1229 */
1230static void rb_set_list_to_head(struct list_head *list)
 
1231{
1232	unsigned long *ptr;
1233
1234	ptr = (unsigned long *)&list->next;
1235	*ptr |= RB_PAGE_HEAD;
1236	*ptr &= ~RB_PAGE_UPDATE;
1237}
1238
1239/*
1240 * rb_head_page_activate - sets up head page
1241 */
1242static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1243{
1244	struct buffer_page *head;
1245
1246	head = cpu_buffer->head_page;
1247	if (!head)
1248		return;
1249
1250	/*
1251	 * Set the previous list pointer to have the HEAD flag.
1252	 */
1253	rb_set_list_to_head(head->list.prev);
1254}
1255
1256static void rb_list_head_clear(struct list_head *list)
1257{
1258	unsigned long *ptr = (unsigned long *)&list->next;
1259
1260	*ptr &= ~RB_FLAG_MASK;
1261}
1262
1263/*
1264 * rb_head_page_deactivate - clears head page ptr (for free list)
1265 */
1266static void
1267rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1268{
1269	struct list_head *hd;
1270
1271	/* Go through the whole list and clear any pointers found. */
1272	rb_list_head_clear(cpu_buffer->pages);
1273
1274	list_for_each(hd, cpu_buffer->pages)
1275		rb_list_head_clear(hd);
1276}
1277
1278static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1279			    struct buffer_page *head,
1280			    struct buffer_page *prev,
1281			    int old_flag, int new_flag)
1282{
1283	struct list_head *list;
1284	unsigned long val = (unsigned long)&head->list;
1285	unsigned long ret;
1286
1287	list = &prev->list;
1288
1289	val &= ~RB_FLAG_MASK;
1290
1291	ret = cmpxchg((unsigned long *)&list->next,
1292		      val | old_flag, val | new_flag);
1293
1294	/* check if the reader took the page */
1295	if ((ret & ~RB_FLAG_MASK) != val)
1296		return RB_PAGE_MOVED;
1297
1298	return ret & RB_FLAG_MASK;
1299}
1300
1301static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1302				   struct buffer_page *head,
1303				   struct buffer_page *prev,
1304				   int old_flag)
1305{
1306	return rb_head_page_set(cpu_buffer, head, prev,
1307				old_flag, RB_PAGE_UPDATE);
1308}
1309
1310static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1311				 struct buffer_page *head,
1312				 struct buffer_page *prev,
1313				 int old_flag)
1314{
1315	return rb_head_page_set(cpu_buffer, head, prev,
1316				old_flag, RB_PAGE_HEAD);
1317}
1318
1319static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1320				   struct buffer_page *head,
1321				   struct buffer_page *prev,
1322				   int old_flag)
1323{
1324	return rb_head_page_set(cpu_buffer, head, prev,
1325				old_flag, RB_PAGE_NORMAL);
1326}
1327
1328static inline void rb_inc_page(struct buffer_page **bpage)
 
1329{
1330	struct list_head *p = rb_list_head((*bpage)->list.next);
1331
1332	*bpage = list_entry(p, struct buffer_page, list);
1333}
1334
1335static struct buffer_page *
1336rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1337{
1338	struct buffer_page *head;
1339	struct buffer_page *page;
1340	struct list_head *list;
1341	int i;
1342
1343	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1344		return NULL;
1345
1346	/* sanity check */
1347	list = cpu_buffer->pages;
1348	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1349		return NULL;
1350
1351	page = head = cpu_buffer->head_page;
1352	/*
1353	 * It is possible that the writer moves the header behind
1354	 * where we started, and we miss in one loop.
1355	 * A second loop should grab the header, but we'll do
1356	 * three loops just because I'm paranoid.
1357	 */
1358	for (i = 0; i < 3; i++) {
1359		do {
1360			if (rb_is_head_page(page, page->list.prev)) {
1361				cpu_buffer->head_page = page;
1362				return page;
1363			}
1364			rb_inc_page(&page);
1365		} while (page != head);
1366	}
1367
1368	RB_WARN_ON(cpu_buffer, 1);
1369
1370	return NULL;
1371}
1372
1373static int rb_head_page_replace(struct buffer_page *old,
1374				struct buffer_page *new)
1375{
1376	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1377	unsigned long val;
1378	unsigned long ret;
1379
1380	val = *ptr & ~RB_FLAG_MASK;
1381	val |= RB_PAGE_HEAD;
1382
1383	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1384
1385	return ret == val;
1386}
1387
1388/*
1389 * rb_tail_page_update - move the tail page forward
1390 */
1391static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1392			       struct buffer_page *tail_page,
1393			       struct buffer_page *next_page)
1394{
1395	unsigned long old_entries;
1396	unsigned long old_write;
1397
1398	/*
1399	 * The tail page now needs to be moved forward.
1400	 *
1401	 * We need to reset the tail page, but without messing
1402	 * with possible erasing of data brought in by interrupts
1403	 * that have moved the tail page and are currently on it.
1404	 *
1405	 * We add a counter to the write field to denote this.
1406	 */
1407	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1408	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1409
1410	local_inc(&cpu_buffer->pages_touched);
1411	/*
1412	 * Just make sure we have seen our old_write and synchronize
1413	 * with any interrupts that come in.
1414	 */
1415	barrier();
1416
1417	/*
1418	 * If the tail page is still the same as what we think
1419	 * it is, then it is up to us to update the tail
1420	 * pointer.
1421	 */
1422	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1423		/* Zero the write counter */
1424		unsigned long val = old_write & ~RB_WRITE_MASK;
1425		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1426
1427		/*
1428		 * This will only succeed if an interrupt did
1429		 * not come in and change it. In which case, we
1430		 * do not want to modify it.
1431		 *
1432		 * We add (void) to let the compiler know that we do not care
1433		 * about the return value of these functions. We use the
1434		 * cmpxchg to only update if an interrupt did not already
1435		 * do it for us. If the cmpxchg fails, we don't care.
1436		 */
1437		(void)local_cmpxchg(&next_page->write, old_write, val);
1438		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1439
1440		/*
1441		 * No need to worry about races with clearing out the commit.
1442		 * it only can increment when a commit takes place. But that
1443		 * only happens in the outer most nested commit.
1444		 */
1445		local_set(&next_page->page->commit, 0);
1446
1447		/* Again, either we update tail_page or an interrupt does */
1448		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1449	}
1450}
1451
1452static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1453			  struct buffer_page *bpage)
1454{
1455	unsigned long val = (unsigned long)bpage;
1456
1457	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1458		return 1;
1459
1460	return 0;
1461}
1462
1463/**
1464 * rb_check_list - make sure a pointer to a list has the last bits zero
1465 */
1466static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1467			 struct list_head *list)
1468{
1469	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1470		return 1;
1471	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1472		return 1;
1473	return 0;
1474}
1475
1476/**
1477 * rb_check_pages - integrity check of buffer pages
1478 * @cpu_buffer: CPU buffer with pages to test
1479 *
1480 * As a safety measure we check to make sure the data pages have not
1481 * been corrupted.
1482 */
1483static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1484{
1485	struct list_head *head = cpu_buffer->pages;
1486	struct buffer_page *bpage, *tmp;
1487
1488	/* Reset the head page if it exists */
1489	if (cpu_buffer->head_page)
1490		rb_set_head_page(cpu_buffer);
1491
1492	rb_head_page_deactivate(cpu_buffer);
1493
1494	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1495		return -1;
1496	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1497		return -1;
1498
1499	if (rb_check_list(cpu_buffer, head))
1500		return -1;
1501
1502	list_for_each_entry_safe(bpage, tmp, head, list) {
1503		if (RB_WARN_ON(cpu_buffer,
1504			       bpage->list.next->prev != &bpage->list))
1505			return -1;
1506		if (RB_WARN_ON(cpu_buffer,
1507			       bpage->list.prev->next != &bpage->list))
1508			return -1;
1509		if (rb_check_list(cpu_buffer, &bpage->list))
1510			return -1;
1511	}
1512
1513	rb_head_page_activate(cpu_buffer);
1514
1515	return 0;
1516}
1517
1518static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1519		long nr_pages, struct list_head *pages)
1520{
1521	struct buffer_page *bpage, *tmp;
1522	bool user_thread = current->mm != NULL;
1523	gfp_t mflags;
1524	long i;
1525
1526	/*
1527	 * Check if the available memory is there first.
1528	 * Note, si_mem_available() only gives us a rough estimate of available
1529	 * memory. It may not be accurate. But we don't care, we just want
1530	 * to prevent doing any allocation when it is obvious that it is
1531	 * not going to succeed.
1532	 */
1533	i = si_mem_available();
1534	if (i < nr_pages)
1535		return -ENOMEM;
1536
1537	/*
1538	 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1539	 * gracefully without invoking oom-killer and the system is not
1540	 * destabilized.
1541	 */
1542	mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1543
1544	/*
1545	 * If a user thread allocates too much, and si_mem_available()
1546	 * reports there's enough memory, even though there is not.
1547	 * Make sure the OOM killer kills this thread. This can happen
1548	 * even with RETRY_MAYFAIL because another task may be doing
1549	 * an allocation after this task has taken all memory.
1550	 * This is the task the OOM killer needs to take out during this
1551	 * loop, even if it was triggered by an allocation somewhere else.
1552	 */
1553	if (user_thread)
1554		set_current_oom_origin();
1555	for (i = 0; i < nr_pages; i++) {
1556		struct page *page;
1557
1558		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1559				    mflags, cpu_to_node(cpu_buffer->cpu));
1560		if (!bpage)
1561			goto free_pages;
1562
1563		rb_check_bpage(cpu_buffer, bpage);
1564
1565		list_add(&bpage->list, pages);
1566
1567		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0);
1568		if (!page)
1569			goto free_pages;
1570		bpage->page = page_address(page);
1571		rb_init_page(bpage->page);
1572
1573		if (user_thread && fatal_signal_pending(current))
1574			goto free_pages;
1575	}
1576	if (user_thread)
1577		clear_current_oom_origin();
1578
1579	return 0;
1580
1581free_pages:
1582	list_for_each_entry_safe(bpage, tmp, pages, list) {
1583		list_del_init(&bpage->list);
1584		free_buffer_page(bpage);
1585	}
1586	if (user_thread)
1587		clear_current_oom_origin();
1588
1589	return -ENOMEM;
1590}
1591
1592static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1593			     unsigned long nr_pages)
1594{
1595	LIST_HEAD(pages);
1596
1597	WARN_ON(!nr_pages);
1598
1599	if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
1600		return -ENOMEM;
1601
1602	/*
1603	 * The ring buffer page list is a circular list that does not
1604	 * start and end with a list head. All page list items point to
1605	 * other pages.
1606	 */
1607	cpu_buffer->pages = pages.next;
1608	list_del(&pages);
1609
1610	cpu_buffer->nr_pages = nr_pages;
1611
1612	rb_check_pages(cpu_buffer);
1613
1614	return 0;
1615}
1616
1617static struct ring_buffer_per_cpu *
1618rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1619{
1620	struct ring_buffer_per_cpu *cpu_buffer;
1621	struct buffer_page *bpage;
1622	struct page *page;
1623	int ret;
1624
1625	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1626				  GFP_KERNEL, cpu_to_node(cpu));
1627	if (!cpu_buffer)
1628		return NULL;
1629
1630	cpu_buffer->cpu = cpu;
1631	cpu_buffer->buffer = buffer;
1632	raw_spin_lock_init(&cpu_buffer->reader_lock);
1633	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1634	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1635	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1636	init_completion(&cpu_buffer->update_done);
1637	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1638	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1639	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1640
1641	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1642			    GFP_KERNEL, cpu_to_node(cpu));
1643	if (!bpage)
1644		goto fail_free_buffer;
1645
1646	rb_check_bpage(cpu_buffer, bpage);
1647
1648	cpu_buffer->reader_page = bpage;
1649	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1650	if (!page)
1651		goto fail_free_reader;
1652	bpage->page = page_address(page);
1653	rb_init_page(bpage->page);
1654
1655	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1656	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1657
1658	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1659	if (ret < 0)
1660		goto fail_free_reader;
1661
1662	cpu_buffer->head_page
1663		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1664	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1665
1666	rb_head_page_activate(cpu_buffer);
1667
1668	return cpu_buffer;
1669
1670 fail_free_reader:
1671	free_buffer_page(cpu_buffer->reader_page);
1672
1673 fail_free_buffer:
1674	kfree(cpu_buffer);
1675	return NULL;
1676}
1677
1678static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1679{
1680	struct list_head *head = cpu_buffer->pages;
1681	struct buffer_page *bpage, *tmp;
1682
1683	free_buffer_page(cpu_buffer->reader_page);
1684
1685	rb_head_page_deactivate(cpu_buffer);
1686
1687	if (head) {
1688		list_for_each_entry_safe(bpage, tmp, head, list) {
1689			list_del_init(&bpage->list);
1690			free_buffer_page(bpage);
1691		}
1692		bpage = list_entry(head, struct buffer_page, list);
1693		free_buffer_page(bpage);
1694	}
1695
1696	kfree(cpu_buffer);
1697}
1698
1699/**
1700 * __ring_buffer_alloc - allocate a new ring_buffer
1701 * @size: the size in bytes per cpu that is needed.
1702 * @flags: attributes to set for the ring buffer.
1703 * @key: ring buffer reader_lock_key.
1704 *
1705 * Currently the only flag that is available is the RB_FL_OVERWRITE
1706 * flag. This flag means that the buffer will overwrite old data
1707 * when the buffer wraps. If this flag is not set, the buffer will
1708 * drop data when the tail hits the head.
1709 */
1710struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1711					struct lock_class_key *key)
1712{
1713	struct trace_buffer *buffer;
1714	long nr_pages;
1715	int bsize;
1716	int cpu;
1717	int ret;
1718
1719	/* keep it in its own cache line */
1720	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1721			 GFP_KERNEL);
1722	if (!buffer)
1723		return NULL;
1724
1725	if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1726		goto fail_free_buffer;
1727
1728	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1729	buffer->flags = flags;
1730	buffer->clock = trace_clock_local;
1731	buffer->reader_lock_key = key;
1732
1733	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1734	init_waitqueue_head(&buffer->irq_work.waiters);
1735
1736	/* need at least two pages */
1737	if (nr_pages < 2)
1738		nr_pages = 2;
1739
1740	buffer->cpus = nr_cpu_ids;
1741
1742	bsize = sizeof(void *) * nr_cpu_ids;
1743	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1744				  GFP_KERNEL);
1745	if (!buffer->buffers)
1746		goto fail_free_cpumask;
1747
1748	cpu = raw_smp_processor_id();
1749	cpumask_set_cpu(cpu, buffer->cpumask);
1750	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1751	if (!buffer->buffers[cpu])
1752		goto fail_free_buffers;
1753
1754	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1755	if (ret < 0)
1756		goto fail_free_buffers;
1757
1758	mutex_init(&buffer->mutex);
1759
1760	return buffer;
1761
1762 fail_free_buffers:
1763	for_each_buffer_cpu(buffer, cpu) {
1764		if (buffer->buffers[cpu])
1765			rb_free_cpu_buffer(buffer->buffers[cpu]);
1766	}
1767	kfree(buffer->buffers);
1768
1769 fail_free_cpumask:
1770	free_cpumask_var(buffer->cpumask);
1771
1772 fail_free_buffer:
1773	kfree(buffer);
1774	return NULL;
1775}
1776EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1777
1778/**
1779 * ring_buffer_free - free a ring buffer.
1780 * @buffer: the buffer to free.
1781 */
1782void
1783ring_buffer_free(struct trace_buffer *buffer)
1784{
1785	int cpu;
1786
1787	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1788
1789	for_each_buffer_cpu(buffer, cpu)
1790		rb_free_cpu_buffer(buffer->buffers[cpu]);
1791
1792	kfree(buffer->buffers);
1793	free_cpumask_var(buffer->cpumask);
1794
1795	kfree(buffer);
1796}
1797EXPORT_SYMBOL_GPL(ring_buffer_free);
1798
1799void ring_buffer_set_clock(struct trace_buffer *buffer,
1800			   u64 (*clock)(void))
1801{
1802	buffer->clock = clock;
1803}
1804
1805void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1806{
1807	buffer->time_stamp_abs = abs;
1808}
1809
1810bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1811{
1812	return buffer->time_stamp_abs;
1813}
1814
1815static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1816
1817static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1818{
1819	return local_read(&bpage->entries) & RB_WRITE_MASK;
1820}
1821
1822static inline unsigned long rb_page_write(struct buffer_page *bpage)
1823{
1824	return local_read(&bpage->write) & RB_WRITE_MASK;
1825}
1826
1827static int
1828rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1829{
1830	struct list_head *tail_page, *to_remove, *next_page;
1831	struct buffer_page *to_remove_page, *tmp_iter_page;
1832	struct buffer_page *last_page, *first_page;
1833	unsigned long nr_removed;
1834	unsigned long head_bit;
1835	int page_entries;
1836
1837	head_bit = 0;
1838
1839	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1840	atomic_inc(&cpu_buffer->record_disabled);
1841	/*
1842	 * We don't race with the readers since we have acquired the reader
1843	 * lock. We also don't race with writers after disabling recording.
1844	 * This makes it easy to figure out the first and the last page to be
1845	 * removed from the list. We unlink all the pages in between including
1846	 * the first and last pages. This is done in a busy loop so that we
1847	 * lose the least number of traces.
1848	 * The pages are freed after we restart recording and unlock readers.
1849	 */
1850	tail_page = &cpu_buffer->tail_page->list;
1851
1852	/*
1853	 * tail page might be on reader page, we remove the next page
1854	 * from the ring buffer
1855	 */
1856	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1857		tail_page = rb_list_head(tail_page->next);
1858	to_remove = tail_page;
1859
1860	/* start of pages to remove */
1861	first_page = list_entry(rb_list_head(to_remove->next),
1862				struct buffer_page, list);
1863
1864	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1865		to_remove = rb_list_head(to_remove)->next;
1866		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1867	}
1868
1869	next_page = rb_list_head(to_remove)->next;
1870
1871	/*
1872	 * Now we remove all pages between tail_page and next_page.
1873	 * Make sure that we have head_bit value preserved for the
1874	 * next page
1875	 */
1876	tail_page->next = (struct list_head *)((unsigned long)next_page |
1877						head_bit);
1878	next_page = rb_list_head(next_page);
1879	next_page->prev = tail_page;
1880
1881	/* make sure pages points to a valid page in the ring buffer */
1882	cpu_buffer->pages = next_page;
1883
1884	/* update head page */
1885	if (head_bit)
1886		cpu_buffer->head_page = list_entry(next_page,
1887						struct buffer_page, list);
1888
1889	/*
1890	 * change read pointer to make sure any read iterators reset
1891	 * themselves
1892	 */
1893	cpu_buffer->read = 0;
1894
1895	/* pages are removed, resume tracing and then free the pages */
1896	atomic_dec(&cpu_buffer->record_disabled);
1897	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1898
1899	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1900
1901	/* last buffer page to remove */
1902	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1903				list);
1904	tmp_iter_page = first_page;
1905
1906	do {
1907		cond_resched();
1908
1909		to_remove_page = tmp_iter_page;
1910		rb_inc_page(&tmp_iter_page);
1911
1912		/* update the counters */
1913		page_entries = rb_page_entries(to_remove_page);
1914		if (page_entries) {
1915			/*
1916			 * If something was added to this page, it was full
1917			 * since it is not the tail page. So we deduct the
1918			 * bytes consumed in ring buffer from here.
1919			 * Increment overrun to account for the lost events.
1920			 */
1921			local_add(page_entries, &cpu_buffer->overrun);
1922			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1923		}
1924
1925		/*
1926		 * We have already removed references to this list item, just
1927		 * free up the buffer_page and its page
1928		 */
1929		free_buffer_page(to_remove_page);
1930		nr_removed--;
1931
1932	} while (to_remove_page != last_page);
1933
1934	RB_WARN_ON(cpu_buffer, nr_removed);
1935
1936	return nr_removed == 0;
1937}
1938
1939static int
1940rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1941{
1942	struct list_head *pages = &cpu_buffer->new_pages;
1943	int retries, success;
1944
1945	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1946	/*
1947	 * We are holding the reader lock, so the reader page won't be swapped
1948	 * in the ring buffer. Now we are racing with the writer trying to
1949	 * move head page and the tail page.
1950	 * We are going to adapt the reader page update process where:
1951	 * 1. We first splice the start and end of list of new pages between
1952	 *    the head page and its previous page.
1953	 * 2. We cmpxchg the prev_page->next to point from head page to the
1954	 *    start of new pages list.
1955	 * 3. Finally, we update the head->prev to the end of new list.
1956	 *
1957	 * We will try this process 10 times, to make sure that we don't keep
1958	 * spinning.
1959	 */
1960	retries = 10;
1961	success = 0;
1962	while (retries--) {
1963		struct list_head *head_page, *prev_page, *r;
1964		struct list_head *last_page, *first_page;
1965		struct list_head *head_page_with_bit;
1966
1967		head_page = &rb_set_head_page(cpu_buffer)->list;
1968		if (!head_page)
1969			break;
1970		prev_page = head_page->prev;
1971
1972		first_page = pages->next;
1973		last_page  = pages->prev;
1974
1975		head_page_with_bit = (struct list_head *)
1976				     ((unsigned long)head_page | RB_PAGE_HEAD);
1977
1978		last_page->next = head_page_with_bit;
1979		first_page->prev = prev_page;
1980
1981		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1982
1983		if (r == head_page_with_bit) {
1984			/*
1985			 * yay, we replaced the page pointer to our new list,
1986			 * now, we just have to update to head page's prev
1987			 * pointer to point to end of list
1988			 */
1989			head_page->prev = last_page;
1990			success = 1;
1991			break;
1992		}
1993	}
1994
1995	if (success)
1996		INIT_LIST_HEAD(pages);
1997	/*
1998	 * If we weren't successful in adding in new pages, warn and stop
1999	 * tracing
2000	 */
2001	RB_WARN_ON(cpu_buffer, !success);
2002	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
2003
2004	/* free pages if they weren't inserted */
2005	if (!success) {
2006		struct buffer_page *bpage, *tmp;
2007		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2008					 list) {
2009			list_del_init(&bpage->list);
2010			free_buffer_page(bpage);
2011		}
2012	}
2013	return success;
2014}
2015
2016static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2017{
2018	int success;
2019
2020	if (cpu_buffer->nr_pages_to_update > 0)
2021		success = rb_insert_pages(cpu_buffer);
2022	else
2023		success = rb_remove_pages(cpu_buffer,
2024					-cpu_buffer->nr_pages_to_update);
2025
2026	if (success)
2027		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
2028}
2029
2030static void update_pages_handler(struct work_struct *work)
2031{
2032	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2033			struct ring_buffer_per_cpu, update_pages_work);
2034	rb_update_pages(cpu_buffer);
2035	complete(&cpu_buffer->update_done);
2036}
2037
2038/**
2039 * ring_buffer_resize - resize the ring buffer
2040 * @buffer: the buffer to resize.
2041 * @size: the new size.
2042 * @cpu_id: the cpu buffer to resize
2043 *
2044 * Minimum size is 2 * BUF_PAGE_SIZE.
2045 *
2046 * Returns 0 on success and < 0 on failure.
2047 */
2048int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2049			int cpu_id)
2050{
2051	struct ring_buffer_per_cpu *cpu_buffer;
2052	unsigned long nr_pages;
2053	int cpu, err;
2054
2055	/*
2056	 * Always succeed at resizing a non-existent buffer:
2057	 */
2058	if (!buffer)
2059		return 0;
2060
2061	/* Make sure the requested buffer exists */
2062	if (cpu_id != RING_BUFFER_ALL_CPUS &&
2063	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
2064		return 0;
2065
2066	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
2067
2068	/* we need a minimum of two pages */
2069	if (nr_pages < 2)
2070		nr_pages = 2;
2071
 
 
 
 
 
 
 
 
 
 
2072	/* prevent another thread from changing buffer sizes */
2073	mutex_lock(&buffer->mutex);
2074
2075
2076	if (cpu_id == RING_BUFFER_ALL_CPUS) {
2077		/*
2078		 * Don't succeed if resizing is disabled, as a reader might be
2079		 * manipulating the ring buffer and is expecting a sane state while
2080		 * this is true.
2081		 */
2082		for_each_buffer_cpu(buffer, cpu) {
2083			cpu_buffer = buffer->buffers[cpu];
2084			if (atomic_read(&cpu_buffer->resize_disabled)) {
2085				err = -EBUSY;
2086				goto out_err_unlock;
2087			}
2088		}
2089
2090		/* calculate the pages to update */
2091		for_each_buffer_cpu(buffer, cpu) {
2092			cpu_buffer = buffer->buffers[cpu];
2093
2094			cpu_buffer->nr_pages_to_update = nr_pages -
2095							cpu_buffer->nr_pages;
2096			/*
2097			 * nothing more to do for removing pages or no update
2098			 */
2099			if (cpu_buffer->nr_pages_to_update <= 0)
2100				continue;
2101			/*
2102			 * to add pages, make sure all new pages can be
2103			 * allocated without receiving ENOMEM
2104			 */
2105			INIT_LIST_HEAD(&cpu_buffer->new_pages);
2106			if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2107						&cpu_buffer->new_pages)) {
2108				/* not enough memory for new pages */
2109				err = -ENOMEM;
2110				goto out_err;
2111			}
2112		}
2113
2114		get_online_cpus();
2115		/*
2116		 * Fire off all the required work handlers
2117		 * We can't schedule on offline CPUs, but it's not necessary
2118		 * since we can change their buffer sizes without any race.
2119		 */
2120		for_each_buffer_cpu(buffer, cpu) {
2121			cpu_buffer = buffer->buffers[cpu];
2122			if (!cpu_buffer->nr_pages_to_update)
2123				continue;
2124
2125			/* Can't run something on an offline CPU. */
2126			if (!cpu_online(cpu)) {
2127				rb_update_pages(cpu_buffer);
2128				cpu_buffer->nr_pages_to_update = 0;
2129			} else {
2130				schedule_work_on(cpu,
2131						&cpu_buffer->update_pages_work);
2132			}
2133		}
2134
2135		/* wait for all the updates to complete */
2136		for_each_buffer_cpu(buffer, cpu) {
2137			cpu_buffer = buffer->buffers[cpu];
2138			if (!cpu_buffer->nr_pages_to_update)
2139				continue;
2140
2141			if (cpu_online(cpu))
2142				wait_for_completion(&cpu_buffer->update_done);
2143			cpu_buffer->nr_pages_to_update = 0;
2144		}
2145
2146		put_online_cpus();
2147	} else {
 
 
 
 
2148		cpu_buffer = buffer->buffers[cpu_id];
2149
2150		if (nr_pages == cpu_buffer->nr_pages)
2151			goto out;
2152
2153		/*
2154		 * Don't succeed if resizing is disabled, as a reader might be
2155		 * manipulating the ring buffer and is expecting a sane state while
2156		 * this is true.
2157		 */
2158		if (atomic_read(&cpu_buffer->resize_disabled)) {
2159			err = -EBUSY;
2160			goto out_err_unlock;
2161		}
2162
2163		cpu_buffer->nr_pages_to_update = nr_pages -
2164						cpu_buffer->nr_pages;
2165
2166		INIT_LIST_HEAD(&cpu_buffer->new_pages);
2167		if (cpu_buffer->nr_pages_to_update > 0 &&
2168			__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2169					    &cpu_buffer->new_pages)) {
2170			err = -ENOMEM;
2171			goto out_err;
2172		}
2173
2174		get_online_cpus();
2175
2176		/* Can't run something on an offline CPU. */
2177		if (!cpu_online(cpu_id))
2178			rb_update_pages(cpu_buffer);
2179		else {
2180			schedule_work_on(cpu_id,
2181					 &cpu_buffer->update_pages_work);
2182			wait_for_completion(&cpu_buffer->update_done);
2183		}
2184
2185		cpu_buffer->nr_pages_to_update = 0;
2186		put_online_cpus();
2187	}
2188
2189 out:
2190	/*
2191	 * The ring buffer resize can happen with the ring buffer
2192	 * enabled, so that the update disturbs the tracing as little
2193	 * as possible. But if the buffer is disabled, we do not need
2194	 * to worry about that, and we can take the time to verify
2195	 * that the buffer is not corrupt.
2196	 */
2197	if (atomic_read(&buffer->record_disabled)) {
2198		atomic_inc(&buffer->record_disabled);
2199		/*
2200		 * Even though the buffer was disabled, we must make sure
2201		 * that it is truly disabled before calling rb_check_pages.
2202		 * There could have been a race between checking
2203		 * record_disable and incrementing it.
2204		 */
2205		synchronize_rcu();
2206		for_each_buffer_cpu(buffer, cpu) {
2207			cpu_buffer = buffer->buffers[cpu];
2208			rb_check_pages(cpu_buffer);
2209		}
2210		atomic_dec(&buffer->record_disabled);
2211	}
2212
2213	mutex_unlock(&buffer->mutex);
2214	return 0;
2215
2216 out_err:
2217	for_each_buffer_cpu(buffer, cpu) {
2218		struct buffer_page *bpage, *tmp;
2219
2220		cpu_buffer = buffer->buffers[cpu];
2221		cpu_buffer->nr_pages_to_update = 0;
2222
2223		if (list_empty(&cpu_buffer->new_pages))
2224			continue;
2225
2226		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2227					list) {
2228			list_del_init(&bpage->list);
2229			free_buffer_page(bpage);
2230		}
2231	}
2232 out_err_unlock:
2233	mutex_unlock(&buffer->mutex);
2234	return err;
2235}
2236EXPORT_SYMBOL_GPL(ring_buffer_resize);
2237
2238void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2239{
2240	mutex_lock(&buffer->mutex);
2241	if (val)
2242		buffer->flags |= RB_FL_OVERWRITE;
2243	else
2244		buffer->flags &= ~RB_FL_OVERWRITE;
2245	mutex_unlock(&buffer->mutex);
2246}
2247EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2248
2249static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
2250{
2251	return bpage->page->data + index;
2252}
2253
2254static __always_inline struct ring_buffer_event *
2255rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2256{
2257	return __rb_page_index(cpu_buffer->reader_page,
2258			       cpu_buffer->reader_page->read);
2259}
2260
2261static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
 
2262{
2263	return local_read(&bpage->page->commit);
2264}
2265
2266static struct ring_buffer_event *
2267rb_iter_head_event(struct ring_buffer_iter *iter)
2268{
2269	struct ring_buffer_event *event;
2270	struct buffer_page *iter_head_page = iter->head_page;
2271	unsigned long commit;
2272	unsigned length;
2273
2274	if (iter->head != iter->next_event)
2275		return iter->event;
2276
2277	/*
2278	 * When the writer goes across pages, it issues a cmpxchg which
2279	 * is a mb(), which will synchronize with the rmb here.
2280	 * (see rb_tail_page_update() and __rb_reserve_next())
2281	 */
2282	commit = rb_page_commit(iter_head_page);
2283	smp_rmb();
2284	event = __rb_page_index(iter_head_page, iter->head);
2285	length = rb_event_length(event);
2286
2287	/*
2288	 * READ_ONCE() doesn't work on functions and we don't want the
2289	 * compiler doing any crazy optimizations with length.
2290	 */
2291	barrier();
2292
2293	if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
2294		/* Writer corrupted the read? */
2295		goto reset;
2296
2297	memcpy(iter->event, event, length);
2298	/*
2299	 * If the page stamp is still the same after this rmb() then the
2300	 * event was safely copied without the writer entering the page.
2301	 */
2302	smp_rmb();
2303
2304	/* Make sure the page didn't change since we read this */
2305	if (iter->page_stamp != iter_head_page->page->time_stamp ||
2306	    commit > rb_page_commit(iter_head_page))
2307		goto reset;
2308
2309	iter->next_event = iter->head + length;
2310	return iter->event;
2311 reset:
2312	/* Reset to the beginning */
2313	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2314	iter->head = 0;
2315	iter->next_event = 0;
2316	iter->missed_events = 1;
2317	return NULL;
2318}
2319
2320/* Size is determined by what has been committed */
2321static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2322{
2323	return rb_page_commit(bpage);
2324}
2325
2326static __always_inline unsigned
2327rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2328{
2329	return rb_page_commit(cpu_buffer->commit_page);
2330}
2331
2332static __always_inline unsigned
2333rb_event_index(struct ring_buffer_event *event)
2334{
2335	unsigned long addr = (unsigned long)event;
2336
2337	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
2338}
2339
2340static void rb_inc_iter(struct ring_buffer_iter *iter)
2341{
2342	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2343
2344	/*
2345	 * The iterator could be on the reader page (it starts there).
2346	 * But the head could have moved, since the reader was
2347	 * found. Check for this case and assign the iterator
2348	 * to the head page instead of next.
2349	 */
2350	if (iter->head_page == cpu_buffer->reader_page)
2351		iter->head_page = rb_set_head_page(cpu_buffer);
2352	else
2353		rb_inc_page(&iter->head_page);
2354
2355	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2356	iter->head = 0;
2357	iter->next_event = 0;
2358}
2359
2360/*
2361 * rb_handle_head_page - writer hit the head page
2362 *
2363 * Returns: +1 to retry page
2364 *           0 to continue
2365 *          -1 on error
2366 */
2367static int
2368rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2369		    struct buffer_page *tail_page,
2370		    struct buffer_page *next_page)
2371{
2372	struct buffer_page *new_head;
2373	int entries;
2374	int type;
2375	int ret;
2376
2377	entries = rb_page_entries(next_page);
2378
2379	/*
2380	 * The hard part is here. We need to move the head
2381	 * forward, and protect against both readers on
2382	 * other CPUs and writers coming in via interrupts.
2383	 */
2384	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2385				       RB_PAGE_HEAD);
2386
2387	/*
2388	 * type can be one of four:
2389	 *  NORMAL - an interrupt already moved it for us
2390	 *  HEAD   - we are the first to get here.
2391	 *  UPDATE - we are the interrupt interrupting
2392	 *           a current move.
2393	 *  MOVED  - a reader on another CPU moved the next
2394	 *           pointer to its reader page. Give up
2395	 *           and try again.
2396	 */
2397
2398	switch (type) {
2399	case RB_PAGE_HEAD:
2400		/*
2401		 * We changed the head to UPDATE, thus
2402		 * it is our responsibility to update
2403		 * the counters.
2404		 */
2405		local_add(entries, &cpu_buffer->overrun);
2406		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2407
2408		/*
2409		 * The entries will be zeroed out when we move the
2410		 * tail page.
2411		 */
2412
2413		/* still more to do */
2414		break;
2415
2416	case RB_PAGE_UPDATE:
2417		/*
2418		 * This is an interrupt that interrupt the
2419		 * previous update. Still more to do.
2420		 */
2421		break;
2422	case RB_PAGE_NORMAL:
2423		/*
2424		 * An interrupt came in before the update
2425		 * and processed this for us.
2426		 * Nothing left to do.
2427		 */
2428		return 1;
2429	case RB_PAGE_MOVED:
2430		/*
2431		 * The reader is on another CPU and just did
2432		 * a swap with our next_page.
2433		 * Try again.
2434		 */
2435		return 1;
2436	default:
2437		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2438		return -1;
2439	}
2440
2441	/*
2442	 * Now that we are here, the old head pointer is
2443	 * set to UPDATE. This will keep the reader from
2444	 * swapping the head page with the reader page.
2445	 * The reader (on another CPU) will spin till
2446	 * we are finished.
2447	 *
2448	 * We just need to protect against interrupts
2449	 * doing the job. We will set the next pointer
2450	 * to HEAD. After that, we set the old pointer
2451	 * to NORMAL, but only if it was HEAD before.
2452	 * otherwise we are an interrupt, and only
2453	 * want the outer most commit to reset it.
2454	 */
2455	new_head = next_page;
2456	rb_inc_page(&new_head);
2457
2458	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2459				    RB_PAGE_NORMAL);
2460
2461	/*
2462	 * Valid returns are:
2463	 *  HEAD   - an interrupt came in and already set it.
2464	 *  NORMAL - One of two things:
2465	 *            1) We really set it.
2466	 *            2) A bunch of interrupts came in and moved
2467	 *               the page forward again.
2468	 */
2469	switch (ret) {
2470	case RB_PAGE_HEAD:
2471	case RB_PAGE_NORMAL:
2472		/* OK */
2473		break;
2474	default:
2475		RB_WARN_ON(cpu_buffer, 1);
2476		return -1;
2477	}
2478
2479	/*
2480	 * It is possible that an interrupt came in,
2481	 * set the head up, then more interrupts came in
2482	 * and moved it again. When we get back here,
2483	 * the page would have been set to NORMAL but we
2484	 * just set it back to HEAD.
2485	 *
2486	 * How do you detect this? Well, if that happened
2487	 * the tail page would have moved.
2488	 */
2489	if (ret == RB_PAGE_NORMAL) {
2490		struct buffer_page *buffer_tail_page;
2491
2492		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2493		/*
2494		 * If the tail had moved passed next, then we need
2495		 * to reset the pointer.
2496		 */
2497		if (buffer_tail_page != tail_page &&
2498		    buffer_tail_page != next_page)
2499			rb_head_page_set_normal(cpu_buffer, new_head,
2500						next_page,
2501						RB_PAGE_HEAD);
2502	}
2503
2504	/*
2505	 * If this was the outer most commit (the one that
2506	 * changed the original pointer from HEAD to UPDATE),
2507	 * then it is up to us to reset it to NORMAL.
2508	 */
2509	if (type == RB_PAGE_HEAD) {
2510		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2511					      tail_page,
2512					      RB_PAGE_UPDATE);
2513		if (RB_WARN_ON(cpu_buffer,
2514			       ret != RB_PAGE_UPDATE))
2515			return -1;
2516	}
2517
2518	return 0;
2519}
2520
2521static inline void
2522rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2523	      unsigned long tail, struct rb_event_info *info)
2524{
2525	struct buffer_page *tail_page = info->tail_page;
2526	struct ring_buffer_event *event;
2527	unsigned long length = info->length;
2528
2529	/*
2530	 * Only the event that crossed the page boundary
2531	 * must fill the old tail_page with padding.
2532	 */
2533	if (tail >= BUF_PAGE_SIZE) {
2534		/*
2535		 * If the page was filled, then we still need
2536		 * to update the real_end. Reset it to zero
2537		 * and the reader will ignore it.
2538		 */
2539		if (tail == BUF_PAGE_SIZE)
2540			tail_page->real_end = 0;
2541
2542		local_sub(length, &tail_page->write);
2543		return;
2544	}
2545
2546	event = __rb_page_index(tail_page, tail);
2547
2548	/* account for padding bytes */
2549	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2550
2551	/*
2552	 * Save the original length to the meta data.
2553	 * This will be used by the reader to add lost event
2554	 * counter.
2555	 */
2556	tail_page->real_end = tail;
2557
2558	/*
2559	 * If this event is bigger than the minimum size, then
2560	 * we need to be careful that we don't subtract the
2561	 * write counter enough to allow another writer to slip
2562	 * in on this page.
2563	 * We put in a discarded commit instead, to make sure
2564	 * that this space is not used again.
2565	 *
2566	 * If we are less than the minimum size, we don't need to
2567	 * worry about it.
2568	 */
2569	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2570		/* No room for any events */
2571
2572		/* Mark the rest of the page with padding */
2573		rb_event_set_padding(event);
2574
2575		/* Set the write back to the previous setting */
2576		local_sub(length, &tail_page->write);
2577		return;
2578	}
2579
2580	/* Put in a discarded event */
2581	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2582	event->type_len = RINGBUF_TYPE_PADDING;
2583	/* time delta must be non zero */
2584	event->time_delta = 1;
2585
2586	/* Set write to end of buffer */
2587	length = (tail + length) - BUF_PAGE_SIZE;
2588	local_sub(length, &tail_page->write);
2589}
2590
2591static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2592
2593/*
2594 * This is the slow path, force gcc not to inline it.
2595 */
2596static noinline struct ring_buffer_event *
2597rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2598	     unsigned long tail, struct rb_event_info *info)
2599{
2600	struct buffer_page *tail_page = info->tail_page;
2601	struct buffer_page *commit_page = cpu_buffer->commit_page;
2602	struct trace_buffer *buffer = cpu_buffer->buffer;
2603	struct buffer_page *next_page;
2604	int ret;
2605
2606	next_page = tail_page;
2607
2608	rb_inc_page(&next_page);
2609
2610	/*
2611	 * If for some reason, we had an interrupt storm that made
2612	 * it all the way around the buffer, bail, and warn
2613	 * about it.
2614	 */
2615	if (unlikely(next_page == commit_page)) {
2616		local_inc(&cpu_buffer->commit_overrun);
2617		goto out_reset;
2618	}
2619
2620	/*
2621	 * This is where the fun begins!
2622	 *
2623	 * We are fighting against races between a reader that
2624	 * could be on another CPU trying to swap its reader
2625	 * page with the buffer head.
2626	 *
2627	 * We are also fighting against interrupts coming in and
2628	 * moving the head or tail on us as well.
2629	 *
2630	 * If the next page is the head page then we have filled
2631	 * the buffer, unless the commit page is still on the
2632	 * reader page.
2633	 */
2634	if (rb_is_head_page(next_page, &tail_page->list)) {
2635
2636		/*
2637		 * If the commit is not on the reader page, then
2638		 * move the header page.
2639		 */
2640		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2641			/*
2642			 * If we are not in overwrite mode,
2643			 * this is easy, just stop here.
2644			 */
2645			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2646				local_inc(&cpu_buffer->dropped_events);
2647				goto out_reset;
2648			}
2649
2650			ret = rb_handle_head_page(cpu_buffer,
2651						  tail_page,
2652						  next_page);
2653			if (ret < 0)
2654				goto out_reset;
2655			if (ret)
2656				goto out_again;
2657		} else {
2658			/*
2659			 * We need to be careful here too. The
2660			 * commit page could still be on the reader
2661			 * page. We could have a small buffer, and
2662			 * have filled up the buffer with events
2663			 * from interrupts and such, and wrapped.
2664			 *
2665			 * Note, if the tail page is also on the
2666			 * reader_page, we let it move out.
2667			 */
2668			if (unlikely((cpu_buffer->commit_page !=
2669				      cpu_buffer->tail_page) &&
2670				     (cpu_buffer->commit_page ==
2671				      cpu_buffer->reader_page))) {
2672				local_inc(&cpu_buffer->commit_overrun);
2673				goto out_reset;
2674			}
2675		}
2676	}
2677
2678	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2679
2680 out_again:
2681
2682	rb_reset_tail(cpu_buffer, tail, info);
2683
2684	/* Commit what we have for now. */
2685	rb_end_commit(cpu_buffer);
2686	/* rb_end_commit() decs committing */
2687	local_inc(&cpu_buffer->committing);
2688
2689	/* fail and let the caller try again */
2690	return ERR_PTR(-EAGAIN);
2691
2692 out_reset:
2693	/* reset write */
2694	rb_reset_tail(cpu_buffer, tail, info);
2695
2696	return NULL;
2697}
2698
2699/* Slow path */
2700static struct ring_buffer_event *
2701rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2702{
2703	if (abs)
2704		event->type_len = RINGBUF_TYPE_TIME_STAMP;
2705	else
2706		event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2707
2708	/* Not the first event on the page, or not delta? */
2709	if (abs || rb_event_index(event)) {
2710		event->time_delta = delta & TS_MASK;
2711		event->array[0] = delta >> TS_SHIFT;
2712	} else {
2713		/* nope, just zero it */
2714		event->time_delta = 0;
2715		event->array[0] = 0;
2716	}
2717
2718	return skip_time_extend(event);
2719}
2720
2721#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2722static inline bool sched_clock_stable(void)
2723{
2724	return true;
2725}
2726#endif
2727
2728static void
2729rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2730		   struct rb_event_info *info)
2731{
2732	u64 write_stamp;
2733
2734	WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2735		  (unsigned long long)info->delta,
2736		  (unsigned long long)info->ts,
2737		  (unsigned long long)info->before,
2738		  (unsigned long long)info->after,
2739		  (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0),
2740		  sched_clock_stable() ? "" :
2741		  "If you just came from a suspend/resume,\n"
2742		  "please switch to the trace global clock:\n"
2743		  "  echo global > /sys/kernel/debug/tracing/trace_clock\n"
2744		  "or add trace_clock=global to the kernel command line\n");
2745}
2746
2747static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2748				      struct ring_buffer_event **event,
2749				      struct rb_event_info *info,
2750				      u64 *delta,
2751				      unsigned int *length)
2752{
2753	bool abs = info->add_timestamp &
2754		(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2755
2756	if (unlikely(info->delta > (1ULL << 59))) {
2757		/* did the clock go backwards */
2758		if (info->before == info->after && info->before > info->ts) {
2759			/* not interrupted */
2760			static int once;
2761
2762			/*
2763			 * This is possible with a recalibrating of the TSC.
2764			 * Do not produce a call stack, but just report it.
2765			 */
2766			if (!once) {
2767				once++;
2768				pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2769					info->before, info->ts);
2770			}
2771		} else
2772			rb_check_timestamp(cpu_buffer, info);
2773		if (!abs)
2774			info->delta = 0;
2775	}
2776	*event = rb_add_time_stamp(*event, info->delta, abs);
2777	*length -= RB_LEN_TIME_EXTEND;
2778	*delta = 0;
2779}
2780
2781/**
2782 * rb_update_event - update event type and data
2783 * @cpu_buffer: The per cpu buffer of the @event
2784 * @event: the event to update
2785 * @info: The info to update the @event with (contains length and delta)
 
2786 *
2787 * Update the type and data fields of the @event. The length
2788 * is the actual size that is written to the ring buffer,
2789 * and with this, we can determine what to place into the
2790 * data field.
2791 */
2792static void
2793rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2794		struct ring_buffer_event *event,
2795		struct rb_event_info *info)
2796{
2797	unsigned length = info->length;
2798	u64 delta = info->delta;
2799	unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2800
2801	if (!WARN_ON_ONCE(nest >= MAX_NEST))
2802		cpu_buffer->event_stamp[nest] = info->ts;
 
2803
2804	/*
2805	 * If we need to add a timestamp, then we
2806	 * add it to the start of the reserved space.
2807	 */
2808	if (unlikely(info->add_timestamp))
2809		rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
 
 
 
 
 
2810
2811	event->time_delta = delta;
2812	length -= RB_EVNT_HDR_SIZE;
2813	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2814		event->type_len = 0;
2815		event->array[0] = length;
2816	} else
2817		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2818}
2819
2820static unsigned rb_calculate_event_length(unsigned length)
2821{
2822	struct ring_buffer_event event; /* Used only for sizeof array */
2823
2824	/* zero length can cause confusions */
2825	if (!length)
2826		length++;
2827
2828	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2829		length += sizeof(event.array[0]);
2830
2831	length += RB_EVNT_HDR_SIZE;
2832	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2833
2834	/*
2835	 * In case the time delta is larger than the 27 bits for it
2836	 * in the header, we need to add a timestamp. If another
2837	 * event comes in when trying to discard this one to increase
2838	 * the length, then the timestamp will be added in the allocated
2839	 * space of this event. If length is bigger than the size needed
2840	 * for the TIME_EXTEND, then padding has to be used. The events
2841	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2842	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2843	 * As length is a multiple of 4, we only need to worry if it
2844	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2845	 */
2846	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2847		length += RB_ALIGNMENT;
2848
2849	return length;
2850}
2851
2852static u64 rb_time_delta(struct ring_buffer_event *event)
 
2853{
2854	switch (event->type_len) {
2855	case RINGBUF_TYPE_PADDING:
2856		return 0;
2857
2858	case RINGBUF_TYPE_TIME_EXTEND:
2859		return rb_event_time_stamp(event);
2860
2861	case RINGBUF_TYPE_TIME_STAMP:
2862		return 0;
2863
2864	case RINGBUF_TYPE_DATA:
2865		return event->time_delta;
2866	default:
2867		return 0;
2868	}
2869}
 
2870
2871static inline int
2872rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2873		  struct ring_buffer_event *event)
2874{
2875	unsigned long new_index, old_index;
2876	struct buffer_page *bpage;
2877	unsigned long index;
2878	unsigned long addr;
2879	u64 write_stamp;
2880	u64 delta;
2881
2882	new_index = rb_event_index(event);
2883	old_index = new_index + rb_event_ts_length(event);
2884	addr = (unsigned long)event;
2885	addr &= PAGE_MASK;
2886
2887	bpage = READ_ONCE(cpu_buffer->tail_page);
2888
2889	delta = rb_time_delta(event);
2890
2891	if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
2892		return 0;
2893
2894	/* Make sure the write stamp is read before testing the location */
2895	barrier();
2896
2897	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2898		unsigned long write_mask =
2899			local_read(&bpage->write) & ~RB_WRITE_MASK;
2900		unsigned long event_length = rb_event_length(event);
2901
2902		/* Something came in, can't discard */
2903		if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
2904				       write_stamp, write_stamp - delta))
2905			return 0;
2906
2907		/*
2908		 * It's possible that the event time delta is zero
2909		 * (has the same time stamp as the previous event)
2910		 * in which case write_stamp and before_stamp could
2911		 * be the same. In such a case, force before_stamp
2912		 * to be different than write_stamp. It doesn't
2913		 * matter what it is, as long as its different.
2914		 */
2915		if (!delta)
2916			rb_time_set(&cpu_buffer->before_stamp, 0);
2917
2918		/*
2919		 * If an event were to come in now, it would see that the
2920		 * write_stamp and the before_stamp are different, and assume
2921		 * that this event just added itself before updating
2922		 * the write stamp. The interrupting event will fix the
2923		 * write stamp for us, and use the before stamp as its delta.
2924		 */
2925
2926		/*
2927		 * This is on the tail page. It is possible that
2928		 * a write could come in and move the tail page
2929		 * and write to the next page. That is fine
2930		 * because we just shorten what is on this page.
2931		 */
2932		old_index += write_mask;
2933		new_index += write_mask;
2934		index = local_cmpxchg(&bpage->write, old_index, new_index);
2935		if (index == old_index) {
2936			/* update counters */
2937			local_sub(event_length, &cpu_buffer->entries_bytes);
2938			return 1;
2939		}
2940	}
2941
2942	/* could not discard */
2943	return 0;
2944}
2945
2946static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2947{
2948	local_inc(&cpu_buffer->committing);
2949	local_inc(&cpu_buffer->commits);
2950}
2951
2952static __always_inline void
2953rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2954{
2955	unsigned long max_count;
2956
2957	/*
2958	 * We only race with interrupts and NMIs on this CPU.
2959	 * If we own the commit event, then we can commit
2960	 * all others that interrupted us, since the interruptions
2961	 * are in stack format (they finish before they come
2962	 * back to us). This allows us to do a simple loop to
2963	 * assign the commit to the tail.
2964	 */
2965 again:
2966	max_count = cpu_buffer->nr_pages * 100;
2967
2968	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2969		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2970			return;
2971		if (RB_WARN_ON(cpu_buffer,
2972			       rb_is_reader_page(cpu_buffer->tail_page)))
2973			return;
2974		local_set(&cpu_buffer->commit_page->page->commit,
2975			  rb_page_write(cpu_buffer->commit_page));
2976		rb_inc_page(&cpu_buffer->commit_page);
 
 
 
 
2977		/* add barrier to keep gcc from optimizing too much */
2978		barrier();
2979	}
2980	while (rb_commit_index(cpu_buffer) !=
2981	       rb_page_write(cpu_buffer->commit_page)) {
2982
2983		local_set(&cpu_buffer->commit_page->page->commit,
2984			  rb_page_write(cpu_buffer->commit_page));
2985		RB_WARN_ON(cpu_buffer,
2986			   local_read(&cpu_buffer->commit_page->page->commit) &
2987			   ~RB_WRITE_MASK);
2988		barrier();
2989	}
2990
2991	/* again, keep gcc from optimizing */
2992	barrier();
2993
2994	/*
2995	 * If an interrupt came in just after the first while loop
2996	 * and pushed the tail page forward, we will be left with
2997	 * a dangling commit that will never go forward.
2998	 */
2999	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3000		goto again;
3001}
3002
3003static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3004{
3005	unsigned long commits;
3006
3007	if (RB_WARN_ON(cpu_buffer,
3008		       !local_read(&cpu_buffer->committing)))
3009		return;
3010
3011 again:
3012	commits = local_read(&cpu_buffer->commits);
3013	/* synchronize with interrupts */
3014	barrier();
3015	if (local_read(&cpu_buffer->committing) == 1)
3016		rb_set_commit_to_write(cpu_buffer);
3017
3018	local_dec(&cpu_buffer->committing);
3019
3020	/* synchronize with interrupts */
3021	barrier();
3022
3023	/*
3024	 * Need to account for interrupts coming in between the
3025	 * updating of the commit page and the clearing of the
3026	 * committing counter.
3027	 */
3028	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3029	    !local_read(&cpu_buffer->committing)) {
3030		local_inc(&cpu_buffer->committing);
3031		goto again;
3032	}
3033}
3034
3035static inline void rb_event_discard(struct ring_buffer_event *event)
3036{
3037	if (extended_time(event))
3038		event = skip_time_extend(event);
3039
3040	/* array[0] holds the actual length for the discarded event */
3041	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3042	event->type_len = RINGBUF_TYPE_PADDING;
3043	/* time delta must be non zero */
3044	if (!event->time_delta)
3045		event->time_delta = 1;
3046}
3047
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3048static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
3049		      struct ring_buffer_event *event)
3050{
3051	local_inc(&cpu_buffer->entries);
 
3052	rb_end_commit(cpu_buffer);
3053}
3054
3055static __always_inline void
3056rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3057{
3058	size_t nr_pages;
3059	size_t dirty;
3060	size_t full;
3061
3062	if (buffer->irq_work.waiters_pending) {
3063		buffer->irq_work.waiters_pending = false;
3064		/* irq_work_queue() supplies it's own memory barriers */
3065		irq_work_queue(&buffer->irq_work.work);
3066	}
3067
3068	if (cpu_buffer->irq_work.waiters_pending) {
3069		cpu_buffer->irq_work.waiters_pending = false;
3070		/* irq_work_queue() supplies it's own memory barriers */
3071		irq_work_queue(&cpu_buffer->irq_work.work);
3072	}
3073
3074	if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3075		return;
3076
3077	if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3078		return;
3079
3080	if (!cpu_buffer->irq_work.full_waiters_pending)
3081		return;
3082
3083	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3084
3085	full = cpu_buffer->shortest_full;
3086	nr_pages = cpu_buffer->nr_pages;
3087	dirty = ring_buffer_nr_dirty_pages(buffer, cpu_buffer->cpu);
3088	if (full && nr_pages && (dirty * 100) <= full * nr_pages)
3089		return;
3090
3091	cpu_buffer->irq_work.wakeup_full = true;
3092	cpu_buffer->irq_work.full_waiters_pending = false;
3093	/* irq_work_queue() supplies it's own memory barriers */
3094	irq_work_queue(&cpu_buffer->irq_work.work);
3095}
3096
3097#ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3098# define do_ring_buffer_record_recursion()	\
3099	do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3100#else
3101# define do_ring_buffer_record_recursion() do { } while (0)
3102#endif
3103
3104/*
3105 * The lock and unlock are done within a preempt disable section.
3106 * The current_context per_cpu variable can only be modified
3107 * by the current task between lock and unlock. But it can
3108 * be modified more than once via an interrupt. To pass this
3109 * information from the lock to the unlock without having to
3110 * access the 'in_interrupt()' functions again (which do show
3111 * a bit of overhead in something as critical as function tracing,
3112 * we use a bitmask trick.
3113 *
3114 *  bit 1 =  NMI context
3115 *  bit 2 =  IRQ context
3116 *  bit 3 =  SoftIRQ context
3117 *  bit 4 =  normal context.
3118 *
3119 * This works because this is the order of contexts that can
3120 * preempt other contexts. A SoftIRQ never preempts an IRQ
3121 * context.
3122 *
3123 * When the context is determined, the corresponding bit is
3124 * checked and set (if it was set, then a recursion of that context
3125 * happened).
3126 *
3127 * On unlock, we need to clear this bit. To do so, just subtract
3128 * 1 from the current_context and AND it to itself.
3129 *
3130 * (binary)
3131 *  101 - 1 = 100
3132 *  101 & 100 = 100 (clearing bit zero)
3133 *
3134 *  1010 - 1 = 1001
3135 *  1010 & 1001 = 1000 (clearing bit 1)
3136 *
3137 * The least significant bit can be cleared this way, and it
3138 * just so happens that it is the same bit corresponding to
3139 * the current context.
3140 *
3141 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3142 * is set when a recursion is detected at the current context, and if
3143 * the TRANSITION bit is already set, it will fail the recursion.
3144 * This is needed because there's a lag between the changing of
3145 * interrupt context and updating the preempt count. In this case,
3146 * a false positive will be found. To handle this, one extra recursion
3147 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3148 * bit is already set, then it is considered a recursion and the function
3149 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3150 *
3151 * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3152 * to be cleared. Even if it wasn't the context that set it. That is,
3153 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3154 * is called before preempt_count() is updated, since the check will
3155 * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3156 * NMI then comes in, it will set the NMI bit, but when the NMI code
3157 * does the trace_recursive_unlock() it will clear the TRANSITION bit
3158 * and leave the NMI bit set. But this is fine, because the interrupt
3159 * code that set the TRANSITION bit will then clear the NMI bit when it
3160 * calls trace_recursive_unlock(). If another NMI comes in, it will
3161 * set the TRANSITION bit and continue.
3162 *
3163 * Note: The TRANSITION bit only handles a single transition between context.
3164 */
3165
3166static __always_inline int
3167trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3168{
3169	unsigned int val = cpu_buffer->current_context;
3170	unsigned long pc = preempt_count();
3171	int bit;
3172
3173	if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
3174		bit = RB_CTX_NORMAL;
3175	else
3176		bit = pc & NMI_MASK ? RB_CTX_NMI :
3177			pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
3178
3179	if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3180		/*
3181		 * It is possible that this was called by transitioning
3182		 * between interrupt context, and preempt_count() has not
3183		 * been updated yet. In this case, use the TRANSITION bit.
3184		 */
3185		bit = RB_CTX_TRANSITION;
3186		if (val & (1 << (bit + cpu_buffer->nest))) {
3187			do_ring_buffer_record_recursion();
3188			return 1;
3189		}
3190	}
3191
3192	val |= (1 << (bit + cpu_buffer->nest));
3193	cpu_buffer->current_context = val;
3194
3195	return 0;
3196}
3197
3198static __always_inline void
3199trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3200{
3201	cpu_buffer->current_context &=
3202		cpu_buffer->current_context - (1 << cpu_buffer->nest);
3203}
3204
3205/* The recursive locking above uses 5 bits */
3206#define NESTED_BITS 5
3207
3208/**
3209 * ring_buffer_nest_start - Allow to trace while nested
3210 * @buffer: The ring buffer to modify
3211 *
3212 * The ring buffer has a safety mechanism to prevent recursion.
3213 * But there may be a case where a trace needs to be done while
3214 * tracing something else. In this case, calling this function
3215 * will allow this function to nest within a currently active
3216 * ring_buffer_lock_reserve().
3217 *
3218 * Call this function before calling another ring_buffer_lock_reserve() and
3219 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3220 */
3221void ring_buffer_nest_start(struct trace_buffer *buffer)
3222{
3223	struct ring_buffer_per_cpu *cpu_buffer;
3224	int cpu;
3225
3226	/* Enabled by ring_buffer_nest_end() */
3227	preempt_disable_notrace();
3228	cpu = raw_smp_processor_id();
3229	cpu_buffer = buffer->buffers[cpu];
3230	/* This is the shift value for the above recursive locking */
3231	cpu_buffer->nest += NESTED_BITS;
3232}
3233
3234/**
3235 * ring_buffer_nest_end - Allow to trace while nested
3236 * @buffer: The ring buffer to modify
3237 *
3238 * Must be called after ring_buffer_nest_start() and after the
3239 * ring_buffer_unlock_commit().
3240 */
3241void ring_buffer_nest_end(struct trace_buffer *buffer)
3242{
3243	struct ring_buffer_per_cpu *cpu_buffer;
3244	int cpu;
3245
3246	/* disabled by ring_buffer_nest_start() */
3247	cpu = raw_smp_processor_id();
3248	cpu_buffer = buffer->buffers[cpu];
3249	/* This is the shift value for the above recursive locking */
3250	cpu_buffer->nest -= NESTED_BITS;
3251	preempt_enable_notrace();
3252}
3253
3254/**
3255 * ring_buffer_unlock_commit - commit a reserved
3256 * @buffer: The buffer to commit to
3257 * @event: The event pointer to commit.
3258 *
3259 * This commits the data to the ring buffer, and releases any locks held.
3260 *
3261 * Must be paired with ring_buffer_lock_reserve.
3262 */
3263int ring_buffer_unlock_commit(struct trace_buffer *buffer,
3264			      struct ring_buffer_event *event)
3265{
3266	struct ring_buffer_per_cpu *cpu_buffer;
3267	int cpu = raw_smp_processor_id();
3268
3269	cpu_buffer = buffer->buffers[cpu];
3270
3271	rb_commit(cpu_buffer, event);
3272
3273	rb_wakeups(buffer, cpu_buffer);
3274
3275	trace_recursive_unlock(cpu_buffer);
3276
3277	preempt_enable_notrace();
3278
3279	return 0;
3280}
3281EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3282
3283/* Special value to validate all deltas on a page. */
3284#define CHECK_FULL_PAGE		1L
3285
3286#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
3287static void dump_buffer_page(struct buffer_data_page *bpage,
3288			     struct rb_event_info *info,
3289			     unsigned long tail)
3290{
3291	struct ring_buffer_event *event;
3292	u64 ts, delta;
3293	int e;
3294
3295	ts = bpage->time_stamp;
3296	pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
3297
3298	for (e = 0; e < tail; e += rb_event_length(event)) {
3299
3300		event = (struct ring_buffer_event *)(bpage->data + e);
3301
3302		switch (event->type_len) {
3303
3304		case RINGBUF_TYPE_TIME_EXTEND:
3305			delta = rb_event_time_stamp(event);
3306			ts += delta;
3307			pr_warn("  [%lld] delta:%lld TIME EXTEND\n", ts, delta);
3308			break;
3309
3310		case RINGBUF_TYPE_TIME_STAMP:
3311			delta = rb_event_time_stamp(event);
3312			ts = delta;
3313			pr_warn("  [%lld] absolute:%lld TIME STAMP\n", ts, delta);
3314			break;
3315
3316		case RINGBUF_TYPE_PADDING:
3317			ts += event->time_delta;
3318			pr_warn("  [%lld] delta:%d PADDING\n", ts, event->time_delta);
3319			break;
3320
3321		case RINGBUF_TYPE_DATA:
3322			ts += event->time_delta;
3323			pr_warn("  [%lld] delta:%d\n", ts, event->time_delta);
3324			break;
3325
3326		default:
3327			break;
3328		}
3329	}
3330}
3331
3332static DEFINE_PER_CPU(atomic_t, checking);
3333static atomic_t ts_dump;
3334
3335/*
3336 * Check if the current event time stamp matches the deltas on
3337 * the buffer page.
3338 */
3339static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3340			 struct rb_event_info *info,
3341			 unsigned long tail)
3342{
3343	struct ring_buffer_event *event;
3344	struct buffer_data_page *bpage;
3345	u64 ts, delta;
3346	bool full = false;
3347	int e;
3348
3349	bpage = info->tail_page->page;
3350
3351	if (tail == CHECK_FULL_PAGE) {
3352		full = true;
3353		tail = local_read(&bpage->commit);
3354	} else if (info->add_timestamp &
3355		   (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3356		/* Ignore events with absolute time stamps */
3357		return;
3358	}
3359
3360	/*
3361	 * Do not check the first event (skip possible extends too).
3362	 * Also do not check if previous events have not been committed.
3363	 */
3364	if (tail <= 8 || tail > local_read(&bpage->commit))
3365		return;
3366
3367	/*
3368	 * If this interrupted another event, 
3369	 */
3370	if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3371		goto out;
3372
3373	ts = bpage->time_stamp;
3374
3375	for (e = 0; e < tail; e += rb_event_length(event)) {
3376
3377		event = (struct ring_buffer_event *)(bpage->data + e);
3378
3379		switch (event->type_len) {
3380
3381		case RINGBUF_TYPE_TIME_EXTEND:
3382			delta = rb_event_time_stamp(event);
3383			ts += delta;
3384			break;
3385
3386		case RINGBUF_TYPE_TIME_STAMP:
3387			delta = rb_event_time_stamp(event);
3388			ts = delta;
3389			break;
3390
3391		case RINGBUF_TYPE_PADDING:
3392			if (event->time_delta == 1)
3393				break;
3394			fallthrough;
3395		case RINGBUF_TYPE_DATA:
3396			ts += event->time_delta;
3397			break;
3398
3399		default:
3400			RB_WARN_ON(cpu_buffer, 1);
3401		}
3402	}
3403	if ((full && ts > info->ts) ||
3404	    (!full && ts + info->delta != info->ts)) {
3405		/* If another report is happening, ignore this one */
3406		if (atomic_inc_return(&ts_dump) != 1) {
3407			atomic_dec(&ts_dump);
3408			goto out;
3409		}
3410		atomic_inc(&cpu_buffer->record_disabled);
3411		/* There's some cases in boot up that this can happen */
3412		WARN_ON_ONCE(system_state != SYSTEM_BOOTING);
3413		pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n",
3414			cpu_buffer->cpu,
3415			ts + info->delta, info->ts, info->delta,
3416			info->before, info->after,
3417			full ? " (full)" : "");
3418		dump_buffer_page(bpage, info, tail);
3419		atomic_dec(&ts_dump);
3420		/* Do not re-enable checking */
3421		return;
3422	}
3423out:
3424	atomic_dec(this_cpu_ptr(&checking));
3425}
3426#else
3427static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3428			 struct rb_event_info *info,
3429			 unsigned long tail)
3430{
3431}
3432#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3433
3434static struct ring_buffer_event *
3435__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3436		  struct rb_event_info *info)
3437{
3438	struct ring_buffer_event *event;
3439	struct buffer_page *tail_page;
3440	unsigned long tail, write, w;
3441	bool a_ok;
3442	bool b_ok;
 
 
 
 
 
 
3443
3444	/* Don't let the compiler play games with cpu_buffer->tail_page */
3445	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3446
3447 /*A*/	w = local_read(&tail_page->write) & RB_WRITE_MASK;
3448	barrier();
3449	b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3450	a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3451	barrier();
3452	info->ts = rb_time_stamp(cpu_buffer->buffer);
3453
3454	if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3455		info->delta = info->ts;
3456	} else {
3457		/*
3458		 * If interrupting an event time update, we may need an
3459		 * absolute timestamp.
3460		 * Don't bother if this is the start of a new page (w == 0).
3461		 */
3462		if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
3463			info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3464			info->length += RB_LEN_TIME_EXTEND;
3465		} else {
3466			info->delta = info->ts - info->after;
3467			if (unlikely(test_time_stamp(info->delta))) {
3468				info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3469				info->length += RB_LEN_TIME_EXTEND;
3470			}
3471		}
3472	}
3473
3474 /*B*/	rb_time_set(&cpu_buffer->before_stamp, info->ts);
3475
3476 /*C*/	write = local_add_return(info->length, &tail_page->write);
3477
3478	/* set write to only the index of the write */
3479	write &= RB_WRITE_MASK;
3480
3481	tail = write - info->length;
3482
3483	/* See if we shot pass the end of this buffer page */
3484	if (unlikely(write > BUF_PAGE_SIZE)) {
3485		/* before and after may now different, fix it up*/
3486		b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3487		a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3488		if (a_ok && b_ok && info->before != info->after)
3489			(void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
3490					      info->before, info->after);
3491		if (a_ok && b_ok)
3492			check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
3493		return rb_move_tail(cpu_buffer, tail, info);
3494	}
3495
3496	if (likely(tail == w)) {
3497		u64 save_before;
3498		bool s_ok;
3499
3500		/* Nothing interrupted us between A and C */
3501 /*D*/		rb_time_set(&cpu_buffer->write_stamp, info->ts);
3502		barrier();
3503 /*E*/		s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
3504		RB_WARN_ON(cpu_buffer, !s_ok);
3505		if (likely(!(info->add_timestamp &
3506			     (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3507			/* This did not interrupt any time update */
3508			info->delta = info->ts - info->after;
3509		else
3510			/* Just use full timestamp for interrupting event */
3511			info->delta = info->ts;
3512		barrier();
3513		check_buffer(cpu_buffer, info, tail);
3514		if (unlikely(info->ts != save_before)) {
3515			/* SLOW PATH - Interrupted between C and E */
3516
3517			a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3518			RB_WARN_ON(cpu_buffer, !a_ok);
3519
3520			/* Write stamp must only go forward */
3521			if (save_before > info->after) {
3522				/*
3523				 * We do not care about the result, only that
3524				 * it gets updated atomically.
3525				 */
3526				(void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
3527						      info->after, save_before);
3528			}
3529		}
3530	} else {
3531		u64 ts;
3532		/* SLOW PATH - Interrupted between A and C */
3533		a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3534		/* Was interrupted before here, write_stamp must be valid */
3535		RB_WARN_ON(cpu_buffer, !a_ok);
3536		ts = rb_time_stamp(cpu_buffer->buffer);
3537		barrier();
3538 /*E*/		if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3539		    info->after < ts &&
3540		    rb_time_cmpxchg(&cpu_buffer->write_stamp,
3541				    info->after, ts)) {
3542			/* Nothing came after this event between C and E */
3543			info->delta = ts - info->after;
3544		} else {
3545			/*
3546			 * Interrupted between C and E:
3547			 * Lost the previous events time stamp. Just set the
3548			 * delta to zero, and this will be the same time as
3549			 * the event this event interrupted. And the events that
3550			 * came after this will still be correct (as they would
3551			 * have built their delta on the previous event.
3552			 */
3553			info->delta = 0;
3554		}
3555		info->ts = ts;
3556		info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3557	}
3558
3559	/*
3560	 * If this is the first commit on the page, then it has the same
3561	 * timestamp as the page itself.
3562	 */
3563	if (unlikely(!tail && !(info->add_timestamp &
3564				(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3565		info->delta = 0;
3566
 
 
 
 
3567	/* We reserved something on the buffer */
3568
3569	event = __rb_page_index(tail_page, tail);
3570	rb_update_event(cpu_buffer, event, info);
3571
3572	local_inc(&tail_page->entries);
3573
3574	/*
3575	 * If this is the first commit on the page, then update
3576	 * its timestamp.
3577	 */
3578	if (unlikely(!tail))
3579		tail_page->page->time_stamp = info->ts;
3580
3581	/* account for these added bytes */
3582	local_add(info->length, &cpu_buffer->entries_bytes);
3583
3584	return event;
3585}
3586
3587static __always_inline struct ring_buffer_event *
3588rb_reserve_next_event(struct trace_buffer *buffer,
3589		      struct ring_buffer_per_cpu *cpu_buffer,
3590		      unsigned long length)
3591{
3592	struct ring_buffer_event *event;
3593	struct rb_event_info info;
3594	int nr_loops = 0;
3595	int add_ts_default;
3596
3597	rb_start_commit(cpu_buffer);
3598	/* The commit page can not change after this */
3599
3600#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3601	/*
3602	 * Due to the ability to swap a cpu buffer from a buffer
3603	 * it is possible it was swapped before we committed.
3604	 * (committing stops a swap). We check for it here and
3605	 * if it happened, we have to fail the write.
3606	 */
3607	barrier();
3608	if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3609		local_dec(&cpu_buffer->committing);
3610		local_dec(&cpu_buffer->commits);
3611		return NULL;
3612	}
3613#endif
3614
3615	info.length = rb_calculate_event_length(length);
3616
3617	if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3618		add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3619		info.length += RB_LEN_TIME_EXTEND;
3620	} else {
3621		add_ts_default = RB_ADD_STAMP_NONE;
3622	}
3623
3624 again:
3625	info.add_timestamp = add_ts_default;
3626	info.delta = 0;
3627
3628	/*
3629	 * We allow for interrupts to reenter here and do a trace.
3630	 * If one does, it will cause this original code to loop
3631	 * back here. Even with heavy interrupts happening, this
3632	 * should only happen a few times in a row. If this happens
3633	 * 1000 times in a row, there must be either an interrupt
3634	 * storm or we have something buggy.
3635	 * Bail!
3636	 */
3637	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3638		goto out_fail;
3639
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3640	event = __rb_reserve_next(cpu_buffer, &info);
3641
3642	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3643		if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3644			info.length -= RB_LEN_TIME_EXTEND;
3645		goto again;
3646	}
3647
3648	if (likely(event))
3649		return event;
 
 
 
3650 out_fail:
3651	rb_end_commit(cpu_buffer);
3652	return NULL;
3653}
3654
3655/**
3656 * ring_buffer_lock_reserve - reserve a part of the buffer
3657 * @buffer: the ring buffer to reserve from
3658 * @length: the length of the data to reserve (excluding event header)
3659 *
3660 * Returns a reserved event on the ring buffer to copy directly to.
3661 * The user of this interface will need to get the body to write into
3662 * and can use the ring_buffer_event_data() interface.
3663 *
3664 * The length is the length of the data needed, not the event length
3665 * which also includes the event header.
3666 *
3667 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3668 * If NULL is returned, then nothing has been allocated or locked.
3669 */
3670struct ring_buffer_event *
3671ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3672{
3673	struct ring_buffer_per_cpu *cpu_buffer;
3674	struct ring_buffer_event *event;
3675	int cpu;
3676
3677	/* If we are tracing schedule, we don't want to recurse */
3678	preempt_disable_notrace();
3679
3680	if (unlikely(atomic_read(&buffer->record_disabled)))
3681		goto out;
3682
3683	cpu = raw_smp_processor_id();
3684
3685	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3686		goto out;
3687
3688	cpu_buffer = buffer->buffers[cpu];
3689
3690	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3691		goto out;
3692
3693	if (unlikely(length > BUF_MAX_DATA_SIZE))
3694		goto out;
3695
3696	if (unlikely(trace_recursive_lock(cpu_buffer)))
3697		goto out;
3698
3699	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3700	if (!event)
3701		goto out_unlock;
3702
3703	return event;
3704
3705 out_unlock:
3706	trace_recursive_unlock(cpu_buffer);
3707 out:
3708	preempt_enable_notrace();
3709	return NULL;
3710}
3711EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3712
3713/*
3714 * Decrement the entries to the page that an event is on.
3715 * The event does not even need to exist, only the pointer
3716 * to the page it is on. This may only be called before the commit
3717 * takes place.
3718 */
3719static inline void
3720rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3721		   struct ring_buffer_event *event)
3722{
3723	unsigned long addr = (unsigned long)event;
3724	struct buffer_page *bpage = cpu_buffer->commit_page;
3725	struct buffer_page *start;
3726
3727	addr &= PAGE_MASK;
3728
3729	/* Do the likely case first */
3730	if (likely(bpage->page == (void *)addr)) {
3731		local_dec(&bpage->entries);
3732		return;
3733	}
3734
3735	/*
3736	 * Because the commit page may be on the reader page we
3737	 * start with the next page and check the end loop there.
3738	 */
3739	rb_inc_page(&bpage);
3740	start = bpage;
3741	do {
3742		if (bpage->page == (void *)addr) {
3743			local_dec(&bpage->entries);
3744			return;
3745		}
3746		rb_inc_page(&bpage);
3747	} while (bpage != start);
3748
3749	/* commit not part of this buffer?? */
3750	RB_WARN_ON(cpu_buffer, 1);
3751}
3752
3753/**
3754 * ring_buffer_discard_commit - discard an event that has not been committed
3755 * @buffer: the ring buffer
3756 * @event: non committed event to discard
3757 *
3758 * Sometimes an event that is in the ring buffer needs to be ignored.
3759 * This function lets the user discard an event in the ring buffer
3760 * and then that event will not be read later.
3761 *
3762 * This function only works if it is called before the item has been
3763 * committed. It will try to free the event from the ring buffer
3764 * if another event has not been added behind it.
3765 *
3766 * If another event has been added behind it, it will set the event
3767 * up as discarded, and perform the commit.
3768 *
3769 * If this function is called, do not call ring_buffer_unlock_commit on
3770 * the event.
3771 */
3772void ring_buffer_discard_commit(struct trace_buffer *buffer,
3773				struct ring_buffer_event *event)
3774{
3775	struct ring_buffer_per_cpu *cpu_buffer;
3776	int cpu;
3777
3778	/* The event is discarded regardless */
3779	rb_event_discard(event);
3780
3781	cpu = smp_processor_id();
3782	cpu_buffer = buffer->buffers[cpu];
3783
3784	/*
3785	 * This must only be called if the event has not been
3786	 * committed yet. Thus we can assume that preemption
3787	 * is still disabled.
3788	 */
3789	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3790
3791	rb_decrement_entry(cpu_buffer, event);
3792	if (rb_try_to_discard(cpu_buffer, event))
3793		goto out;
3794
 
 
 
 
 
3795 out:
3796	rb_end_commit(cpu_buffer);
3797
3798	trace_recursive_unlock(cpu_buffer);
3799
3800	preempt_enable_notrace();
3801
3802}
3803EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3804
3805/**
3806 * ring_buffer_write - write data to the buffer without reserving
3807 * @buffer: The ring buffer to write to.
3808 * @length: The length of the data being written (excluding the event header)
3809 * @data: The data to write to the buffer.
3810 *
3811 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3812 * one function. If you already have the data to write to the buffer, it
3813 * may be easier to simply call this function.
3814 *
3815 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3816 * and not the length of the event which would hold the header.
3817 */
3818int ring_buffer_write(struct trace_buffer *buffer,
3819		      unsigned long length,
3820		      void *data)
3821{
3822	struct ring_buffer_per_cpu *cpu_buffer;
3823	struct ring_buffer_event *event;
3824	void *body;
3825	int ret = -EBUSY;
3826	int cpu;
3827
3828	preempt_disable_notrace();
3829
3830	if (atomic_read(&buffer->record_disabled))
3831		goto out;
3832
3833	cpu = raw_smp_processor_id();
3834
3835	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3836		goto out;
3837
3838	cpu_buffer = buffer->buffers[cpu];
3839
3840	if (atomic_read(&cpu_buffer->record_disabled))
3841		goto out;
3842
3843	if (length > BUF_MAX_DATA_SIZE)
3844		goto out;
3845
3846	if (unlikely(trace_recursive_lock(cpu_buffer)))
3847		goto out;
3848
3849	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3850	if (!event)
3851		goto out_unlock;
3852
3853	body = rb_event_data(event);
3854
3855	memcpy(body, data, length);
3856
3857	rb_commit(cpu_buffer, event);
3858
3859	rb_wakeups(buffer, cpu_buffer);
3860
3861	ret = 0;
3862
3863 out_unlock:
3864	trace_recursive_unlock(cpu_buffer);
3865
3866 out:
3867	preempt_enable_notrace();
3868
3869	return ret;
3870}
3871EXPORT_SYMBOL_GPL(ring_buffer_write);
3872
3873static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3874{
3875	struct buffer_page *reader = cpu_buffer->reader_page;
3876	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3877	struct buffer_page *commit = cpu_buffer->commit_page;
3878
3879	/* In case of error, head will be NULL */
3880	if (unlikely(!head))
3881		return true;
3882
3883	/* Reader should exhaust content in reader page */
3884	if (reader->read != rb_page_commit(reader))
3885		return false;
3886
3887	/*
3888	 * If writers are committing on the reader page, knowing all
3889	 * committed content has been read, the ring buffer is empty.
3890	 */
3891	if (commit == reader)
3892		return true;
3893
3894	/*
3895	 * If writers are committing on a page other than reader page
3896	 * and head page, there should always be content to read.
3897	 */
3898	if (commit != head)
3899		return false;
3900
3901	/*
3902	 * Writers are committing on the head page, we just need
3903	 * to care about there're committed data, and the reader will
3904	 * swap reader page with head page when it is to read data.
3905	 */
3906	return rb_page_commit(commit) == 0;
3907}
3908
3909/**
3910 * ring_buffer_record_disable - stop all writes into the buffer
3911 * @buffer: The ring buffer to stop writes to.
3912 *
3913 * This prevents all writes to the buffer. Any attempt to write
3914 * to the buffer after this will fail and return NULL.
3915 *
3916 * The caller should call synchronize_rcu() after this.
3917 */
3918void ring_buffer_record_disable(struct trace_buffer *buffer)
3919{
3920	atomic_inc(&buffer->record_disabled);
3921}
3922EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3923
3924/**
3925 * ring_buffer_record_enable - enable writes to the buffer
3926 * @buffer: The ring buffer to enable writes
3927 *
3928 * Note, multiple disables will need the same number of enables
3929 * to truly enable the writing (much like preempt_disable).
3930 */
3931void ring_buffer_record_enable(struct trace_buffer *buffer)
3932{
3933	atomic_dec(&buffer->record_disabled);
3934}
3935EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3936
3937/**
3938 * ring_buffer_record_off - stop all writes into the buffer
3939 * @buffer: The ring buffer to stop writes to.
3940 *
3941 * This prevents all writes to the buffer. Any attempt to write
3942 * to the buffer after this will fail and return NULL.
3943 *
3944 * This is different than ring_buffer_record_disable() as
3945 * it works like an on/off switch, where as the disable() version
3946 * must be paired with a enable().
3947 */
3948void ring_buffer_record_off(struct trace_buffer *buffer)
3949{
3950	unsigned int rd;
3951	unsigned int new_rd;
3952
3953	do {
3954		rd = atomic_read(&buffer->record_disabled);
3955		new_rd = rd | RB_BUFFER_OFF;
3956	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3957}
3958EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3959
3960/**
3961 * ring_buffer_record_on - restart writes into the buffer
3962 * @buffer: The ring buffer to start writes to.
3963 *
3964 * This enables all writes to the buffer that was disabled by
3965 * ring_buffer_record_off().
3966 *
3967 * This is different than ring_buffer_record_enable() as
3968 * it works like an on/off switch, where as the enable() version
3969 * must be paired with a disable().
3970 */
3971void ring_buffer_record_on(struct trace_buffer *buffer)
3972{
3973	unsigned int rd;
3974	unsigned int new_rd;
3975
3976	do {
3977		rd = atomic_read(&buffer->record_disabled);
3978		new_rd = rd & ~RB_BUFFER_OFF;
3979	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3980}
3981EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3982
3983/**
3984 * ring_buffer_record_is_on - return true if the ring buffer can write
3985 * @buffer: The ring buffer to see if write is enabled
3986 *
3987 * Returns true if the ring buffer is in a state that it accepts writes.
3988 */
3989bool ring_buffer_record_is_on(struct trace_buffer *buffer)
3990{
3991	return !atomic_read(&buffer->record_disabled);
3992}
3993
3994/**
3995 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
3996 * @buffer: The ring buffer to see if write is set enabled
3997 *
3998 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
3999 * Note that this does NOT mean it is in a writable state.
4000 *
4001 * It may return true when the ring buffer has been disabled by
4002 * ring_buffer_record_disable(), as that is a temporary disabling of
4003 * the ring buffer.
4004 */
4005bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4006{
4007	return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4008}
4009
4010/**
4011 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4012 * @buffer: The ring buffer to stop writes to.
4013 * @cpu: The CPU buffer to stop
4014 *
4015 * This prevents all writes to the buffer. Any attempt to write
4016 * to the buffer after this will fail and return NULL.
4017 *
4018 * The caller should call synchronize_rcu() after this.
4019 */
4020void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4021{
4022	struct ring_buffer_per_cpu *cpu_buffer;
4023
4024	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4025		return;
4026
4027	cpu_buffer = buffer->buffers[cpu];
4028	atomic_inc(&cpu_buffer->record_disabled);
4029}
4030EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4031
4032/**
4033 * ring_buffer_record_enable_cpu - enable writes to the buffer
4034 * @buffer: The ring buffer to enable writes
4035 * @cpu: The CPU to enable.
4036 *
4037 * Note, multiple disables will need the same number of enables
4038 * to truly enable the writing (much like preempt_disable).
4039 */
4040void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4041{
4042	struct ring_buffer_per_cpu *cpu_buffer;
4043
4044	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4045		return;
4046
4047	cpu_buffer = buffer->buffers[cpu];
4048	atomic_dec(&cpu_buffer->record_disabled);
4049}
4050EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4051
4052/*
4053 * The total entries in the ring buffer is the running counter
4054 * of entries entered into the ring buffer, minus the sum of
4055 * the entries read from the ring buffer and the number of
4056 * entries that were overwritten.
4057 */
4058static inline unsigned long
4059rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4060{
4061	return local_read(&cpu_buffer->entries) -
4062		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4063}
4064
4065/**
4066 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4067 * @buffer: The ring buffer
4068 * @cpu: The per CPU buffer to read from.
4069 */
4070u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4071{
4072	unsigned long flags;
4073	struct ring_buffer_per_cpu *cpu_buffer;
4074	struct buffer_page *bpage;
4075	u64 ret = 0;
4076
4077	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4078		return 0;
4079
4080	cpu_buffer = buffer->buffers[cpu];
4081	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4082	/*
4083	 * if the tail is on reader_page, oldest time stamp is on the reader
4084	 * page
4085	 */
4086	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4087		bpage = cpu_buffer->reader_page;
4088	else
4089		bpage = rb_set_head_page(cpu_buffer);
4090	if (bpage)
4091		ret = bpage->page->time_stamp;
4092	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4093
4094	return ret;
4095}
4096EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4097
4098/**
4099 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
4100 * @buffer: The ring buffer
4101 * @cpu: The per CPU buffer to read from.
4102 */
4103unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4104{
4105	struct ring_buffer_per_cpu *cpu_buffer;
4106	unsigned long ret;
4107
4108	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4109		return 0;
4110
4111	cpu_buffer = buffer->buffers[cpu];
4112	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4113
4114	return ret;
4115}
4116EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4117
4118/**
4119 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4120 * @buffer: The ring buffer
4121 * @cpu: The per CPU buffer to get the entries from.
4122 */
4123unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4124{
4125	struct ring_buffer_per_cpu *cpu_buffer;
4126
4127	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4128		return 0;
4129
4130	cpu_buffer = buffer->buffers[cpu];
4131
4132	return rb_num_of_entries(cpu_buffer);
4133}
4134EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4135
4136/**
4137 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4138 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4139 * @buffer: The ring buffer
4140 * @cpu: The per CPU buffer to get the number of overruns from
4141 */
4142unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
4143{
4144	struct ring_buffer_per_cpu *cpu_buffer;
4145	unsigned long ret;
4146
4147	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4148		return 0;
4149
4150	cpu_buffer = buffer->buffers[cpu];
4151	ret = local_read(&cpu_buffer->overrun);
4152
4153	return ret;
4154}
4155EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
4156
4157/**
4158 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4159 * commits failing due to the buffer wrapping around while there are uncommitted
4160 * events, such as during an interrupt storm.
4161 * @buffer: The ring buffer
4162 * @cpu: The per CPU buffer to get the number of overruns from
4163 */
4164unsigned long
4165ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
4166{
4167	struct ring_buffer_per_cpu *cpu_buffer;
4168	unsigned long ret;
4169
4170	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4171		return 0;
4172
4173	cpu_buffer = buffer->buffers[cpu];
4174	ret = local_read(&cpu_buffer->commit_overrun);
4175
4176	return ret;
4177}
4178EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4179
4180/**
4181 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4182 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4183 * @buffer: The ring buffer
4184 * @cpu: The per CPU buffer to get the number of overruns from
4185 */
4186unsigned long
4187ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
4188{
4189	struct ring_buffer_per_cpu *cpu_buffer;
4190	unsigned long ret;
4191
4192	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4193		return 0;
4194
4195	cpu_buffer = buffer->buffers[cpu];
4196	ret = local_read(&cpu_buffer->dropped_events);
4197
4198	return ret;
4199}
4200EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4201
4202/**
4203 * ring_buffer_read_events_cpu - get the number of events successfully read
4204 * @buffer: The ring buffer
4205 * @cpu: The per CPU buffer to get the number of events read
4206 */
4207unsigned long
4208ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
4209{
4210	struct ring_buffer_per_cpu *cpu_buffer;
4211
4212	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4213		return 0;
4214
4215	cpu_buffer = buffer->buffers[cpu];
4216	return cpu_buffer->read;
4217}
4218EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4219
4220/**
4221 * ring_buffer_entries - get the number of entries in a buffer
4222 * @buffer: The ring buffer
4223 *
4224 * Returns the total number of entries in the ring buffer
4225 * (all CPU entries)
4226 */
4227unsigned long ring_buffer_entries(struct trace_buffer *buffer)
4228{
4229	struct ring_buffer_per_cpu *cpu_buffer;
4230	unsigned long entries = 0;
4231	int cpu;
4232
4233	/* if you care about this being correct, lock the buffer */
4234	for_each_buffer_cpu(buffer, cpu) {
4235		cpu_buffer = buffer->buffers[cpu];
4236		entries += rb_num_of_entries(cpu_buffer);
4237	}
4238
4239	return entries;
4240}
4241EXPORT_SYMBOL_GPL(ring_buffer_entries);
4242
4243/**
4244 * ring_buffer_overruns - get the number of overruns in buffer
4245 * @buffer: The ring buffer
4246 *
4247 * Returns the total number of overruns in the ring buffer
4248 * (all CPU entries)
4249 */
4250unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
4251{
4252	struct ring_buffer_per_cpu *cpu_buffer;
4253	unsigned long overruns = 0;
4254	int cpu;
4255
4256	/* if you care about this being correct, lock the buffer */
4257	for_each_buffer_cpu(buffer, cpu) {
4258		cpu_buffer = buffer->buffers[cpu];
4259		overruns += local_read(&cpu_buffer->overrun);
4260	}
4261
4262	return overruns;
4263}
4264EXPORT_SYMBOL_GPL(ring_buffer_overruns);
4265
4266static void rb_iter_reset(struct ring_buffer_iter *iter)
4267{
4268	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4269
4270	/* Iterator usage is expected to have record disabled */
4271	iter->head_page = cpu_buffer->reader_page;
4272	iter->head = cpu_buffer->reader_page->read;
4273	iter->next_event = iter->head;
4274
4275	iter->cache_reader_page = iter->head_page;
4276	iter->cache_read = cpu_buffer->read;
4277
4278	if (iter->head) {
4279		iter->read_stamp = cpu_buffer->read_stamp;
4280		iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4281	} else {
4282		iter->read_stamp = iter->head_page->page->time_stamp;
4283		iter->page_stamp = iter->read_stamp;
4284	}
4285}
4286
4287/**
4288 * ring_buffer_iter_reset - reset an iterator
4289 * @iter: The iterator to reset
4290 *
4291 * Resets the iterator, so that it will start from the beginning
4292 * again.
4293 */
4294void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4295{
4296	struct ring_buffer_per_cpu *cpu_buffer;
4297	unsigned long flags;
4298
4299	if (!iter)
4300		return;
4301
4302	cpu_buffer = iter->cpu_buffer;
4303
4304	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4305	rb_iter_reset(iter);
4306	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4307}
4308EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4309
4310/**
4311 * ring_buffer_iter_empty - check if an iterator has no more to read
4312 * @iter: The iterator to check
4313 */
4314int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4315{
4316	struct ring_buffer_per_cpu *cpu_buffer;
4317	struct buffer_page *reader;
4318	struct buffer_page *head_page;
4319	struct buffer_page *commit_page;
4320	struct buffer_page *curr_commit_page;
4321	unsigned commit;
4322	u64 curr_commit_ts;
4323	u64 commit_ts;
4324
4325	cpu_buffer = iter->cpu_buffer;
 
 
4326	reader = cpu_buffer->reader_page;
4327	head_page = cpu_buffer->head_page;
4328	commit_page = cpu_buffer->commit_page;
4329	commit_ts = commit_page->page->time_stamp;
4330
4331	/*
4332	 * When the writer goes across pages, it issues a cmpxchg which
4333	 * is a mb(), which will synchronize with the rmb here.
4334	 * (see rb_tail_page_update())
4335	 */
4336	smp_rmb();
4337	commit = rb_page_commit(commit_page);
4338	/* We want to make sure that the commit page doesn't change */
4339	smp_rmb();
4340
4341	/* Make sure commit page didn't change */
4342	curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4343	curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4344
4345	/* If the commit page changed, then there's more data */
4346	if (curr_commit_page != commit_page ||
4347	    curr_commit_ts != commit_ts)
4348		return 0;
4349
4350	/* Still racy, as it may return a false positive, but that's OK */
4351	return ((iter->head_page == commit_page && iter->head >= commit) ||
4352		(iter->head_page == reader && commit_page == head_page &&
4353		 head_page->read == commit &&
4354		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
4355}
4356EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4357
4358static void
4359rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4360		     struct ring_buffer_event *event)
4361{
4362	u64 delta;
4363
4364	switch (event->type_len) {
4365	case RINGBUF_TYPE_PADDING:
4366		return;
4367
4368	case RINGBUF_TYPE_TIME_EXTEND:
4369		delta = rb_event_time_stamp(event);
4370		cpu_buffer->read_stamp += delta;
4371		return;
4372
4373	case RINGBUF_TYPE_TIME_STAMP:
4374		delta = rb_event_time_stamp(event);
4375		cpu_buffer->read_stamp = delta;
4376		return;
4377
4378	case RINGBUF_TYPE_DATA:
4379		cpu_buffer->read_stamp += event->time_delta;
4380		return;
4381
4382	default:
4383		RB_WARN_ON(cpu_buffer, 1);
4384	}
4385	return;
4386}
4387
4388static void
4389rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4390			  struct ring_buffer_event *event)
4391{
4392	u64 delta;
4393
4394	switch (event->type_len) {
4395	case RINGBUF_TYPE_PADDING:
4396		return;
4397
4398	case RINGBUF_TYPE_TIME_EXTEND:
4399		delta = rb_event_time_stamp(event);
4400		iter->read_stamp += delta;
4401		return;
4402
4403	case RINGBUF_TYPE_TIME_STAMP:
4404		delta = rb_event_time_stamp(event);
4405		iter->read_stamp = delta;
4406		return;
4407
4408	case RINGBUF_TYPE_DATA:
4409		iter->read_stamp += event->time_delta;
4410		return;
4411
4412	default:
4413		RB_WARN_ON(iter->cpu_buffer, 1);
4414	}
4415	return;
4416}
4417
4418static struct buffer_page *
4419rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4420{
4421	struct buffer_page *reader = NULL;
4422	unsigned long overwrite;
4423	unsigned long flags;
4424	int nr_loops = 0;
4425	int ret;
4426
4427	local_irq_save(flags);
4428	arch_spin_lock(&cpu_buffer->lock);
4429
4430 again:
4431	/*
4432	 * This should normally only loop twice. But because the
4433	 * start of the reader inserts an empty page, it causes
4434	 * a case where we will loop three times. There should be no
4435	 * reason to loop four times (that I know of).
4436	 */
4437	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4438		reader = NULL;
4439		goto out;
4440	}
4441
4442	reader = cpu_buffer->reader_page;
4443
4444	/* If there's more to read, return this page */
4445	if (cpu_buffer->reader_page->read < rb_page_size(reader))
4446		goto out;
4447
4448	/* Never should we have an index greater than the size */
4449	if (RB_WARN_ON(cpu_buffer,
4450		       cpu_buffer->reader_page->read > rb_page_size(reader)))
4451		goto out;
4452
4453	/* check if we caught up to the tail */
4454	reader = NULL;
4455	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4456		goto out;
4457
4458	/* Don't bother swapping if the ring buffer is empty */
4459	if (rb_num_of_entries(cpu_buffer) == 0)
4460		goto out;
4461
4462	/*
4463	 * Reset the reader page to size zero.
4464	 */
4465	local_set(&cpu_buffer->reader_page->write, 0);
4466	local_set(&cpu_buffer->reader_page->entries, 0);
4467	local_set(&cpu_buffer->reader_page->page->commit, 0);
4468	cpu_buffer->reader_page->real_end = 0;
4469
4470 spin:
4471	/*
4472	 * Splice the empty reader page into the list around the head.
4473	 */
4474	reader = rb_set_head_page(cpu_buffer);
4475	if (!reader)
4476		goto out;
4477	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4478	cpu_buffer->reader_page->list.prev = reader->list.prev;
4479
4480	/*
4481	 * cpu_buffer->pages just needs to point to the buffer, it
4482	 *  has no specific buffer page to point to. Lets move it out
4483	 *  of our way so we don't accidentally swap it.
4484	 */
4485	cpu_buffer->pages = reader->list.prev;
4486
4487	/* The reader page will be pointing to the new head */
4488	rb_set_list_to_head(&cpu_buffer->reader_page->list);
4489
4490	/*
4491	 * We want to make sure we read the overruns after we set up our
4492	 * pointers to the next object. The writer side does a
4493	 * cmpxchg to cross pages which acts as the mb on the writer
4494	 * side. Note, the reader will constantly fail the swap
4495	 * while the writer is updating the pointers, so this
4496	 * guarantees that the overwrite recorded here is the one we
4497	 * want to compare with the last_overrun.
4498	 */
4499	smp_mb();
4500	overwrite = local_read(&(cpu_buffer->overrun));
4501
4502	/*
4503	 * Here's the tricky part.
4504	 *
4505	 * We need to move the pointer past the header page.
4506	 * But we can only do that if a writer is not currently
4507	 * moving it. The page before the header page has the
4508	 * flag bit '1' set if it is pointing to the page we want.
4509	 * but if the writer is in the process of moving it
4510	 * than it will be '2' or already moved '0'.
4511	 */
4512
4513	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4514
4515	/*
4516	 * If we did not convert it, then we must try again.
4517	 */
4518	if (!ret)
4519		goto spin;
4520
4521	/*
4522	 * Yay! We succeeded in replacing the page.
4523	 *
4524	 * Now make the new head point back to the reader page.
4525	 */
4526	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4527	rb_inc_page(&cpu_buffer->head_page);
4528
4529	local_inc(&cpu_buffer->pages_read);
4530
4531	/* Finally update the reader page to the new head */
4532	cpu_buffer->reader_page = reader;
4533	cpu_buffer->reader_page->read = 0;
4534
4535	if (overwrite != cpu_buffer->last_overrun) {
4536		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4537		cpu_buffer->last_overrun = overwrite;
4538	}
4539
4540	goto again;
4541
4542 out:
4543	/* Update the read_stamp on the first event */
4544	if (reader && reader->read == 0)
4545		cpu_buffer->read_stamp = reader->page->time_stamp;
4546
4547	arch_spin_unlock(&cpu_buffer->lock);
4548	local_irq_restore(flags);
4549
4550	return reader;
4551}
4552
4553static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4554{
4555	struct ring_buffer_event *event;
4556	struct buffer_page *reader;
4557	unsigned length;
4558
4559	reader = rb_get_reader_page(cpu_buffer);
4560
4561	/* This function should not be called when buffer is empty */
4562	if (RB_WARN_ON(cpu_buffer, !reader))
4563		return;
4564
4565	event = rb_reader_event(cpu_buffer);
4566
4567	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4568		cpu_buffer->read++;
4569
4570	rb_update_read_stamp(cpu_buffer, event);
4571
4572	length = rb_event_length(event);
4573	cpu_buffer->reader_page->read += length;
4574}
4575
4576static void rb_advance_iter(struct ring_buffer_iter *iter)
4577{
4578	struct ring_buffer_per_cpu *cpu_buffer;
 
 
4579
4580	cpu_buffer = iter->cpu_buffer;
4581
4582	/* If head == next_event then we need to jump to the next event */
4583	if (iter->head == iter->next_event) {
4584		/* If the event gets overwritten again, there's nothing to do */
4585		if (rb_iter_head_event(iter) == NULL)
4586			return;
4587	}
4588
4589	iter->head = iter->next_event;
4590
4591	/*
4592	 * Check if we are at the end of the buffer.
4593	 */
4594	if (iter->next_event >= rb_page_size(iter->head_page)) {
4595		/* discarded commits can make the page empty */
4596		if (iter->head_page == cpu_buffer->commit_page)
4597			return;
4598		rb_inc_iter(iter);
4599		return;
4600	}
4601
4602	rb_update_iter_read_stamp(iter, iter->event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4603}
4604
4605static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4606{
4607	return cpu_buffer->lost_events;
4608}
4609
4610static struct ring_buffer_event *
4611rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4612	       unsigned long *lost_events)
4613{
4614	struct ring_buffer_event *event;
4615	struct buffer_page *reader;
4616	int nr_loops = 0;
4617
4618	if (ts)
4619		*ts = 0;
4620 again:
4621	/*
4622	 * We repeat when a time extend is encountered.
4623	 * Since the time extend is always attached to a data event,
4624	 * we should never loop more than once.
4625	 * (We never hit the following condition more than twice).
4626	 */
4627	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4628		return NULL;
4629
4630	reader = rb_get_reader_page(cpu_buffer);
4631	if (!reader)
4632		return NULL;
4633
4634	event = rb_reader_event(cpu_buffer);
4635
4636	switch (event->type_len) {
4637	case RINGBUF_TYPE_PADDING:
4638		if (rb_null_event(event))
4639			RB_WARN_ON(cpu_buffer, 1);
4640		/*
4641		 * Because the writer could be discarding every
4642		 * event it creates (which would probably be bad)
4643		 * if we were to go back to "again" then we may never
4644		 * catch up, and will trigger the warn on, or lock
4645		 * the box. Return the padding, and we will release
4646		 * the current locks, and try again.
4647		 */
4648		return event;
4649
4650	case RINGBUF_TYPE_TIME_EXTEND:
4651		/* Internal data, OK to advance */
4652		rb_advance_reader(cpu_buffer);
4653		goto again;
4654
4655	case RINGBUF_TYPE_TIME_STAMP:
4656		if (ts) {
4657			*ts = rb_event_time_stamp(event);
4658			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4659							 cpu_buffer->cpu, ts);
4660		}
4661		/* Internal data, OK to advance */
4662		rb_advance_reader(cpu_buffer);
4663		goto again;
4664
4665	case RINGBUF_TYPE_DATA:
4666		if (ts && !(*ts)) {
4667			*ts = cpu_buffer->read_stamp + event->time_delta;
4668			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4669							 cpu_buffer->cpu, ts);
4670		}
4671		if (lost_events)
4672			*lost_events = rb_lost_events(cpu_buffer);
4673		return event;
4674
4675	default:
4676		RB_WARN_ON(cpu_buffer, 1);
4677	}
4678
4679	return NULL;
4680}
4681EXPORT_SYMBOL_GPL(ring_buffer_peek);
4682
4683static struct ring_buffer_event *
4684rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4685{
4686	struct trace_buffer *buffer;
4687	struct ring_buffer_per_cpu *cpu_buffer;
4688	struct ring_buffer_event *event;
4689	int nr_loops = 0;
4690
4691	if (ts)
4692		*ts = 0;
4693
4694	cpu_buffer = iter->cpu_buffer;
4695	buffer = cpu_buffer->buffer;
4696
4697	/*
4698	 * Check if someone performed a consuming read to
4699	 * the buffer. A consuming read invalidates the iterator
4700	 * and we need to reset the iterator in this case.
4701	 */
4702	if (unlikely(iter->cache_read != cpu_buffer->read ||
4703		     iter->cache_reader_page != cpu_buffer->reader_page))
4704		rb_iter_reset(iter);
4705
4706 again:
4707	if (ring_buffer_iter_empty(iter))
4708		return NULL;
4709
4710	/*
4711	 * As the writer can mess with what the iterator is trying
4712	 * to read, just give up if we fail to get an event after
4713	 * three tries. The iterator is not as reliable when reading
4714	 * the ring buffer with an active write as the consumer is.
4715	 * Do not warn if the three failures is reached.
 
4716	 */
4717	if (++nr_loops > 3)
4718		return NULL;
4719
4720	if (rb_per_cpu_empty(cpu_buffer))
4721		return NULL;
4722
4723	if (iter->head >= rb_page_size(iter->head_page)) {
4724		rb_inc_iter(iter);
4725		goto again;
4726	}
4727
4728	event = rb_iter_head_event(iter);
4729	if (!event)
4730		goto again;
4731
4732	switch (event->type_len) {
4733	case RINGBUF_TYPE_PADDING:
4734		if (rb_null_event(event)) {
4735			rb_inc_iter(iter);
4736			goto again;
4737		}
4738		rb_advance_iter(iter);
4739		return event;
4740
4741	case RINGBUF_TYPE_TIME_EXTEND:
4742		/* Internal data, OK to advance */
4743		rb_advance_iter(iter);
4744		goto again;
4745
4746	case RINGBUF_TYPE_TIME_STAMP:
4747		if (ts) {
4748			*ts = rb_event_time_stamp(event);
4749			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4750							 cpu_buffer->cpu, ts);
4751		}
4752		/* Internal data, OK to advance */
4753		rb_advance_iter(iter);
4754		goto again;
4755
4756	case RINGBUF_TYPE_DATA:
4757		if (ts && !(*ts)) {
4758			*ts = iter->read_stamp + event->time_delta;
4759			ring_buffer_normalize_time_stamp(buffer,
4760							 cpu_buffer->cpu, ts);
4761		}
4762		return event;
4763
4764	default:
4765		RB_WARN_ON(cpu_buffer, 1);
4766	}
4767
4768	return NULL;
4769}
4770EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4771
4772static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4773{
4774	if (likely(!in_nmi())) {
4775		raw_spin_lock(&cpu_buffer->reader_lock);
4776		return true;
4777	}
4778
4779	/*
4780	 * If an NMI die dumps out the content of the ring buffer
4781	 * trylock must be used to prevent a deadlock if the NMI
4782	 * preempted a task that holds the ring buffer locks. If
4783	 * we get the lock then all is fine, if not, then continue
4784	 * to do the read, but this can corrupt the ring buffer,
4785	 * so it must be permanently disabled from future writes.
4786	 * Reading from NMI is a oneshot deal.
4787	 */
4788	if (raw_spin_trylock(&cpu_buffer->reader_lock))
4789		return true;
4790
4791	/* Continue without locking, but disable the ring buffer */
4792	atomic_inc(&cpu_buffer->record_disabled);
4793	return false;
4794}
4795
4796static inline void
4797rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4798{
4799	if (likely(locked))
4800		raw_spin_unlock(&cpu_buffer->reader_lock);
4801	return;
4802}
4803
4804/**
4805 * ring_buffer_peek - peek at the next event to be read
4806 * @buffer: The ring buffer to read
4807 * @cpu: The cpu to peak at
4808 * @ts: The timestamp counter of this event.
4809 * @lost_events: a variable to store if events were lost (may be NULL)
4810 *
4811 * This will return the event that will be read next, but does
4812 * not consume the data.
4813 */
4814struct ring_buffer_event *
4815ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
4816		 unsigned long *lost_events)
4817{
4818	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4819	struct ring_buffer_event *event;
4820	unsigned long flags;
4821	bool dolock;
4822
4823	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4824		return NULL;
4825
4826 again:
4827	local_irq_save(flags);
4828	dolock = rb_reader_lock(cpu_buffer);
4829	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4830	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4831		rb_advance_reader(cpu_buffer);
4832	rb_reader_unlock(cpu_buffer, dolock);
4833	local_irq_restore(flags);
4834
4835	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4836		goto again;
4837
4838	return event;
4839}
4840
4841/** ring_buffer_iter_dropped - report if there are dropped events
4842 * @iter: The ring buffer iterator
4843 *
4844 * Returns true if there was dropped events since the last peek.
4845 */
4846bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4847{
4848	bool ret = iter->missed_events != 0;
4849
4850	iter->missed_events = 0;
4851	return ret;
4852}
4853EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
4854
4855/**
4856 * ring_buffer_iter_peek - peek at the next event to be read
4857 * @iter: The ring buffer iterator
4858 * @ts: The timestamp counter of this event.
4859 *
4860 * This will return the event that will be read next, but does
4861 * not increment the iterator.
4862 */
4863struct ring_buffer_event *
4864ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4865{
4866	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4867	struct ring_buffer_event *event;
4868	unsigned long flags;
4869
4870 again:
4871	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4872	event = rb_iter_peek(iter, ts);
4873	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4874
4875	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4876		goto again;
4877
4878	return event;
4879}
4880
4881/**
4882 * ring_buffer_consume - return an event and consume it
4883 * @buffer: The ring buffer to get the next event from
4884 * @cpu: the cpu to read the buffer from
4885 * @ts: a variable to store the timestamp (may be NULL)
4886 * @lost_events: a variable to store if events were lost (may be NULL)
4887 *
4888 * Returns the next event in the ring buffer, and that event is consumed.
4889 * Meaning, that sequential reads will keep returning a different event,
4890 * and eventually empty the ring buffer if the producer is slower.
4891 */
4892struct ring_buffer_event *
4893ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
4894		    unsigned long *lost_events)
4895{
4896	struct ring_buffer_per_cpu *cpu_buffer;
4897	struct ring_buffer_event *event = NULL;
4898	unsigned long flags;
4899	bool dolock;
4900
4901 again:
4902	/* might be called in atomic */
4903	preempt_disable();
4904
4905	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4906		goto out;
4907
4908	cpu_buffer = buffer->buffers[cpu];
4909	local_irq_save(flags);
4910	dolock = rb_reader_lock(cpu_buffer);
4911
4912	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4913	if (event) {
4914		cpu_buffer->lost_events = 0;
4915		rb_advance_reader(cpu_buffer);
4916	}
4917
4918	rb_reader_unlock(cpu_buffer, dolock);
4919	local_irq_restore(flags);
4920
4921 out:
4922	preempt_enable();
4923
4924	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4925		goto again;
4926
4927	return event;
4928}
4929EXPORT_SYMBOL_GPL(ring_buffer_consume);
4930
4931/**
4932 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4933 * @buffer: The ring buffer to read from
4934 * @cpu: The cpu buffer to iterate over
4935 * @flags: gfp flags to use for memory allocation
4936 *
4937 * This performs the initial preparations necessary to iterate
4938 * through the buffer.  Memory is allocated, buffer recording
4939 * is disabled, and the iterator pointer is returned to the caller.
4940 *
4941 * Disabling buffer recording prevents the reading from being
4942 * corrupted. This is not a consuming read, so a producer is not
4943 * expected.
4944 *
4945 * After a sequence of ring_buffer_read_prepare calls, the user is
4946 * expected to make at least one call to ring_buffer_read_prepare_sync.
4947 * Afterwards, ring_buffer_read_start is invoked to get things going
4948 * for real.
4949 *
4950 * This overall must be paired with ring_buffer_read_finish.
4951 */
4952struct ring_buffer_iter *
4953ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
4954{
4955	struct ring_buffer_per_cpu *cpu_buffer;
4956	struct ring_buffer_iter *iter;
4957
4958	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4959		return NULL;
4960
4961	iter = kzalloc(sizeof(*iter), flags);
4962	if (!iter)
4963		return NULL;
4964
4965	iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
4966	if (!iter->event) {
4967		kfree(iter);
4968		return NULL;
4969	}
4970
4971	cpu_buffer = buffer->buffers[cpu];
4972
4973	iter->cpu_buffer = cpu_buffer;
4974
4975	atomic_inc(&cpu_buffer->resize_disabled);
 
4976
4977	return iter;
4978}
4979EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4980
4981/**
4982 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4983 *
4984 * All previously invoked ring_buffer_read_prepare calls to prepare
4985 * iterators will be synchronized.  Afterwards, read_buffer_read_start
4986 * calls on those iterators are allowed.
4987 */
4988void
4989ring_buffer_read_prepare_sync(void)
4990{
4991	synchronize_rcu();
4992}
4993EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4994
4995/**
4996 * ring_buffer_read_start - start a non consuming read of the buffer
4997 * @iter: The iterator returned by ring_buffer_read_prepare
4998 *
4999 * This finalizes the startup of an iteration through the buffer.
5000 * The iterator comes from a call to ring_buffer_read_prepare and
5001 * an intervening ring_buffer_read_prepare_sync must have been
5002 * performed.
5003 *
5004 * Must be paired with ring_buffer_read_finish.
5005 */
5006void
5007ring_buffer_read_start(struct ring_buffer_iter *iter)
5008{
5009	struct ring_buffer_per_cpu *cpu_buffer;
5010	unsigned long flags;
5011
5012	if (!iter)
5013		return;
5014
5015	cpu_buffer = iter->cpu_buffer;
5016
5017	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5018	arch_spin_lock(&cpu_buffer->lock);
5019	rb_iter_reset(iter);
5020	arch_spin_unlock(&cpu_buffer->lock);
5021	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5022}
5023EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5024
5025/**
5026 * ring_buffer_read_finish - finish reading the iterator of the buffer
5027 * @iter: The iterator retrieved by ring_buffer_start
5028 *
5029 * This re-enables the recording to the buffer, and frees the
5030 * iterator.
5031 */
5032void
5033ring_buffer_read_finish(struct ring_buffer_iter *iter)
5034{
5035	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5036	unsigned long flags;
5037
5038	/*
5039	 * Ring buffer is disabled from recording, here's a good place
5040	 * to check the integrity of the ring buffer.
5041	 * Must prevent readers from trying to read, as the check
5042	 * clears the HEAD page and readers require it.
5043	 */
5044	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5045	rb_check_pages(cpu_buffer);
5046	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5047
5048	atomic_dec(&cpu_buffer->resize_disabled);
5049	kfree(iter->event);
5050	kfree(iter);
5051}
5052EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5053
5054/**
5055 * ring_buffer_iter_advance - advance the iterator to the next location
5056 * @iter: The ring buffer iterator
 
5057 *
5058 * Move the location of the iterator such that the next read will
5059 * be the next location of the iterator.
5060 */
5061void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
 
5062{
 
5063	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5064	unsigned long flags;
5065
5066	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
 
 
 
 
 
 
5067
5068	rb_advance_iter(iter);
 
 
5069
5070	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5071}
5072EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5073
5074/**
5075 * ring_buffer_size - return the size of the ring buffer (in bytes)
5076 * @buffer: The ring buffer.
5077 * @cpu: The CPU to get ring buffer size from.
5078 */
5079unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5080{
5081	/*
5082	 * Earlier, this method returned
5083	 *	BUF_PAGE_SIZE * buffer->nr_pages
5084	 * Since the nr_pages field is now removed, we have converted this to
5085	 * return the per cpu buffer value.
5086	 */
5087	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5088		return 0;
5089
5090	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
5091}
5092EXPORT_SYMBOL_GPL(ring_buffer_size);
5093
5094static void
5095rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5096{
5097	rb_head_page_deactivate(cpu_buffer);
5098
5099	cpu_buffer->head_page
5100		= list_entry(cpu_buffer->pages, struct buffer_page, list);
5101	local_set(&cpu_buffer->head_page->write, 0);
5102	local_set(&cpu_buffer->head_page->entries, 0);
5103	local_set(&cpu_buffer->head_page->page->commit, 0);
5104
5105	cpu_buffer->head_page->read = 0;
5106
5107	cpu_buffer->tail_page = cpu_buffer->head_page;
5108	cpu_buffer->commit_page = cpu_buffer->head_page;
5109
5110	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5111	INIT_LIST_HEAD(&cpu_buffer->new_pages);
5112	local_set(&cpu_buffer->reader_page->write, 0);
5113	local_set(&cpu_buffer->reader_page->entries, 0);
5114	local_set(&cpu_buffer->reader_page->page->commit, 0);
5115	cpu_buffer->reader_page->read = 0;
5116
5117	local_set(&cpu_buffer->entries_bytes, 0);
5118	local_set(&cpu_buffer->overrun, 0);
5119	local_set(&cpu_buffer->commit_overrun, 0);
5120	local_set(&cpu_buffer->dropped_events, 0);
5121	local_set(&cpu_buffer->entries, 0);
5122	local_set(&cpu_buffer->committing, 0);
5123	local_set(&cpu_buffer->commits, 0);
5124	local_set(&cpu_buffer->pages_touched, 0);
5125	local_set(&cpu_buffer->pages_read, 0);
5126	cpu_buffer->last_pages_touch = 0;
5127	cpu_buffer->shortest_full = 0;
5128	cpu_buffer->read = 0;
5129	cpu_buffer->read_bytes = 0;
5130
5131	rb_time_set(&cpu_buffer->write_stamp, 0);
5132	rb_time_set(&cpu_buffer->before_stamp, 0);
5133
5134	memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5135
5136	cpu_buffer->lost_events = 0;
5137	cpu_buffer->last_overrun = 0;
5138
5139	rb_head_page_activate(cpu_buffer);
5140}
5141
5142/* Must have disabled the cpu buffer then done a synchronize_rcu */
5143static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5144{
5145	unsigned long flags;
5146
5147	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5148
5149	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5150		goto out;
5151
5152	arch_spin_lock(&cpu_buffer->lock);
5153
5154	rb_reset_cpu(cpu_buffer);
5155
5156	arch_spin_unlock(&cpu_buffer->lock);
5157
5158 out:
5159	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5160}
5161
5162/**
5163 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5164 * @buffer: The ring buffer to reset a per cpu buffer of
5165 * @cpu: The CPU buffer to be reset
5166 */
5167void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
5168{
5169	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 
5170
5171	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5172		return;
5173
5174	/* prevent another thread from changing buffer sizes */
5175	mutex_lock(&buffer->mutex);
5176
5177	atomic_inc(&cpu_buffer->resize_disabled);
5178	atomic_inc(&cpu_buffer->record_disabled);
5179
5180	/* Make sure all commits have finished */
5181	synchronize_rcu();
5182
5183	reset_disabled_cpu_buffer(cpu_buffer);
5184
5185	atomic_dec(&cpu_buffer->record_disabled);
5186	atomic_dec(&cpu_buffer->resize_disabled);
5187
5188	mutex_unlock(&buffer->mutex);
5189}
5190EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
5191
5192/**
5193 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5194 * @buffer: The ring buffer to reset a per cpu buffer of
5195 * @cpu: The CPU buffer to be reset
5196 */
5197void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5198{
5199	struct ring_buffer_per_cpu *cpu_buffer;
5200	int cpu;
5201
5202	/* prevent another thread from changing buffer sizes */
5203	mutex_lock(&buffer->mutex);
5204
5205	for_each_online_buffer_cpu(buffer, cpu) {
5206		cpu_buffer = buffer->buffers[cpu];
5207
5208		atomic_inc(&cpu_buffer->resize_disabled);
5209		atomic_inc(&cpu_buffer->record_disabled);
5210	}
5211
5212	/* Make sure all commits have finished */
5213	synchronize_rcu();
5214
5215	for_each_online_buffer_cpu(buffer, cpu) {
5216		cpu_buffer = buffer->buffers[cpu];
5217
5218		reset_disabled_cpu_buffer(cpu_buffer);
5219
5220		atomic_dec(&cpu_buffer->record_disabled);
5221		atomic_dec(&cpu_buffer->resize_disabled);
5222	}
5223
5224	mutex_unlock(&buffer->mutex);
5225}
 
5226
5227/**
5228 * ring_buffer_reset - reset a ring buffer
5229 * @buffer: The ring buffer to reset all cpu buffers
5230 */
5231void ring_buffer_reset(struct trace_buffer *buffer)
5232{
5233	struct ring_buffer_per_cpu *cpu_buffer;
5234	int cpu;
5235
5236	for_each_buffer_cpu(buffer, cpu) {
5237		cpu_buffer = buffer->buffers[cpu];
5238
5239		atomic_inc(&cpu_buffer->resize_disabled);
5240		atomic_inc(&cpu_buffer->record_disabled);
5241	}
5242
5243	/* Make sure all commits have finished */
5244	synchronize_rcu();
5245
5246	for_each_buffer_cpu(buffer, cpu) {
5247		cpu_buffer = buffer->buffers[cpu];
5248
5249		reset_disabled_cpu_buffer(cpu_buffer);
5250
5251		atomic_dec(&cpu_buffer->record_disabled);
5252		atomic_dec(&cpu_buffer->resize_disabled);
5253	}
5254}
5255EXPORT_SYMBOL_GPL(ring_buffer_reset);
5256
5257/**
5258 * rind_buffer_empty - is the ring buffer empty?
5259 * @buffer: The ring buffer to test
5260 */
5261bool ring_buffer_empty(struct trace_buffer *buffer)
5262{
5263	struct ring_buffer_per_cpu *cpu_buffer;
5264	unsigned long flags;
5265	bool dolock;
5266	int cpu;
5267	int ret;
5268
5269	/* yes this is racy, but if you don't like the race, lock the buffer */
5270	for_each_buffer_cpu(buffer, cpu) {
5271		cpu_buffer = buffer->buffers[cpu];
5272		local_irq_save(flags);
5273		dolock = rb_reader_lock(cpu_buffer);
5274		ret = rb_per_cpu_empty(cpu_buffer);
5275		rb_reader_unlock(cpu_buffer, dolock);
5276		local_irq_restore(flags);
5277
5278		if (!ret)
5279			return false;
5280	}
5281
5282	return true;
5283}
5284EXPORT_SYMBOL_GPL(ring_buffer_empty);
5285
5286/**
5287 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5288 * @buffer: The ring buffer
5289 * @cpu: The CPU buffer to test
5290 */
5291bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
5292{
5293	struct ring_buffer_per_cpu *cpu_buffer;
5294	unsigned long flags;
5295	bool dolock;
5296	int ret;
5297
5298	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5299		return true;
5300
5301	cpu_buffer = buffer->buffers[cpu];
5302	local_irq_save(flags);
5303	dolock = rb_reader_lock(cpu_buffer);
5304	ret = rb_per_cpu_empty(cpu_buffer);
5305	rb_reader_unlock(cpu_buffer, dolock);
5306	local_irq_restore(flags);
5307
5308	return ret;
5309}
5310EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
5311
5312#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
5313/**
5314 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5315 * @buffer_a: One buffer to swap with
5316 * @buffer_b: The other buffer to swap with
5317 * @cpu: the CPU of the buffers to swap
5318 *
5319 * This function is useful for tracers that want to take a "snapshot"
5320 * of a CPU buffer and has another back up buffer lying around.
5321 * it is expected that the tracer handles the cpu buffer not being
5322 * used at the moment.
5323 */
5324int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5325			 struct trace_buffer *buffer_b, int cpu)
5326{
5327	struct ring_buffer_per_cpu *cpu_buffer_a;
5328	struct ring_buffer_per_cpu *cpu_buffer_b;
5329	int ret = -EINVAL;
5330
5331	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5332	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
5333		goto out;
5334
5335	cpu_buffer_a = buffer_a->buffers[cpu];
5336	cpu_buffer_b = buffer_b->buffers[cpu];
5337
5338	/* At least make sure the two buffers are somewhat the same */
5339	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5340		goto out;
5341
5342	ret = -EAGAIN;
5343
5344	if (atomic_read(&buffer_a->record_disabled))
5345		goto out;
5346
5347	if (atomic_read(&buffer_b->record_disabled))
5348		goto out;
5349
5350	if (atomic_read(&cpu_buffer_a->record_disabled))
5351		goto out;
5352
5353	if (atomic_read(&cpu_buffer_b->record_disabled))
5354		goto out;
5355
5356	/*
5357	 * We can't do a synchronize_rcu here because this
5358	 * function can be called in atomic context.
5359	 * Normally this will be called from the same CPU as cpu.
5360	 * If not it's up to the caller to protect this.
5361	 */
5362	atomic_inc(&cpu_buffer_a->record_disabled);
5363	atomic_inc(&cpu_buffer_b->record_disabled);
5364
5365	ret = -EBUSY;
5366	if (local_read(&cpu_buffer_a->committing))
5367		goto out_dec;
5368	if (local_read(&cpu_buffer_b->committing))
5369		goto out_dec;
5370
5371	buffer_a->buffers[cpu] = cpu_buffer_b;
5372	buffer_b->buffers[cpu] = cpu_buffer_a;
5373
5374	cpu_buffer_b->buffer = buffer_a;
5375	cpu_buffer_a->buffer = buffer_b;
5376
5377	ret = 0;
5378
5379out_dec:
5380	atomic_dec(&cpu_buffer_a->record_disabled);
5381	atomic_dec(&cpu_buffer_b->record_disabled);
5382out:
5383	return ret;
5384}
5385EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5386#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
5387
5388/**
5389 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5390 * @buffer: the buffer to allocate for.
5391 * @cpu: the cpu buffer to allocate.
5392 *
5393 * This function is used in conjunction with ring_buffer_read_page.
5394 * When reading a full page from the ring buffer, these functions
5395 * can be used to speed up the process. The calling function should
5396 * allocate a few pages first with this function. Then when it
5397 * needs to get pages from the ring buffer, it passes the result
5398 * of this function into ring_buffer_read_page, which will swap
5399 * the page that was allocated, with the read page of the buffer.
5400 *
5401 * Returns:
5402 *  The page allocated, or ERR_PTR
5403 */
5404void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5405{
5406	struct ring_buffer_per_cpu *cpu_buffer;
5407	struct buffer_data_page *bpage = NULL;
5408	unsigned long flags;
5409	struct page *page;
5410
5411	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5412		return ERR_PTR(-ENODEV);
5413
5414	cpu_buffer = buffer->buffers[cpu];
5415	local_irq_save(flags);
5416	arch_spin_lock(&cpu_buffer->lock);
5417
5418	if (cpu_buffer->free_page) {
5419		bpage = cpu_buffer->free_page;
5420		cpu_buffer->free_page = NULL;
5421	}
5422
5423	arch_spin_unlock(&cpu_buffer->lock);
5424	local_irq_restore(flags);
5425
5426	if (bpage)
5427		goto out;
5428
5429	page = alloc_pages_node(cpu_to_node(cpu),
5430				GFP_KERNEL | __GFP_NORETRY, 0);
5431	if (!page)
5432		return ERR_PTR(-ENOMEM);
5433
5434	bpage = page_address(page);
5435
5436 out:
5437	rb_init_page(bpage);
5438
5439	return bpage;
5440}
5441EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5442
5443/**
5444 * ring_buffer_free_read_page - free an allocated read page
5445 * @buffer: the buffer the page was allocate for
5446 * @cpu: the cpu buffer the page came from
5447 * @data: the page to free
5448 *
5449 * Free a page allocated from ring_buffer_alloc_read_page.
5450 */
5451void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
5452{
5453	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5454	struct buffer_data_page *bpage = data;
5455	struct page *page = virt_to_page(bpage);
5456	unsigned long flags;
5457
5458	/* If the page is still in use someplace else, we can't reuse it */
5459	if (page_ref_count(page) > 1)
5460		goto out;
5461
5462	local_irq_save(flags);
5463	arch_spin_lock(&cpu_buffer->lock);
5464
5465	if (!cpu_buffer->free_page) {
5466		cpu_buffer->free_page = bpage;
5467		bpage = NULL;
5468	}
5469
5470	arch_spin_unlock(&cpu_buffer->lock);
5471	local_irq_restore(flags);
5472
5473 out:
5474	free_page((unsigned long)bpage);
5475}
5476EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5477
5478/**
5479 * ring_buffer_read_page - extract a page from the ring buffer
5480 * @buffer: buffer to extract from
5481 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
5482 * @len: amount to extract
5483 * @cpu: the cpu of the buffer to extract
5484 * @full: should the extraction only happen when the page is full.
5485 *
5486 * This function will pull out a page from the ring buffer and consume it.
5487 * @data_page must be the address of the variable that was returned
5488 * from ring_buffer_alloc_read_page. This is because the page might be used
5489 * to swap with a page in the ring buffer.
5490 *
5491 * for example:
5492 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
5493 *	if (IS_ERR(rpage))
5494 *		return PTR_ERR(rpage);
5495 *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
5496 *	if (ret >= 0)
5497 *		process_page(rpage, ret);
5498 *
5499 * When @full is set, the function will not return true unless
5500 * the writer is off the reader page.
5501 *
5502 * Note: it is up to the calling functions to handle sleeps and wakeups.
5503 *  The ring buffer can be used anywhere in the kernel and can not
5504 *  blindly call wake_up. The layer that uses the ring buffer must be
5505 *  responsible for that.
5506 *
5507 * Returns:
5508 *  >=0 if data has been transferred, returns the offset of consumed data.
5509 *  <0 if no data has been transferred.
5510 */
5511int ring_buffer_read_page(struct trace_buffer *buffer,
5512			  void **data_page, size_t len, int cpu, int full)
5513{
5514	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5515	struct ring_buffer_event *event;
5516	struct buffer_data_page *bpage;
5517	struct buffer_page *reader;
5518	unsigned long missed_events;
5519	unsigned long flags;
5520	unsigned int commit;
5521	unsigned int read;
5522	u64 save_timestamp;
5523	int ret = -1;
5524
5525	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5526		goto out;
5527
5528	/*
5529	 * If len is not big enough to hold the page header, then
5530	 * we can not copy anything.
5531	 */
5532	if (len <= BUF_PAGE_HDR_SIZE)
5533		goto out;
5534
5535	len -= BUF_PAGE_HDR_SIZE;
5536
5537	if (!data_page)
5538		goto out;
5539
5540	bpage = *data_page;
5541	if (!bpage)
5542		goto out;
5543
5544	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5545
5546	reader = rb_get_reader_page(cpu_buffer);
5547	if (!reader)
5548		goto out_unlock;
5549
5550	event = rb_reader_event(cpu_buffer);
5551
5552	read = reader->read;
5553	commit = rb_page_commit(reader);
5554
5555	/* Check if any events were dropped */
5556	missed_events = cpu_buffer->lost_events;
5557
5558	/*
5559	 * If this page has been partially read or
5560	 * if len is not big enough to read the rest of the page or
5561	 * a writer is still on the page, then
5562	 * we must copy the data from the page to the buffer.
5563	 * Otherwise, we can simply swap the page with the one passed in.
5564	 */
5565	if (read || (len < (commit - read)) ||
5566	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
5567		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5568		unsigned int rpos = read;
5569		unsigned int pos = 0;
5570		unsigned int size;
5571
5572		if (full)
5573			goto out_unlock;
5574
5575		if (len > (commit - read))
5576			len = (commit - read);
5577
5578		/* Always keep the time extend and data together */
5579		size = rb_event_ts_length(event);
5580
5581		if (len < size)
5582			goto out_unlock;
5583
5584		/* save the current timestamp, since the user will need it */
5585		save_timestamp = cpu_buffer->read_stamp;
5586
5587		/* Need to copy one event at a time */
5588		do {
5589			/* We need the size of one event, because
5590			 * rb_advance_reader only advances by one event,
5591			 * whereas rb_event_ts_length may include the size of
5592			 * one or two events.
5593			 * We have already ensured there's enough space if this
5594			 * is a time extend. */
5595			size = rb_event_length(event);
5596			memcpy(bpage->data + pos, rpage->data + rpos, size);
5597
5598			len -= size;
5599
5600			rb_advance_reader(cpu_buffer);
5601			rpos = reader->read;
5602			pos += size;
5603
5604			if (rpos >= commit)
5605				break;
5606
5607			event = rb_reader_event(cpu_buffer);
5608			/* Always keep the time extend and data together */
5609			size = rb_event_ts_length(event);
5610		} while (len >= size);
5611
5612		/* update bpage */
5613		local_set(&bpage->commit, pos);
5614		bpage->time_stamp = save_timestamp;
5615
5616		/* we copied everything to the beginning */
5617		read = 0;
5618	} else {
5619		/* update the entry counter */
5620		cpu_buffer->read += rb_page_entries(reader);
5621		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
5622
5623		/* swap the pages */
5624		rb_init_page(bpage);
5625		bpage = reader->page;
5626		reader->page = *data_page;
5627		local_set(&reader->write, 0);
5628		local_set(&reader->entries, 0);
5629		reader->read = 0;
5630		*data_page = bpage;
5631
5632		/*
5633		 * Use the real_end for the data size,
5634		 * This gives us a chance to store the lost events
5635		 * on the page.
5636		 */
5637		if (reader->real_end)
5638			local_set(&bpage->commit, reader->real_end);
5639	}
5640	ret = read;
5641
5642	cpu_buffer->lost_events = 0;
5643
5644	commit = local_read(&bpage->commit);
5645	/*
5646	 * Set a flag in the commit field if we lost events
5647	 */
5648	if (missed_events) {
5649		/* If there is room at the end of the page to save the
5650		 * missed events, then record it there.
5651		 */
5652		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
5653			memcpy(&bpage->data[commit], &missed_events,
5654			       sizeof(missed_events));
5655			local_add(RB_MISSED_STORED, &bpage->commit);
5656			commit += sizeof(missed_events);
5657		}
5658		local_add(RB_MISSED_EVENTS, &bpage->commit);
5659	}
5660
5661	/*
5662	 * This page may be off to user land. Zero it out here.
5663	 */
5664	if (commit < BUF_PAGE_SIZE)
5665		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
5666
5667 out_unlock:
5668	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5669
5670 out:
5671	return ret;
5672}
5673EXPORT_SYMBOL_GPL(ring_buffer_read_page);
5674
5675/*
5676 * We only allocate new buffers, never free them if the CPU goes down.
5677 * If we were to free the buffer, then the user would lose any trace that was in
5678 * the buffer.
5679 */
5680int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
5681{
5682	struct trace_buffer *buffer;
5683	long nr_pages_same;
5684	int cpu_i;
5685	unsigned long nr_pages;
5686
5687	buffer = container_of(node, struct trace_buffer, node);
5688	if (cpumask_test_cpu(cpu, buffer->cpumask))
5689		return 0;
5690
5691	nr_pages = 0;
5692	nr_pages_same = 1;
5693	/* check if all cpu sizes are same */
5694	for_each_buffer_cpu(buffer, cpu_i) {
5695		/* fill in the size from first enabled cpu */
5696		if (nr_pages == 0)
5697			nr_pages = buffer->buffers[cpu_i]->nr_pages;
5698		if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
5699			nr_pages_same = 0;
5700			break;
5701		}
5702	}
5703	/* allocate minimum pages, user can later expand it */
5704	if (!nr_pages_same)
5705		nr_pages = 2;
5706	buffer->buffers[cpu] =
5707		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
5708	if (!buffer->buffers[cpu]) {
5709		WARN(1, "failed to allocate ring buffer on CPU %u\n",
5710		     cpu);
5711		return -ENOMEM;
5712	}
5713	smp_wmb();
5714	cpumask_set_cpu(cpu, buffer->cpumask);
5715	return 0;
5716}
5717
5718#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
5719/*
5720 * This is a basic integrity check of the ring buffer.
5721 * Late in the boot cycle this test will run when configured in.
5722 * It will kick off a thread per CPU that will go into a loop
5723 * writing to the per cpu ring buffer various sizes of data.
5724 * Some of the data will be large items, some small.
5725 *
5726 * Another thread is created that goes into a spin, sending out
5727 * IPIs to the other CPUs to also write into the ring buffer.
5728 * this is to test the nesting ability of the buffer.
5729 *
5730 * Basic stats are recorded and reported. If something in the
5731 * ring buffer should happen that's not expected, a big warning
5732 * is displayed and all ring buffers are disabled.
5733 */
5734static struct task_struct *rb_threads[NR_CPUS] __initdata;
5735
5736struct rb_test_data {
5737	struct trace_buffer *buffer;
5738	unsigned long		events;
5739	unsigned long		bytes_written;
5740	unsigned long		bytes_alloc;
5741	unsigned long		bytes_dropped;
5742	unsigned long		events_nested;
5743	unsigned long		bytes_written_nested;
5744	unsigned long		bytes_alloc_nested;
5745	unsigned long		bytes_dropped_nested;
5746	int			min_size_nested;
5747	int			max_size_nested;
5748	int			max_size;
5749	int			min_size;
5750	int			cpu;
5751	int			cnt;
5752};
5753
5754static struct rb_test_data rb_data[NR_CPUS] __initdata;
5755
5756/* 1 meg per cpu */
5757#define RB_TEST_BUFFER_SIZE	1048576
5758
5759static char rb_string[] __initdata =
5760	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
5761	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
5762	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
5763
5764static bool rb_test_started __initdata;
5765
5766struct rb_item {
5767	int size;
5768	char str[];
5769};
5770
5771static __init int rb_write_something(struct rb_test_data *data, bool nested)
5772{
5773	struct ring_buffer_event *event;
5774	struct rb_item *item;
5775	bool started;
5776	int event_len;
5777	int size;
5778	int len;
5779	int cnt;
5780
5781	/* Have nested writes different that what is written */
5782	cnt = data->cnt + (nested ? 27 : 0);
5783
5784	/* Multiply cnt by ~e, to make some unique increment */
5785	size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
5786
5787	len = size + sizeof(struct rb_item);
5788
5789	started = rb_test_started;
5790	/* read rb_test_started before checking buffer enabled */
5791	smp_rmb();
5792
5793	event = ring_buffer_lock_reserve(data->buffer, len);
5794	if (!event) {
5795		/* Ignore dropped events before test starts. */
5796		if (started) {
5797			if (nested)
5798				data->bytes_dropped += len;
5799			else
5800				data->bytes_dropped_nested += len;
5801		}
5802		return len;
5803	}
5804
5805	event_len = ring_buffer_event_length(event);
5806
5807	if (RB_WARN_ON(data->buffer, event_len < len))
5808		goto out;
5809
5810	item = ring_buffer_event_data(event);
5811	item->size = size;
5812	memcpy(item->str, rb_string, size);
5813
5814	if (nested) {
5815		data->bytes_alloc_nested += event_len;
5816		data->bytes_written_nested += len;
5817		data->events_nested++;
5818		if (!data->min_size_nested || len < data->min_size_nested)
5819			data->min_size_nested = len;
5820		if (len > data->max_size_nested)
5821			data->max_size_nested = len;
5822	} else {
5823		data->bytes_alloc += event_len;
5824		data->bytes_written += len;
5825		data->events++;
5826		if (!data->min_size || len < data->min_size)
5827			data->max_size = len;
5828		if (len > data->max_size)
5829			data->max_size = len;
5830	}
5831
5832 out:
5833	ring_buffer_unlock_commit(data->buffer, event);
5834
5835	return 0;
5836}
5837
5838static __init int rb_test(void *arg)
5839{
5840	struct rb_test_data *data = arg;
5841
5842	while (!kthread_should_stop()) {
5843		rb_write_something(data, false);
5844		data->cnt++;
5845
5846		set_current_state(TASK_INTERRUPTIBLE);
5847		/* Now sleep between a min of 100-300us and a max of 1ms */
5848		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
5849	}
5850
5851	return 0;
5852}
5853
5854static __init void rb_ipi(void *ignore)
5855{
5856	struct rb_test_data *data;
5857	int cpu = smp_processor_id();
5858
5859	data = &rb_data[cpu];
5860	rb_write_something(data, true);
5861}
5862
5863static __init int rb_hammer_test(void *arg)
5864{
5865	while (!kthread_should_stop()) {
5866
5867		/* Send an IPI to all cpus to write data! */
5868		smp_call_function(rb_ipi, NULL, 1);
5869		/* No sleep, but for non preempt, let others run */
5870		schedule();
5871	}
5872
5873	return 0;
5874}
5875
5876static __init int test_ringbuffer(void)
5877{
5878	struct task_struct *rb_hammer;
5879	struct trace_buffer *buffer;
5880	int cpu;
5881	int ret = 0;
5882
5883	if (security_locked_down(LOCKDOWN_TRACEFS)) {
5884		pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
5885		return 0;
5886	}
5887
5888	pr_info("Running ring buffer tests...\n");
5889
5890	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
5891	if (WARN_ON(!buffer))
5892		return 0;
5893
5894	/* Disable buffer so that threads can't write to it yet */
5895	ring_buffer_record_off(buffer);
5896
5897	for_each_online_cpu(cpu) {
5898		rb_data[cpu].buffer = buffer;
5899		rb_data[cpu].cpu = cpu;
5900		rb_data[cpu].cnt = cpu;
5901		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
5902						 "rbtester/%d", cpu);
5903		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
5904			pr_cont("FAILED\n");
5905			ret = PTR_ERR(rb_threads[cpu]);
5906			goto out_free;
5907		}
5908
5909		kthread_bind(rb_threads[cpu], cpu);
5910 		wake_up_process(rb_threads[cpu]);
5911	}
5912
5913	/* Now create the rb hammer! */
5914	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
5915	if (WARN_ON(IS_ERR(rb_hammer))) {
5916		pr_cont("FAILED\n");
5917		ret = PTR_ERR(rb_hammer);
5918		goto out_free;
5919	}
5920
5921	ring_buffer_record_on(buffer);
5922	/*
5923	 * Show buffer is enabled before setting rb_test_started.
5924	 * Yes there's a small race window where events could be
5925	 * dropped and the thread wont catch it. But when a ring
5926	 * buffer gets enabled, there will always be some kind of
5927	 * delay before other CPUs see it. Thus, we don't care about
5928	 * those dropped events. We care about events dropped after
5929	 * the threads see that the buffer is active.
5930	 */
5931	smp_wmb();
5932	rb_test_started = true;
5933
5934	set_current_state(TASK_INTERRUPTIBLE);
5935	/* Just run for 10 seconds */;
5936	schedule_timeout(10 * HZ);
5937
5938	kthread_stop(rb_hammer);
5939
5940 out_free:
5941	for_each_online_cpu(cpu) {
5942		if (!rb_threads[cpu])
5943			break;
5944		kthread_stop(rb_threads[cpu]);
5945	}
5946	if (ret) {
5947		ring_buffer_free(buffer);
5948		return ret;
5949	}
5950
5951	/* Report! */
5952	pr_info("finished\n");
5953	for_each_online_cpu(cpu) {
5954		struct ring_buffer_event *event;
5955		struct rb_test_data *data = &rb_data[cpu];
5956		struct rb_item *item;
5957		unsigned long total_events;
5958		unsigned long total_dropped;
5959		unsigned long total_written;
5960		unsigned long total_alloc;
5961		unsigned long total_read = 0;
5962		unsigned long total_size = 0;
5963		unsigned long total_len = 0;
5964		unsigned long total_lost = 0;
5965		unsigned long lost;
5966		int big_event_size;
5967		int small_event_size;
5968
5969		ret = -1;
5970
5971		total_events = data->events + data->events_nested;
5972		total_written = data->bytes_written + data->bytes_written_nested;
5973		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
5974		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
5975
5976		big_event_size = data->max_size + data->max_size_nested;
5977		small_event_size = data->min_size + data->min_size_nested;
5978
5979		pr_info("CPU %d:\n", cpu);
5980		pr_info("              events:    %ld\n", total_events);
5981		pr_info("       dropped bytes:    %ld\n", total_dropped);
5982		pr_info("       alloced bytes:    %ld\n", total_alloc);
5983		pr_info("       written bytes:    %ld\n", total_written);
5984		pr_info("       biggest event:    %d\n", big_event_size);
5985		pr_info("      smallest event:    %d\n", small_event_size);
5986
5987		if (RB_WARN_ON(buffer, total_dropped))
5988			break;
5989
5990		ret = 0;
5991
5992		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
5993			total_lost += lost;
5994			item = ring_buffer_event_data(event);
5995			total_len += ring_buffer_event_length(event);
5996			total_size += item->size + sizeof(struct rb_item);
5997			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
5998				pr_info("FAILED!\n");
5999				pr_info("buffer had: %.*s\n", item->size, item->str);
6000				pr_info("expected:   %.*s\n", item->size, rb_string);
6001				RB_WARN_ON(buffer, 1);
6002				ret = -1;
6003				break;
6004			}
6005			total_read++;
6006		}
6007		if (ret)
6008			break;
6009
6010		ret = -1;
6011
6012		pr_info("         read events:   %ld\n", total_read);
6013		pr_info("         lost events:   %ld\n", total_lost);
6014		pr_info("        total events:   %ld\n", total_lost + total_read);
6015		pr_info("  recorded len bytes:   %ld\n", total_len);
6016		pr_info(" recorded size bytes:   %ld\n", total_size);
6017		if (total_lost)
6018			pr_info(" With dropped events, record len and size may not match\n"
6019				" alloced and written from above\n");
6020		if (!total_lost) {
6021			if (RB_WARN_ON(buffer, total_len != total_alloc ||
6022				       total_size != total_written))
6023				break;
6024		}
6025		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6026			break;
6027
6028		ret = 0;
6029	}
6030	if (!ret)
6031		pr_info("Ring buffer PASSED!\n");
6032
6033	ring_buffer_free(buffer);
6034	return 0;
6035}
6036
6037late_initcall(test_ringbuffer);
6038#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */