Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Generic ring buffer
   4 *
   5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   6 */
   7#include <linux/trace_recursion.h>
   8#include <linux/trace_events.h>
   9#include <linux/ring_buffer.h>
  10#include <linux/trace_clock.h>
  11#include <linux/sched/clock.h>
  12#include <linux/trace_seq.h>
  13#include <linux/spinlock.h>
  14#include <linux/irq_work.h>
  15#include <linux/security.h>
  16#include <linux/uaccess.h>
  17#include <linux/hardirq.h>
  18#include <linux/kthread.h>	/* for self test */
  19#include <linux/module.h>
  20#include <linux/percpu.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/slab.h>
  24#include <linux/init.h>
  25#include <linux/hash.h>
  26#include <linux/list.h>
  27#include <linux/cpu.h>
  28#include <linux/oom.h>
  29
  30#include <asm/local64.h>
  31#include <asm/local.h>
  32
  33/*
  34 * The "absolute" timestamp in the buffer is only 59 bits.
  35 * If a clock has the 5 MSBs set, it needs to be saved and
  36 * reinserted.
  37 */
  38#define TS_MSB		(0xf8ULL << 56)
  39#define ABS_TS_MASK	(~TS_MSB)
  40
  41static void update_pages_handler(struct work_struct *work);
  42
  43/*
  44 * The ring buffer header is special. We must manually up keep it.
  45 */
  46int ring_buffer_print_entry_header(struct trace_seq *s)
  47{
  48	trace_seq_puts(s, "# compressed entry header\n");
  49	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  50	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  51	trace_seq_puts(s, "\tarray       :   32 bits\n");
  52	trace_seq_putc(s, '\n');
  53	trace_seq_printf(s, "\tpadding     : type == %d\n",
  54			 RINGBUF_TYPE_PADDING);
  55	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  56			 RINGBUF_TYPE_TIME_EXTEND);
  57	trace_seq_printf(s, "\ttime_stamp : type == %d\n",
  58			 RINGBUF_TYPE_TIME_STAMP);
  59	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  60			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  61
  62	return !trace_seq_has_overflowed(s);
  63}
  64
  65/*
  66 * The ring buffer is made up of a list of pages. A separate list of pages is
  67 * allocated for each CPU. A writer may only write to a buffer that is
  68 * associated with the CPU it is currently executing on.  A reader may read
  69 * from any per cpu buffer.
  70 *
  71 * The reader is special. For each per cpu buffer, the reader has its own
  72 * reader page. When a reader has read the entire reader page, this reader
  73 * page is swapped with another page in the ring buffer.
  74 *
  75 * Now, as long as the writer is off the reader page, the reader can do what
  76 * ever it wants with that page. The writer will never write to that page
  77 * again (as long as it is out of the ring buffer).
  78 *
  79 * Here's some silly ASCII art.
  80 *
  81 *   +------+
  82 *   |reader|          RING BUFFER
  83 *   |page  |
  84 *   +------+        +---+   +---+   +---+
  85 *                   |   |-->|   |-->|   |
  86 *                   +---+   +---+   +---+
  87 *                     ^               |
  88 *                     |               |
  89 *                     +---------------+
  90 *
  91 *
  92 *   +------+
  93 *   |reader|          RING BUFFER
  94 *   |page  |------------------v
  95 *   +------+        +---+   +---+   +---+
  96 *                   |   |-->|   |-->|   |
  97 *                   +---+   +---+   +---+
  98 *                     ^               |
  99 *                     |               |
 100 *                     +---------------+
 101 *
 102 *
 103 *   +------+
 104 *   |reader|          RING BUFFER
 105 *   |page  |------------------v
 106 *   +------+        +---+   +---+   +---+
 107 *      ^            |   |-->|   |-->|   |
 108 *      |            +---+   +---+   +---+
 109 *      |                              |
 110 *      |                              |
 111 *      +------------------------------+
 112 *
 113 *
 114 *   +------+
 115 *   |buffer|          RING BUFFER
 116 *   |page  |------------------v
 117 *   +------+        +---+   +---+   +---+
 118 *      ^            |   |   |   |-->|   |
 119 *      |   New      +---+   +---+   +---+
 120 *      |  Reader------^               |
 121 *      |   page                       |
 122 *      +------------------------------+
 123 *
 124 *
 125 * After we make this swap, the reader can hand this page off to the splice
 126 * code and be done with it. It can even allocate a new page if it needs to
 127 * and swap that into the ring buffer.
 128 *
 129 * We will be using cmpxchg soon to make all this lockless.
 130 *
 131 */
 132
 133/* Used for individual buffers (after the counter) */
 134#define RB_BUFFER_OFF		(1 << 20)
 135
 136#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 137
 138#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 139#define RB_ALIGNMENT		4U
 140#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 141#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 142
 143#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 144# define RB_FORCE_8BYTE_ALIGNMENT	0
 145# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 146#else
 147# define RB_FORCE_8BYTE_ALIGNMENT	1
 148# define RB_ARCH_ALIGNMENT		8U
 149#endif
 150
 151#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 152
 153/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 154#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 155
 156enum {
 157	RB_LEN_TIME_EXTEND = 8,
 158	RB_LEN_TIME_STAMP =  8,
 159};
 160
 161#define skip_time_extend(event) \
 162	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 163
 164#define extended_time(event) \
 165	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
 166
 167static inline bool rb_null_event(struct ring_buffer_event *event)
 168{
 169	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 170}
 171
 172static void rb_event_set_padding(struct ring_buffer_event *event)
 173{
 174	/* padding has a NULL time_delta */
 175	event->type_len = RINGBUF_TYPE_PADDING;
 176	event->time_delta = 0;
 177}
 178
 179static unsigned
 180rb_event_data_length(struct ring_buffer_event *event)
 181{
 182	unsigned length;
 183
 184	if (event->type_len)
 185		length = event->type_len * RB_ALIGNMENT;
 186	else
 187		length = event->array[0];
 188	return length + RB_EVNT_HDR_SIZE;
 189}
 190
 191/*
 192 * Return the length of the given event. Will return
 193 * the length of the time extend if the event is a
 194 * time extend.
 195 */
 196static inline unsigned
 197rb_event_length(struct ring_buffer_event *event)
 198{
 199	switch (event->type_len) {
 200	case RINGBUF_TYPE_PADDING:
 201		if (rb_null_event(event))
 202			/* undefined */
 203			return -1;
 204		return  event->array[0] + RB_EVNT_HDR_SIZE;
 205
 206	case RINGBUF_TYPE_TIME_EXTEND:
 207		return RB_LEN_TIME_EXTEND;
 208
 209	case RINGBUF_TYPE_TIME_STAMP:
 210		return RB_LEN_TIME_STAMP;
 211
 212	case RINGBUF_TYPE_DATA:
 213		return rb_event_data_length(event);
 214	default:
 215		WARN_ON_ONCE(1);
 216	}
 217	/* not hit */
 218	return 0;
 219}
 220
 221/*
 222 * Return total length of time extend and data,
 223 *   or just the event length for all other events.
 224 */
 225static inline unsigned
 226rb_event_ts_length(struct ring_buffer_event *event)
 227{
 228	unsigned len = 0;
 229
 230	if (extended_time(event)) {
 231		/* time extends include the data event after it */
 232		len = RB_LEN_TIME_EXTEND;
 233		event = skip_time_extend(event);
 234	}
 235	return len + rb_event_length(event);
 236}
 237
 238/**
 239 * ring_buffer_event_length - return the length of the event
 240 * @event: the event to get the length of
 241 *
 242 * Returns the size of the data load of a data event.
 243 * If the event is something other than a data event, it
 244 * returns the size of the event itself. With the exception
 245 * of a TIME EXTEND, where it still returns the size of the
 246 * data load of the data event after it.
 247 */
 248unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 249{
 250	unsigned length;
 251
 252	if (extended_time(event))
 253		event = skip_time_extend(event);
 254
 255	length = rb_event_length(event);
 256	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 257		return length;
 258	length -= RB_EVNT_HDR_SIZE;
 259	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 260                length -= sizeof(event->array[0]);
 261	return length;
 262}
 263EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 264
 265/* inline for ring buffer fast paths */
 266static __always_inline void *
 267rb_event_data(struct ring_buffer_event *event)
 268{
 269	if (extended_time(event))
 270		event = skip_time_extend(event);
 271	WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 272	/* If length is in len field, then array[0] has the data */
 273	if (event->type_len)
 274		return (void *)&event->array[0];
 275	/* Otherwise length is in array[0] and array[1] has the data */
 276	return (void *)&event->array[1];
 277}
 278
 279/**
 280 * ring_buffer_event_data - return the data of the event
 281 * @event: the event to get the data from
 282 */
 283void *ring_buffer_event_data(struct ring_buffer_event *event)
 284{
 285	return rb_event_data(event);
 286}
 287EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 288
 289#define for_each_buffer_cpu(buffer, cpu)		\
 290	for_each_cpu(cpu, buffer->cpumask)
 291
 292#define for_each_online_buffer_cpu(buffer, cpu)		\
 293	for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
 294
 295#define TS_SHIFT	27
 296#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 297#define TS_DELTA_TEST	(~TS_MASK)
 298
 299static u64 rb_event_time_stamp(struct ring_buffer_event *event)
 300{
 301	u64 ts;
 302
 303	ts = event->array[0];
 304	ts <<= TS_SHIFT;
 305	ts += event->time_delta;
 306
 307	return ts;
 308}
 309
 310/* Flag when events were overwritten */
 311#define RB_MISSED_EVENTS	(1 << 31)
 312/* Missed count stored at end */
 313#define RB_MISSED_STORED	(1 << 30)
 314
 315struct buffer_data_page {
 316	u64		 time_stamp;	/* page time stamp */
 317	local_t		 commit;	/* write committed index */
 318	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 319};
 320
 321struct buffer_data_read_page {
 322	unsigned		order;	/* order of the page */
 323	struct buffer_data_page	*data;	/* actual data, stored in this page */
 324};
 325
 326/*
 327 * Note, the buffer_page list must be first. The buffer pages
 328 * are allocated in cache lines, which means that each buffer
 329 * page will be at the beginning of a cache line, and thus
 330 * the least significant bits will be zero. We use this to
 331 * add flags in the list struct pointers, to make the ring buffer
 332 * lockless.
 333 */
 334struct buffer_page {
 335	struct list_head list;		/* list of buffer pages */
 336	local_t		 write;		/* index for next write */
 337	unsigned	 read;		/* index for next read */
 338	local_t		 entries;	/* entries on this page */
 339	unsigned long	 real_end;	/* real end of data */
 340	unsigned	 order;		/* order of the page */
 341	struct buffer_data_page *page;	/* Actual data page */
 342};
 343
 344/*
 345 * The buffer page counters, write and entries, must be reset
 346 * atomically when crossing page boundaries. To synchronize this
 347 * update, two counters are inserted into the number. One is
 348 * the actual counter for the write position or count on the page.
 349 *
 350 * The other is a counter of updaters. Before an update happens
 351 * the update partition of the counter is incremented. This will
 352 * allow the updater to update the counter atomically.
 353 *
 354 * The counter is 20 bits, and the state data is 12.
 355 */
 356#define RB_WRITE_MASK		0xfffff
 357#define RB_WRITE_INTCNT		(1 << 20)
 358
 359static void rb_init_page(struct buffer_data_page *bpage)
 360{
 361	local_set(&bpage->commit, 0);
 362}
 363
 364static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
 365{
 366	return local_read(&bpage->page->commit);
 367}
 368
 369static void free_buffer_page(struct buffer_page *bpage)
 370{
 371	free_pages((unsigned long)bpage->page, bpage->order);
 372	kfree(bpage);
 373}
 374
 375/*
 376 * We need to fit the time_stamp delta into 27 bits.
 377 */
 378static inline bool test_time_stamp(u64 delta)
 379{
 380	return !!(delta & TS_DELTA_TEST);
 381}
 382
 383struct rb_irq_work {
 384	struct irq_work			work;
 385	wait_queue_head_t		waiters;
 386	wait_queue_head_t		full_waiters;
 387	atomic_t			seq;
 388	bool				waiters_pending;
 389	bool				full_waiters_pending;
 390	bool				wakeup_full;
 391};
 392
 393/*
 394 * Structure to hold event state and handle nested events.
 395 */
 396struct rb_event_info {
 397	u64			ts;
 398	u64			delta;
 399	u64			before;
 400	u64			after;
 401	unsigned long		length;
 402	struct buffer_page	*tail_page;
 403	int			add_timestamp;
 404};
 405
 406/*
 407 * Used for the add_timestamp
 408 *  NONE
 409 *  EXTEND - wants a time extend
 410 *  ABSOLUTE - the buffer requests all events to have absolute time stamps
 411 *  FORCE - force a full time stamp.
 412 */
 413enum {
 414	RB_ADD_STAMP_NONE		= 0,
 415	RB_ADD_STAMP_EXTEND		= BIT(1),
 416	RB_ADD_STAMP_ABSOLUTE		= BIT(2),
 417	RB_ADD_STAMP_FORCE		= BIT(3)
 418};
 419/*
 420 * Used for which event context the event is in.
 421 *  TRANSITION = 0
 422 *  NMI     = 1
 423 *  IRQ     = 2
 424 *  SOFTIRQ = 3
 425 *  NORMAL  = 4
 426 *
 427 * See trace_recursive_lock() comment below for more details.
 428 */
 429enum {
 430	RB_CTX_TRANSITION,
 431	RB_CTX_NMI,
 432	RB_CTX_IRQ,
 433	RB_CTX_SOFTIRQ,
 434	RB_CTX_NORMAL,
 435	RB_CTX_MAX
 436};
 437
 438struct rb_time_struct {
 439	local64_t	time;
 440};
 441typedef struct rb_time_struct rb_time_t;
 442
 443#define MAX_NEST	5
 444
 445/*
 446 * head_page == tail_page && head == tail then buffer is empty.
 447 */
 448struct ring_buffer_per_cpu {
 449	int				cpu;
 450	atomic_t			record_disabled;
 451	atomic_t			resize_disabled;
 452	struct trace_buffer	*buffer;
 453	raw_spinlock_t			reader_lock;	/* serialize readers */
 454	arch_spinlock_t			lock;
 455	struct lock_class_key		lock_key;
 456	struct buffer_data_page		*free_page;
 457	unsigned long			nr_pages;
 458	unsigned int			current_context;
 459	struct list_head		*pages;
 460	struct buffer_page		*head_page;	/* read from head */
 461	struct buffer_page		*tail_page;	/* write to tail */
 462	struct buffer_page		*commit_page;	/* committed pages */
 463	struct buffer_page		*reader_page;
 464	unsigned long			lost_events;
 465	unsigned long			last_overrun;
 466	unsigned long			nest;
 467	local_t				entries_bytes;
 468	local_t				entries;
 469	local_t				overrun;
 470	local_t				commit_overrun;
 471	local_t				dropped_events;
 472	local_t				committing;
 473	local_t				commits;
 474	local_t				pages_touched;
 475	local_t				pages_lost;
 476	local_t				pages_read;
 477	long				last_pages_touch;
 478	size_t				shortest_full;
 479	unsigned long			read;
 480	unsigned long			read_bytes;
 481	rb_time_t			write_stamp;
 482	rb_time_t			before_stamp;
 483	u64				event_stamp[MAX_NEST];
 484	u64				read_stamp;
 485	/* pages removed since last reset */
 486	unsigned long			pages_removed;
 487	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 488	long				nr_pages_to_update;
 489	struct list_head		new_pages; /* new pages to add */
 490	struct work_struct		update_pages_work;
 491	struct completion		update_done;
 492
 493	struct rb_irq_work		irq_work;
 494};
 495
 496struct trace_buffer {
 497	unsigned			flags;
 498	int				cpus;
 499	atomic_t			record_disabled;
 500	atomic_t			resizing;
 501	cpumask_var_t			cpumask;
 502
 503	struct lock_class_key		*reader_lock_key;
 504
 505	struct mutex			mutex;
 506
 507	struct ring_buffer_per_cpu	**buffers;
 508
 509	struct hlist_node		node;
 510	u64				(*clock)(void);
 511
 512	struct rb_irq_work		irq_work;
 513	bool				time_stamp_abs;
 514
 515	unsigned int			subbuf_size;
 516	unsigned int			subbuf_order;
 517	unsigned int			max_data_size;
 518};
 519
 520struct ring_buffer_iter {
 521	struct ring_buffer_per_cpu	*cpu_buffer;
 522	unsigned long			head;
 523	unsigned long			next_event;
 524	struct buffer_page		*head_page;
 525	struct buffer_page		*cache_reader_page;
 526	unsigned long			cache_read;
 527	unsigned long			cache_pages_removed;
 528	u64				read_stamp;
 529	u64				page_stamp;
 530	struct ring_buffer_event	*event;
 531	size_t				event_size;
 532	int				missed_events;
 533};
 534
 535int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
 536{
 537	struct buffer_data_page field;
 538
 539	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 540			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 541			 (unsigned int)sizeof(field.time_stamp),
 542			 (unsigned int)is_signed_type(u64));
 543
 544	trace_seq_printf(s, "\tfield: local_t commit;\t"
 545			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 546			 (unsigned int)offsetof(typeof(field), commit),
 547			 (unsigned int)sizeof(field.commit),
 548			 (unsigned int)is_signed_type(long));
 549
 550	trace_seq_printf(s, "\tfield: int overwrite;\t"
 551			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 552			 (unsigned int)offsetof(typeof(field), commit),
 553			 1,
 554			 (unsigned int)is_signed_type(long));
 555
 556	trace_seq_printf(s, "\tfield: char data;\t"
 557			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 558			 (unsigned int)offsetof(typeof(field), data),
 559			 (unsigned int)buffer->subbuf_size,
 560			 (unsigned int)is_signed_type(char));
 561
 562	return !trace_seq_has_overflowed(s);
 563}
 564
 565static inline void rb_time_read(rb_time_t *t, u64 *ret)
 566{
 567	*ret = local64_read(&t->time);
 568}
 569static void rb_time_set(rb_time_t *t, u64 val)
 570{
 571	local64_set(&t->time, val);
 572}
 573
 574/*
 575 * Enable this to make sure that the event passed to
 576 * ring_buffer_event_time_stamp() is not committed and also
 577 * is on the buffer that it passed in.
 578 */
 579//#define RB_VERIFY_EVENT
 580#ifdef RB_VERIFY_EVENT
 581static struct list_head *rb_list_head(struct list_head *list);
 582static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 583			 void *event)
 584{
 585	struct buffer_page *page = cpu_buffer->commit_page;
 586	struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
 587	struct list_head *next;
 588	long commit, write;
 589	unsigned long addr = (unsigned long)event;
 590	bool done = false;
 591	int stop = 0;
 592
 593	/* Make sure the event exists and is not committed yet */
 594	do {
 595		if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
 596			done = true;
 597		commit = local_read(&page->page->commit);
 598		write = local_read(&page->write);
 599		if (addr >= (unsigned long)&page->page->data[commit] &&
 600		    addr < (unsigned long)&page->page->data[write])
 601			return;
 602
 603		next = rb_list_head(page->list.next);
 604		page = list_entry(next, struct buffer_page, list);
 605	} while (!done);
 606	WARN_ON_ONCE(1);
 607}
 608#else
 609static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 610			 void *event)
 611{
 612}
 613#endif
 614
 615/*
 616 * The absolute time stamp drops the 5 MSBs and some clocks may
 617 * require them. The rb_fix_abs_ts() will take a previous full
 618 * time stamp, and add the 5 MSB of that time stamp on to the
 619 * saved absolute time stamp. Then they are compared in case of
 620 * the unlikely event that the latest time stamp incremented
 621 * the 5 MSB.
 622 */
 623static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
 624{
 625	if (save_ts & TS_MSB) {
 626		abs |= save_ts & TS_MSB;
 627		/* Check for overflow */
 628		if (unlikely(abs < save_ts))
 629			abs += 1ULL << 59;
 630	}
 631	return abs;
 632}
 633
 634static inline u64 rb_time_stamp(struct trace_buffer *buffer);
 635
 636/**
 637 * ring_buffer_event_time_stamp - return the event's current time stamp
 638 * @buffer: The buffer that the event is on
 639 * @event: the event to get the time stamp of
 640 *
 641 * Note, this must be called after @event is reserved, and before it is
 642 * committed to the ring buffer. And must be called from the same
 643 * context where the event was reserved (normal, softirq, irq, etc).
 644 *
 645 * Returns the time stamp associated with the current event.
 646 * If the event has an extended time stamp, then that is used as
 647 * the time stamp to return.
 648 * In the highly unlikely case that the event was nested more than
 649 * the max nesting, then the write_stamp of the buffer is returned,
 650 * otherwise  current time is returned, but that really neither of
 651 * the last two cases should ever happen.
 652 */
 653u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
 654				 struct ring_buffer_event *event)
 655{
 656	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
 657	unsigned int nest;
 658	u64 ts;
 659
 660	/* If the event includes an absolute time, then just use that */
 661	if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
 662		ts = rb_event_time_stamp(event);
 663		return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
 664	}
 665
 666	nest = local_read(&cpu_buffer->committing);
 667	verify_event(cpu_buffer, event);
 668	if (WARN_ON_ONCE(!nest))
 669		goto fail;
 670
 671	/* Read the current saved nesting level time stamp */
 672	if (likely(--nest < MAX_NEST))
 673		return cpu_buffer->event_stamp[nest];
 674
 675	/* Shouldn't happen, warn if it does */
 676	WARN_ONCE(1, "nest (%d) greater than max", nest);
 677
 678 fail:
 679	rb_time_read(&cpu_buffer->write_stamp, &ts);
 680
 681	return ts;
 682}
 683
 684/**
 685 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
 686 * @buffer: The ring_buffer to get the number of pages from
 687 * @cpu: The cpu of the ring_buffer to get the number of pages from
 688 *
 689 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
 690 */
 691size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
 692{
 693	return buffer->buffers[cpu]->nr_pages;
 694}
 695
 696/**
 697 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
 698 * @buffer: The ring_buffer to get the number of pages from
 699 * @cpu: The cpu of the ring_buffer to get the number of pages from
 700 *
 701 * Returns the number of pages that have content in the ring buffer.
 702 */
 703size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
 704{
 705	size_t read;
 706	size_t lost;
 707	size_t cnt;
 708
 709	read = local_read(&buffer->buffers[cpu]->pages_read);
 710	lost = local_read(&buffer->buffers[cpu]->pages_lost);
 711	cnt = local_read(&buffer->buffers[cpu]->pages_touched);
 712
 713	if (WARN_ON_ONCE(cnt < lost))
 714		return 0;
 715
 716	cnt -= lost;
 717
 718	/* The reader can read an empty page, but not more than that */
 719	if (cnt < read) {
 720		WARN_ON_ONCE(read > cnt + 1);
 721		return 0;
 722	}
 723
 724	return cnt - read;
 725}
 726
 727static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
 728{
 729	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 730	size_t nr_pages;
 731	size_t dirty;
 732
 733	nr_pages = cpu_buffer->nr_pages;
 734	if (!nr_pages || !full)
 735		return true;
 736
 737	/*
 738	 * Add one as dirty will never equal nr_pages, as the sub-buffer
 739	 * that the writer is on is not counted as dirty.
 740	 * This is needed if "buffer_percent" is set to 100.
 741	 */
 742	dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
 743
 744	return (dirty * 100) >= (full * nr_pages);
 745}
 746
 747/*
 748 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 749 *
 750 * Schedules a delayed work to wake up any task that is blocked on the
 751 * ring buffer waiters queue.
 752 */
 753static void rb_wake_up_waiters(struct irq_work *work)
 754{
 755	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 756
 757	/* For waiters waiting for the first wake up */
 758	(void)atomic_fetch_inc_release(&rbwork->seq);
 759
 760	wake_up_all(&rbwork->waiters);
 761	if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
 762		/* Only cpu_buffer sets the above flags */
 763		struct ring_buffer_per_cpu *cpu_buffer =
 764			container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
 765
 766		/* Called from interrupt context */
 767		raw_spin_lock(&cpu_buffer->reader_lock);
 768		rbwork->wakeup_full = false;
 769		rbwork->full_waiters_pending = false;
 770
 771		/* Waking up all waiters, they will reset the shortest full */
 772		cpu_buffer->shortest_full = 0;
 773		raw_spin_unlock(&cpu_buffer->reader_lock);
 774
 775		wake_up_all(&rbwork->full_waiters);
 776	}
 777}
 778
 779/**
 780 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
 781 * @buffer: The ring buffer to wake waiters on
 782 * @cpu: The CPU buffer to wake waiters on
 783 *
 784 * In the case of a file that represents a ring buffer is closing,
 785 * it is prudent to wake up any waiters that are on this.
 786 */
 787void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
 788{
 789	struct ring_buffer_per_cpu *cpu_buffer;
 790	struct rb_irq_work *rbwork;
 791
 792	if (!buffer)
 793		return;
 794
 795	if (cpu == RING_BUFFER_ALL_CPUS) {
 796
 797		/* Wake up individual ones too. One level recursion */
 798		for_each_buffer_cpu(buffer, cpu)
 799			ring_buffer_wake_waiters(buffer, cpu);
 800
 801		rbwork = &buffer->irq_work;
 802	} else {
 803		if (WARN_ON_ONCE(!buffer->buffers))
 804			return;
 805		if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
 806			return;
 807
 808		cpu_buffer = buffer->buffers[cpu];
 809		/* The CPU buffer may not have been initialized yet */
 810		if (!cpu_buffer)
 811			return;
 812		rbwork = &cpu_buffer->irq_work;
 813	}
 814
 815	/* This can be called in any context */
 816	irq_work_queue(&rbwork->work);
 817}
 818
 819static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
 820{
 821	struct ring_buffer_per_cpu *cpu_buffer;
 822	bool ret = false;
 823
 824	/* Reads of all CPUs always waits for any data */
 825	if (cpu == RING_BUFFER_ALL_CPUS)
 826		return !ring_buffer_empty(buffer);
 827
 828	cpu_buffer = buffer->buffers[cpu];
 829
 830	if (!ring_buffer_empty_cpu(buffer, cpu)) {
 831		unsigned long flags;
 832		bool pagebusy;
 833
 834		if (!full)
 835			return true;
 836
 837		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 838		pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 839		ret = !pagebusy && full_hit(buffer, cpu, full);
 840
 841		if (!ret && (!cpu_buffer->shortest_full ||
 842			     cpu_buffer->shortest_full > full)) {
 843		    cpu_buffer->shortest_full = full;
 844		}
 845		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 846	}
 847	return ret;
 848}
 849
 850static inline bool
 851rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
 852	     int cpu, int full, ring_buffer_cond_fn cond, void *data)
 853{
 854	if (rb_watermark_hit(buffer, cpu, full))
 855		return true;
 856
 857	if (cond(data))
 858		return true;
 859
 860	/*
 861	 * The events can happen in critical sections where
 862	 * checking a work queue can cause deadlocks.
 863	 * After adding a task to the queue, this flag is set
 864	 * only to notify events to try to wake up the queue
 865	 * using irq_work.
 866	 *
 867	 * We don't clear it even if the buffer is no longer
 868	 * empty. The flag only causes the next event to run
 869	 * irq_work to do the work queue wake up. The worse
 870	 * that can happen if we race with !trace_empty() is that
 871	 * an event will cause an irq_work to try to wake up
 872	 * an empty queue.
 873	 *
 874	 * There's no reason to protect this flag either, as
 875	 * the work queue and irq_work logic will do the necessary
 876	 * synchronization for the wake ups. The only thing
 877	 * that is necessary is that the wake up happens after
 878	 * a task has been queued. It's OK for spurious wake ups.
 879	 */
 880	if (full)
 881		rbwork->full_waiters_pending = true;
 882	else
 883		rbwork->waiters_pending = true;
 884
 885	return false;
 886}
 887
 888struct rb_wait_data {
 889	struct rb_irq_work		*irq_work;
 890	int				seq;
 891};
 892
 893/*
 894 * The default wait condition for ring_buffer_wait() is to just to exit the
 895 * wait loop the first time it is woken up.
 896 */
 897static bool rb_wait_once(void *data)
 898{
 899	struct rb_wait_data *rdata = data;
 900	struct rb_irq_work *rbwork = rdata->irq_work;
 901
 902	return atomic_read_acquire(&rbwork->seq) != rdata->seq;
 903}
 904
 905/**
 906 * ring_buffer_wait - wait for input to the ring buffer
 907 * @buffer: buffer to wait on
 908 * @cpu: the cpu buffer to wait on
 909 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
 910 * @cond: condition function to break out of wait (NULL to run once)
 911 * @data: the data to pass to @cond.
 912 *
 913 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 914 * as data is added to any of the @buffer's cpu buffers. Otherwise
 915 * it will wait for data to be added to a specific cpu buffer.
 916 */
 917int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
 918		     ring_buffer_cond_fn cond, void *data)
 919{
 920	struct ring_buffer_per_cpu *cpu_buffer;
 921	struct wait_queue_head *waitq;
 922	struct rb_irq_work *rbwork;
 923	struct rb_wait_data rdata;
 924	int ret = 0;
 925
 926	/*
 927	 * Depending on what the caller is waiting for, either any
 928	 * data in any cpu buffer, or a specific buffer, put the
 929	 * caller on the appropriate wait queue.
 930	 */
 931	if (cpu == RING_BUFFER_ALL_CPUS) {
 932		rbwork = &buffer->irq_work;
 933		/* Full only makes sense on per cpu reads */
 934		full = 0;
 935	} else {
 936		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 937			return -ENODEV;
 938		cpu_buffer = buffer->buffers[cpu];
 939		rbwork = &cpu_buffer->irq_work;
 940	}
 941
 942	if (full)
 943		waitq = &rbwork->full_waiters;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944	else
 945		waitq = &rbwork->waiters;
 
 
 
 946
 947	/* Set up to exit loop as soon as it is woken */
 948	if (!cond) {
 949		cond = rb_wait_once;
 950		rdata.irq_work = rbwork;
 951		rdata.seq = atomic_read_acquire(&rbwork->seq);
 952		data = &rdata;
 953	}
 954
 955	ret = wait_event_interruptible((*waitq),
 956				rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
 
 
 
 
 
 
 
 957
 958	return ret;
 959}
 960
 961/**
 962 * ring_buffer_poll_wait - poll on buffer input
 963 * @buffer: buffer to wait on
 964 * @cpu: the cpu buffer to wait on
 965 * @filp: the file descriptor
 966 * @poll_table: The poll descriptor
 967 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
 968 *
 969 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 970 * as data is added to any of the @buffer's cpu buffers. Otherwise
 971 * it will wait for data to be added to a specific cpu buffer.
 972 *
 973 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
 974 * zero otherwise.
 975 */
 976__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
 977			  struct file *filp, poll_table *poll_table, int full)
 978{
 979	struct ring_buffer_per_cpu *cpu_buffer;
 980	struct rb_irq_work *rbwork;
 981
 982	if (cpu == RING_BUFFER_ALL_CPUS) {
 983		rbwork = &buffer->irq_work;
 984		full = 0;
 985	} else {
 986		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 987			return EPOLLERR;
 988
 989		cpu_buffer = buffer->buffers[cpu];
 990		rbwork = &cpu_buffer->irq_work;
 991	}
 992
 993	if (full) {
 
 
 994		poll_wait(filp, &rbwork->full_waiters, poll_table);
 995
 996		if (rb_watermark_hit(buffer, cpu, full))
 997			return EPOLLIN | EPOLLRDNORM;
 998		/*
 999		 * Only allow full_waiters_pending update to be seen after
1000		 * the shortest_full is set (in rb_watermark_hit). If the
1001		 * writer sees the full_waiters_pending flag set, it will
1002		 * compare the amount in the ring buffer to shortest_full.
1003		 * If the amount in the ring buffer is greater than the
1004		 * shortest_full percent, it will call the irq_work handler
1005		 * to wake up this list. The irq_handler will reset shortest_full
1006		 * back to zero. That's done under the reader_lock, but
1007		 * the below smp_mb() makes sure that the update to
1008		 * full_waiters_pending doesn't leak up into the above.
1009		 */
1010		smp_mb();
1011		rbwork->full_waiters_pending = true;
1012		return 0;
 
 
 
 
 
 
1013	}
1014
1015	poll_wait(filp, &rbwork->waiters, poll_table);
1016	rbwork->waiters_pending = true;
1017
1018	/*
1019	 * There's a tight race between setting the waiters_pending and
1020	 * checking if the ring buffer is empty.  Once the waiters_pending bit
1021	 * is set, the next event will wake the task up, but we can get stuck
1022	 * if there's only a single event in.
1023	 *
1024	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
1025	 * but adding a memory barrier to all events will cause too much of a
1026	 * performance hit in the fast path.  We only need a memory barrier when
1027	 * the buffer goes from empty to having content.  But as this race is
1028	 * extremely small, and it's not a problem if another event comes in, we
1029	 * will fix it later.
1030	 */
1031	smp_mb();
1032
 
 
 
1033	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1034	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1035		return EPOLLIN | EPOLLRDNORM;
1036	return 0;
1037}
1038
1039/* buffer may be either ring_buffer or ring_buffer_per_cpu */
1040#define RB_WARN_ON(b, cond)						\
1041	({								\
1042		int _____ret = unlikely(cond);				\
1043		if (_____ret) {						\
1044			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1045				struct ring_buffer_per_cpu *__b =	\
1046					(void *)b;			\
1047				atomic_inc(&__b->buffer->record_disabled); \
1048			} else						\
1049				atomic_inc(&b->record_disabled);	\
1050			WARN_ON(1);					\
1051		}							\
1052		_____ret;						\
1053	})
1054
1055/* Up this if you want to test the TIME_EXTENTS and normalization */
1056#define DEBUG_SHIFT 0
1057
1058static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1059{
1060	u64 ts;
1061
1062	/* Skip retpolines :-( */
1063	if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1064		ts = trace_clock_local();
1065	else
1066		ts = buffer->clock();
1067
1068	/* shift to debug/test normalization and TIME_EXTENTS */
1069	return ts << DEBUG_SHIFT;
1070}
1071
1072u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1073{
1074	u64 time;
1075
1076	preempt_disable_notrace();
1077	time = rb_time_stamp(buffer);
1078	preempt_enable_notrace();
1079
1080	return time;
1081}
1082EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1083
1084void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1085				      int cpu, u64 *ts)
1086{
1087	/* Just stupid testing the normalize function and deltas */
1088	*ts >>= DEBUG_SHIFT;
1089}
1090EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1091
1092/*
1093 * Making the ring buffer lockless makes things tricky.
1094 * Although writes only happen on the CPU that they are on,
1095 * and they only need to worry about interrupts. Reads can
1096 * happen on any CPU.
1097 *
1098 * The reader page is always off the ring buffer, but when the
1099 * reader finishes with a page, it needs to swap its page with
1100 * a new one from the buffer. The reader needs to take from
1101 * the head (writes go to the tail). But if a writer is in overwrite
1102 * mode and wraps, it must push the head page forward.
1103 *
1104 * Here lies the problem.
1105 *
1106 * The reader must be careful to replace only the head page, and
1107 * not another one. As described at the top of the file in the
1108 * ASCII art, the reader sets its old page to point to the next
1109 * page after head. It then sets the page after head to point to
1110 * the old reader page. But if the writer moves the head page
1111 * during this operation, the reader could end up with the tail.
1112 *
1113 * We use cmpxchg to help prevent this race. We also do something
1114 * special with the page before head. We set the LSB to 1.
1115 *
1116 * When the writer must push the page forward, it will clear the
1117 * bit that points to the head page, move the head, and then set
1118 * the bit that points to the new head page.
1119 *
1120 * We also don't want an interrupt coming in and moving the head
1121 * page on another writer. Thus we use the second LSB to catch
1122 * that too. Thus:
1123 *
1124 * head->list->prev->next        bit 1          bit 0
1125 *                              -------        -------
1126 * Normal page                     0              0
1127 * Points to head page             0              1
1128 * New head page                   1              0
1129 *
1130 * Note we can not trust the prev pointer of the head page, because:
1131 *
1132 * +----+       +-----+        +-----+
1133 * |    |------>|  T  |---X--->|  N  |
1134 * |    |<------|     |        |     |
1135 * +----+       +-----+        +-----+
1136 *   ^                           ^ |
1137 *   |          +-----+          | |
1138 *   +----------|  R  |----------+ |
1139 *              |     |<-----------+
1140 *              +-----+
1141 *
1142 * Key:  ---X-->  HEAD flag set in pointer
1143 *         T      Tail page
1144 *         R      Reader page
1145 *         N      Next page
1146 *
1147 * (see __rb_reserve_next() to see where this happens)
1148 *
1149 *  What the above shows is that the reader just swapped out
1150 *  the reader page with a page in the buffer, but before it
1151 *  could make the new header point back to the new page added
1152 *  it was preempted by a writer. The writer moved forward onto
1153 *  the new page added by the reader and is about to move forward
1154 *  again.
1155 *
1156 *  You can see, it is legitimate for the previous pointer of
1157 *  the head (or any page) not to point back to itself. But only
1158 *  temporarily.
1159 */
1160
1161#define RB_PAGE_NORMAL		0UL
1162#define RB_PAGE_HEAD		1UL
1163#define RB_PAGE_UPDATE		2UL
1164
1165
1166#define RB_FLAG_MASK		3UL
1167
1168/* PAGE_MOVED is not part of the mask */
1169#define RB_PAGE_MOVED		4UL
1170
1171/*
1172 * rb_list_head - remove any bit
1173 */
1174static struct list_head *rb_list_head(struct list_head *list)
1175{
1176	unsigned long val = (unsigned long)list;
1177
1178	return (struct list_head *)(val & ~RB_FLAG_MASK);
1179}
1180
1181/*
1182 * rb_is_head_page - test if the given page is the head page
1183 *
1184 * Because the reader may move the head_page pointer, we can
1185 * not trust what the head page is (it may be pointing to
1186 * the reader page). But if the next page is a header page,
1187 * its flags will be non zero.
1188 */
1189static inline int
1190rb_is_head_page(struct buffer_page *page, struct list_head *list)
1191{
1192	unsigned long val;
1193
1194	val = (unsigned long)list->next;
1195
1196	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1197		return RB_PAGE_MOVED;
1198
1199	return val & RB_FLAG_MASK;
1200}
1201
1202/*
1203 * rb_is_reader_page
1204 *
1205 * The unique thing about the reader page, is that, if the
1206 * writer is ever on it, the previous pointer never points
1207 * back to the reader page.
1208 */
1209static bool rb_is_reader_page(struct buffer_page *page)
1210{
1211	struct list_head *list = page->list.prev;
1212
1213	return rb_list_head(list->next) != &page->list;
1214}
1215
1216/*
1217 * rb_set_list_to_head - set a list_head to be pointing to head.
1218 */
1219static void rb_set_list_to_head(struct list_head *list)
1220{
1221	unsigned long *ptr;
1222
1223	ptr = (unsigned long *)&list->next;
1224	*ptr |= RB_PAGE_HEAD;
1225	*ptr &= ~RB_PAGE_UPDATE;
1226}
1227
1228/*
1229 * rb_head_page_activate - sets up head page
1230 */
1231static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1232{
1233	struct buffer_page *head;
1234
1235	head = cpu_buffer->head_page;
1236	if (!head)
1237		return;
1238
1239	/*
1240	 * Set the previous list pointer to have the HEAD flag.
1241	 */
1242	rb_set_list_to_head(head->list.prev);
1243}
1244
1245static void rb_list_head_clear(struct list_head *list)
1246{
1247	unsigned long *ptr = (unsigned long *)&list->next;
1248
1249	*ptr &= ~RB_FLAG_MASK;
1250}
1251
1252/*
1253 * rb_head_page_deactivate - clears head page ptr (for free list)
1254 */
1255static void
1256rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1257{
1258	struct list_head *hd;
1259
1260	/* Go through the whole list and clear any pointers found. */
1261	rb_list_head_clear(cpu_buffer->pages);
1262
1263	list_for_each(hd, cpu_buffer->pages)
1264		rb_list_head_clear(hd);
1265}
1266
1267static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1268			    struct buffer_page *head,
1269			    struct buffer_page *prev,
1270			    int old_flag, int new_flag)
1271{
1272	struct list_head *list;
1273	unsigned long val = (unsigned long)&head->list;
1274	unsigned long ret;
1275
1276	list = &prev->list;
1277
1278	val &= ~RB_FLAG_MASK;
1279
1280	ret = cmpxchg((unsigned long *)&list->next,
1281		      val | old_flag, val | new_flag);
1282
1283	/* check if the reader took the page */
1284	if ((ret & ~RB_FLAG_MASK) != val)
1285		return RB_PAGE_MOVED;
1286
1287	return ret & RB_FLAG_MASK;
1288}
1289
1290static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1291				   struct buffer_page *head,
1292				   struct buffer_page *prev,
1293				   int old_flag)
1294{
1295	return rb_head_page_set(cpu_buffer, head, prev,
1296				old_flag, RB_PAGE_UPDATE);
1297}
1298
1299static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1300				 struct buffer_page *head,
1301				 struct buffer_page *prev,
1302				 int old_flag)
1303{
1304	return rb_head_page_set(cpu_buffer, head, prev,
1305				old_flag, RB_PAGE_HEAD);
1306}
1307
1308static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1309				   struct buffer_page *head,
1310				   struct buffer_page *prev,
1311				   int old_flag)
1312{
1313	return rb_head_page_set(cpu_buffer, head, prev,
1314				old_flag, RB_PAGE_NORMAL);
1315}
1316
1317static inline void rb_inc_page(struct buffer_page **bpage)
1318{
1319	struct list_head *p = rb_list_head((*bpage)->list.next);
1320
1321	*bpage = list_entry(p, struct buffer_page, list);
1322}
1323
1324static struct buffer_page *
1325rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1326{
1327	struct buffer_page *head;
1328	struct buffer_page *page;
1329	struct list_head *list;
1330	int i;
1331
1332	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1333		return NULL;
1334
1335	/* sanity check */
1336	list = cpu_buffer->pages;
1337	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1338		return NULL;
1339
1340	page = head = cpu_buffer->head_page;
1341	/*
1342	 * It is possible that the writer moves the header behind
1343	 * where we started, and we miss in one loop.
1344	 * A second loop should grab the header, but we'll do
1345	 * three loops just because I'm paranoid.
1346	 */
1347	for (i = 0; i < 3; i++) {
1348		do {
1349			if (rb_is_head_page(page, page->list.prev)) {
1350				cpu_buffer->head_page = page;
1351				return page;
1352			}
1353			rb_inc_page(&page);
1354		} while (page != head);
1355	}
1356
1357	RB_WARN_ON(cpu_buffer, 1);
1358
1359	return NULL;
1360}
1361
1362static bool rb_head_page_replace(struct buffer_page *old,
1363				struct buffer_page *new)
1364{
1365	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1366	unsigned long val;
1367
1368	val = *ptr & ~RB_FLAG_MASK;
1369	val |= RB_PAGE_HEAD;
1370
1371	return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
1372}
1373
1374/*
1375 * rb_tail_page_update - move the tail page forward
1376 */
1377static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1378			       struct buffer_page *tail_page,
1379			       struct buffer_page *next_page)
1380{
1381	unsigned long old_entries;
1382	unsigned long old_write;
1383
1384	/*
1385	 * The tail page now needs to be moved forward.
1386	 *
1387	 * We need to reset the tail page, but without messing
1388	 * with possible erasing of data brought in by interrupts
1389	 * that have moved the tail page and are currently on it.
1390	 *
1391	 * We add a counter to the write field to denote this.
1392	 */
1393	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1394	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1395
 
1396	/*
1397	 * Just make sure we have seen our old_write and synchronize
1398	 * with any interrupts that come in.
1399	 */
1400	barrier();
1401
1402	/*
1403	 * If the tail page is still the same as what we think
1404	 * it is, then it is up to us to update the tail
1405	 * pointer.
1406	 */
1407	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1408		/* Zero the write counter */
1409		unsigned long val = old_write & ~RB_WRITE_MASK;
1410		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1411
1412		/*
1413		 * This will only succeed if an interrupt did
1414		 * not come in and change it. In which case, we
1415		 * do not want to modify it.
1416		 *
1417		 * We add (void) to let the compiler know that we do not care
1418		 * about the return value of these functions. We use the
1419		 * cmpxchg to only update if an interrupt did not already
1420		 * do it for us. If the cmpxchg fails, we don't care.
1421		 */
1422		(void)local_cmpxchg(&next_page->write, old_write, val);
1423		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1424
1425		/*
1426		 * No need to worry about races with clearing out the commit.
1427		 * it only can increment when a commit takes place. But that
1428		 * only happens in the outer most nested commit.
1429		 */
1430		local_set(&next_page->page->commit, 0);
1431
1432		/* Either we update tail_page or an interrupt does */
1433		if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
1434			local_inc(&cpu_buffer->pages_touched);
1435	}
1436}
1437
1438static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1439			  struct buffer_page *bpage)
1440{
1441	unsigned long val = (unsigned long)bpage;
1442
1443	RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
1444}
1445
1446/**
1447 * rb_check_pages - integrity check of buffer pages
1448 * @cpu_buffer: CPU buffer with pages to test
1449 *
1450 * As a safety measure we check to make sure the data pages have not
1451 * been corrupted.
1452 *
1453 * Callers of this function need to guarantee that the list of pages doesn't get
1454 * modified during the check. In particular, if it's possible that the function
1455 * is invoked with concurrent readers which can swap in a new reader page then
1456 * the caller should take cpu_buffer->reader_lock.
1457 */
1458static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1459{
1460	struct list_head *head = rb_list_head(cpu_buffer->pages);
1461	struct list_head *tmp;
1462
1463	if (RB_WARN_ON(cpu_buffer,
1464			rb_list_head(rb_list_head(head->next)->prev) != head))
1465		return;
1466
1467	if (RB_WARN_ON(cpu_buffer,
1468			rb_list_head(rb_list_head(head->prev)->next) != head))
1469		return;
1470
1471	for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
1472		if (RB_WARN_ON(cpu_buffer,
1473				rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
1474			return;
1475
1476		if (RB_WARN_ON(cpu_buffer,
1477				rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
1478			return;
1479	}
1480}
1481
1482static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1483		long nr_pages, struct list_head *pages)
1484{
1485	struct buffer_page *bpage, *tmp;
1486	bool user_thread = current->mm != NULL;
1487	gfp_t mflags;
1488	long i;
1489
1490	/*
1491	 * Check if the available memory is there first.
1492	 * Note, si_mem_available() only gives us a rough estimate of available
1493	 * memory. It may not be accurate. But we don't care, we just want
1494	 * to prevent doing any allocation when it is obvious that it is
1495	 * not going to succeed.
1496	 */
1497	i = si_mem_available();
1498	if (i < nr_pages)
1499		return -ENOMEM;
1500
1501	/*
1502	 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1503	 * gracefully without invoking oom-killer and the system is not
1504	 * destabilized.
1505	 */
1506	mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1507
1508	/*
1509	 * If a user thread allocates too much, and si_mem_available()
1510	 * reports there's enough memory, even though there is not.
1511	 * Make sure the OOM killer kills this thread. This can happen
1512	 * even with RETRY_MAYFAIL because another task may be doing
1513	 * an allocation after this task has taken all memory.
1514	 * This is the task the OOM killer needs to take out during this
1515	 * loop, even if it was triggered by an allocation somewhere else.
1516	 */
1517	if (user_thread)
1518		set_current_oom_origin();
1519	for (i = 0; i < nr_pages; i++) {
1520		struct page *page;
1521
1522		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1523				    mflags, cpu_to_node(cpu_buffer->cpu));
1524		if (!bpage)
1525			goto free_pages;
1526
1527		rb_check_bpage(cpu_buffer, bpage);
1528
1529		list_add(&bpage->list, pages);
1530
1531		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
1532					mflags | __GFP_ZERO,
1533					cpu_buffer->buffer->subbuf_order);
1534		if (!page)
1535			goto free_pages;
1536		bpage->page = page_address(page);
1537		bpage->order = cpu_buffer->buffer->subbuf_order;
1538		rb_init_page(bpage->page);
1539
1540		if (user_thread && fatal_signal_pending(current))
1541			goto free_pages;
1542	}
1543	if (user_thread)
1544		clear_current_oom_origin();
1545
1546	return 0;
1547
1548free_pages:
1549	list_for_each_entry_safe(bpage, tmp, pages, list) {
1550		list_del_init(&bpage->list);
1551		free_buffer_page(bpage);
1552	}
1553	if (user_thread)
1554		clear_current_oom_origin();
1555
1556	return -ENOMEM;
1557}
1558
1559static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1560			     unsigned long nr_pages)
1561{
1562	LIST_HEAD(pages);
1563
1564	WARN_ON(!nr_pages);
1565
1566	if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
1567		return -ENOMEM;
1568
1569	/*
1570	 * The ring buffer page list is a circular list that does not
1571	 * start and end with a list head. All page list items point to
1572	 * other pages.
1573	 */
1574	cpu_buffer->pages = pages.next;
1575	list_del(&pages);
1576
1577	cpu_buffer->nr_pages = nr_pages;
1578
1579	rb_check_pages(cpu_buffer);
1580
1581	return 0;
1582}
1583
1584static struct ring_buffer_per_cpu *
1585rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1586{
1587	struct ring_buffer_per_cpu *cpu_buffer;
1588	struct buffer_page *bpage;
1589	struct page *page;
1590	int ret;
1591
1592	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1593				  GFP_KERNEL, cpu_to_node(cpu));
1594	if (!cpu_buffer)
1595		return NULL;
1596
1597	cpu_buffer->cpu = cpu;
1598	cpu_buffer->buffer = buffer;
1599	raw_spin_lock_init(&cpu_buffer->reader_lock);
1600	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1601	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1602	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1603	init_completion(&cpu_buffer->update_done);
1604	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1605	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1606	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1607
1608	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1609			    GFP_KERNEL, cpu_to_node(cpu));
1610	if (!bpage)
1611		goto fail_free_buffer;
1612
1613	rb_check_bpage(cpu_buffer, bpage);
1614
1615	cpu_buffer->reader_page = bpage;
1616
1617	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_ZERO,
1618				cpu_buffer->buffer->subbuf_order);
1619	if (!page)
1620		goto fail_free_reader;
1621	bpage->page = page_address(page);
1622	rb_init_page(bpage->page);
1623
1624	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1625	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1626
1627	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1628	if (ret < 0)
1629		goto fail_free_reader;
1630
1631	cpu_buffer->head_page
1632		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1633	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1634
1635	rb_head_page_activate(cpu_buffer);
1636
1637	return cpu_buffer;
1638
1639 fail_free_reader:
1640	free_buffer_page(cpu_buffer->reader_page);
1641
1642 fail_free_buffer:
1643	kfree(cpu_buffer);
1644	return NULL;
1645}
1646
1647static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1648{
1649	struct list_head *head = cpu_buffer->pages;
1650	struct buffer_page *bpage, *tmp;
1651
1652	irq_work_sync(&cpu_buffer->irq_work.work);
1653
1654	free_buffer_page(cpu_buffer->reader_page);
1655
1656	if (head) {
1657		rb_head_page_deactivate(cpu_buffer);
1658
1659		list_for_each_entry_safe(bpage, tmp, head, list) {
1660			list_del_init(&bpage->list);
1661			free_buffer_page(bpage);
1662		}
1663		bpage = list_entry(head, struct buffer_page, list);
1664		free_buffer_page(bpage);
1665	}
1666
1667	free_page((unsigned long)cpu_buffer->free_page);
1668
1669	kfree(cpu_buffer);
1670}
1671
1672/**
1673 * __ring_buffer_alloc - allocate a new ring_buffer
1674 * @size: the size in bytes per cpu that is needed.
1675 * @flags: attributes to set for the ring buffer.
1676 * @key: ring buffer reader_lock_key.
1677 *
1678 * Currently the only flag that is available is the RB_FL_OVERWRITE
1679 * flag. This flag means that the buffer will overwrite old data
1680 * when the buffer wraps. If this flag is not set, the buffer will
1681 * drop data when the tail hits the head.
1682 */
1683struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1684					struct lock_class_key *key)
1685{
1686	struct trace_buffer *buffer;
1687	long nr_pages;
1688	int bsize;
1689	int cpu;
1690	int ret;
1691
1692	/* keep it in its own cache line */
1693	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1694			 GFP_KERNEL);
1695	if (!buffer)
1696		return NULL;
1697
1698	if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1699		goto fail_free_buffer;
1700
1701	/* Default buffer page size - one system page */
1702	buffer->subbuf_order = 0;
1703	buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
1704
1705	/* Max payload is buffer page size - header (8bytes) */
1706	buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
1707
1708	nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
1709	buffer->flags = flags;
1710	buffer->clock = trace_clock_local;
1711	buffer->reader_lock_key = key;
1712
1713	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1714	init_waitqueue_head(&buffer->irq_work.waiters);
1715
1716	/* need at least two pages */
1717	if (nr_pages < 2)
1718		nr_pages = 2;
1719
1720	buffer->cpus = nr_cpu_ids;
1721
1722	bsize = sizeof(void *) * nr_cpu_ids;
1723	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1724				  GFP_KERNEL);
1725	if (!buffer->buffers)
1726		goto fail_free_cpumask;
1727
1728	cpu = raw_smp_processor_id();
1729	cpumask_set_cpu(cpu, buffer->cpumask);
1730	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1731	if (!buffer->buffers[cpu])
1732		goto fail_free_buffers;
1733
1734	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1735	if (ret < 0)
1736		goto fail_free_buffers;
1737
1738	mutex_init(&buffer->mutex);
1739
1740	return buffer;
1741
1742 fail_free_buffers:
1743	for_each_buffer_cpu(buffer, cpu) {
1744		if (buffer->buffers[cpu])
1745			rb_free_cpu_buffer(buffer->buffers[cpu]);
1746	}
1747	kfree(buffer->buffers);
1748
1749 fail_free_cpumask:
1750	free_cpumask_var(buffer->cpumask);
1751
1752 fail_free_buffer:
1753	kfree(buffer);
1754	return NULL;
1755}
1756EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1757
1758/**
1759 * ring_buffer_free - free a ring buffer.
1760 * @buffer: the buffer to free.
1761 */
1762void
1763ring_buffer_free(struct trace_buffer *buffer)
1764{
1765	int cpu;
1766
1767	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1768
1769	irq_work_sync(&buffer->irq_work.work);
1770
1771	for_each_buffer_cpu(buffer, cpu)
1772		rb_free_cpu_buffer(buffer->buffers[cpu]);
1773
1774	kfree(buffer->buffers);
1775	free_cpumask_var(buffer->cpumask);
1776
1777	kfree(buffer);
1778}
1779EXPORT_SYMBOL_GPL(ring_buffer_free);
1780
1781void ring_buffer_set_clock(struct trace_buffer *buffer,
1782			   u64 (*clock)(void))
1783{
1784	buffer->clock = clock;
1785}
1786
1787void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1788{
1789	buffer->time_stamp_abs = abs;
1790}
1791
1792bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1793{
1794	return buffer->time_stamp_abs;
1795}
1796
1797static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1798
1799static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1800{
1801	return local_read(&bpage->entries) & RB_WRITE_MASK;
1802}
1803
1804static inline unsigned long rb_page_write(struct buffer_page *bpage)
1805{
1806	return local_read(&bpage->write) & RB_WRITE_MASK;
1807}
1808
1809static bool
1810rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1811{
1812	struct list_head *tail_page, *to_remove, *next_page;
1813	struct buffer_page *to_remove_page, *tmp_iter_page;
1814	struct buffer_page *last_page, *first_page;
1815	unsigned long nr_removed;
1816	unsigned long head_bit;
1817	int page_entries;
1818
1819	head_bit = 0;
1820
1821	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1822	atomic_inc(&cpu_buffer->record_disabled);
1823	/*
1824	 * We don't race with the readers since we have acquired the reader
1825	 * lock. We also don't race with writers after disabling recording.
1826	 * This makes it easy to figure out the first and the last page to be
1827	 * removed from the list. We unlink all the pages in between including
1828	 * the first and last pages. This is done in a busy loop so that we
1829	 * lose the least number of traces.
1830	 * The pages are freed after we restart recording and unlock readers.
1831	 */
1832	tail_page = &cpu_buffer->tail_page->list;
1833
1834	/*
1835	 * tail page might be on reader page, we remove the next page
1836	 * from the ring buffer
1837	 */
1838	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1839		tail_page = rb_list_head(tail_page->next);
1840	to_remove = tail_page;
1841
1842	/* start of pages to remove */
1843	first_page = list_entry(rb_list_head(to_remove->next),
1844				struct buffer_page, list);
1845
1846	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1847		to_remove = rb_list_head(to_remove)->next;
1848		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1849	}
1850	/* Read iterators need to reset themselves when some pages removed */
1851	cpu_buffer->pages_removed += nr_removed;
1852
1853	next_page = rb_list_head(to_remove)->next;
1854
1855	/*
1856	 * Now we remove all pages between tail_page and next_page.
1857	 * Make sure that we have head_bit value preserved for the
1858	 * next page
1859	 */
1860	tail_page->next = (struct list_head *)((unsigned long)next_page |
1861						head_bit);
1862	next_page = rb_list_head(next_page);
1863	next_page->prev = tail_page;
1864
1865	/* make sure pages points to a valid page in the ring buffer */
1866	cpu_buffer->pages = next_page;
1867
1868	/* update head page */
1869	if (head_bit)
1870		cpu_buffer->head_page = list_entry(next_page,
1871						struct buffer_page, list);
1872
1873	/* pages are removed, resume tracing and then free the pages */
1874	atomic_dec(&cpu_buffer->record_disabled);
1875	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1876
1877	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1878
1879	/* last buffer page to remove */
1880	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1881				list);
1882	tmp_iter_page = first_page;
1883
1884	do {
1885		cond_resched();
1886
1887		to_remove_page = tmp_iter_page;
1888		rb_inc_page(&tmp_iter_page);
1889
1890		/* update the counters */
1891		page_entries = rb_page_entries(to_remove_page);
1892		if (page_entries) {
1893			/*
1894			 * If something was added to this page, it was full
1895			 * since it is not the tail page. So we deduct the
1896			 * bytes consumed in ring buffer from here.
1897			 * Increment overrun to account for the lost events.
1898			 */
1899			local_add(page_entries, &cpu_buffer->overrun);
1900			local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
1901			local_inc(&cpu_buffer->pages_lost);
1902		}
1903
1904		/*
1905		 * We have already removed references to this list item, just
1906		 * free up the buffer_page and its page
1907		 */
1908		free_buffer_page(to_remove_page);
1909		nr_removed--;
1910
1911	} while (to_remove_page != last_page);
1912
1913	RB_WARN_ON(cpu_buffer, nr_removed);
1914
1915	return nr_removed == 0;
1916}
1917
1918static bool
1919rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1920{
1921	struct list_head *pages = &cpu_buffer->new_pages;
1922	unsigned long flags;
1923	bool success;
1924	int retries;
1925
1926	/* Can be called at early boot up, where interrupts must not been enabled */
1927	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1928	/*
1929	 * We are holding the reader lock, so the reader page won't be swapped
1930	 * in the ring buffer. Now we are racing with the writer trying to
1931	 * move head page and the tail page.
1932	 * We are going to adapt the reader page update process where:
1933	 * 1. We first splice the start and end of list of new pages between
1934	 *    the head page and its previous page.
1935	 * 2. We cmpxchg the prev_page->next to point from head page to the
1936	 *    start of new pages list.
1937	 * 3. Finally, we update the head->prev to the end of new list.
1938	 *
1939	 * We will try this process 10 times, to make sure that we don't keep
1940	 * spinning.
1941	 */
1942	retries = 10;
1943	success = false;
1944	while (retries--) {
1945		struct list_head *head_page, *prev_page;
1946		struct list_head *last_page, *first_page;
1947		struct list_head *head_page_with_bit;
1948		struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
1949
1950		if (!hpage)
1951			break;
1952		head_page = &hpage->list;
1953		prev_page = head_page->prev;
1954
1955		first_page = pages->next;
1956		last_page  = pages->prev;
1957
1958		head_page_with_bit = (struct list_head *)
1959				     ((unsigned long)head_page | RB_PAGE_HEAD);
1960
1961		last_page->next = head_page_with_bit;
1962		first_page->prev = prev_page;
1963
1964		/* caution: head_page_with_bit gets updated on cmpxchg failure */
1965		if (try_cmpxchg(&prev_page->next,
1966				&head_page_with_bit, first_page)) {
1967			/*
1968			 * yay, we replaced the page pointer to our new list,
1969			 * now, we just have to update to head page's prev
1970			 * pointer to point to end of list
1971			 */
1972			head_page->prev = last_page;
1973			success = true;
1974			break;
1975		}
1976	}
1977
1978	if (success)
1979		INIT_LIST_HEAD(pages);
1980	/*
1981	 * If we weren't successful in adding in new pages, warn and stop
1982	 * tracing
1983	 */
1984	RB_WARN_ON(cpu_buffer, !success);
1985	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1986
1987	/* free pages if they weren't inserted */
1988	if (!success) {
1989		struct buffer_page *bpage, *tmp;
1990		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1991					 list) {
1992			list_del_init(&bpage->list);
1993			free_buffer_page(bpage);
1994		}
1995	}
1996	return success;
1997}
1998
1999static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2000{
2001	bool success;
2002
2003	if (cpu_buffer->nr_pages_to_update > 0)
2004		success = rb_insert_pages(cpu_buffer);
2005	else
2006		success = rb_remove_pages(cpu_buffer,
2007					-cpu_buffer->nr_pages_to_update);
2008
2009	if (success)
2010		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
2011}
2012
2013static void update_pages_handler(struct work_struct *work)
2014{
2015	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2016			struct ring_buffer_per_cpu, update_pages_work);
2017	rb_update_pages(cpu_buffer);
2018	complete(&cpu_buffer->update_done);
2019}
2020
2021/**
2022 * ring_buffer_resize - resize the ring buffer
2023 * @buffer: the buffer to resize.
2024 * @size: the new size.
2025 * @cpu_id: the cpu buffer to resize
2026 *
2027 * Minimum size is 2 * buffer->subbuf_size.
2028 *
2029 * Returns 0 on success and < 0 on failure.
2030 */
2031int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2032			int cpu_id)
2033{
2034	struct ring_buffer_per_cpu *cpu_buffer;
2035	unsigned long nr_pages;
2036	int cpu, err;
2037
2038	/*
2039	 * Always succeed at resizing a non-existent buffer:
2040	 */
2041	if (!buffer)
2042		return 0;
2043
2044	/* Make sure the requested buffer exists */
2045	if (cpu_id != RING_BUFFER_ALL_CPUS &&
2046	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
2047		return 0;
2048
2049	nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2050
2051	/* we need a minimum of two pages */
2052	if (nr_pages < 2)
2053		nr_pages = 2;
2054
2055	/* prevent another thread from changing buffer sizes */
2056	mutex_lock(&buffer->mutex);
2057	atomic_inc(&buffer->resizing);
2058
2059	if (cpu_id == RING_BUFFER_ALL_CPUS) {
2060		/*
2061		 * Don't succeed if resizing is disabled, as a reader might be
2062		 * manipulating the ring buffer and is expecting a sane state while
2063		 * this is true.
2064		 */
2065		for_each_buffer_cpu(buffer, cpu) {
2066			cpu_buffer = buffer->buffers[cpu];
2067			if (atomic_read(&cpu_buffer->resize_disabled)) {
2068				err = -EBUSY;
2069				goto out_err_unlock;
2070			}
2071		}
2072
2073		/* calculate the pages to update */
2074		for_each_buffer_cpu(buffer, cpu) {
2075			cpu_buffer = buffer->buffers[cpu];
2076
2077			cpu_buffer->nr_pages_to_update = nr_pages -
2078							cpu_buffer->nr_pages;
2079			/*
2080			 * nothing more to do for removing pages or no update
2081			 */
2082			if (cpu_buffer->nr_pages_to_update <= 0)
2083				continue;
2084			/*
2085			 * to add pages, make sure all new pages can be
2086			 * allocated without receiving ENOMEM
2087			 */
2088			INIT_LIST_HEAD(&cpu_buffer->new_pages);
2089			if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2090						&cpu_buffer->new_pages)) {
2091				/* not enough memory for new pages */
2092				err = -ENOMEM;
2093				goto out_err;
2094			}
2095
2096			cond_resched();
2097		}
2098
2099		cpus_read_lock();
2100		/*
2101		 * Fire off all the required work handlers
2102		 * We can't schedule on offline CPUs, but it's not necessary
2103		 * since we can change their buffer sizes without any race.
2104		 */
2105		for_each_buffer_cpu(buffer, cpu) {
2106			cpu_buffer = buffer->buffers[cpu];
2107			if (!cpu_buffer->nr_pages_to_update)
2108				continue;
2109
2110			/* Can't run something on an offline CPU. */
2111			if (!cpu_online(cpu)) {
2112				rb_update_pages(cpu_buffer);
2113				cpu_buffer->nr_pages_to_update = 0;
2114			} else {
2115				/* Run directly if possible. */
2116				migrate_disable();
2117				if (cpu != smp_processor_id()) {
2118					migrate_enable();
2119					schedule_work_on(cpu,
2120							 &cpu_buffer->update_pages_work);
2121				} else {
2122					update_pages_handler(&cpu_buffer->update_pages_work);
2123					migrate_enable();
2124				}
2125			}
2126		}
2127
2128		/* wait for all the updates to complete */
2129		for_each_buffer_cpu(buffer, cpu) {
2130			cpu_buffer = buffer->buffers[cpu];
2131			if (!cpu_buffer->nr_pages_to_update)
2132				continue;
2133
2134			if (cpu_online(cpu))
2135				wait_for_completion(&cpu_buffer->update_done);
2136			cpu_buffer->nr_pages_to_update = 0;
2137		}
2138
2139		cpus_read_unlock();
2140	} else {
2141		cpu_buffer = buffer->buffers[cpu_id];
2142
2143		if (nr_pages == cpu_buffer->nr_pages)
2144			goto out;
2145
2146		/*
2147		 * Don't succeed if resizing is disabled, as a reader might be
2148		 * manipulating the ring buffer and is expecting a sane state while
2149		 * this is true.
2150		 */
2151		if (atomic_read(&cpu_buffer->resize_disabled)) {
2152			err = -EBUSY;
2153			goto out_err_unlock;
2154		}
2155
2156		cpu_buffer->nr_pages_to_update = nr_pages -
2157						cpu_buffer->nr_pages;
2158
2159		INIT_LIST_HEAD(&cpu_buffer->new_pages);
2160		if (cpu_buffer->nr_pages_to_update > 0 &&
2161			__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2162					    &cpu_buffer->new_pages)) {
2163			err = -ENOMEM;
2164			goto out_err;
2165		}
2166
2167		cpus_read_lock();
2168
2169		/* Can't run something on an offline CPU. */
2170		if (!cpu_online(cpu_id))
2171			rb_update_pages(cpu_buffer);
2172		else {
2173			/* Run directly if possible. */
2174			migrate_disable();
2175			if (cpu_id == smp_processor_id()) {
2176				rb_update_pages(cpu_buffer);
2177				migrate_enable();
2178			} else {
2179				migrate_enable();
2180				schedule_work_on(cpu_id,
2181						 &cpu_buffer->update_pages_work);
2182				wait_for_completion(&cpu_buffer->update_done);
2183			}
2184		}
2185
2186		cpu_buffer->nr_pages_to_update = 0;
2187		cpus_read_unlock();
2188	}
2189
2190 out:
2191	/*
2192	 * The ring buffer resize can happen with the ring buffer
2193	 * enabled, so that the update disturbs the tracing as little
2194	 * as possible. But if the buffer is disabled, we do not need
2195	 * to worry about that, and we can take the time to verify
2196	 * that the buffer is not corrupt.
2197	 */
2198	if (atomic_read(&buffer->record_disabled)) {
2199		atomic_inc(&buffer->record_disabled);
2200		/*
2201		 * Even though the buffer was disabled, we must make sure
2202		 * that it is truly disabled before calling rb_check_pages.
2203		 * There could have been a race between checking
2204		 * record_disable and incrementing it.
2205		 */
2206		synchronize_rcu();
2207		for_each_buffer_cpu(buffer, cpu) {
2208			unsigned long flags;
2209
2210			cpu_buffer = buffer->buffers[cpu];
2211			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2212			rb_check_pages(cpu_buffer);
2213			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2214		}
2215		atomic_dec(&buffer->record_disabled);
2216	}
2217
2218	atomic_dec(&buffer->resizing);
2219	mutex_unlock(&buffer->mutex);
2220	return 0;
2221
2222 out_err:
2223	for_each_buffer_cpu(buffer, cpu) {
2224		struct buffer_page *bpage, *tmp;
2225
2226		cpu_buffer = buffer->buffers[cpu];
2227		cpu_buffer->nr_pages_to_update = 0;
2228
2229		if (list_empty(&cpu_buffer->new_pages))
2230			continue;
2231
2232		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2233					list) {
2234			list_del_init(&bpage->list);
2235			free_buffer_page(bpage);
2236		}
2237	}
2238 out_err_unlock:
2239	atomic_dec(&buffer->resizing);
2240	mutex_unlock(&buffer->mutex);
2241	return err;
2242}
2243EXPORT_SYMBOL_GPL(ring_buffer_resize);
2244
2245void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2246{
2247	mutex_lock(&buffer->mutex);
2248	if (val)
2249		buffer->flags |= RB_FL_OVERWRITE;
2250	else
2251		buffer->flags &= ~RB_FL_OVERWRITE;
2252	mutex_unlock(&buffer->mutex);
2253}
2254EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2255
2256static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
2257{
2258	return bpage->page->data + index;
2259}
2260
2261static __always_inline struct ring_buffer_event *
2262rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2263{
2264	return __rb_page_index(cpu_buffer->reader_page,
2265			       cpu_buffer->reader_page->read);
2266}
2267
2268static struct ring_buffer_event *
2269rb_iter_head_event(struct ring_buffer_iter *iter)
2270{
2271	struct ring_buffer_event *event;
2272	struct buffer_page *iter_head_page = iter->head_page;
2273	unsigned long commit;
2274	unsigned length;
2275
2276	if (iter->head != iter->next_event)
2277		return iter->event;
2278
2279	/*
2280	 * When the writer goes across pages, it issues a cmpxchg which
2281	 * is a mb(), which will synchronize with the rmb here.
2282	 * (see rb_tail_page_update() and __rb_reserve_next())
2283	 */
2284	commit = rb_page_commit(iter_head_page);
2285	smp_rmb();
2286
2287	/* An event needs to be at least 8 bytes in size */
2288	if (iter->head > commit - 8)
2289		goto reset;
2290
2291	event = __rb_page_index(iter_head_page, iter->head);
2292	length = rb_event_length(event);
2293
2294	/*
2295	 * READ_ONCE() doesn't work on functions and we don't want the
2296	 * compiler doing any crazy optimizations with length.
2297	 */
2298	barrier();
2299
2300	if ((iter->head + length) > commit || length > iter->event_size)
2301		/* Writer corrupted the read? */
2302		goto reset;
2303
2304	memcpy(iter->event, event, length);
2305	/*
2306	 * If the page stamp is still the same after this rmb() then the
2307	 * event was safely copied without the writer entering the page.
2308	 */
2309	smp_rmb();
2310
2311	/* Make sure the page didn't change since we read this */
2312	if (iter->page_stamp != iter_head_page->page->time_stamp ||
2313	    commit > rb_page_commit(iter_head_page))
2314		goto reset;
2315
2316	iter->next_event = iter->head + length;
2317	return iter->event;
2318 reset:
2319	/* Reset to the beginning */
2320	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2321	iter->head = 0;
2322	iter->next_event = 0;
2323	iter->missed_events = 1;
2324	return NULL;
2325}
2326
2327/* Size is determined by what has been committed */
2328static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2329{
2330	return rb_page_commit(bpage);
2331}
2332
2333static __always_inline unsigned
2334rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2335{
2336	return rb_page_commit(cpu_buffer->commit_page);
2337}
2338
2339static __always_inline unsigned
2340rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
2341{
2342	unsigned long addr = (unsigned long)event;
2343
2344	addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
2345
2346	return addr - BUF_PAGE_HDR_SIZE;
2347}
2348
2349static void rb_inc_iter(struct ring_buffer_iter *iter)
2350{
2351	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2352
2353	/*
2354	 * The iterator could be on the reader page (it starts there).
2355	 * But the head could have moved, since the reader was
2356	 * found. Check for this case and assign the iterator
2357	 * to the head page instead of next.
2358	 */
2359	if (iter->head_page == cpu_buffer->reader_page)
2360		iter->head_page = rb_set_head_page(cpu_buffer);
2361	else
2362		rb_inc_page(&iter->head_page);
2363
2364	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2365	iter->head = 0;
2366	iter->next_event = 0;
2367}
2368
2369/*
2370 * rb_handle_head_page - writer hit the head page
2371 *
2372 * Returns: +1 to retry page
2373 *           0 to continue
2374 *          -1 on error
2375 */
2376static int
2377rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2378		    struct buffer_page *tail_page,
2379		    struct buffer_page *next_page)
2380{
2381	struct buffer_page *new_head;
2382	int entries;
2383	int type;
2384	int ret;
2385
2386	entries = rb_page_entries(next_page);
2387
2388	/*
2389	 * The hard part is here. We need to move the head
2390	 * forward, and protect against both readers on
2391	 * other CPUs and writers coming in via interrupts.
2392	 */
2393	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2394				       RB_PAGE_HEAD);
2395
2396	/*
2397	 * type can be one of four:
2398	 *  NORMAL - an interrupt already moved it for us
2399	 *  HEAD   - we are the first to get here.
2400	 *  UPDATE - we are the interrupt interrupting
2401	 *           a current move.
2402	 *  MOVED  - a reader on another CPU moved the next
2403	 *           pointer to its reader page. Give up
2404	 *           and try again.
2405	 */
2406
2407	switch (type) {
2408	case RB_PAGE_HEAD:
2409		/*
2410		 * We changed the head to UPDATE, thus
2411		 * it is our responsibility to update
2412		 * the counters.
2413		 */
2414		local_add(entries, &cpu_buffer->overrun);
2415		local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
2416		local_inc(&cpu_buffer->pages_lost);
2417
2418		/*
2419		 * The entries will be zeroed out when we move the
2420		 * tail page.
2421		 */
2422
2423		/* still more to do */
2424		break;
2425
2426	case RB_PAGE_UPDATE:
2427		/*
2428		 * This is an interrupt that interrupt the
2429		 * previous update. Still more to do.
2430		 */
2431		break;
2432	case RB_PAGE_NORMAL:
2433		/*
2434		 * An interrupt came in before the update
2435		 * and processed this for us.
2436		 * Nothing left to do.
2437		 */
2438		return 1;
2439	case RB_PAGE_MOVED:
2440		/*
2441		 * The reader is on another CPU and just did
2442		 * a swap with our next_page.
2443		 * Try again.
2444		 */
2445		return 1;
2446	default:
2447		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2448		return -1;
2449	}
2450
2451	/*
2452	 * Now that we are here, the old head pointer is
2453	 * set to UPDATE. This will keep the reader from
2454	 * swapping the head page with the reader page.
2455	 * The reader (on another CPU) will spin till
2456	 * we are finished.
2457	 *
2458	 * We just need to protect against interrupts
2459	 * doing the job. We will set the next pointer
2460	 * to HEAD. After that, we set the old pointer
2461	 * to NORMAL, but only if it was HEAD before.
2462	 * otherwise we are an interrupt, and only
2463	 * want the outer most commit to reset it.
2464	 */
2465	new_head = next_page;
2466	rb_inc_page(&new_head);
2467
2468	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2469				    RB_PAGE_NORMAL);
2470
2471	/*
2472	 * Valid returns are:
2473	 *  HEAD   - an interrupt came in and already set it.
2474	 *  NORMAL - One of two things:
2475	 *            1) We really set it.
2476	 *            2) A bunch of interrupts came in and moved
2477	 *               the page forward again.
2478	 */
2479	switch (ret) {
2480	case RB_PAGE_HEAD:
2481	case RB_PAGE_NORMAL:
2482		/* OK */
2483		break;
2484	default:
2485		RB_WARN_ON(cpu_buffer, 1);
2486		return -1;
2487	}
2488
2489	/*
2490	 * It is possible that an interrupt came in,
2491	 * set the head up, then more interrupts came in
2492	 * and moved it again. When we get back here,
2493	 * the page would have been set to NORMAL but we
2494	 * just set it back to HEAD.
2495	 *
2496	 * How do you detect this? Well, if that happened
2497	 * the tail page would have moved.
2498	 */
2499	if (ret == RB_PAGE_NORMAL) {
2500		struct buffer_page *buffer_tail_page;
2501
2502		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2503		/*
2504		 * If the tail had moved passed next, then we need
2505		 * to reset the pointer.
2506		 */
2507		if (buffer_tail_page != tail_page &&
2508		    buffer_tail_page != next_page)
2509			rb_head_page_set_normal(cpu_buffer, new_head,
2510						next_page,
2511						RB_PAGE_HEAD);
2512	}
2513
2514	/*
2515	 * If this was the outer most commit (the one that
2516	 * changed the original pointer from HEAD to UPDATE),
2517	 * then it is up to us to reset it to NORMAL.
2518	 */
2519	if (type == RB_PAGE_HEAD) {
2520		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2521					      tail_page,
2522					      RB_PAGE_UPDATE);
2523		if (RB_WARN_ON(cpu_buffer,
2524			       ret != RB_PAGE_UPDATE))
2525			return -1;
2526	}
2527
2528	return 0;
2529}
2530
2531static inline void
2532rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2533	      unsigned long tail, struct rb_event_info *info)
2534{
2535	unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
2536	struct buffer_page *tail_page = info->tail_page;
2537	struct ring_buffer_event *event;
2538	unsigned long length = info->length;
2539
2540	/*
2541	 * Only the event that crossed the page boundary
2542	 * must fill the old tail_page with padding.
2543	 */
2544	if (tail >= bsize) {
2545		/*
2546		 * If the page was filled, then we still need
2547		 * to update the real_end. Reset it to zero
2548		 * and the reader will ignore it.
2549		 */
2550		if (tail == bsize)
2551			tail_page->real_end = 0;
2552
2553		local_sub(length, &tail_page->write);
2554		return;
2555	}
2556
2557	event = __rb_page_index(tail_page, tail);
2558
2559	/*
2560	 * Save the original length to the meta data.
2561	 * This will be used by the reader to add lost event
2562	 * counter.
2563	 */
2564	tail_page->real_end = tail;
2565
2566	/*
2567	 * If this event is bigger than the minimum size, then
2568	 * we need to be careful that we don't subtract the
2569	 * write counter enough to allow another writer to slip
2570	 * in on this page.
2571	 * We put in a discarded commit instead, to make sure
2572	 * that this space is not used again, and this space will
2573	 * not be accounted into 'entries_bytes'.
2574	 *
2575	 * If we are less than the minimum size, we don't need to
2576	 * worry about it.
2577	 */
2578	if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
2579		/* No room for any events */
2580
2581		/* Mark the rest of the page with padding */
2582		rb_event_set_padding(event);
2583
2584		/* Make sure the padding is visible before the write update */
2585		smp_wmb();
2586
2587		/* Set the write back to the previous setting */
2588		local_sub(length, &tail_page->write);
2589		return;
2590	}
2591
2592	/* Put in a discarded event */
2593	event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
2594	event->type_len = RINGBUF_TYPE_PADDING;
2595	/* time delta must be non zero */
2596	event->time_delta = 1;
2597
2598	/* account for padding bytes */
2599	local_add(bsize - tail, &cpu_buffer->entries_bytes);
2600
2601	/* Make sure the padding is visible before the tail_page->write update */
2602	smp_wmb();
2603
2604	/* Set write to end of buffer */
2605	length = (tail + length) - bsize;
2606	local_sub(length, &tail_page->write);
2607}
2608
2609static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2610
2611/*
2612 * This is the slow path, force gcc not to inline it.
2613 */
2614static noinline struct ring_buffer_event *
2615rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2616	     unsigned long tail, struct rb_event_info *info)
2617{
2618	struct buffer_page *tail_page = info->tail_page;
2619	struct buffer_page *commit_page = cpu_buffer->commit_page;
2620	struct trace_buffer *buffer = cpu_buffer->buffer;
2621	struct buffer_page *next_page;
2622	int ret;
2623
2624	next_page = tail_page;
2625
2626	rb_inc_page(&next_page);
2627
2628	/*
2629	 * If for some reason, we had an interrupt storm that made
2630	 * it all the way around the buffer, bail, and warn
2631	 * about it.
2632	 */
2633	if (unlikely(next_page == commit_page)) {
2634		local_inc(&cpu_buffer->commit_overrun);
2635		goto out_reset;
2636	}
2637
2638	/*
2639	 * This is where the fun begins!
2640	 *
2641	 * We are fighting against races between a reader that
2642	 * could be on another CPU trying to swap its reader
2643	 * page with the buffer head.
2644	 *
2645	 * We are also fighting against interrupts coming in and
2646	 * moving the head or tail on us as well.
2647	 *
2648	 * If the next page is the head page then we have filled
2649	 * the buffer, unless the commit page is still on the
2650	 * reader page.
2651	 */
2652	if (rb_is_head_page(next_page, &tail_page->list)) {
2653
2654		/*
2655		 * If the commit is not on the reader page, then
2656		 * move the header page.
2657		 */
2658		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2659			/*
2660			 * If we are not in overwrite mode,
2661			 * this is easy, just stop here.
2662			 */
2663			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2664				local_inc(&cpu_buffer->dropped_events);
2665				goto out_reset;
2666			}
2667
2668			ret = rb_handle_head_page(cpu_buffer,
2669						  tail_page,
2670						  next_page);
2671			if (ret < 0)
2672				goto out_reset;
2673			if (ret)
2674				goto out_again;
2675		} else {
2676			/*
2677			 * We need to be careful here too. The
2678			 * commit page could still be on the reader
2679			 * page. We could have a small buffer, and
2680			 * have filled up the buffer with events
2681			 * from interrupts and such, and wrapped.
2682			 *
2683			 * Note, if the tail page is also on the
2684			 * reader_page, we let it move out.
2685			 */
2686			if (unlikely((cpu_buffer->commit_page !=
2687				      cpu_buffer->tail_page) &&
2688				     (cpu_buffer->commit_page ==
2689				      cpu_buffer->reader_page))) {
2690				local_inc(&cpu_buffer->commit_overrun);
2691				goto out_reset;
2692			}
2693		}
2694	}
2695
2696	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2697
2698 out_again:
2699
2700	rb_reset_tail(cpu_buffer, tail, info);
2701
2702	/* Commit what we have for now. */
2703	rb_end_commit(cpu_buffer);
2704	/* rb_end_commit() decs committing */
2705	local_inc(&cpu_buffer->committing);
2706
2707	/* fail and let the caller try again */
2708	return ERR_PTR(-EAGAIN);
2709
2710 out_reset:
2711	/* reset write */
2712	rb_reset_tail(cpu_buffer, tail, info);
2713
2714	return NULL;
2715}
2716
2717/* Slow path */
2718static struct ring_buffer_event *
2719rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2720		  struct ring_buffer_event *event, u64 delta, bool abs)
2721{
2722	if (abs)
2723		event->type_len = RINGBUF_TYPE_TIME_STAMP;
2724	else
2725		event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2726
2727	/* Not the first event on the page, or not delta? */
2728	if (abs || rb_event_index(cpu_buffer, event)) {
2729		event->time_delta = delta & TS_MASK;
2730		event->array[0] = delta >> TS_SHIFT;
2731	} else {
2732		/* nope, just zero it */
2733		event->time_delta = 0;
2734		event->array[0] = 0;
2735	}
2736
2737	return skip_time_extend(event);
2738}
2739
2740#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2741static inline bool sched_clock_stable(void)
2742{
2743	return true;
2744}
2745#endif
2746
2747static void
2748rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2749		   struct rb_event_info *info)
2750{
2751	u64 write_stamp;
2752
2753	WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2754		  (unsigned long long)info->delta,
2755		  (unsigned long long)info->ts,
2756		  (unsigned long long)info->before,
2757		  (unsigned long long)info->after,
2758		  (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
2759		  sched_clock_stable() ? "" :
2760		  "If you just came from a suspend/resume,\n"
2761		  "please switch to the trace global clock:\n"
2762		  "  echo global > /sys/kernel/tracing/trace_clock\n"
2763		  "or add trace_clock=global to the kernel command line\n");
2764}
2765
2766static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2767				      struct ring_buffer_event **event,
2768				      struct rb_event_info *info,
2769				      u64 *delta,
2770				      unsigned int *length)
2771{
2772	bool abs = info->add_timestamp &
2773		(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2774
2775	if (unlikely(info->delta > (1ULL << 59))) {
2776		/*
2777		 * Some timers can use more than 59 bits, and when a timestamp
2778		 * is added to the buffer, it will lose those bits.
2779		 */
2780		if (abs && (info->ts & TS_MSB)) {
2781			info->delta &= ABS_TS_MASK;
2782
2783		/* did the clock go backwards */
2784		} else if (info->before == info->after && info->before > info->ts) {
2785			/* not interrupted */
2786			static int once;
2787
2788			/*
2789			 * This is possible with a recalibrating of the TSC.
2790			 * Do not produce a call stack, but just report it.
2791			 */
2792			if (!once) {
2793				once++;
2794				pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2795					info->before, info->ts);
2796			}
2797		} else
2798			rb_check_timestamp(cpu_buffer, info);
2799		if (!abs)
2800			info->delta = 0;
2801	}
2802	*event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
2803	*length -= RB_LEN_TIME_EXTEND;
2804	*delta = 0;
2805}
2806
2807/**
2808 * rb_update_event - update event type and data
2809 * @cpu_buffer: The per cpu buffer of the @event
2810 * @event: the event to update
2811 * @info: The info to update the @event with (contains length and delta)
2812 *
2813 * Update the type and data fields of the @event. The length
2814 * is the actual size that is written to the ring buffer,
2815 * and with this, we can determine what to place into the
2816 * data field.
2817 */
2818static void
2819rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2820		struct ring_buffer_event *event,
2821		struct rb_event_info *info)
2822{
2823	unsigned length = info->length;
2824	u64 delta = info->delta;
2825	unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2826
2827	if (!WARN_ON_ONCE(nest >= MAX_NEST))
2828		cpu_buffer->event_stamp[nest] = info->ts;
2829
2830	/*
2831	 * If we need to add a timestamp, then we
2832	 * add it to the start of the reserved space.
2833	 */
2834	if (unlikely(info->add_timestamp))
2835		rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
2836
2837	event->time_delta = delta;
2838	length -= RB_EVNT_HDR_SIZE;
2839	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2840		event->type_len = 0;
2841		event->array[0] = length;
2842	} else
2843		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2844}
2845
2846static unsigned rb_calculate_event_length(unsigned length)
2847{
2848	struct ring_buffer_event event; /* Used only for sizeof array */
2849
2850	/* zero length can cause confusions */
2851	if (!length)
2852		length++;
2853
2854	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2855		length += sizeof(event.array[0]);
2856
2857	length += RB_EVNT_HDR_SIZE;
2858	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2859
2860	/*
2861	 * In case the time delta is larger than the 27 bits for it
2862	 * in the header, we need to add a timestamp. If another
2863	 * event comes in when trying to discard this one to increase
2864	 * the length, then the timestamp will be added in the allocated
2865	 * space of this event. If length is bigger than the size needed
2866	 * for the TIME_EXTEND, then padding has to be used. The events
2867	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2868	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2869	 * As length is a multiple of 4, we only need to worry if it
2870	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2871	 */
2872	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2873		length += RB_ALIGNMENT;
2874
2875	return length;
2876}
2877
2878static inline bool
2879rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2880		  struct ring_buffer_event *event)
2881{
2882	unsigned long new_index, old_index;
2883	struct buffer_page *bpage;
2884	unsigned long addr;
2885
2886	new_index = rb_event_index(cpu_buffer, event);
2887	old_index = new_index + rb_event_ts_length(event);
2888	addr = (unsigned long)event;
2889	addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
2890
2891	bpage = READ_ONCE(cpu_buffer->tail_page);
2892
2893	/*
2894	 * Make sure the tail_page is still the same and
2895	 * the next write location is the end of this event
2896	 */
2897	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2898		unsigned long write_mask =
2899			local_read(&bpage->write) & ~RB_WRITE_MASK;
2900		unsigned long event_length = rb_event_length(event);
2901
2902		/*
2903		 * For the before_stamp to be different than the write_stamp
2904		 * to make sure that the next event adds an absolute
2905		 * value and does not rely on the saved write stamp, which
2906		 * is now going to be bogus.
2907		 *
2908		 * By setting the before_stamp to zero, the next event
2909		 * is not going to use the write_stamp and will instead
2910		 * create an absolute timestamp. This means there's no
2911		 * reason to update the wirte_stamp!
2912		 */
2913		rb_time_set(&cpu_buffer->before_stamp, 0);
2914
2915		/*
2916		 * If an event were to come in now, it would see that the
2917		 * write_stamp and the before_stamp are different, and assume
2918		 * that this event just added itself before updating
2919		 * the write stamp. The interrupting event will fix the
2920		 * write stamp for us, and use an absolute timestamp.
2921		 */
2922
2923		/*
2924		 * This is on the tail page. It is possible that
2925		 * a write could come in and move the tail page
2926		 * and write to the next page. That is fine
2927		 * because we just shorten what is on this page.
2928		 */
2929		old_index += write_mask;
2930		new_index += write_mask;
2931
2932		/* caution: old_index gets updated on cmpxchg failure */
2933		if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
2934			/* update counters */
2935			local_sub(event_length, &cpu_buffer->entries_bytes);
2936			return true;
2937		}
2938	}
2939
2940	/* could not discard */
2941	return false;
2942}
2943
2944static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2945{
2946	local_inc(&cpu_buffer->committing);
2947	local_inc(&cpu_buffer->commits);
2948}
2949
2950static __always_inline void
2951rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2952{
2953	unsigned long max_count;
2954
2955	/*
2956	 * We only race with interrupts and NMIs on this CPU.
2957	 * If we own the commit event, then we can commit
2958	 * all others that interrupted us, since the interruptions
2959	 * are in stack format (they finish before they come
2960	 * back to us). This allows us to do a simple loop to
2961	 * assign the commit to the tail.
2962	 */
2963 again:
2964	max_count = cpu_buffer->nr_pages * 100;
2965
2966	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2967		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2968			return;
2969		if (RB_WARN_ON(cpu_buffer,
2970			       rb_is_reader_page(cpu_buffer->tail_page)))
2971			return;
2972		/*
2973		 * No need for a memory barrier here, as the update
2974		 * of the tail_page did it for this page.
2975		 */
2976		local_set(&cpu_buffer->commit_page->page->commit,
2977			  rb_page_write(cpu_buffer->commit_page));
2978		rb_inc_page(&cpu_buffer->commit_page);
2979		/* add barrier to keep gcc from optimizing too much */
2980		barrier();
2981	}
2982	while (rb_commit_index(cpu_buffer) !=
2983	       rb_page_write(cpu_buffer->commit_page)) {
2984
2985		/* Make sure the readers see the content of what is committed. */
2986		smp_wmb();
2987		local_set(&cpu_buffer->commit_page->page->commit,
2988			  rb_page_write(cpu_buffer->commit_page));
2989		RB_WARN_ON(cpu_buffer,
2990			   local_read(&cpu_buffer->commit_page->page->commit) &
2991			   ~RB_WRITE_MASK);
2992		barrier();
2993	}
2994
2995	/* again, keep gcc from optimizing */
2996	barrier();
2997
2998	/*
2999	 * If an interrupt came in just after the first while loop
3000	 * and pushed the tail page forward, we will be left with
3001	 * a dangling commit that will never go forward.
3002	 */
3003	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3004		goto again;
3005}
3006
3007static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3008{
3009	unsigned long commits;
3010
3011	if (RB_WARN_ON(cpu_buffer,
3012		       !local_read(&cpu_buffer->committing)))
3013		return;
3014
3015 again:
3016	commits = local_read(&cpu_buffer->commits);
3017	/* synchronize with interrupts */
3018	barrier();
3019	if (local_read(&cpu_buffer->committing) == 1)
3020		rb_set_commit_to_write(cpu_buffer);
3021
3022	local_dec(&cpu_buffer->committing);
3023
3024	/* synchronize with interrupts */
3025	barrier();
3026
3027	/*
3028	 * Need to account for interrupts coming in between the
3029	 * updating of the commit page and the clearing of the
3030	 * committing counter.
3031	 */
3032	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3033	    !local_read(&cpu_buffer->committing)) {
3034		local_inc(&cpu_buffer->committing);
3035		goto again;
3036	}
3037}
3038
3039static inline void rb_event_discard(struct ring_buffer_event *event)
3040{
3041	if (extended_time(event))
3042		event = skip_time_extend(event);
3043
3044	/* array[0] holds the actual length for the discarded event */
3045	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3046	event->type_len = RINGBUF_TYPE_PADDING;
3047	/* time delta must be non zero */
3048	if (!event->time_delta)
3049		event->time_delta = 1;
3050}
3051
3052static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
3053{
3054	local_inc(&cpu_buffer->entries);
3055	rb_end_commit(cpu_buffer);
3056}
3057
3058static __always_inline void
3059rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3060{
3061	if (buffer->irq_work.waiters_pending) {
3062		buffer->irq_work.waiters_pending = false;
3063		/* irq_work_queue() supplies it's own memory barriers */
3064		irq_work_queue(&buffer->irq_work.work);
3065	}
3066
3067	if (cpu_buffer->irq_work.waiters_pending) {
3068		cpu_buffer->irq_work.waiters_pending = false;
3069		/* irq_work_queue() supplies it's own memory barriers */
3070		irq_work_queue(&cpu_buffer->irq_work.work);
3071	}
3072
3073	if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3074		return;
3075
3076	if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3077		return;
3078
3079	if (!cpu_buffer->irq_work.full_waiters_pending)
3080		return;
3081
3082	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3083
3084	if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3085		return;
3086
3087	cpu_buffer->irq_work.wakeup_full = true;
3088	cpu_buffer->irq_work.full_waiters_pending = false;
3089	/* irq_work_queue() supplies it's own memory barriers */
3090	irq_work_queue(&cpu_buffer->irq_work.work);
3091}
3092
3093#ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3094# define do_ring_buffer_record_recursion()	\
3095	do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3096#else
3097# define do_ring_buffer_record_recursion() do { } while (0)
3098#endif
3099
3100/*
3101 * The lock and unlock are done within a preempt disable section.
3102 * The current_context per_cpu variable can only be modified
3103 * by the current task between lock and unlock. But it can
3104 * be modified more than once via an interrupt. To pass this
3105 * information from the lock to the unlock without having to
3106 * access the 'in_interrupt()' functions again (which do show
3107 * a bit of overhead in something as critical as function tracing,
3108 * we use a bitmask trick.
3109 *
3110 *  bit 1 =  NMI context
3111 *  bit 2 =  IRQ context
3112 *  bit 3 =  SoftIRQ context
3113 *  bit 4 =  normal context.
3114 *
3115 * This works because this is the order of contexts that can
3116 * preempt other contexts. A SoftIRQ never preempts an IRQ
3117 * context.
3118 *
3119 * When the context is determined, the corresponding bit is
3120 * checked and set (if it was set, then a recursion of that context
3121 * happened).
3122 *
3123 * On unlock, we need to clear this bit. To do so, just subtract
3124 * 1 from the current_context and AND it to itself.
3125 *
3126 * (binary)
3127 *  101 - 1 = 100
3128 *  101 & 100 = 100 (clearing bit zero)
3129 *
3130 *  1010 - 1 = 1001
3131 *  1010 & 1001 = 1000 (clearing bit 1)
3132 *
3133 * The least significant bit can be cleared this way, and it
3134 * just so happens that it is the same bit corresponding to
3135 * the current context.
3136 *
3137 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3138 * is set when a recursion is detected at the current context, and if
3139 * the TRANSITION bit is already set, it will fail the recursion.
3140 * This is needed because there's a lag between the changing of
3141 * interrupt context and updating the preempt count. In this case,
3142 * a false positive will be found. To handle this, one extra recursion
3143 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3144 * bit is already set, then it is considered a recursion and the function
3145 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3146 *
3147 * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3148 * to be cleared. Even if it wasn't the context that set it. That is,
3149 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3150 * is called before preempt_count() is updated, since the check will
3151 * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3152 * NMI then comes in, it will set the NMI bit, but when the NMI code
3153 * does the trace_recursive_unlock() it will clear the TRANSITION bit
3154 * and leave the NMI bit set. But this is fine, because the interrupt
3155 * code that set the TRANSITION bit will then clear the NMI bit when it
3156 * calls trace_recursive_unlock(). If another NMI comes in, it will
3157 * set the TRANSITION bit and continue.
3158 *
3159 * Note: The TRANSITION bit only handles a single transition between context.
3160 */
3161
3162static __always_inline bool
3163trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3164{
3165	unsigned int val = cpu_buffer->current_context;
3166	int bit = interrupt_context_level();
3167
3168	bit = RB_CTX_NORMAL - bit;
3169
3170	if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3171		/*
3172		 * It is possible that this was called by transitioning
3173		 * between interrupt context, and preempt_count() has not
3174		 * been updated yet. In this case, use the TRANSITION bit.
3175		 */
3176		bit = RB_CTX_TRANSITION;
3177		if (val & (1 << (bit + cpu_buffer->nest))) {
3178			do_ring_buffer_record_recursion();
3179			return true;
3180		}
3181	}
3182
3183	val |= (1 << (bit + cpu_buffer->nest));
3184	cpu_buffer->current_context = val;
3185
3186	return false;
3187}
3188
3189static __always_inline void
3190trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3191{
3192	cpu_buffer->current_context &=
3193		cpu_buffer->current_context - (1 << cpu_buffer->nest);
3194}
3195
3196/* The recursive locking above uses 5 bits */
3197#define NESTED_BITS 5
3198
3199/**
3200 * ring_buffer_nest_start - Allow to trace while nested
3201 * @buffer: The ring buffer to modify
3202 *
3203 * The ring buffer has a safety mechanism to prevent recursion.
3204 * But there may be a case where a trace needs to be done while
3205 * tracing something else. In this case, calling this function
3206 * will allow this function to nest within a currently active
3207 * ring_buffer_lock_reserve().
3208 *
3209 * Call this function before calling another ring_buffer_lock_reserve() and
3210 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3211 */
3212void ring_buffer_nest_start(struct trace_buffer *buffer)
3213{
3214	struct ring_buffer_per_cpu *cpu_buffer;
3215	int cpu;
3216
3217	/* Enabled by ring_buffer_nest_end() */
3218	preempt_disable_notrace();
3219	cpu = raw_smp_processor_id();
3220	cpu_buffer = buffer->buffers[cpu];
3221	/* This is the shift value for the above recursive locking */
3222	cpu_buffer->nest += NESTED_BITS;
3223}
3224
3225/**
3226 * ring_buffer_nest_end - Allow to trace while nested
3227 * @buffer: The ring buffer to modify
3228 *
3229 * Must be called after ring_buffer_nest_start() and after the
3230 * ring_buffer_unlock_commit().
3231 */
3232void ring_buffer_nest_end(struct trace_buffer *buffer)
3233{
3234	struct ring_buffer_per_cpu *cpu_buffer;
3235	int cpu;
3236
3237	/* disabled by ring_buffer_nest_start() */
3238	cpu = raw_smp_processor_id();
3239	cpu_buffer = buffer->buffers[cpu];
3240	/* This is the shift value for the above recursive locking */
3241	cpu_buffer->nest -= NESTED_BITS;
3242	preempt_enable_notrace();
3243}
3244
3245/**
3246 * ring_buffer_unlock_commit - commit a reserved
3247 * @buffer: The buffer to commit to
3248 *
3249 * This commits the data to the ring buffer, and releases any locks held.
3250 *
3251 * Must be paired with ring_buffer_lock_reserve.
3252 */
3253int ring_buffer_unlock_commit(struct trace_buffer *buffer)
3254{
3255	struct ring_buffer_per_cpu *cpu_buffer;
3256	int cpu = raw_smp_processor_id();
3257
3258	cpu_buffer = buffer->buffers[cpu];
3259
3260	rb_commit(cpu_buffer);
3261
3262	rb_wakeups(buffer, cpu_buffer);
3263
3264	trace_recursive_unlock(cpu_buffer);
3265
3266	preempt_enable_notrace();
3267
3268	return 0;
3269}
3270EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3271
3272/* Special value to validate all deltas on a page. */
3273#define CHECK_FULL_PAGE		1L
3274
3275#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
3276
3277static const char *show_irq_str(int bits)
3278{
3279	const char *type[] = {
3280		".",	// 0
3281		"s",	// 1
3282		"h",	// 2
3283		"Hs",	// 3
3284		"n",	// 4
3285		"Ns",	// 5
3286		"Nh",	// 6
3287		"NHs",	// 7
3288	};
3289
3290	return type[bits];
3291}
3292
3293/* Assume this is an trace event */
3294static const char *show_flags(struct ring_buffer_event *event)
3295{
3296	struct trace_entry *entry;
3297	int bits = 0;
3298
3299	if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3300		return "X";
3301
3302	entry = ring_buffer_event_data(event);
3303
3304	if (entry->flags & TRACE_FLAG_SOFTIRQ)
3305		bits |= 1;
3306
3307	if (entry->flags & TRACE_FLAG_HARDIRQ)
3308		bits |= 2;
3309
3310	if (entry->flags & TRACE_FLAG_NMI)
3311		bits |= 4;
3312
3313	return show_irq_str(bits);
3314}
3315
3316static const char *show_irq(struct ring_buffer_event *event)
3317{
3318	struct trace_entry *entry;
3319
3320	if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3321		return "";
3322
3323	entry = ring_buffer_event_data(event);
3324	if (entry->flags & TRACE_FLAG_IRQS_OFF)
3325		return "d";
3326	return "";
3327}
3328
3329static const char *show_interrupt_level(void)
3330{
3331	unsigned long pc = preempt_count();
3332	unsigned char level = 0;
3333
3334	if (pc & SOFTIRQ_OFFSET)
3335		level |= 1;
3336
3337	if (pc & HARDIRQ_MASK)
3338		level |= 2;
3339
3340	if (pc & NMI_MASK)
3341		level |= 4;
3342
3343	return show_irq_str(level);
3344}
3345
3346static void dump_buffer_page(struct buffer_data_page *bpage,
3347			     struct rb_event_info *info,
3348			     unsigned long tail)
3349{
3350	struct ring_buffer_event *event;
3351	u64 ts, delta;
3352	int e;
3353
3354	ts = bpage->time_stamp;
3355	pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
3356
3357	for (e = 0; e < tail; e += rb_event_length(event)) {
3358
3359		event = (struct ring_buffer_event *)(bpage->data + e);
3360
3361		switch (event->type_len) {
3362
3363		case RINGBUF_TYPE_TIME_EXTEND:
3364			delta = rb_event_time_stamp(event);
3365			ts += delta;
3366			pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
3367				e, ts, delta);
3368			break;
3369
3370		case RINGBUF_TYPE_TIME_STAMP:
3371			delta = rb_event_time_stamp(event);
3372			ts = rb_fix_abs_ts(delta, ts);
3373			pr_warn(" 0x%x:  [%lld] absolute:%lld TIME STAMP\n",
3374				e, ts, delta);
3375			break;
3376
3377		case RINGBUF_TYPE_PADDING:
3378			ts += event->time_delta;
3379			pr_warn(" 0x%x:  [%lld] delta:%d PADDING\n",
3380				e, ts, event->time_delta);
3381			break;
3382
3383		case RINGBUF_TYPE_DATA:
3384			ts += event->time_delta;
3385			pr_warn(" 0x%x:  [%lld] delta:%d %s%s\n",
3386				e, ts, event->time_delta,
3387				show_flags(event), show_irq(event));
3388			break;
3389
3390		default:
3391			break;
3392		}
3393	}
3394	pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
3395}
3396
3397static DEFINE_PER_CPU(atomic_t, checking);
3398static atomic_t ts_dump;
3399
3400#define buffer_warn_return(fmt, ...)					\
3401	do {								\
3402		/* If another report is happening, ignore this one */	\
3403		if (atomic_inc_return(&ts_dump) != 1) {			\
3404			atomic_dec(&ts_dump);				\
3405			goto out;					\
3406		}							\
3407		atomic_inc(&cpu_buffer->record_disabled);		\
3408		pr_warn(fmt, ##__VA_ARGS__);				\
3409		dump_buffer_page(bpage, info, tail);			\
3410		atomic_dec(&ts_dump);					\
3411		/* There's some cases in boot up that this can happen */ \
3412		if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING))	\
3413			/* Do not re-enable checking */			\
3414			return;						\
3415	} while (0)
3416
3417/*
3418 * Check if the current event time stamp matches the deltas on
3419 * the buffer page.
3420 */
3421static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3422			 struct rb_event_info *info,
3423			 unsigned long tail)
3424{
3425	struct ring_buffer_event *event;
3426	struct buffer_data_page *bpage;
3427	u64 ts, delta;
3428	bool full = false;
3429	int e;
3430
3431	bpage = info->tail_page->page;
3432
3433	if (tail == CHECK_FULL_PAGE) {
3434		full = true;
3435		tail = local_read(&bpage->commit);
3436	} else if (info->add_timestamp &
3437		   (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3438		/* Ignore events with absolute time stamps */
3439		return;
3440	}
3441
3442	/*
3443	 * Do not check the first event (skip possible extends too).
3444	 * Also do not check if previous events have not been committed.
3445	 */
3446	if (tail <= 8 || tail > local_read(&bpage->commit))
3447		return;
3448
3449	/*
3450	 * If this interrupted another event,
3451	 */
3452	if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3453		goto out;
3454
3455	ts = bpage->time_stamp;
3456
3457	for (e = 0; e < tail; e += rb_event_length(event)) {
3458
3459		event = (struct ring_buffer_event *)(bpage->data + e);
3460
3461		switch (event->type_len) {
3462
3463		case RINGBUF_TYPE_TIME_EXTEND:
3464			delta = rb_event_time_stamp(event);
3465			ts += delta;
3466			break;
3467
3468		case RINGBUF_TYPE_TIME_STAMP:
3469			delta = rb_event_time_stamp(event);
3470			delta = rb_fix_abs_ts(delta, ts);
3471			if (delta < ts) {
3472				buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
3473						   cpu_buffer->cpu, ts, delta);
3474			}
3475			ts = delta;
3476			break;
3477
3478		case RINGBUF_TYPE_PADDING:
3479			if (event->time_delta == 1)
3480				break;
3481			fallthrough;
3482		case RINGBUF_TYPE_DATA:
3483			ts += event->time_delta;
3484			break;
3485
3486		default:
3487			RB_WARN_ON(cpu_buffer, 1);
3488		}
3489	}
3490	if ((full && ts > info->ts) ||
3491	    (!full && ts + info->delta != info->ts)) {
3492		buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
3493				   cpu_buffer->cpu,
3494				   ts + info->delta, info->ts, info->delta,
3495				   info->before, info->after,
3496				   full ? " (full)" : "", show_interrupt_level());
3497	}
3498out:
3499	atomic_dec(this_cpu_ptr(&checking));
3500}
3501#else
3502static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3503			 struct rb_event_info *info,
3504			 unsigned long tail)
3505{
3506}
3507#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3508
3509static struct ring_buffer_event *
3510__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3511		  struct rb_event_info *info)
3512{
3513	struct ring_buffer_event *event;
3514	struct buffer_page *tail_page;
3515	unsigned long tail, write, w;
3516
3517	/* Don't let the compiler play games with cpu_buffer->tail_page */
3518	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3519
3520 /*A*/	w = local_read(&tail_page->write) & RB_WRITE_MASK;
3521	barrier();
3522	rb_time_read(&cpu_buffer->before_stamp, &info->before);
3523	rb_time_read(&cpu_buffer->write_stamp, &info->after);
3524	barrier();
3525	info->ts = rb_time_stamp(cpu_buffer->buffer);
3526
3527	if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3528		info->delta = info->ts;
3529	} else {
3530		/*
3531		 * If interrupting an event time update, we may need an
3532		 * absolute timestamp.
3533		 * Don't bother if this is the start of a new page (w == 0).
3534		 */
3535		if (!w) {
3536			/* Use the sub-buffer timestamp */
3537			info->delta = 0;
3538		} else if (unlikely(info->before != info->after)) {
3539			info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3540			info->length += RB_LEN_TIME_EXTEND;
3541		} else {
3542			info->delta = info->ts - info->after;
3543			if (unlikely(test_time_stamp(info->delta))) {
3544				info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3545				info->length += RB_LEN_TIME_EXTEND;
3546			}
3547		}
3548	}
3549
3550 /*B*/	rb_time_set(&cpu_buffer->before_stamp, info->ts);
3551
3552 /*C*/	write = local_add_return(info->length, &tail_page->write);
3553
3554	/* set write to only the index of the write */
3555	write &= RB_WRITE_MASK;
3556
3557	tail = write - info->length;
3558
3559	/* See if we shot pass the end of this buffer page */
3560	if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
3561		check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
3562		return rb_move_tail(cpu_buffer, tail, info);
3563	}
3564
3565	if (likely(tail == w)) {
3566		/* Nothing interrupted us between A and C */
3567 /*D*/		rb_time_set(&cpu_buffer->write_stamp, info->ts);
3568		/*
3569		 * If something came in between C and D, the write stamp
3570		 * may now not be in sync. But that's fine as the before_stamp
3571		 * will be different and then next event will just be forced
3572		 * to use an absolute timestamp.
3573		 */
3574		if (likely(!(info->add_timestamp &
3575			     (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3576			/* This did not interrupt any time update */
3577			info->delta = info->ts - info->after;
3578		else
3579			/* Just use full timestamp for interrupting event */
3580			info->delta = info->ts;
3581		check_buffer(cpu_buffer, info, tail);
3582	} else {
3583		u64 ts;
3584		/* SLOW PATH - Interrupted between A and C */
3585
3586		/* Save the old before_stamp */
3587		rb_time_read(&cpu_buffer->before_stamp, &info->before);
3588
3589		/*
3590		 * Read a new timestamp and update the before_stamp to make
3591		 * the next event after this one force using an absolute
3592		 * timestamp. This is in case an interrupt were to come in
3593		 * between E and F.
3594		 */
3595		ts = rb_time_stamp(cpu_buffer->buffer);
3596		rb_time_set(&cpu_buffer->before_stamp, ts);
3597
3598		barrier();
3599 /*E*/		rb_time_read(&cpu_buffer->write_stamp, &info->after);
3600		barrier();
3601 /*F*/		if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3602		    info->after == info->before && info->after < ts) {
3603			/*
3604			 * Nothing came after this event between C and F, it is
3605			 * safe to use info->after for the delta as it
3606			 * matched info->before and is still valid.
3607			 */
3608			info->delta = ts - info->after;
3609		} else {
3610			/*
3611			 * Interrupted between C and F:
3612			 * Lost the previous events time stamp. Just set the
3613			 * delta to zero, and this will be the same time as
3614			 * the event this event interrupted. And the events that
3615			 * came after this will still be correct (as they would
3616			 * have built their delta on the previous event.
3617			 */
3618			info->delta = 0;
3619		}
3620		info->ts = ts;
3621		info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3622	}
3623
3624	/*
3625	 * If this is the first commit on the page, then it has the same
3626	 * timestamp as the page itself.
3627	 */
3628	if (unlikely(!tail && !(info->add_timestamp &
3629				(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3630		info->delta = 0;
3631
3632	/* We reserved something on the buffer */
3633
3634	event = __rb_page_index(tail_page, tail);
3635	rb_update_event(cpu_buffer, event, info);
3636
3637	local_inc(&tail_page->entries);
3638
3639	/*
3640	 * If this is the first commit on the page, then update
3641	 * its timestamp.
3642	 */
3643	if (unlikely(!tail))
3644		tail_page->page->time_stamp = info->ts;
3645
3646	/* account for these added bytes */
3647	local_add(info->length, &cpu_buffer->entries_bytes);
3648
3649	return event;
3650}
3651
3652static __always_inline struct ring_buffer_event *
3653rb_reserve_next_event(struct trace_buffer *buffer,
3654		      struct ring_buffer_per_cpu *cpu_buffer,
3655		      unsigned long length)
3656{
3657	struct ring_buffer_event *event;
3658	struct rb_event_info info;
3659	int nr_loops = 0;
3660	int add_ts_default;
3661
3662	/* ring buffer does cmpxchg, make sure it is safe in NMI context */
3663	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
3664	    (unlikely(in_nmi()))) {
3665		return NULL;
3666	}
3667
3668	rb_start_commit(cpu_buffer);
3669	/* The commit page can not change after this */
3670
3671#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3672	/*
3673	 * Due to the ability to swap a cpu buffer from a buffer
3674	 * it is possible it was swapped before we committed.
3675	 * (committing stops a swap). We check for it here and
3676	 * if it happened, we have to fail the write.
3677	 */
3678	barrier();
3679	if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3680		local_dec(&cpu_buffer->committing);
3681		local_dec(&cpu_buffer->commits);
3682		return NULL;
3683	}
3684#endif
3685
3686	info.length = rb_calculate_event_length(length);
3687
3688	if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3689		add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3690		info.length += RB_LEN_TIME_EXTEND;
3691		if (info.length > cpu_buffer->buffer->max_data_size)
3692			goto out_fail;
3693	} else {
3694		add_ts_default = RB_ADD_STAMP_NONE;
3695	}
3696
3697 again:
3698	info.add_timestamp = add_ts_default;
3699	info.delta = 0;
3700
3701	/*
3702	 * We allow for interrupts to reenter here and do a trace.
3703	 * If one does, it will cause this original code to loop
3704	 * back here. Even with heavy interrupts happening, this
3705	 * should only happen a few times in a row. If this happens
3706	 * 1000 times in a row, there must be either an interrupt
3707	 * storm or we have something buggy.
3708	 * Bail!
3709	 */
3710	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3711		goto out_fail;
3712
3713	event = __rb_reserve_next(cpu_buffer, &info);
3714
3715	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3716		if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3717			info.length -= RB_LEN_TIME_EXTEND;
3718		goto again;
3719	}
3720
3721	if (likely(event))
3722		return event;
3723 out_fail:
3724	rb_end_commit(cpu_buffer);
3725	return NULL;
3726}
3727
3728/**
3729 * ring_buffer_lock_reserve - reserve a part of the buffer
3730 * @buffer: the ring buffer to reserve from
3731 * @length: the length of the data to reserve (excluding event header)
3732 *
3733 * Returns a reserved event on the ring buffer to copy directly to.
3734 * The user of this interface will need to get the body to write into
3735 * and can use the ring_buffer_event_data() interface.
3736 *
3737 * The length is the length of the data needed, not the event length
3738 * which also includes the event header.
3739 *
3740 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3741 * If NULL is returned, then nothing has been allocated or locked.
3742 */
3743struct ring_buffer_event *
3744ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3745{
3746	struct ring_buffer_per_cpu *cpu_buffer;
3747	struct ring_buffer_event *event;
3748	int cpu;
3749
3750	/* If we are tracing schedule, we don't want to recurse */
3751	preempt_disable_notrace();
3752
3753	if (unlikely(atomic_read(&buffer->record_disabled)))
3754		goto out;
3755
3756	cpu = raw_smp_processor_id();
3757
3758	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3759		goto out;
3760
3761	cpu_buffer = buffer->buffers[cpu];
3762
3763	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3764		goto out;
3765
3766	if (unlikely(length > buffer->max_data_size))
3767		goto out;
3768
3769	if (unlikely(trace_recursive_lock(cpu_buffer)))
3770		goto out;
3771
3772	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3773	if (!event)
3774		goto out_unlock;
3775
3776	return event;
3777
3778 out_unlock:
3779	trace_recursive_unlock(cpu_buffer);
3780 out:
3781	preempt_enable_notrace();
3782	return NULL;
3783}
3784EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3785
3786/*
3787 * Decrement the entries to the page that an event is on.
3788 * The event does not even need to exist, only the pointer
3789 * to the page it is on. This may only be called before the commit
3790 * takes place.
3791 */
3792static inline void
3793rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3794		   struct ring_buffer_event *event)
3795{
3796	unsigned long addr = (unsigned long)event;
3797	struct buffer_page *bpage = cpu_buffer->commit_page;
3798	struct buffer_page *start;
3799
3800	addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
3801
3802	/* Do the likely case first */
3803	if (likely(bpage->page == (void *)addr)) {
3804		local_dec(&bpage->entries);
3805		return;
3806	}
3807
3808	/*
3809	 * Because the commit page may be on the reader page we
3810	 * start with the next page and check the end loop there.
3811	 */
3812	rb_inc_page(&bpage);
3813	start = bpage;
3814	do {
3815		if (bpage->page == (void *)addr) {
3816			local_dec(&bpage->entries);
3817			return;
3818		}
3819		rb_inc_page(&bpage);
3820	} while (bpage != start);
3821
3822	/* commit not part of this buffer?? */
3823	RB_WARN_ON(cpu_buffer, 1);
3824}
3825
3826/**
3827 * ring_buffer_discard_commit - discard an event that has not been committed
3828 * @buffer: the ring buffer
3829 * @event: non committed event to discard
3830 *
3831 * Sometimes an event that is in the ring buffer needs to be ignored.
3832 * This function lets the user discard an event in the ring buffer
3833 * and then that event will not be read later.
3834 *
3835 * This function only works if it is called before the item has been
3836 * committed. It will try to free the event from the ring buffer
3837 * if another event has not been added behind it.
3838 *
3839 * If another event has been added behind it, it will set the event
3840 * up as discarded, and perform the commit.
3841 *
3842 * If this function is called, do not call ring_buffer_unlock_commit on
3843 * the event.
3844 */
3845void ring_buffer_discard_commit(struct trace_buffer *buffer,
3846				struct ring_buffer_event *event)
3847{
3848	struct ring_buffer_per_cpu *cpu_buffer;
3849	int cpu;
3850
3851	/* The event is discarded regardless */
3852	rb_event_discard(event);
3853
3854	cpu = smp_processor_id();
3855	cpu_buffer = buffer->buffers[cpu];
3856
3857	/*
3858	 * This must only be called if the event has not been
3859	 * committed yet. Thus we can assume that preemption
3860	 * is still disabled.
3861	 */
3862	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3863
3864	rb_decrement_entry(cpu_buffer, event);
3865	if (rb_try_to_discard(cpu_buffer, event))
3866		goto out;
3867
3868 out:
3869	rb_end_commit(cpu_buffer);
3870
3871	trace_recursive_unlock(cpu_buffer);
3872
3873	preempt_enable_notrace();
3874
3875}
3876EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3877
3878/**
3879 * ring_buffer_write - write data to the buffer without reserving
3880 * @buffer: The ring buffer to write to.
3881 * @length: The length of the data being written (excluding the event header)
3882 * @data: The data to write to the buffer.
3883 *
3884 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3885 * one function. If you already have the data to write to the buffer, it
3886 * may be easier to simply call this function.
3887 *
3888 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3889 * and not the length of the event which would hold the header.
3890 */
3891int ring_buffer_write(struct trace_buffer *buffer,
3892		      unsigned long length,
3893		      void *data)
3894{
3895	struct ring_buffer_per_cpu *cpu_buffer;
3896	struct ring_buffer_event *event;
3897	void *body;
3898	int ret = -EBUSY;
3899	int cpu;
3900
3901	preempt_disable_notrace();
3902
3903	if (atomic_read(&buffer->record_disabled))
3904		goto out;
3905
3906	cpu = raw_smp_processor_id();
3907
3908	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3909		goto out;
3910
3911	cpu_buffer = buffer->buffers[cpu];
3912
3913	if (atomic_read(&cpu_buffer->record_disabled))
3914		goto out;
3915
3916	if (length > buffer->max_data_size)
3917		goto out;
3918
3919	if (unlikely(trace_recursive_lock(cpu_buffer)))
3920		goto out;
3921
3922	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3923	if (!event)
3924		goto out_unlock;
3925
3926	body = rb_event_data(event);
3927
3928	memcpy(body, data, length);
3929
3930	rb_commit(cpu_buffer);
3931
3932	rb_wakeups(buffer, cpu_buffer);
3933
3934	ret = 0;
3935
3936 out_unlock:
3937	trace_recursive_unlock(cpu_buffer);
3938
3939 out:
3940	preempt_enable_notrace();
3941
3942	return ret;
3943}
3944EXPORT_SYMBOL_GPL(ring_buffer_write);
3945
3946static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3947{
3948	struct buffer_page *reader = cpu_buffer->reader_page;
3949	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3950	struct buffer_page *commit = cpu_buffer->commit_page;
3951
3952	/* In case of error, head will be NULL */
3953	if (unlikely(!head))
3954		return true;
3955
3956	/* Reader should exhaust content in reader page */
3957	if (reader->read != rb_page_commit(reader))
3958		return false;
3959
3960	/*
3961	 * If writers are committing on the reader page, knowing all
3962	 * committed content has been read, the ring buffer is empty.
3963	 */
3964	if (commit == reader)
3965		return true;
3966
3967	/*
3968	 * If writers are committing on a page other than reader page
3969	 * and head page, there should always be content to read.
3970	 */
3971	if (commit != head)
3972		return false;
3973
3974	/*
3975	 * Writers are committing on the head page, we just need
3976	 * to care about there're committed data, and the reader will
3977	 * swap reader page with head page when it is to read data.
3978	 */
3979	return rb_page_commit(commit) == 0;
3980}
3981
3982/**
3983 * ring_buffer_record_disable - stop all writes into the buffer
3984 * @buffer: The ring buffer to stop writes to.
3985 *
3986 * This prevents all writes to the buffer. Any attempt to write
3987 * to the buffer after this will fail and return NULL.
3988 *
3989 * The caller should call synchronize_rcu() after this.
3990 */
3991void ring_buffer_record_disable(struct trace_buffer *buffer)
3992{
3993	atomic_inc(&buffer->record_disabled);
3994}
3995EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3996
3997/**
3998 * ring_buffer_record_enable - enable writes to the buffer
3999 * @buffer: The ring buffer to enable writes
4000 *
4001 * Note, multiple disables will need the same number of enables
4002 * to truly enable the writing (much like preempt_disable).
4003 */
4004void ring_buffer_record_enable(struct trace_buffer *buffer)
4005{
4006	atomic_dec(&buffer->record_disabled);
4007}
4008EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
4009
4010/**
4011 * ring_buffer_record_off - stop all writes into the buffer
4012 * @buffer: The ring buffer to stop writes to.
4013 *
4014 * This prevents all writes to the buffer. Any attempt to write
4015 * to the buffer after this will fail and return NULL.
4016 *
4017 * This is different than ring_buffer_record_disable() as
4018 * it works like an on/off switch, where as the disable() version
4019 * must be paired with a enable().
4020 */
4021void ring_buffer_record_off(struct trace_buffer *buffer)
4022{
4023	unsigned int rd;
4024	unsigned int new_rd;
4025
4026	rd = atomic_read(&buffer->record_disabled);
4027	do {
4028		new_rd = rd | RB_BUFFER_OFF;
4029	} while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4030}
4031EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4032
4033/**
4034 * ring_buffer_record_on - restart writes into the buffer
4035 * @buffer: The ring buffer to start writes to.
4036 *
4037 * This enables all writes to the buffer that was disabled by
4038 * ring_buffer_record_off().
4039 *
4040 * This is different than ring_buffer_record_enable() as
4041 * it works like an on/off switch, where as the enable() version
4042 * must be paired with a disable().
4043 */
4044void ring_buffer_record_on(struct trace_buffer *buffer)
4045{
4046	unsigned int rd;
4047	unsigned int new_rd;
4048
4049	rd = atomic_read(&buffer->record_disabled);
4050	do {
4051		new_rd = rd & ~RB_BUFFER_OFF;
4052	} while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4053}
4054EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4055
4056/**
4057 * ring_buffer_record_is_on - return true if the ring buffer can write
4058 * @buffer: The ring buffer to see if write is enabled
4059 *
4060 * Returns true if the ring buffer is in a state that it accepts writes.
4061 */
4062bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4063{
4064	return !atomic_read(&buffer->record_disabled);
4065}
4066
4067/**
4068 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4069 * @buffer: The ring buffer to see if write is set enabled
4070 *
4071 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4072 * Note that this does NOT mean it is in a writable state.
4073 *
4074 * It may return true when the ring buffer has been disabled by
4075 * ring_buffer_record_disable(), as that is a temporary disabling of
4076 * the ring buffer.
4077 */
4078bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4079{
4080	return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4081}
4082
4083/**
4084 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4085 * @buffer: The ring buffer to stop writes to.
4086 * @cpu: The CPU buffer to stop
4087 *
4088 * This prevents all writes to the buffer. Any attempt to write
4089 * to the buffer after this will fail and return NULL.
4090 *
4091 * The caller should call synchronize_rcu() after this.
4092 */
4093void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4094{
4095	struct ring_buffer_per_cpu *cpu_buffer;
4096
4097	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4098		return;
4099
4100	cpu_buffer = buffer->buffers[cpu];
4101	atomic_inc(&cpu_buffer->record_disabled);
4102}
4103EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4104
4105/**
4106 * ring_buffer_record_enable_cpu - enable writes to the buffer
4107 * @buffer: The ring buffer to enable writes
4108 * @cpu: The CPU to enable.
4109 *
4110 * Note, multiple disables will need the same number of enables
4111 * to truly enable the writing (much like preempt_disable).
4112 */
4113void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4114{
4115	struct ring_buffer_per_cpu *cpu_buffer;
4116
4117	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4118		return;
4119
4120	cpu_buffer = buffer->buffers[cpu];
4121	atomic_dec(&cpu_buffer->record_disabled);
4122}
4123EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4124
4125/*
4126 * The total entries in the ring buffer is the running counter
4127 * of entries entered into the ring buffer, minus the sum of
4128 * the entries read from the ring buffer and the number of
4129 * entries that were overwritten.
4130 */
4131static inline unsigned long
4132rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4133{
4134	return local_read(&cpu_buffer->entries) -
4135		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4136}
4137
4138/**
4139 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4140 * @buffer: The ring buffer
4141 * @cpu: The per CPU buffer to read from.
4142 */
4143u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4144{
4145	unsigned long flags;
4146	struct ring_buffer_per_cpu *cpu_buffer;
4147	struct buffer_page *bpage;
4148	u64 ret = 0;
4149
4150	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4151		return 0;
4152
4153	cpu_buffer = buffer->buffers[cpu];
4154	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4155	/*
4156	 * if the tail is on reader_page, oldest time stamp is on the reader
4157	 * page
4158	 */
4159	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4160		bpage = cpu_buffer->reader_page;
4161	else
4162		bpage = rb_set_head_page(cpu_buffer);
4163	if (bpage)
4164		ret = bpage->page->time_stamp;
4165	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4166
4167	return ret;
4168}
4169EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4170
4171/**
4172 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4173 * @buffer: The ring buffer
4174 * @cpu: The per CPU buffer to read from.
4175 */
4176unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4177{
4178	struct ring_buffer_per_cpu *cpu_buffer;
4179	unsigned long ret;
4180
4181	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4182		return 0;
4183
4184	cpu_buffer = buffer->buffers[cpu];
4185	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4186
4187	return ret;
4188}
4189EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4190
4191/**
4192 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4193 * @buffer: The ring buffer
4194 * @cpu: The per CPU buffer to get the entries from.
4195 */
4196unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4197{
4198	struct ring_buffer_per_cpu *cpu_buffer;
4199
4200	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4201		return 0;
4202
4203	cpu_buffer = buffer->buffers[cpu];
4204
4205	return rb_num_of_entries(cpu_buffer);
4206}
4207EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4208
4209/**
4210 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4211 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4212 * @buffer: The ring buffer
4213 * @cpu: The per CPU buffer to get the number of overruns from
4214 */
4215unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
4216{
4217	struct ring_buffer_per_cpu *cpu_buffer;
4218	unsigned long ret;
4219
4220	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4221		return 0;
4222
4223	cpu_buffer = buffer->buffers[cpu];
4224	ret = local_read(&cpu_buffer->overrun);
4225
4226	return ret;
4227}
4228EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
4229
4230/**
4231 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4232 * commits failing due to the buffer wrapping around while there are uncommitted
4233 * events, such as during an interrupt storm.
4234 * @buffer: The ring buffer
4235 * @cpu: The per CPU buffer to get the number of overruns from
4236 */
4237unsigned long
4238ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
4239{
4240	struct ring_buffer_per_cpu *cpu_buffer;
4241	unsigned long ret;
4242
4243	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4244		return 0;
4245
4246	cpu_buffer = buffer->buffers[cpu];
4247	ret = local_read(&cpu_buffer->commit_overrun);
4248
4249	return ret;
4250}
4251EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4252
4253/**
4254 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4255 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4256 * @buffer: The ring buffer
4257 * @cpu: The per CPU buffer to get the number of overruns from
4258 */
4259unsigned long
4260ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
4261{
4262	struct ring_buffer_per_cpu *cpu_buffer;
4263	unsigned long ret;
4264
4265	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4266		return 0;
4267
4268	cpu_buffer = buffer->buffers[cpu];
4269	ret = local_read(&cpu_buffer->dropped_events);
4270
4271	return ret;
4272}
4273EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4274
4275/**
4276 * ring_buffer_read_events_cpu - get the number of events successfully read
4277 * @buffer: The ring buffer
4278 * @cpu: The per CPU buffer to get the number of events read
4279 */
4280unsigned long
4281ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
4282{
4283	struct ring_buffer_per_cpu *cpu_buffer;
4284
4285	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4286		return 0;
4287
4288	cpu_buffer = buffer->buffers[cpu];
4289	return cpu_buffer->read;
4290}
4291EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4292
4293/**
4294 * ring_buffer_entries - get the number of entries in a buffer
4295 * @buffer: The ring buffer
4296 *
4297 * Returns the total number of entries in the ring buffer
4298 * (all CPU entries)
4299 */
4300unsigned long ring_buffer_entries(struct trace_buffer *buffer)
4301{
4302	struct ring_buffer_per_cpu *cpu_buffer;
4303	unsigned long entries = 0;
4304	int cpu;
4305
4306	/* if you care about this being correct, lock the buffer */
4307	for_each_buffer_cpu(buffer, cpu) {
4308		cpu_buffer = buffer->buffers[cpu];
4309		entries += rb_num_of_entries(cpu_buffer);
4310	}
4311
4312	return entries;
4313}
4314EXPORT_SYMBOL_GPL(ring_buffer_entries);
4315
4316/**
4317 * ring_buffer_overruns - get the number of overruns in buffer
4318 * @buffer: The ring buffer
4319 *
4320 * Returns the total number of overruns in the ring buffer
4321 * (all CPU entries)
4322 */
4323unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
4324{
4325	struct ring_buffer_per_cpu *cpu_buffer;
4326	unsigned long overruns = 0;
4327	int cpu;
4328
4329	/* if you care about this being correct, lock the buffer */
4330	for_each_buffer_cpu(buffer, cpu) {
4331		cpu_buffer = buffer->buffers[cpu];
4332		overruns += local_read(&cpu_buffer->overrun);
4333	}
4334
4335	return overruns;
4336}
4337EXPORT_SYMBOL_GPL(ring_buffer_overruns);
4338
4339static void rb_iter_reset(struct ring_buffer_iter *iter)
4340{
4341	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4342
4343	/* Iterator usage is expected to have record disabled */
4344	iter->head_page = cpu_buffer->reader_page;
4345	iter->head = cpu_buffer->reader_page->read;
4346	iter->next_event = iter->head;
4347
4348	iter->cache_reader_page = iter->head_page;
4349	iter->cache_read = cpu_buffer->read;
4350	iter->cache_pages_removed = cpu_buffer->pages_removed;
4351
4352	if (iter->head) {
4353		iter->read_stamp = cpu_buffer->read_stamp;
4354		iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4355	} else {
4356		iter->read_stamp = iter->head_page->page->time_stamp;
4357		iter->page_stamp = iter->read_stamp;
4358	}
4359}
4360
4361/**
4362 * ring_buffer_iter_reset - reset an iterator
4363 * @iter: The iterator to reset
4364 *
4365 * Resets the iterator, so that it will start from the beginning
4366 * again.
4367 */
4368void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4369{
4370	struct ring_buffer_per_cpu *cpu_buffer;
4371	unsigned long flags;
4372
4373	if (!iter)
4374		return;
4375
4376	cpu_buffer = iter->cpu_buffer;
4377
4378	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4379	rb_iter_reset(iter);
4380	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4381}
4382EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4383
4384/**
4385 * ring_buffer_iter_empty - check if an iterator has no more to read
4386 * @iter: The iterator to check
4387 */
4388int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4389{
4390	struct ring_buffer_per_cpu *cpu_buffer;
4391	struct buffer_page *reader;
4392	struct buffer_page *head_page;
4393	struct buffer_page *commit_page;
4394	struct buffer_page *curr_commit_page;
4395	unsigned commit;
4396	u64 curr_commit_ts;
4397	u64 commit_ts;
4398
4399	cpu_buffer = iter->cpu_buffer;
4400	reader = cpu_buffer->reader_page;
4401	head_page = cpu_buffer->head_page;
4402	commit_page = READ_ONCE(cpu_buffer->commit_page);
4403	commit_ts = commit_page->page->time_stamp;
4404
4405	/*
4406	 * When the writer goes across pages, it issues a cmpxchg which
4407	 * is a mb(), which will synchronize with the rmb here.
4408	 * (see rb_tail_page_update())
4409	 */
4410	smp_rmb();
4411	commit = rb_page_commit(commit_page);
4412	/* We want to make sure that the commit page doesn't change */
4413	smp_rmb();
4414
4415	/* Make sure commit page didn't change */
4416	curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4417	curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4418
4419	/* If the commit page changed, then there's more data */
4420	if (curr_commit_page != commit_page ||
4421	    curr_commit_ts != commit_ts)
4422		return 0;
4423
4424	/* Still racy, as it may return a false positive, but that's OK */
4425	return ((iter->head_page == commit_page && iter->head >= commit) ||
4426		(iter->head_page == reader && commit_page == head_page &&
4427		 head_page->read == commit &&
4428		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
4429}
4430EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4431
4432static void
4433rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4434		     struct ring_buffer_event *event)
4435{
4436	u64 delta;
4437
4438	switch (event->type_len) {
4439	case RINGBUF_TYPE_PADDING:
4440		return;
4441
4442	case RINGBUF_TYPE_TIME_EXTEND:
4443		delta = rb_event_time_stamp(event);
4444		cpu_buffer->read_stamp += delta;
4445		return;
4446
4447	case RINGBUF_TYPE_TIME_STAMP:
4448		delta = rb_event_time_stamp(event);
4449		delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
4450		cpu_buffer->read_stamp = delta;
4451		return;
4452
4453	case RINGBUF_TYPE_DATA:
4454		cpu_buffer->read_stamp += event->time_delta;
4455		return;
4456
4457	default:
4458		RB_WARN_ON(cpu_buffer, 1);
4459	}
4460}
4461
4462static void
4463rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4464			  struct ring_buffer_event *event)
4465{
4466	u64 delta;
4467
4468	switch (event->type_len) {
4469	case RINGBUF_TYPE_PADDING:
4470		return;
4471
4472	case RINGBUF_TYPE_TIME_EXTEND:
4473		delta = rb_event_time_stamp(event);
4474		iter->read_stamp += delta;
4475		return;
4476
4477	case RINGBUF_TYPE_TIME_STAMP:
4478		delta = rb_event_time_stamp(event);
4479		delta = rb_fix_abs_ts(delta, iter->read_stamp);
4480		iter->read_stamp = delta;
4481		return;
4482
4483	case RINGBUF_TYPE_DATA:
4484		iter->read_stamp += event->time_delta;
4485		return;
4486
4487	default:
4488		RB_WARN_ON(iter->cpu_buffer, 1);
4489	}
4490}
4491
4492static struct buffer_page *
4493rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4494{
4495	struct buffer_page *reader = NULL;
4496	unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
4497	unsigned long overwrite;
4498	unsigned long flags;
4499	int nr_loops = 0;
4500	bool ret;
4501
4502	local_irq_save(flags);
4503	arch_spin_lock(&cpu_buffer->lock);
4504
4505 again:
4506	/*
4507	 * This should normally only loop twice. But because the
4508	 * start of the reader inserts an empty page, it causes
4509	 * a case where we will loop three times. There should be no
4510	 * reason to loop four times (that I know of).
4511	 */
4512	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4513		reader = NULL;
4514		goto out;
4515	}
4516
4517	reader = cpu_buffer->reader_page;
4518
4519	/* If there's more to read, return this page */
4520	if (cpu_buffer->reader_page->read < rb_page_size(reader))
4521		goto out;
4522
4523	/* Never should we have an index greater than the size */
4524	if (RB_WARN_ON(cpu_buffer,
4525		       cpu_buffer->reader_page->read > rb_page_size(reader)))
4526		goto out;
4527
4528	/* check if we caught up to the tail */
4529	reader = NULL;
4530	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4531		goto out;
4532
4533	/* Don't bother swapping if the ring buffer is empty */
4534	if (rb_num_of_entries(cpu_buffer) == 0)
4535		goto out;
4536
4537	/*
4538	 * Reset the reader page to size zero.
4539	 */
4540	local_set(&cpu_buffer->reader_page->write, 0);
4541	local_set(&cpu_buffer->reader_page->entries, 0);
4542	local_set(&cpu_buffer->reader_page->page->commit, 0);
4543	cpu_buffer->reader_page->real_end = 0;
4544
4545 spin:
4546	/*
4547	 * Splice the empty reader page into the list around the head.
4548	 */
4549	reader = rb_set_head_page(cpu_buffer);
4550	if (!reader)
4551		goto out;
4552	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4553	cpu_buffer->reader_page->list.prev = reader->list.prev;
4554
4555	/*
4556	 * cpu_buffer->pages just needs to point to the buffer, it
4557	 *  has no specific buffer page to point to. Lets move it out
4558	 *  of our way so we don't accidentally swap it.
4559	 */
4560	cpu_buffer->pages = reader->list.prev;
4561
4562	/* The reader page will be pointing to the new head */
4563	rb_set_list_to_head(&cpu_buffer->reader_page->list);
4564
4565	/*
4566	 * We want to make sure we read the overruns after we set up our
4567	 * pointers to the next object. The writer side does a
4568	 * cmpxchg to cross pages which acts as the mb on the writer
4569	 * side. Note, the reader will constantly fail the swap
4570	 * while the writer is updating the pointers, so this
4571	 * guarantees that the overwrite recorded here is the one we
4572	 * want to compare with the last_overrun.
4573	 */
4574	smp_mb();
4575	overwrite = local_read(&(cpu_buffer->overrun));
4576
4577	/*
4578	 * Here's the tricky part.
4579	 *
4580	 * We need to move the pointer past the header page.
4581	 * But we can only do that if a writer is not currently
4582	 * moving it. The page before the header page has the
4583	 * flag bit '1' set if it is pointing to the page we want.
4584	 * but if the writer is in the process of moving it
4585	 * than it will be '2' or already moved '0'.
4586	 */
4587
4588	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4589
4590	/*
4591	 * If we did not convert it, then we must try again.
4592	 */
4593	if (!ret)
4594		goto spin;
4595
4596	/*
4597	 * Yay! We succeeded in replacing the page.
4598	 *
4599	 * Now make the new head point back to the reader page.
4600	 */
4601	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4602	rb_inc_page(&cpu_buffer->head_page);
4603
4604	local_inc(&cpu_buffer->pages_read);
4605
4606	/* Finally update the reader page to the new head */
4607	cpu_buffer->reader_page = reader;
4608	cpu_buffer->reader_page->read = 0;
4609
4610	if (overwrite != cpu_buffer->last_overrun) {
4611		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4612		cpu_buffer->last_overrun = overwrite;
4613	}
4614
4615	goto again;
4616
4617 out:
4618	/* Update the read_stamp on the first event */
4619	if (reader && reader->read == 0)
4620		cpu_buffer->read_stamp = reader->page->time_stamp;
4621
4622	arch_spin_unlock(&cpu_buffer->lock);
4623	local_irq_restore(flags);
4624
4625	/*
4626	 * The writer has preempt disable, wait for it. But not forever
4627	 * Although, 1 second is pretty much "forever"
4628	 */
4629#define USECS_WAIT	1000000
4630        for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
4631		/* If the write is past the end of page, a writer is still updating it */
4632		if (likely(!reader || rb_page_write(reader) <= bsize))
4633			break;
4634
4635		udelay(1);
4636
4637		/* Get the latest version of the reader write value */
4638		smp_rmb();
4639	}
4640
4641	/* The writer is not moving forward? Something is wrong */
4642	if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
4643		reader = NULL;
4644
4645	/*
4646	 * Make sure we see any padding after the write update
4647	 * (see rb_reset_tail()).
4648	 *
4649	 * In addition, a writer may be writing on the reader page
4650	 * if the page has not been fully filled, so the read barrier
4651	 * is also needed to make sure we see the content of what is
4652	 * committed by the writer (see rb_set_commit_to_write()).
4653	 */
4654	smp_rmb();
4655
4656
4657	return reader;
4658}
4659
4660static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4661{
4662	struct ring_buffer_event *event;
4663	struct buffer_page *reader;
4664	unsigned length;
4665
4666	reader = rb_get_reader_page(cpu_buffer);
4667
4668	/* This function should not be called when buffer is empty */
4669	if (RB_WARN_ON(cpu_buffer, !reader))
4670		return;
4671
4672	event = rb_reader_event(cpu_buffer);
4673
4674	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4675		cpu_buffer->read++;
4676
4677	rb_update_read_stamp(cpu_buffer, event);
4678
4679	length = rb_event_length(event);
4680	cpu_buffer->reader_page->read += length;
4681	cpu_buffer->read_bytes += length;
4682}
4683
4684static void rb_advance_iter(struct ring_buffer_iter *iter)
4685{
4686	struct ring_buffer_per_cpu *cpu_buffer;
4687
4688	cpu_buffer = iter->cpu_buffer;
4689
4690	/* If head == next_event then we need to jump to the next event */
4691	if (iter->head == iter->next_event) {
4692		/* If the event gets overwritten again, there's nothing to do */
4693		if (rb_iter_head_event(iter) == NULL)
4694			return;
4695	}
4696
4697	iter->head = iter->next_event;
4698
4699	/*
4700	 * Check if we are at the end of the buffer.
4701	 */
4702	if (iter->next_event >= rb_page_size(iter->head_page)) {
4703		/* discarded commits can make the page empty */
4704		if (iter->head_page == cpu_buffer->commit_page)
4705			return;
4706		rb_inc_iter(iter);
4707		return;
4708	}
4709
4710	rb_update_iter_read_stamp(iter, iter->event);
4711}
4712
4713static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4714{
4715	return cpu_buffer->lost_events;
4716}
4717
4718static struct ring_buffer_event *
4719rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4720	       unsigned long *lost_events)
4721{
4722	struct ring_buffer_event *event;
4723	struct buffer_page *reader;
4724	int nr_loops = 0;
4725
4726	if (ts)
4727		*ts = 0;
4728 again:
4729	/*
4730	 * We repeat when a time extend is encountered.
4731	 * Since the time extend is always attached to a data event,
4732	 * we should never loop more than once.
4733	 * (We never hit the following condition more than twice).
4734	 */
4735	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4736		return NULL;
4737
4738	reader = rb_get_reader_page(cpu_buffer);
4739	if (!reader)
4740		return NULL;
4741
4742	event = rb_reader_event(cpu_buffer);
4743
4744	switch (event->type_len) {
4745	case RINGBUF_TYPE_PADDING:
4746		if (rb_null_event(event))
4747			RB_WARN_ON(cpu_buffer, 1);
4748		/*
4749		 * Because the writer could be discarding every
4750		 * event it creates (which would probably be bad)
4751		 * if we were to go back to "again" then we may never
4752		 * catch up, and will trigger the warn on, or lock
4753		 * the box. Return the padding, and we will release
4754		 * the current locks, and try again.
4755		 */
4756		return event;
4757
4758	case RINGBUF_TYPE_TIME_EXTEND:
4759		/* Internal data, OK to advance */
4760		rb_advance_reader(cpu_buffer);
4761		goto again;
4762
4763	case RINGBUF_TYPE_TIME_STAMP:
4764		if (ts) {
4765			*ts = rb_event_time_stamp(event);
4766			*ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
4767			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4768							 cpu_buffer->cpu, ts);
4769		}
4770		/* Internal data, OK to advance */
4771		rb_advance_reader(cpu_buffer);
4772		goto again;
4773
4774	case RINGBUF_TYPE_DATA:
4775		if (ts && !(*ts)) {
4776			*ts = cpu_buffer->read_stamp + event->time_delta;
4777			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4778							 cpu_buffer->cpu, ts);
4779		}
4780		if (lost_events)
4781			*lost_events = rb_lost_events(cpu_buffer);
4782		return event;
4783
4784	default:
4785		RB_WARN_ON(cpu_buffer, 1);
4786	}
4787
4788	return NULL;
4789}
4790EXPORT_SYMBOL_GPL(ring_buffer_peek);
4791
4792static struct ring_buffer_event *
4793rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4794{
4795	struct trace_buffer *buffer;
4796	struct ring_buffer_per_cpu *cpu_buffer;
4797	struct ring_buffer_event *event;
4798	int nr_loops = 0;
4799
4800	if (ts)
4801		*ts = 0;
4802
4803	cpu_buffer = iter->cpu_buffer;
4804	buffer = cpu_buffer->buffer;
4805
4806	/*
4807	 * Check if someone performed a consuming read to the buffer
4808	 * or removed some pages from the buffer. In these cases,
4809	 * iterator was invalidated and we need to reset it.
4810	 */
4811	if (unlikely(iter->cache_read != cpu_buffer->read ||
4812		     iter->cache_reader_page != cpu_buffer->reader_page ||
4813		     iter->cache_pages_removed != cpu_buffer->pages_removed))
4814		rb_iter_reset(iter);
4815
4816 again:
4817	if (ring_buffer_iter_empty(iter))
4818		return NULL;
4819
4820	/*
4821	 * As the writer can mess with what the iterator is trying
4822	 * to read, just give up if we fail to get an event after
4823	 * three tries. The iterator is not as reliable when reading
4824	 * the ring buffer with an active write as the consumer is.
4825	 * Do not warn if the three failures is reached.
4826	 */
4827	if (++nr_loops > 3)
4828		return NULL;
4829
4830	if (rb_per_cpu_empty(cpu_buffer))
4831		return NULL;
4832
4833	if (iter->head >= rb_page_size(iter->head_page)) {
4834		rb_inc_iter(iter);
4835		goto again;
4836	}
4837
4838	event = rb_iter_head_event(iter);
4839	if (!event)
4840		goto again;
4841
4842	switch (event->type_len) {
4843	case RINGBUF_TYPE_PADDING:
4844		if (rb_null_event(event)) {
4845			rb_inc_iter(iter);
4846			goto again;
4847		}
4848		rb_advance_iter(iter);
4849		return event;
4850
4851	case RINGBUF_TYPE_TIME_EXTEND:
4852		/* Internal data, OK to advance */
4853		rb_advance_iter(iter);
4854		goto again;
4855
4856	case RINGBUF_TYPE_TIME_STAMP:
4857		if (ts) {
4858			*ts = rb_event_time_stamp(event);
4859			*ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
4860			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4861							 cpu_buffer->cpu, ts);
4862		}
4863		/* Internal data, OK to advance */
4864		rb_advance_iter(iter);
4865		goto again;
4866
4867	case RINGBUF_TYPE_DATA:
4868		if (ts && !(*ts)) {
4869			*ts = iter->read_stamp + event->time_delta;
4870			ring_buffer_normalize_time_stamp(buffer,
4871							 cpu_buffer->cpu, ts);
4872		}
4873		return event;
4874
4875	default:
4876		RB_WARN_ON(cpu_buffer, 1);
4877	}
4878
4879	return NULL;
4880}
4881EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4882
4883static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4884{
4885	if (likely(!in_nmi())) {
4886		raw_spin_lock(&cpu_buffer->reader_lock);
4887		return true;
4888	}
4889
4890	/*
4891	 * If an NMI die dumps out the content of the ring buffer
4892	 * trylock must be used to prevent a deadlock if the NMI
4893	 * preempted a task that holds the ring buffer locks. If
4894	 * we get the lock then all is fine, if not, then continue
4895	 * to do the read, but this can corrupt the ring buffer,
4896	 * so it must be permanently disabled from future writes.
4897	 * Reading from NMI is a oneshot deal.
4898	 */
4899	if (raw_spin_trylock(&cpu_buffer->reader_lock))
4900		return true;
4901
4902	/* Continue without locking, but disable the ring buffer */
4903	atomic_inc(&cpu_buffer->record_disabled);
4904	return false;
4905}
4906
4907static inline void
4908rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4909{
4910	if (likely(locked))
4911		raw_spin_unlock(&cpu_buffer->reader_lock);
4912}
4913
4914/**
4915 * ring_buffer_peek - peek at the next event to be read
4916 * @buffer: The ring buffer to read
4917 * @cpu: The cpu to peak at
4918 * @ts: The timestamp counter of this event.
4919 * @lost_events: a variable to store if events were lost (may be NULL)
4920 *
4921 * This will return the event that will be read next, but does
4922 * not consume the data.
4923 */
4924struct ring_buffer_event *
4925ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
4926		 unsigned long *lost_events)
4927{
4928	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4929	struct ring_buffer_event *event;
4930	unsigned long flags;
4931	bool dolock;
4932
4933	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4934		return NULL;
4935
4936 again:
4937	local_irq_save(flags);
4938	dolock = rb_reader_lock(cpu_buffer);
4939	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4940	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4941		rb_advance_reader(cpu_buffer);
4942	rb_reader_unlock(cpu_buffer, dolock);
4943	local_irq_restore(flags);
4944
4945	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4946		goto again;
4947
4948	return event;
4949}
4950
4951/** ring_buffer_iter_dropped - report if there are dropped events
4952 * @iter: The ring buffer iterator
4953 *
4954 * Returns true if there was dropped events since the last peek.
4955 */
4956bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4957{
4958	bool ret = iter->missed_events != 0;
4959
4960	iter->missed_events = 0;
4961	return ret;
4962}
4963EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
4964
4965/**
4966 * ring_buffer_iter_peek - peek at the next event to be read
4967 * @iter: The ring buffer iterator
4968 * @ts: The timestamp counter of this event.
4969 *
4970 * This will return the event that will be read next, but does
4971 * not increment the iterator.
4972 */
4973struct ring_buffer_event *
4974ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4975{
4976	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4977	struct ring_buffer_event *event;
4978	unsigned long flags;
4979
4980 again:
4981	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4982	event = rb_iter_peek(iter, ts);
4983	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4984
4985	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4986		goto again;
4987
4988	return event;
4989}
4990
4991/**
4992 * ring_buffer_consume - return an event and consume it
4993 * @buffer: The ring buffer to get the next event from
4994 * @cpu: the cpu to read the buffer from
4995 * @ts: a variable to store the timestamp (may be NULL)
4996 * @lost_events: a variable to store if events were lost (may be NULL)
4997 *
4998 * Returns the next event in the ring buffer, and that event is consumed.
4999 * Meaning, that sequential reads will keep returning a different event,
5000 * and eventually empty the ring buffer if the producer is slower.
5001 */
5002struct ring_buffer_event *
5003ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
5004		    unsigned long *lost_events)
5005{
5006	struct ring_buffer_per_cpu *cpu_buffer;
5007	struct ring_buffer_event *event = NULL;
5008	unsigned long flags;
5009	bool dolock;
5010
5011 again:
5012	/* might be called in atomic */
5013	preempt_disable();
5014
5015	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5016		goto out;
5017
5018	cpu_buffer = buffer->buffers[cpu];
5019	local_irq_save(flags);
5020	dolock = rb_reader_lock(cpu_buffer);
5021
5022	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5023	if (event) {
5024		cpu_buffer->lost_events = 0;
5025		rb_advance_reader(cpu_buffer);
5026	}
5027
5028	rb_reader_unlock(cpu_buffer, dolock);
5029	local_irq_restore(flags);
5030
5031 out:
5032	preempt_enable();
5033
5034	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5035		goto again;
5036
5037	return event;
5038}
5039EXPORT_SYMBOL_GPL(ring_buffer_consume);
5040
5041/**
5042 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5043 * @buffer: The ring buffer to read from
5044 * @cpu: The cpu buffer to iterate over
5045 * @flags: gfp flags to use for memory allocation
5046 *
5047 * This performs the initial preparations necessary to iterate
5048 * through the buffer.  Memory is allocated, buffer recording
5049 * is disabled, and the iterator pointer is returned to the caller.
5050 *
5051 * Disabling buffer recording prevents the reading from being
5052 * corrupted. This is not a consuming read, so a producer is not
5053 * expected.
5054 *
5055 * After a sequence of ring_buffer_read_prepare calls, the user is
5056 * expected to make at least one call to ring_buffer_read_prepare_sync.
5057 * Afterwards, ring_buffer_read_start is invoked to get things going
5058 * for real.
5059 *
5060 * This overall must be paired with ring_buffer_read_finish.
5061 */
5062struct ring_buffer_iter *
5063ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5064{
5065	struct ring_buffer_per_cpu *cpu_buffer;
5066	struct ring_buffer_iter *iter;
5067
5068	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5069		return NULL;
5070
5071	iter = kzalloc(sizeof(*iter), flags);
5072	if (!iter)
5073		return NULL;
5074
5075	/* Holds the entire event: data and meta data */
5076	iter->event_size = buffer->subbuf_size;
5077	iter->event = kmalloc(iter->event_size, flags);
5078	if (!iter->event) {
5079		kfree(iter);
5080		return NULL;
5081	}
5082
5083	cpu_buffer = buffer->buffers[cpu];
5084
5085	iter->cpu_buffer = cpu_buffer;
5086
5087	atomic_inc(&cpu_buffer->resize_disabled);
5088
5089	return iter;
5090}
5091EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5092
5093/**
5094 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5095 *
5096 * All previously invoked ring_buffer_read_prepare calls to prepare
5097 * iterators will be synchronized.  Afterwards, read_buffer_read_start
5098 * calls on those iterators are allowed.
5099 */
5100void
5101ring_buffer_read_prepare_sync(void)
5102{
5103	synchronize_rcu();
5104}
5105EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5106
5107/**
5108 * ring_buffer_read_start - start a non consuming read of the buffer
5109 * @iter: The iterator returned by ring_buffer_read_prepare
5110 *
5111 * This finalizes the startup of an iteration through the buffer.
5112 * The iterator comes from a call to ring_buffer_read_prepare and
5113 * an intervening ring_buffer_read_prepare_sync must have been
5114 * performed.
5115 *
5116 * Must be paired with ring_buffer_read_finish.
5117 */
5118void
5119ring_buffer_read_start(struct ring_buffer_iter *iter)
5120{
5121	struct ring_buffer_per_cpu *cpu_buffer;
5122	unsigned long flags;
5123
5124	if (!iter)
5125		return;
5126
5127	cpu_buffer = iter->cpu_buffer;
5128
5129	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5130	arch_spin_lock(&cpu_buffer->lock);
5131	rb_iter_reset(iter);
5132	arch_spin_unlock(&cpu_buffer->lock);
5133	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5134}
5135EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5136
5137/**
5138 * ring_buffer_read_finish - finish reading the iterator of the buffer
5139 * @iter: The iterator retrieved by ring_buffer_start
5140 *
5141 * This re-enables the recording to the buffer, and frees the
5142 * iterator.
5143 */
5144void
5145ring_buffer_read_finish(struct ring_buffer_iter *iter)
5146{
5147	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5148	unsigned long flags;
5149
5150	/*
5151	 * Ring buffer is disabled from recording, here's a good place
5152	 * to check the integrity of the ring buffer.
5153	 * Must prevent readers from trying to read, as the check
5154	 * clears the HEAD page and readers require it.
5155	 */
5156	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5157	rb_check_pages(cpu_buffer);
5158	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5159
5160	atomic_dec(&cpu_buffer->resize_disabled);
5161	kfree(iter->event);
5162	kfree(iter);
5163}
5164EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5165
5166/**
5167 * ring_buffer_iter_advance - advance the iterator to the next location
5168 * @iter: The ring buffer iterator
5169 *
5170 * Move the location of the iterator such that the next read will
5171 * be the next location of the iterator.
5172 */
5173void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
5174{
5175	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5176	unsigned long flags;
5177
5178	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5179
5180	rb_advance_iter(iter);
5181
5182	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5183}
5184EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5185
5186/**
5187 * ring_buffer_size - return the size of the ring buffer (in bytes)
5188 * @buffer: The ring buffer.
5189 * @cpu: The CPU to get ring buffer size from.
5190 */
5191unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5192{
5193	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5194		return 0;
5195
5196	return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
5197}
5198EXPORT_SYMBOL_GPL(ring_buffer_size);
5199
5200/**
5201 * ring_buffer_max_event_size - return the max data size of an event
5202 * @buffer: The ring buffer.
5203 *
5204 * Returns the maximum size an event can be.
5205 */
5206unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
5207{
5208	/* If abs timestamp is requested, events have a timestamp too */
5209	if (ring_buffer_time_stamp_abs(buffer))
5210		return buffer->max_data_size - RB_LEN_TIME_EXTEND;
5211	return buffer->max_data_size;
5212}
5213EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
5214
5215static void rb_clear_buffer_page(struct buffer_page *page)
5216{
5217	local_set(&page->write, 0);
5218	local_set(&page->entries, 0);
5219	rb_init_page(page->page);
5220	page->read = 0;
5221}
5222
5223static void
5224rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5225{
5226	struct buffer_page *page;
5227
5228	rb_head_page_deactivate(cpu_buffer);
5229
5230	cpu_buffer->head_page
5231		= list_entry(cpu_buffer->pages, struct buffer_page, list);
5232	rb_clear_buffer_page(cpu_buffer->head_page);
5233	list_for_each_entry(page, cpu_buffer->pages, list) {
5234		rb_clear_buffer_page(page);
5235	}
5236
5237	cpu_buffer->tail_page = cpu_buffer->head_page;
5238	cpu_buffer->commit_page = cpu_buffer->head_page;
5239
5240	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5241	INIT_LIST_HEAD(&cpu_buffer->new_pages);
5242	rb_clear_buffer_page(cpu_buffer->reader_page);
5243
5244	local_set(&cpu_buffer->entries_bytes, 0);
5245	local_set(&cpu_buffer->overrun, 0);
5246	local_set(&cpu_buffer->commit_overrun, 0);
5247	local_set(&cpu_buffer->dropped_events, 0);
5248	local_set(&cpu_buffer->entries, 0);
5249	local_set(&cpu_buffer->committing, 0);
5250	local_set(&cpu_buffer->commits, 0);
5251	local_set(&cpu_buffer->pages_touched, 0);
5252	local_set(&cpu_buffer->pages_lost, 0);
5253	local_set(&cpu_buffer->pages_read, 0);
5254	cpu_buffer->last_pages_touch = 0;
5255	cpu_buffer->shortest_full = 0;
5256	cpu_buffer->read = 0;
5257	cpu_buffer->read_bytes = 0;
5258
5259	rb_time_set(&cpu_buffer->write_stamp, 0);
5260	rb_time_set(&cpu_buffer->before_stamp, 0);
5261
5262	memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5263
5264	cpu_buffer->lost_events = 0;
5265	cpu_buffer->last_overrun = 0;
5266
5267	rb_head_page_activate(cpu_buffer);
5268	cpu_buffer->pages_removed = 0;
5269}
5270
5271/* Must have disabled the cpu buffer then done a synchronize_rcu */
5272static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5273{
5274	unsigned long flags;
5275
5276	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5277
5278	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5279		goto out;
5280
5281	arch_spin_lock(&cpu_buffer->lock);
5282
5283	rb_reset_cpu(cpu_buffer);
5284
5285	arch_spin_unlock(&cpu_buffer->lock);
5286
5287 out:
5288	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5289}
5290
5291/**
5292 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5293 * @buffer: The ring buffer to reset a per cpu buffer of
5294 * @cpu: The CPU buffer to be reset
5295 */
5296void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
5297{
5298	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5299
5300	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5301		return;
5302
5303	/* prevent another thread from changing buffer sizes */
5304	mutex_lock(&buffer->mutex);
5305
5306	atomic_inc(&cpu_buffer->resize_disabled);
5307	atomic_inc(&cpu_buffer->record_disabled);
5308
5309	/* Make sure all commits have finished */
5310	synchronize_rcu();
5311
5312	reset_disabled_cpu_buffer(cpu_buffer);
5313
5314	atomic_dec(&cpu_buffer->record_disabled);
5315	atomic_dec(&cpu_buffer->resize_disabled);
5316
5317	mutex_unlock(&buffer->mutex);
5318}
5319EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
5320
5321/* Flag to ensure proper resetting of atomic variables */
5322#define RESET_BIT	(1 << 30)
5323
5324/**
5325 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5326 * @buffer: The ring buffer to reset a per cpu buffer of
5327 */
5328void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5329{
5330	struct ring_buffer_per_cpu *cpu_buffer;
5331	int cpu;
5332
5333	/* prevent another thread from changing buffer sizes */
5334	mutex_lock(&buffer->mutex);
5335
5336	for_each_online_buffer_cpu(buffer, cpu) {
5337		cpu_buffer = buffer->buffers[cpu];
5338
5339		atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
5340		atomic_inc(&cpu_buffer->record_disabled);
5341	}
5342
5343	/* Make sure all commits have finished */
5344	synchronize_rcu();
5345
5346	for_each_buffer_cpu(buffer, cpu) {
5347		cpu_buffer = buffer->buffers[cpu];
5348
5349		/*
5350		 * If a CPU came online during the synchronize_rcu(), then
5351		 * ignore it.
5352		 */
5353		if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
5354			continue;
5355
5356		reset_disabled_cpu_buffer(cpu_buffer);
5357
5358		atomic_dec(&cpu_buffer->record_disabled);
5359		atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
5360	}
5361
5362	mutex_unlock(&buffer->mutex);
5363}
5364
5365/**
5366 * ring_buffer_reset - reset a ring buffer
5367 * @buffer: The ring buffer to reset all cpu buffers
5368 */
5369void ring_buffer_reset(struct trace_buffer *buffer)
5370{
5371	struct ring_buffer_per_cpu *cpu_buffer;
5372	int cpu;
5373
5374	/* prevent another thread from changing buffer sizes */
5375	mutex_lock(&buffer->mutex);
5376
5377	for_each_buffer_cpu(buffer, cpu) {
5378		cpu_buffer = buffer->buffers[cpu];
5379
5380		atomic_inc(&cpu_buffer->resize_disabled);
5381		atomic_inc(&cpu_buffer->record_disabled);
5382	}
5383
5384	/* Make sure all commits have finished */
5385	synchronize_rcu();
5386
5387	for_each_buffer_cpu(buffer, cpu) {
5388		cpu_buffer = buffer->buffers[cpu];
5389
5390		reset_disabled_cpu_buffer(cpu_buffer);
5391
5392		atomic_dec(&cpu_buffer->record_disabled);
5393		atomic_dec(&cpu_buffer->resize_disabled);
5394	}
5395
5396	mutex_unlock(&buffer->mutex);
5397}
5398EXPORT_SYMBOL_GPL(ring_buffer_reset);
5399
5400/**
5401 * ring_buffer_empty - is the ring buffer empty?
5402 * @buffer: The ring buffer to test
5403 */
5404bool ring_buffer_empty(struct trace_buffer *buffer)
5405{
5406	struct ring_buffer_per_cpu *cpu_buffer;
5407	unsigned long flags;
5408	bool dolock;
5409	bool ret;
5410	int cpu;
5411
5412	/* yes this is racy, but if you don't like the race, lock the buffer */
5413	for_each_buffer_cpu(buffer, cpu) {
5414		cpu_buffer = buffer->buffers[cpu];
5415		local_irq_save(flags);
5416		dolock = rb_reader_lock(cpu_buffer);
5417		ret = rb_per_cpu_empty(cpu_buffer);
5418		rb_reader_unlock(cpu_buffer, dolock);
5419		local_irq_restore(flags);
5420
5421		if (!ret)
5422			return false;
5423	}
5424
5425	return true;
5426}
5427EXPORT_SYMBOL_GPL(ring_buffer_empty);
5428
5429/**
5430 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5431 * @buffer: The ring buffer
5432 * @cpu: The CPU buffer to test
5433 */
5434bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
5435{
5436	struct ring_buffer_per_cpu *cpu_buffer;
5437	unsigned long flags;
5438	bool dolock;
5439	bool ret;
5440
5441	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5442		return true;
5443
5444	cpu_buffer = buffer->buffers[cpu];
5445	local_irq_save(flags);
5446	dolock = rb_reader_lock(cpu_buffer);
5447	ret = rb_per_cpu_empty(cpu_buffer);
5448	rb_reader_unlock(cpu_buffer, dolock);
5449	local_irq_restore(flags);
5450
5451	return ret;
5452}
5453EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
5454
5455#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
5456/**
5457 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5458 * @buffer_a: One buffer to swap with
5459 * @buffer_b: The other buffer to swap with
5460 * @cpu: the CPU of the buffers to swap
5461 *
5462 * This function is useful for tracers that want to take a "snapshot"
5463 * of a CPU buffer and has another back up buffer lying around.
5464 * it is expected that the tracer handles the cpu buffer not being
5465 * used at the moment.
5466 */
5467int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5468			 struct trace_buffer *buffer_b, int cpu)
5469{
5470	struct ring_buffer_per_cpu *cpu_buffer_a;
5471	struct ring_buffer_per_cpu *cpu_buffer_b;
5472	int ret = -EINVAL;
5473
5474	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5475	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
5476		goto out;
5477
5478	cpu_buffer_a = buffer_a->buffers[cpu];
5479	cpu_buffer_b = buffer_b->buffers[cpu];
5480
5481	/* At least make sure the two buffers are somewhat the same */
5482	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5483		goto out;
5484
5485	if (buffer_a->subbuf_order != buffer_b->subbuf_order)
5486		goto out;
5487
5488	ret = -EAGAIN;
5489
5490	if (atomic_read(&buffer_a->record_disabled))
5491		goto out;
5492
5493	if (atomic_read(&buffer_b->record_disabled))
5494		goto out;
5495
5496	if (atomic_read(&cpu_buffer_a->record_disabled))
5497		goto out;
5498
5499	if (atomic_read(&cpu_buffer_b->record_disabled))
5500		goto out;
5501
5502	/*
5503	 * We can't do a synchronize_rcu here because this
5504	 * function can be called in atomic context.
5505	 * Normally this will be called from the same CPU as cpu.
5506	 * If not it's up to the caller to protect this.
5507	 */
5508	atomic_inc(&cpu_buffer_a->record_disabled);
5509	atomic_inc(&cpu_buffer_b->record_disabled);
5510
5511	ret = -EBUSY;
5512	if (local_read(&cpu_buffer_a->committing))
5513		goto out_dec;
5514	if (local_read(&cpu_buffer_b->committing))
5515		goto out_dec;
5516
5517	/*
5518	 * When resize is in progress, we cannot swap it because
5519	 * it will mess the state of the cpu buffer.
5520	 */
5521	if (atomic_read(&buffer_a->resizing))
5522		goto out_dec;
5523	if (atomic_read(&buffer_b->resizing))
5524		goto out_dec;
5525
5526	buffer_a->buffers[cpu] = cpu_buffer_b;
5527	buffer_b->buffers[cpu] = cpu_buffer_a;
5528
5529	cpu_buffer_b->buffer = buffer_a;
5530	cpu_buffer_a->buffer = buffer_b;
5531
5532	ret = 0;
5533
5534out_dec:
5535	atomic_dec(&cpu_buffer_a->record_disabled);
5536	atomic_dec(&cpu_buffer_b->record_disabled);
5537out:
5538	return ret;
5539}
5540EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5541#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
5542
5543/**
5544 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5545 * @buffer: the buffer to allocate for.
5546 * @cpu: the cpu buffer to allocate.
5547 *
5548 * This function is used in conjunction with ring_buffer_read_page.
5549 * When reading a full page from the ring buffer, these functions
5550 * can be used to speed up the process. The calling function should
5551 * allocate a few pages first with this function. Then when it
5552 * needs to get pages from the ring buffer, it passes the result
5553 * of this function into ring_buffer_read_page, which will swap
5554 * the page that was allocated, with the read page of the buffer.
5555 *
5556 * Returns:
5557 *  The page allocated, or ERR_PTR
5558 */
5559struct buffer_data_read_page *
5560ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5561{
5562	struct ring_buffer_per_cpu *cpu_buffer;
5563	struct buffer_data_read_page *bpage = NULL;
5564	unsigned long flags;
5565	struct page *page;
5566
5567	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5568		return ERR_PTR(-ENODEV);
5569
5570	bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
5571	if (!bpage)
5572		return ERR_PTR(-ENOMEM);
5573
5574	bpage->order = buffer->subbuf_order;
5575	cpu_buffer = buffer->buffers[cpu];
5576	local_irq_save(flags);
5577	arch_spin_lock(&cpu_buffer->lock);
5578
5579	if (cpu_buffer->free_page) {
5580		bpage->data = cpu_buffer->free_page;
5581		cpu_buffer->free_page = NULL;
5582	}
5583
5584	arch_spin_unlock(&cpu_buffer->lock);
5585	local_irq_restore(flags);
5586
5587	if (bpage->data)
5588		goto out;
5589
5590	page = alloc_pages_node(cpu_to_node(cpu),
5591				GFP_KERNEL | __GFP_NORETRY | __GFP_ZERO,
5592				cpu_buffer->buffer->subbuf_order);
5593	if (!page) {
5594		kfree(bpage);
5595		return ERR_PTR(-ENOMEM);
5596	}
5597
5598	bpage->data = page_address(page);
5599
5600 out:
5601	rb_init_page(bpage->data);
5602
5603	return bpage;
5604}
5605EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5606
5607/**
5608 * ring_buffer_free_read_page - free an allocated read page
5609 * @buffer: the buffer the page was allocate for
5610 * @cpu: the cpu buffer the page came from
5611 * @data_page: the page to free
5612 *
5613 * Free a page allocated from ring_buffer_alloc_read_page.
5614 */
5615void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
5616				struct buffer_data_read_page *data_page)
5617{
5618	struct ring_buffer_per_cpu *cpu_buffer;
5619	struct buffer_data_page *bpage = data_page->data;
5620	struct page *page = virt_to_page(bpage);
5621	unsigned long flags;
5622
5623	if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
5624		return;
5625
5626	cpu_buffer = buffer->buffers[cpu];
5627
5628	/*
5629	 * If the page is still in use someplace else, or order of the page
5630	 * is different from the subbuffer order of the buffer -
5631	 * we can't reuse it
5632	 */
5633	if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
5634		goto out;
5635
5636	local_irq_save(flags);
5637	arch_spin_lock(&cpu_buffer->lock);
5638
5639	if (!cpu_buffer->free_page) {
5640		cpu_buffer->free_page = bpage;
5641		bpage = NULL;
5642	}
5643
5644	arch_spin_unlock(&cpu_buffer->lock);
5645	local_irq_restore(flags);
5646
5647 out:
5648	free_pages((unsigned long)bpage, data_page->order);
5649	kfree(data_page);
5650}
5651EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5652
5653/**
5654 * ring_buffer_read_page - extract a page from the ring buffer
5655 * @buffer: buffer to extract from
5656 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
5657 * @len: amount to extract
5658 * @cpu: the cpu of the buffer to extract
5659 * @full: should the extraction only happen when the page is full.
5660 *
5661 * This function will pull out a page from the ring buffer and consume it.
5662 * @data_page must be the address of the variable that was returned
5663 * from ring_buffer_alloc_read_page. This is because the page might be used
5664 * to swap with a page in the ring buffer.
5665 *
5666 * for example:
5667 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
5668 *	if (IS_ERR(rpage))
5669 *		return PTR_ERR(rpage);
5670 *	ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
5671 *	if (ret >= 0)
5672 *		process_page(ring_buffer_read_page_data(rpage), ret);
5673 *	ring_buffer_free_read_page(buffer, cpu, rpage);
5674 *
5675 * When @full is set, the function will not return true unless
5676 * the writer is off the reader page.
5677 *
5678 * Note: it is up to the calling functions to handle sleeps and wakeups.
5679 *  The ring buffer can be used anywhere in the kernel and can not
5680 *  blindly call wake_up. The layer that uses the ring buffer must be
5681 *  responsible for that.
5682 *
5683 * Returns:
5684 *  >=0 if data has been transferred, returns the offset of consumed data.
5685 *  <0 if no data has been transferred.
5686 */
5687int ring_buffer_read_page(struct trace_buffer *buffer,
5688			  struct buffer_data_read_page *data_page,
5689			  size_t len, int cpu, int full)
5690{
5691	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5692	struct ring_buffer_event *event;
5693	struct buffer_data_page *bpage;
5694	struct buffer_page *reader;
5695	unsigned long missed_events;
5696	unsigned long flags;
5697	unsigned int commit;
5698	unsigned int read;
5699	u64 save_timestamp;
5700	int ret = -1;
5701
5702	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5703		goto out;
5704
5705	/*
5706	 * If len is not big enough to hold the page header, then
5707	 * we can not copy anything.
5708	 */
5709	if (len <= BUF_PAGE_HDR_SIZE)
5710		goto out;
5711
5712	len -= BUF_PAGE_HDR_SIZE;
5713
5714	if (!data_page || !data_page->data)
5715		goto out;
5716	if (data_page->order != buffer->subbuf_order)
5717		goto out;
5718
5719	bpage = data_page->data;
5720	if (!bpage)
5721		goto out;
5722
5723	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5724
5725	reader = rb_get_reader_page(cpu_buffer);
5726	if (!reader)
5727		goto out_unlock;
5728
5729	event = rb_reader_event(cpu_buffer);
5730
5731	read = reader->read;
5732	commit = rb_page_commit(reader);
5733
5734	/* Check if any events were dropped */
5735	missed_events = cpu_buffer->lost_events;
5736
5737	/*
5738	 * If this page has been partially read or
5739	 * if len is not big enough to read the rest of the page or
5740	 * a writer is still on the page, then
5741	 * we must copy the data from the page to the buffer.
5742	 * Otherwise, we can simply swap the page with the one passed in.
5743	 */
5744	if (read || (len < (commit - read)) ||
5745	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
5746		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5747		unsigned int rpos = read;
5748		unsigned int pos = 0;
5749		unsigned int size;
5750
5751		/*
5752		 * If a full page is expected, this can still be returned
5753		 * if there's been a previous partial read and the
5754		 * rest of the page can be read and the commit page is off
5755		 * the reader page.
5756		 */
5757		if (full &&
5758		    (!read || (len < (commit - read)) ||
5759		     cpu_buffer->reader_page == cpu_buffer->commit_page))
5760			goto out_unlock;
5761
5762		if (len > (commit - read))
5763			len = (commit - read);
5764
5765		/* Always keep the time extend and data together */
5766		size = rb_event_ts_length(event);
5767
5768		if (len < size)
5769			goto out_unlock;
5770
5771		/* save the current timestamp, since the user will need it */
5772		save_timestamp = cpu_buffer->read_stamp;
5773
5774		/* Need to copy one event at a time */
5775		do {
5776			/* We need the size of one event, because
5777			 * rb_advance_reader only advances by one event,
5778			 * whereas rb_event_ts_length may include the size of
5779			 * one or two events.
5780			 * We have already ensured there's enough space if this
5781			 * is a time extend. */
5782			size = rb_event_length(event);
5783			memcpy(bpage->data + pos, rpage->data + rpos, size);
5784
5785			len -= size;
5786
5787			rb_advance_reader(cpu_buffer);
5788			rpos = reader->read;
5789			pos += size;
5790
5791			if (rpos >= commit)
5792				break;
5793
5794			event = rb_reader_event(cpu_buffer);
5795			/* Always keep the time extend and data together */
5796			size = rb_event_ts_length(event);
5797		} while (len >= size);
5798
5799		/* update bpage */
5800		local_set(&bpage->commit, pos);
5801		bpage->time_stamp = save_timestamp;
5802
5803		/* we copied everything to the beginning */
5804		read = 0;
5805	} else {
5806		/* update the entry counter */
5807		cpu_buffer->read += rb_page_entries(reader);
5808		cpu_buffer->read_bytes += rb_page_commit(reader);
5809
5810		/* swap the pages */
5811		rb_init_page(bpage);
5812		bpage = reader->page;
5813		reader->page = data_page->data;
5814		local_set(&reader->write, 0);
5815		local_set(&reader->entries, 0);
5816		reader->read = 0;
5817		data_page->data = bpage;
5818
5819		/*
5820		 * Use the real_end for the data size,
5821		 * This gives us a chance to store the lost events
5822		 * on the page.
5823		 */
5824		if (reader->real_end)
5825			local_set(&bpage->commit, reader->real_end);
5826	}
5827	ret = read;
5828
5829	cpu_buffer->lost_events = 0;
5830
5831	commit = local_read(&bpage->commit);
5832	/*
5833	 * Set a flag in the commit field if we lost events
5834	 */
5835	if (missed_events) {
5836		/* If there is room at the end of the page to save the
5837		 * missed events, then record it there.
5838		 */
5839		if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
5840			memcpy(&bpage->data[commit], &missed_events,
5841			       sizeof(missed_events));
5842			local_add(RB_MISSED_STORED, &bpage->commit);
5843			commit += sizeof(missed_events);
5844		}
5845		local_add(RB_MISSED_EVENTS, &bpage->commit);
5846	}
5847
5848	/*
5849	 * This page may be off to user land. Zero it out here.
5850	 */
5851	if (commit < buffer->subbuf_size)
5852		memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
5853
5854 out_unlock:
5855	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5856
5857 out:
5858	return ret;
5859}
5860EXPORT_SYMBOL_GPL(ring_buffer_read_page);
5861
5862/**
5863 * ring_buffer_read_page_data - get pointer to the data in the page.
5864 * @page:  the page to get the data from
5865 *
5866 * Returns pointer to the actual data in this page.
5867 */
5868void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
5869{
5870	return page->data;
5871}
5872EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
5873
5874/**
5875 * ring_buffer_subbuf_size_get - get size of the sub buffer.
5876 * @buffer: the buffer to get the sub buffer size from
5877 *
5878 * Returns size of the sub buffer, in bytes.
5879 */
5880int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
5881{
5882	return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
5883}
5884EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
5885
5886/**
5887 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
5888 * @buffer: The ring_buffer to get the system sub page order from
5889 *
5890 * By default, one ring buffer sub page equals to one system page. This parameter
5891 * is configurable, per ring buffer. The size of the ring buffer sub page can be
5892 * extended, but must be an order of system page size.
5893 *
5894 * Returns the order of buffer sub page size, in system pages:
5895 * 0 means the sub buffer size is 1 system page and so forth.
5896 * In case of an error < 0 is returned.
5897 */
5898int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
5899{
5900	if (!buffer)
5901		return -EINVAL;
5902
5903	return buffer->subbuf_order;
5904}
5905EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
5906
5907/**
5908 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
5909 * @buffer: The ring_buffer to set the new page size.
5910 * @order: Order of the system pages in one sub buffer page
5911 *
5912 * By default, one ring buffer pages equals to one system page. This API can be
5913 * used to set new size of the ring buffer page. The size must be order of
5914 * system page size, that's why the input parameter @order is the order of
5915 * system pages that are allocated for one ring buffer page:
5916 *  0 - 1 system page
5917 *  1 - 2 system pages
5918 *  3 - 4 system pages
5919 *  ...
5920 *
5921 * Returns 0 on success or < 0 in case of an error.
5922 */
5923int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
5924{
5925	struct ring_buffer_per_cpu *cpu_buffer;
5926	struct buffer_page *bpage, *tmp;
5927	int old_order, old_size;
5928	int nr_pages;
5929	int psize;
5930	int err;
5931	int cpu;
5932
5933	if (!buffer || order < 0)
5934		return -EINVAL;
5935
5936	if (buffer->subbuf_order == order)
5937		return 0;
5938
5939	psize = (1 << order) * PAGE_SIZE;
5940	if (psize <= BUF_PAGE_HDR_SIZE)
5941		return -EINVAL;
5942
5943	/* Size of a subbuf cannot be greater than the write counter */
5944	if (psize > RB_WRITE_MASK + 1)
5945		return -EINVAL;
5946
5947	old_order = buffer->subbuf_order;
5948	old_size = buffer->subbuf_size;
5949
5950	/* prevent another thread from changing buffer sizes */
5951	mutex_lock(&buffer->mutex);
5952	atomic_inc(&buffer->record_disabled);
5953
5954	/* Make sure all commits have finished */
5955	synchronize_rcu();
5956
5957	buffer->subbuf_order = order;
5958	buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
5959
5960	/* Make sure all new buffers are allocated, before deleting the old ones */
5961	for_each_buffer_cpu(buffer, cpu) {
5962
5963		if (!cpumask_test_cpu(cpu, buffer->cpumask))
5964			continue;
5965
5966		cpu_buffer = buffer->buffers[cpu];
5967
5968		/* Update the number of pages to match the new size */
5969		nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
5970		nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
5971
5972		/* we need a minimum of two pages */
5973		if (nr_pages < 2)
5974			nr_pages = 2;
5975
5976		cpu_buffer->nr_pages_to_update = nr_pages;
5977
5978		/* Include the reader page */
5979		nr_pages++;
5980
5981		/* Allocate the new size buffer */
5982		INIT_LIST_HEAD(&cpu_buffer->new_pages);
5983		if (__rb_allocate_pages(cpu_buffer, nr_pages,
5984					&cpu_buffer->new_pages)) {
5985			/* not enough memory for new pages */
5986			err = -ENOMEM;
5987			goto error;
5988		}
5989	}
5990
5991	for_each_buffer_cpu(buffer, cpu) {
5992
5993		if (!cpumask_test_cpu(cpu, buffer->cpumask))
5994			continue;
5995
5996		cpu_buffer = buffer->buffers[cpu];
5997
5998		/* Clear the head bit to make the link list normal to read */
5999		rb_head_page_deactivate(cpu_buffer);
6000
6001		/* Now walk the list and free all the old sub buffers */
6002		list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
6003			list_del_init(&bpage->list);
6004			free_buffer_page(bpage);
6005		}
6006		/* The above loop stopped an the last page needing to be freed */
6007		bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
6008		free_buffer_page(bpage);
6009
6010		/* Free the current reader page */
6011		free_buffer_page(cpu_buffer->reader_page);
6012
6013		/* One page was allocated for the reader page */
6014		cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
6015						     struct buffer_page, list);
6016		list_del_init(&cpu_buffer->reader_page->list);
6017
6018		/* The cpu_buffer pages are a link list with no head */
6019		cpu_buffer->pages = cpu_buffer->new_pages.next;
6020		cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
6021		cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
6022
6023		/* Clear the new_pages list */
6024		INIT_LIST_HEAD(&cpu_buffer->new_pages);
6025
6026		cpu_buffer->head_page
6027			= list_entry(cpu_buffer->pages, struct buffer_page, list);
6028		cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
6029
6030		cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
6031		cpu_buffer->nr_pages_to_update = 0;
6032
6033		free_pages((unsigned long)cpu_buffer->free_page, old_order);
6034		cpu_buffer->free_page = NULL;
6035
6036		rb_head_page_activate(cpu_buffer);
6037
6038		rb_check_pages(cpu_buffer);
6039	}
6040
6041	atomic_dec(&buffer->record_disabled);
6042	mutex_unlock(&buffer->mutex);
6043
6044	return 0;
6045
6046error:
6047	buffer->subbuf_order = old_order;
6048	buffer->subbuf_size = old_size;
6049
6050	atomic_dec(&buffer->record_disabled);
6051	mutex_unlock(&buffer->mutex);
6052
6053	for_each_buffer_cpu(buffer, cpu) {
6054		cpu_buffer = buffer->buffers[cpu];
6055
6056		if (!cpu_buffer->nr_pages_to_update)
6057			continue;
6058
6059		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
6060			list_del_init(&bpage->list);
6061			free_buffer_page(bpage);
6062		}
6063	}
6064
6065	return err;
6066}
6067EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
6068
6069/*
6070 * We only allocate new buffers, never free them if the CPU goes down.
6071 * If we were to free the buffer, then the user would lose any trace that was in
6072 * the buffer.
6073 */
6074int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
6075{
6076	struct trace_buffer *buffer;
6077	long nr_pages_same;
6078	int cpu_i;
6079	unsigned long nr_pages;
6080
6081	buffer = container_of(node, struct trace_buffer, node);
6082	if (cpumask_test_cpu(cpu, buffer->cpumask))
6083		return 0;
6084
6085	nr_pages = 0;
6086	nr_pages_same = 1;
6087	/* check if all cpu sizes are same */
6088	for_each_buffer_cpu(buffer, cpu_i) {
6089		/* fill in the size from first enabled cpu */
6090		if (nr_pages == 0)
6091			nr_pages = buffer->buffers[cpu_i]->nr_pages;
6092		if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
6093			nr_pages_same = 0;
6094			break;
6095		}
6096	}
6097	/* allocate minimum pages, user can later expand it */
6098	if (!nr_pages_same)
6099		nr_pages = 2;
6100	buffer->buffers[cpu] =
6101		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
6102	if (!buffer->buffers[cpu]) {
6103		WARN(1, "failed to allocate ring buffer on CPU %u\n",
6104		     cpu);
6105		return -ENOMEM;
6106	}
6107	smp_wmb();
6108	cpumask_set_cpu(cpu, buffer->cpumask);
6109	return 0;
6110}
6111
6112#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
6113/*
6114 * This is a basic integrity check of the ring buffer.
6115 * Late in the boot cycle this test will run when configured in.
6116 * It will kick off a thread per CPU that will go into a loop
6117 * writing to the per cpu ring buffer various sizes of data.
6118 * Some of the data will be large items, some small.
6119 *
6120 * Another thread is created that goes into a spin, sending out
6121 * IPIs to the other CPUs to also write into the ring buffer.
6122 * this is to test the nesting ability of the buffer.
6123 *
6124 * Basic stats are recorded and reported. If something in the
6125 * ring buffer should happen that's not expected, a big warning
6126 * is displayed and all ring buffers are disabled.
6127 */
6128static struct task_struct *rb_threads[NR_CPUS] __initdata;
6129
6130struct rb_test_data {
6131	struct trace_buffer *buffer;
6132	unsigned long		events;
6133	unsigned long		bytes_written;
6134	unsigned long		bytes_alloc;
6135	unsigned long		bytes_dropped;
6136	unsigned long		events_nested;
6137	unsigned long		bytes_written_nested;
6138	unsigned long		bytes_alloc_nested;
6139	unsigned long		bytes_dropped_nested;
6140	int			min_size_nested;
6141	int			max_size_nested;
6142	int			max_size;
6143	int			min_size;
6144	int			cpu;
6145	int			cnt;
6146};
6147
6148static struct rb_test_data rb_data[NR_CPUS] __initdata;
6149
6150/* 1 meg per cpu */
6151#define RB_TEST_BUFFER_SIZE	1048576
6152
6153static char rb_string[] __initdata =
6154	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
6155	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
6156	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
6157
6158static bool rb_test_started __initdata;
6159
6160struct rb_item {
6161	int size;
6162	char str[];
6163};
6164
6165static __init int rb_write_something(struct rb_test_data *data, bool nested)
6166{
6167	struct ring_buffer_event *event;
6168	struct rb_item *item;
6169	bool started;
6170	int event_len;
6171	int size;
6172	int len;
6173	int cnt;
6174
6175	/* Have nested writes different that what is written */
6176	cnt = data->cnt + (nested ? 27 : 0);
6177
6178	/* Multiply cnt by ~e, to make some unique increment */
6179	size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
6180
6181	len = size + sizeof(struct rb_item);
6182
6183	started = rb_test_started;
6184	/* read rb_test_started before checking buffer enabled */
6185	smp_rmb();
6186
6187	event = ring_buffer_lock_reserve(data->buffer, len);
6188	if (!event) {
6189		/* Ignore dropped events before test starts. */
6190		if (started) {
6191			if (nested)
6192				data->bytes_dropped += len;
6193			else
6194				data->bytes_dropped_nested += len;
6195		}
6196		return len;
6197	}
6198
6199	event_len = ring_buffer_event_length(event);
6200
6201	if (RB_WARN_ON(data->buffer, event_len < len))
6202		goto out;
6203
6204	item = ring_buffer_event_data(event);
6205	item->size = size;
6206	memcpy(item->str, rb_string, size);
6207
6208	if (nested) {
6209		data->bytes_alloc_nested += event_len;
6210		data->bytes_written_nested += len;
6211		data->events_nested++;
6212		if (!data->min_size_nested || len < data->min_size_nested)
6213			data->min_size_nested = len;
6214		if (len > data->max_size_nested)
6215			data->max_size_nested = len;
6216	} else {
6217		data->bytes_alloc += event_len;
6218		data->bytes_written += len;
6219		data->events++;
6220		if (!data->min_size || len < data->min_size)
6221			data->max_size = len;
6222		if (len > data->max_size)
6223			data->max_size = len;
6224	}
6225
6226 out:
6227	ring_buffer_unlock_commit(data->buffer);
6228
6229	return 0;
6230}
6231
6232static __init int rb_test(void *arg)
6233{
6234	struct rb_test_data *data = arg;
6235
6236	while (!kthread_should_stop()) {
6237		rb_write_something(data, false);
6238		data->cnt++;
6239
6240		set_current_state(TASK_INTERRUPTIBLE);
6241		/* Now sleep between a min of 100-300us and a max of 1ms */
6242		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
6243	}
6244
6245	return 0;
6246}
6247
6248static __init void rb_ipi(void *ignore)
6249{
6250	struct rb_test_data *data;
6251	int cpu = smp_processor_id();
6252
6253	data = &rb_data[cpu];
6254	rb_write_something(data, true);
6255}
6256
6257static __init int rb_hammer_test(void *arg)
6258{
6259	while (!kthread_should_stop()) {
6260
6261		/* Send an IPI to all cpus to write data! */
6262		smp_call_function(rb_ipi, NULL, 1);
6263		/* No sleep, but for non preempt, let others run */
6264		schedule();
6265	}
6266
6267	return 0;
6268}
6269
6270static __init int test_ringbuffer(void)
6271{
6272	struct task_struct *rb_hammer;
6273	struct trace_buffer *buffer;
6274	int cpu;
6275	int ret = 0;
6276
6277	if (security_locked_down(LOCKDOWN_TRACEFS)) {
6278		pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
6279		return 0;
6280	}
6281
6282	pr_info("Running ring buffer tests...\n");
6283
6284	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
6285	if (WARN_ON(!buffer))
6286		return 0;
6287
6288	/* Disable buffer so that threads can't write to it yet */
6289	ring_buffer_record_off(buffer);
6290
6291	for_each_online_cpu(cpu) {
6292		rb_data[cpu].buffer = buffer;
6293		rb_data[cpu].cpu = cpu;
6294		rb_data[cpu].cnt = cpu;
6295		rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
6296						     cpu, "rbtester/%u");
6297		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
6298			pr_cont("FAILED\n");
6299			ret = PTR_ERR(rb_threads[cpu]);
6300			goto out_free;
6301		}
6302	}
6303
6304	/* Now create the rb hammer! */
6305	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
6306	if (WARN_ON(IS_ERR(rb_hammer))) {
6307		pr_cont("FAILED\n");
6308		ret = PTR_ERR(rb_hammer);
6309		goto out_free;
6310	}
6311
6312	ring_buffer_record_on(buffer);
6313	/*
6314	 * Show buffer is enabled before setting rb_test_started.
6315	 * Yes there's a small race window where events could be
6316	 * dropped and the thread wont catch it. But when a ring
6317	 * buffer gets enabled, there will always be some kind of
6318	 * delay before other CPUs see it. Thus, we don't care about
6319	 * those dropped events. We care about events dropped after
6320	 * the threads see that the buffer is active.
6321	 */
6322	smp_wmb();
6323	rb_test_started = true;
6324
6325	set_current_state(TASK_INTERRUPTIBLE);
6326	/* Just run for 10 seconds */;
6327	schedule_timeout(10 * HZ);
6328
6329	kthread_stop(rb_hammer);
6330
6331 out_free:
6332	for_each_online_cpu(cpu) {
6333		if (!rb_threads[cpu])
6334			break;
6335		kthread_stop(rb_threads[cpu]);
6336	}
6337	if (ret) {
6338		ring_buffer_free(buffer);
6339		return ret;
6340	}
6341
6342	/* Report! */
6343	pr_info("finished\n");
6344	for_each_online_cpu(cpu) {
6345		struct ring_buffer_event *event;
6346		struct rb_test_data *data = &rb_data[cpu];
6347		struct rb_item *item;
6348		unsigned long total_events;
6349		unsigned long total_dropped;
6350		unsigned long total_written;
6351		unsigned long total_alloc;
6352		unsigned long total_read = 0;
6353		unsigned long total_size = 0;
6354		unsigned long total_len = 0;
6355		unsigned long total_lost = 0;
6356		unsigned long lost;
6357		int big_event_size;
6358		int small_event_size;
6359
6360		ret = -1;
6361
6362		total_events = data->events + data->events_nested;
6363		total_written = data->bytes_written + data->bytes_written_nested;
6364		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
6365		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
6366
6367		big_event_size = data->max_size + data->max_size_nested;
6368		small_event_size = data->min_size + data->min_size_nested;
6369
6370		pr_info("CPU %d:\n", cpu);
6371		pr_info("              events:    %ld\n", total_events);
6372		pr_info("       dropped bytes:    %ld\n", total_dropped);
6373		pr_info("       alloced bytes:    %ld\n", total_alloc);
6374		pr_info("       written bytes:    %ld\n", total_written);
6375		pr_info("       biggest event:    %d\n", big_event_size);
6376		pr_info("      smallest event:    %d\n", small_event_size);
6377
6378		if (RB_WARN_ON(buffer, total_dropped))
6379			break;
6380
6381		ret = 0;
6382
6383		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
6384			total_lost += lost;
6385			item = ring_buffer_event_data(event);
6386			total_len += ring_buffer_event_length(event);
6387			total_size += item->size + sizeof(struct rb_item);
6388			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
6389				pr_info("FAILED!\n");
6390				pr_info("buffer had: %.*s\n", item->size, item->str);
6391				pr_info("expected:   %.*s\n", item->size, rb_string);
6392				RB_WARN_ON(buffer, 1);
6393				ret = -1;
6394				break;
6395			}
6396			total_read++;
6397		}
6398		if (ret)
6399			break;
6400
6401		ret = -1;
6402
6403		pr_info("         read events:   %ld\n", total_read);
6404		pr_info("         lost events:   %ld\n", total_lost);
6405		pr_info("        total events:   %ld\n", total_lost + total_read);
6406		pr_info("  recorded len bytes:   %ld\n", total_len);
6407		pr_info(" recorded size bytes:   %ld\n", total_size);
6408		if (total_lost) {
6409			pr_info(" With dropped events, record len and size may not match\n"
6410				" alloced and written from above\n");
6411		} else {
6412			if (RB_WARN_ON(buffer, total_len != total_alloc ||
6413				       total_size != total_written))
6414				break;
6415		}
6416		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6417			break;
6418
6419		ret = 0;
6420	}
6421	if (!ret)
6422		pr_info("Ring buffer PASSED!\n");
6423
6424	ring_buffer_free(buffer);
6425	return 0;
6426}
6427
6428late_initcall(test_ringbuffer);
6429#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Generic ring buffer
   4 *
   5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   6 */
   7#include <linux/trace_recursion.h>
   8#include <linux/trace_events.h>
   9#include <linux/ring_buffer.h>
  10#include <linux/trace_clock.h>
  11#include <linux/sched/clock.h>
  12#include <linux/trace_seq.h>
  13#include <linux/spinlock.h>
  14#include <linux/irq_work.h>
  15#include <linux/security.h>
  16#include <linux/uaccess.h>
  17#include <linux/hardirq.h>
  18#include <linux/kthread.h>	/* for self test */
  19#include <linux/module.h>
  20#include <linux/percpu.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/slab.h>
  24#include <linux/init.h>
  25#include <linux/hash.h>
  26#include <linux/list.h>
  27#include <linux/cpu.h>
  28#include <linux/oom.h>
  29
  30#include <asm/local64.h>
  31#include <asm/local.h>
  32
  33/*
  34 * The "absolute" timestamp in the buffer is only 59 bits.
  35 * If a clock has the 5 MSBs set, it needs to be saved and
  36 * reinserted.
  37 */
  38#define TS_MSB		(0xf8ULL << 56)
  39#define ABS_TS_MASK	(~TS_MSB)
  40
  41static void update_pages_handler(struct work_struct *work);
  42
  43/*
  44 * The ring buffer header is special. We must manually up keep it.
  45 */
  46int ring_buffer_print_entry_header(struct trace_seq *s)
  47{
  48	trace_seq_puts(s, "# compressed entry header\n");
  49	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  50	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  51	trace_seq_puts(s, "\tarray       :   32 bits\n");
  52	trace_seq_putc(s, '\n');
  53	trace_seq_printf(s, "\tpadding     : type == %d\n",
  54			 RINGBUF_TYPE_PADDING);
  55	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  56			 RINGBUF_TYPE_TIME_EXTEND);
  57	trace_seq_printf(s, "\ttime_stamp : type == %d\n",
  58			 RINGBUF_TYPE_TIME_STAMP);
  59	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  60			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  61
  62	return !trace_seq_has_overflowed(s);
  63}
  64
  65/*
  66 * The ring buffer is made up of a list of pages. A separate list of pages is
  67 * allocated for each CPU. A writer may only write to a buffer that is
  68 * associated with the CPU it is currently executing on.  A reader may read
  69 * from any per cpu buffer.
  70 *
  71 * The reader is special. For each per cpu buffer, the reader has its own
  72 * reader page. When a reader has read the entire reader page, this reader
  73 * page is swapped with another page in the ring buffer.
  74 *
  75 * Now, as long as the writer is off the reader page, the reader can do what
  76 * ever it wants with that page. The writer will never write to that page
  77 * again (as long as it is out of the ring buffer).
  78 *
  79 * Here's some silly ASCII art.
  80 *
  81 *   +------+
  82 *   |reader|          RING BUFFER
  83 *   |page  |
  84 *   +------+        +---+   +---+   +---+
  85 *                   |   |-->|   |-->|   |
  86 *                   +---+   +---+   +---+
  87 *                     ^               |
  88 *                     |               |
  89 *                     +---------------+
  90 *
  91 *
  92 *   +------+
  93 *   |reader|          RING BUFFER
  94 *   |page  |------------------v
  95 *   +------+        +---+   +---+   +---+
  96 *                   |   |-->|   |-->|   |
  97 *                   +---+   +---+   +---+
  98 *                     ^               |
  99 *                     |               |
 100 *                     +---------------+
 101 *
 102 *
 103 *   +------+
 104 *   |reader|          RING BUFFER
 105 *   |page  |------------------v
 106 *   +------+        +---+   +---+   +---+
 107 *      ^            |   |-->|   |-->|   |
 108 *      |            +---+   +---+   +---+
 109 *      |                              |
 110 *      |                              |
 111 *      +------------------------------+
 112 *
 113 *
 114 *   +------+
 115 *   |buffer|          RING BUFFER
 116 *   |page  |------------------v
 117 *   +------+        +---+   +---+   +---+
 118 *      ^            |   |   |   |-->|   |
 119 *      |   New      +---+   +---+   +---+
 120 *      |  Reader------^               |
 121 *      |   page                       |
 122 *      +------------------------------+
 123 *
 124 *
 125 * After we make this swap, the reader can hand this page off to the splice
 126 * code and be done with it. It can even allocate a new page if it needs to
 127 * and swap that into the ring buffer.
 128 *
 129 * We will be using cmpxchg soon to make all this lockless.
 130 *
 131 */
 132
 133/* Used for individual buffers (after the counter) */
 134#define RB_BUFFER_OFF		(1 << 20)
 135
 136#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 137
 138#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 139#define RB_ALIGNMENT		4U
 140#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 141#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 142
 143#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 144# define RB_FORCE_8BYTE_ALIGNMENT	0
 145# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 146#else
 147# define RB_FORCE_8BYTE_ALIGNMENT	1
 148# define RB_ARCH_ALIGNMENT		8U
 149#endif
 150
 151#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 152
 153/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 154#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 155
 156enum {
 157	RB_LEN_TIME_EXTEND = 8,
 158	RB_LEN_TIME_STAMP =  8,
 159};
 160
 161#define skip_time_extend(event) \
 162	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 163
 164#define extended_time(event) \
 165	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
 166
 167static inline bool rb_null_event(struct ring_buffer_event *event)
 168{
 169	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 170}
 171
 172static void rb_event_set_padding(struct ring_buffer_event *event)
 173{
 174	/* padding has a NULL time_delta */
 175	event->type_len = RINGBUF_TYPE_PADDING;
 176	event->time_delta = 0;
 177}
 178
 179static unsigned
 180rb_event_data_length(struct ring_buffer_event *event)
 181{
 182	unsigned length;
 183
 184	if (event->type_len)
 185		length = event->type_len * RB_ALIGNMENT;
 186	else
 187		length = event->array[0];
 188	return length + RB_EVNT_HDR_SIZE;
 189}
 190
 191/*
 192 * Return the length of the given event. Will return
 193 * the length of the time extend if the event is a
 194 * time extend.
 195 */
 196static inline unsigned
 197rb_event_length(struct ring_buffer_event *event)
 198{
 199	switch (event->type_len) {
 200	case RINGBUF_TYPE_PADDING:
 201		if (rb_null_event(event))
 202			/* undefined */
 203			return -1;
 204		return  event->array[0] + RB_EVNT_HDR_SIZE;
 205
 206	case RINGBUF_TYPE_TIME_EXTEND:
 207		return RB_LEN_TIME_EXTEND;
 208
 209	case RINGBUF_TYPE_TIME_STAMP:
 210		return RB_LEN_TIME_STAMP;
 211
 212	case RINGBUF_TYPE_DATA:
 213		return rb_event_data_length(event);
 214	default:
 215		WARN_ON_ONCE(1);
 216	}
 217	/* not hit */
 218	return 0;
 219}
 220
 221/*
 222 * Return total length of time extend and data,
 223 *   or just the event length for all other events.
 224 */
 225static inline unsigned
 226rb_event_ts_length(struct ring_buffer_event *event)
 227{
 228	unsigned len = 0;
 229
 230	if (extended_time(event)) {
 231		/* time extends include the data event after it */
 232		len = RB_LEN_TIME_EXTEND;
 233		event = skip_time_extend(event);
 234	}
 235	return len + rb_event_length(event);
 236}
 237
 238/**
 239 * ring_buffer_event_length - return the length of the event
 240 * @event: the event to get the length of
 241 *
 242 * Returns the size of the data load of a data event.
 243 * If the event is something other than a data event, it
 244 * returns the size of the event itself. With the exception
 245 * of a TIME EXTEND, where it still returns the size of the
 246 * data load of the data event after it.
 247 */
 248unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 249{
 250	unsigned length;
 251
 252	if (extended_time(event))
 253		event = skip_time_extend(event);
 254
 255	length = rb_event_length(event);
 256	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 257		return length;
 258	length -= RB_EVNT_HDR_SIZE;
 259	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 260                length -= sizeof(event->array[0]);
 261	return length;
 262}
 263EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 264
 265/* inline for ring buffer fast paths */
 266static __always_inline void *
 267rb_event_data(struct ring_buffer_event *event)
 268{
 269	if (extended_time(event))
 270		event = skip_time_extend(event);
 271	WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 272	/* If length is in len field, then array[0] has the data */
 273	if (event->type_len)
 274		return (void *)&event->array[0];
 275	/* Otherwise length is in array[0] and array[1] has the data */
 276	return (void *)&event->array[1];
 277}
 278
 279/**
 280 * ring_buffer_event_data - return the data of the event
 281 * @event: the event to get the data from
 282 */
 283void *ring_buffer_event_data(struct ring_buffer_event *event)
 284{
 285	return rb_event_data(event);
 286}
 287EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 288
 289#define for_each_buffer_cpu(buffer, cpu)		\
 290	for_each_cpu(cpu, buffer->cpumask)
 291
 292#define for_each_online_buffer_cpu(buffer, cpu)		\
 293	for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
 294
 295#define TS_SHIFT	27
 296#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 297#define TS_DELTA_TEST	(~TS_MASK)
 298
 299static u64 rb_event_time_stamp(struct ring_buffer_event *event)
 300{
 301	u64 ts;
 302
 303	ts = event->array[0];
 304	ts <<= TS_SHIFT;
 305	ts += event->time_delta;
 306
 307	return ts;
 308}
 309
 310/* Flag when events were overwritten */
 311#define RB_MISSED_EVENTS	(1 << 31)
 312/* Missed count stored at end */
 313#define RB_MISSED_STORED	(1 << 30)
 314
 315struct buffer_data_page {
 316	u64		 time_stamp;	/* page time stamp */
 317	local_t		 commit;	/* write committed index */
 318	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 319};
 320
 321struct buffer_data_read_page {
 322	unsigned		order;	/* order of the page */
 323	struct buffer_data_page	*data;	/* actual data, stored in this page */
 324};
 325
 326/*
 327 * Note, the buffer_page list must be first. The buffer pages
 328 * are allocated in cache lines, which means that each buffer
 329 * page will be at the beginning of a cache line, and thus
 330 * the least significant bits will be zero. We use this to
 331 * add flags in the list struct pointers, to make the ring buffer
 332 * lockless.
 333 */
 334struct buffer_page {
 335	struct list_head list;		/* list of buffer pages */
 336	local_t		 write;		/* index for next write */
 337	unsigned	 read;		/* index for next read */
 338	local_t		 entries;	/* entries on this page */
 339	unsigned long	 real_end;	/* real end of data */
 340	unsigned	 order;		/* order of the page */
 341	struct buffer_data_page *page;	/* Actual data page */
 342};
 343
 344/*
 345 * The buffer page counters, write and entries, must be reset
 346 * atomically when crossing page boundaries. To synchronize this
 347 * update, two counters are inserted into the number. One is
 348 * the actual counter for the write position or count on the page.
 349 *
 350 * The other is a counter of updaters. Before an update happens
 351 * the update partition of the counter is incremented. This will
 352 * allow the updater to update the counter atomically.
 353 *
 354 * The counter is 20 bits, and the state data is 12.
 355 */
 356#define RB_WRITE_MASK		0xfffff
 357#define RB_WRITE_INTCNT		(1 << 20)
 358
 359static void rb_init_page(struct buffer_data_page *bpage)
 360{
 361	local_set(&bpage->commit, 0);
 362}
 363
 364static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
 365{
 366	return local_read(&bpage->page->commit);
 367}
 368
 369static void free_buffer_page(struct buffer_page *bpage)
 370{
 371	free_pages((unsigned long)bpage->page, bpage->order);
 372	kfree(bpage);
 373}
 374
 375/*
 376 * We need to fit the time_stamp delta into 27 bits.
 377 */
 378static inline bool test_time_stamp(u64 delta)
 379{
 380	return !!(delta & TS_DELTA_TEST);
 381}
 382
 383struct rb_irq_work {
 384	struct irq_work			work;
 385	wait_queue_head_t		waiters;
 386	wait_queue_head_t		full_waiters;
 
 387	bool				waiters_pending;
 388	bool				full_waiters_pending;
 389	bool				wakeup_full;
 390};
 391
 392/*
 393 * Structure to hold event state and handle nested events.
 394 */
 395struct rb_event_info {
 396	u64			ts;
 397	u64			delta;
 398	u64			before;
 399	u64			after;
 400	unsigned long		length;
 401	struct buffer_page	*tail_page;
 402	int			add_timestamp;
 403};
 404
 405/*
 406 * Used for the add_timestamp
 407 *  NONE
 408 *  EXTEND - wants a time extend
 409 *  ABSOLUTE - the buffer requests all events to have absolute time stamps
 410 *  FORCE - force a full time stamp.
 411 */
 412enum {
 413	RB_ADD_STAMP_NONE		= 0,
 414	RB_ADD_STAMP_EXTEND		= BIT(1),
 415	RB_ADD_STAMP_ABSOLUTE		= BIT(2),
 416	RB_ADD_STAMP_FORCE		= BIT(3)
 417};
 418/*
 419 * Used for which event context the event is in.
 420 *  TRANSITION = 0
 421 *  NMI     = 1
 422 *  IRQ     = 2
 423 *  SOFTIRQ = 3
 424 *  NORMAL  = 4
 425 *
 426 * See trace_recursive_lock() comment below for more details.
 427 */
 428enum {
 429	RB_CTX_TRANSITION,
 430	RB_CTX_NMI,
 431	RB_CTX_IRQ,
 432	RB_CTX_SOFTIRQ,
 433	RB_CTX_NORMAL,
 434	RB_CTX_MAX
 435};
 436
 437struct rb_time_struct {
 438	local64_t	time;
 439};
 440typedef struct rb_time_struct rb_time_t;
 441
 442#define MAX_NEST	5
 443
 444/*
 445 * head_page == tail_page && head == tail then buffer is empty.
 446 */
 447struct ring_buffer_per_cpu {
 448	int				cpu;
 449	atomic_t			record_disabled;
 450	atomic_t			resize_disabled;
 451	struct trace_buffer	*buffer;
 452	raw_spinlock_t			reader_lock;	/* serialize readers */
 453	arch_spinlock_t			lock;
 454	struct lock_class_key		lock_key;
 455	struct buffer_data_page		*free_page;
 456	unsigned long			nr_pages;
 457	unsigned int			current_context;
 458	struct list_head		*pages;
 459	struct buffer_page		*head_page;	/* read from head */
 460	struct buffer_page		*tail_page;	/* write to tail */
 461	struct buffer_page		*commit_page;	/* committed pages */
 462	struct buffer_page		*reader_page;
 463	unsigned long			lost_events;
 464	unsigned long			last_overrun;
 465	unsigned long			nest;
 466	local_t				entries_bytes;
 467	local_t				entries;
 468	local_t				overrun;
 469	local_t				commit_overrun;
 470	local_t				dropped_events;
 471	local_t				committing;
 472	local_t				commits;
 473	local_t				pages_touched;
 474	local_t				pages_lost;
 475	local_t				pages_read;
 476	long				last_pages_touch;
 477	size_t				shortest_full;
 478	unsigned long			read;
 479	unsigned long			read_bytes;
 480	rb_time_t			write_stamp;
 481	rb_time_t			before_stamp;
 482	u64				event_stamp[MAX_NEST];
 483	u64				read_stamp;
 484	/* pages removed since last reset */
 485	unsigned long			pages_removed;
 486	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 487	long				nr_pages_to_update;
 488	struct list_head		new_pages; /* new pages to add */
 489	struct work_struct		update_pages_work;
 490	struct completion		update_done;
 491
 492	struct rb_irq_work		irq_work;
 493};
 494
 495struct trace_buffer {
 496	unsigned			flags;
 497	int				cpus;
 498	atomic_t			record_disabled;
 499	atomic_t			resizing;
 500	cpumask_var_t			cpumask;
 501
 502	struct lock_class_key		*reader_lock_key;
 503
 504	struct mutex			mutex;
 505
 506	struct ring_buffer_per_cpu	**buffers;
 507
 508	struct hlist_node		node;
 509	u64				(*clock)(void);
 510
 511	struct rb_irq_work		irq_work;
 512	bool				time_stamp_abs;
 513
 514	unsigned int			subbuf_size;
 515	unsigned int			subbuf_order;
 516	unsigned int			max_data_size;
 517};
 518
 519struct ring_buffer_iter {
 520	struct ring_buffer_per_cpu	*cpu_buffer;
 521	unsigned long			head;
 522	unsigned long			next_event;
 523	struct buffer_page		*head_page;
 524	struct buffer_page		*cache_reader_page;
 525	unsigned long			cache_read;
 526	unsigned long			cache_pages_removed;
 527	u64				read_stamp;
 528	u64				page_stamp;
 529	struct ring_buffer_event	*event;
 530	size_t				event_size;
 531	int				missed_events;
 532};
 533
 534int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
 535{
 536	struct buffer_data_page field;
 537
 538	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 539			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 540			 (unsigned int)sizeof(field.time_stamp),
 541			 (unsigned int)is_signed_type(u64));
 542
 543	trace_seq_printf(s, "\tfield: local_t commit;\t"
 544			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 545			 (unsigned int)offsetof(typeof(field), commit),
 546			 (unsigned int)sizeof(field.commit),
 547			 (unsigned int)is_signed_type(long));
 548
 549	trace_seq_printf(s, "\tfield: int overwrite;\t"
 550			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 551			 (unsigned int)offsetof(typeof(field), commit),
 552			 1,
 553			 (unsigned int)is_signed_type(long));
 554
 555	trace_seq_printf(s, "\tfield: char data;\t"
 556			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 557			 (unsigned int)offsetof(typeof(field), data),
 558			 (unsigned int)buffer->subbuf_size,
 559			 (unsigned int)is_signed_type(char));
 560
 561	return !trace_seq_has_overflowed(s);
 562}
 563
 564static inline void rb_time_read(rb_time_t *t, u64 *ret)
 565{
 566	*ret = local64_read(&t->time);
 567}
 568static void rb_time_set(rb_time_t *t, u64 val)
 569{
 570	local64_set(&t->time, val);
 571}
 572
 573/*
 574 * Enable this to make sure that the event passed to
 575 * ring_buffer_event_time_stamp() is not committed and also
 576 * is on the buffer that it passed in.
 577 */
 578//#define RB_VERIFY_EVENT
 579#ifdef RB_VERIFY_EVENT
 580static struct list_head *rb_list_head(struct list_head *list);
 581static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 582			 void *event)
 583{
 584	struct buffer_page *page = cpu_buffer->commit_page;
 585	struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
 586	struct list_head *next;
 587	long commit, write;
 588	unsigned long addr = (unsigned long)event;
 589	bool done = false;
 590	int stop = 0;
 591
 592	/* Make sure the event exists and is not committed yet */
 593	do {
 594		if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
 595			done = true;
 596		commit = local_read(&page->page->commit);
 597		write = local_read(&page->write);
 598		if (addr >= (unsigned long)&page->page->data[commit] &&
 599		    addr < (unsigned long)&page->page->data[write])
 600			return;
 601
 602		next = rb_list_head(page->list.next);
 603		page = list_entry(next, struct buffer_page, list);
 604	} while (!done);
 605	WARN_ON_ONCE(1);
 606}
 607#else
 608static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 609			 void *event)
 610{
 611}
 612#endif
 613
 614/*
 615 * The absolute time stamp drops the 5 MSBs and some clocks may
 616 * require them. The rb_fix_abs_ts() will take a previous full
 617 * time stamp, and add the 5 MSB of that time stamp on to the
 618 * saved absolute time stamp. Then they are compared in case of
 619 * the unlikely event that the latest time stamp incremented
 620 * the 5 MSB.
 621 */
 622static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
 623{
 624	if (save_ts & TS_MSB) {
 625		abs |= save_ts & TS_MSB;
 626		/* Check for overflow */
 627		if (unlikely(abs < save_ts))
 628			abs += 1ULL << 59;
 629	}
 630	return abs;
 631}
 632
 633static inline u64 rb_time_stamp(struct trace_buffer *buffer);
 634
 635/**
 636 * ring_buffer_event_time_stamp - return the event's current time stamp
 637 * @buffer: The buffer that the event is on
 638 * @event: the event to get the time stamp of
 639 *
 640 * Note, this must be called after @event is reserved, and before it is
 641 * committed to the ring buffer. And must be called from the same
 642 * context where the event was reserved (normal, softirq, irq, etc).
 643 *
 644 * Returns the time stamp associated with the current event.
 645 * If the event has an extended time stamp, then that is used as
 646 * the time stamp to return.
 647 * In the highly unlikely case that the event was nested more than
 648 * the max nesting, then the write_stamp of the buffer is returned,
 649 * otherwise  current time is returned, but that really neither of
 650 * the last two cases should ever happen.
 651 */
 652u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
 653				 struct ring_buffer_event *event)
 654{
 655	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
 656	unsigned int nest;
 657	u64 ts;
 658
 659	/* If the event includes an absolute time, then just use that */
 660	if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
 661		ts = rb_event_time_stamp(event);
 662		return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
 663	}
 664
 665	nest = local_read(&cpu_buffer->committing);
 666	verify_event(cpu_buffer, event);
 667	if (WARN_ON_ONCE(!nest))
 668		goto fail;
 669
 670	/* Read the current saved nesting level time stamp */
 671	if (likely(--nest < MAX_NEST))
 672		return cpu_buffer->event_stamp[nest];
 673
 674	/* Shouldn't happen, warn if it does */
 675	WARN_ONCE(1, "nest (%d) greater than max", nest);
 676
 677 fail:
 678	rb_time_read(&cpu_buffer->write_stamp, &ts);
 679
 680	return ts;
 681}
 682
 683/**
 684 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
 685 * @buffer: The ring_buffer to get the number of pages from
 686 * @cpu: The cpu of the ring_buffer to get the number of pages from
 687 *
 688 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
 689 */
 690size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
 691{
 692	return buffer->buffers[cpu]->nr_pages;
 693}
 694
 695/**
 696 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
 697 * @buffer: The ring_buffer to get the number of pages from
 698 * @cpu: The cpu of the ring_buffer to get the number of pages from
 699 *
 700 * Returns the number of pages that have content in the ring buffer.
 701 */
 702size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
 703{
 704	size_t read;
 705	size_t lost;
 706	size_t cnt;
 707
 708	read = local_read(&buffer->buffers[cpu]->pages_read);
 709	lost = local_read(&buffer->buffers[cpu]->pages_lost);
 710	cnt = local_read(&buffer->buffers[cpu]->pages_touched);
 711
 712	if (WARN_ON_ONCE(cnt < lost))
 713		return 0;
 714
 715	cnt -= lost;
 716
 717	/* The reader can read an empty page, but not more than that */
 718	if (cnt < read) {
 719		WARN_ON_ONCE(read > cnt + 1);
 720		return 0;
 721	}
 722
 723	return cnt - read;
 724}
 725
 726static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
 727{
 728	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 729	size_t nr_pages;
 730	size_t dirty;
 731
 732	nr_pages = cpu_buffer->nr_pages;
 733	if (!nr_pages || !full)
 734		return true;
 735
 736	/*
 737	 * Add one as dirty will never equal nr_pages, as the sub-buffer
 738	 * that the writer is on is not counted as dirty.
 739	 * This is needed if "buffer_percent" is set to 100.
 740	 */
 741	dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
 742
 743	return (dirty * 100) >= (full * nr_pages);
 744}
 745
 746/*
 747 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 748 *
 749 * Schedules a delayed work to wake up any task that is blocked on the
 750 * ring buffer waiters queue.
 751 */
 752static void rb_wake_up_waiters(struct irq_work *work)
 753{
 754	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 755
 
 
 
 756	wake_up_all(&rbwork->waiters);
 757	if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
 758		/* Only cpu_buffer sets the above flags */
 759		struct ring_buffer_per_cpu *cpu_buffer =
 760			container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
 761
 762		/* Called from interrupt context */
 763		raw_spin_lock(&cpu_buffer->reader_lock);
 764		rbwork->wakeup_full = false;
 765		rbwork->full_waiters_pending = false;
 766
 767		/* Waking up all waiters, they will reset the shortest full */
 768		cpu_buffer->shortest_full = 0;
 769		raw_spin_unlock(&cpu_buffer->reader_lock);
 770
 771		wake_up_all(&rbwork->full_waiters);
 772	}
 773}
 774
 775/**
 776 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
 777 * @buffer: The ring buffer to wake waiters on
 778 * @cpu: The CPU buffer to wake waiters on
 779 *
 780 * In the case of a file that represents a ring buffer is closing,
 781 * it is prudent to wake up any waiters that are on this.
 782 */
 783void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
 784{
 785	struct ring_buffer_per_cpu *cpu_buffer;
 786	struct rb_irq_work *rbwork;
 787
 788	if (!buffer)
 789		return;
 790
 791	if (cpu == RING_BUFFER_ALL_CPUS) {
 792
 793		/* Wake up individual ones too. One level recursion */
 794		for_each_buffer_cpu(buffer, cpu)
 795			ring_buffer_wake_waiters(buffer, cpu);
 796
 797		rbwork = &buffer->irq_work;
 798	} else {
 799		if (WARN_ON_ONCE(!buffer->buffers))
 800			return;
 801		if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
 802			return;
 803
 804		cpu_buffer = buffer->buffers[cpu];
 805		/* The CPU buffer may not have been initialized yet */
 806		if (!cpu_buffer)
 807			return;
 808		rbwork = &cpu_buffer->irq_work;
 809	}
 810
 811	/* This can be called in any context */
 812	irq_work_queue(&rbwork->work);
 813}
 814
 815static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
 816{
 817	struct ring_buffer_per_cpu *cpu_buffer;
 818	bool ret = false;
 819
 820	/* Reads of all CPUs always waits for any data */
 821	if (cpu == RING_BUFFER_ALL_CPUS)
 822		return !ring_buffer_empty(buffer);
 823
 824	cpu_buffer = buffer->buffers[cpu];
 825
 826	if (!ring_buffer_empty_cpu(buffer, cpu)) {
 827		unsigned long flags;
 828		bool pagebusy;
 829
 830		if (!full)
 831			return true;
 832
 833		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 834		pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 835		ret = !pagebusy && full_hit(buffer, cpu, full);
 836
 837		if (!cpu_buffer->shortest_full ||
 838		    cpu_buffer->shortest_full > full)
 839			cpu_buffer->shortest_full = full;
 
 840		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 841	}
 842	return ret;
 843}
 844
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 845/**
 846 * ring_buffer_wait - wait for input to the ring buffer
 847 * @buffer: buffer to wait on
 848 * @cpu: the cpu buffer to wait on
 849 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
 
 
 850 *
 851 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 852 * as data is added to any of the @buffer's cpu buffers. Otherwise
 853 * it will wait for data to be added to a specific cpu buffer.
 854 */
 855int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
 
 856{
 857	struct ring_buffer_per_cpu *cpu_buffer;
 858	DEFINE_WAIT(wait);
 859	struct rb_irq_work *work;
 
 860	int ret = 0;
 861
 862	/*
 863	 * Depending on what the caller is waiting for, either any
 864	 * data in any cpu buffer, or a specific buffer, put the
 865	 * caller on the appropriate wait queue.
 866	 */
 867	if (cpu == RING_BUFFER_ALL_CPUS) {
 868		work = &buffer->irq_work;
 869		/* Full only makes sense on per cpu reads */
 870		full = 0;
 871	} else {
 872		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 873			return -ENODEV;
 874		cpu_buffer = buffer->buffers[cpu];
 875		work = &cpu_buffer->irq_work;
 876	}
 877
 878	if (full)
 879		prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
 880	else
 881		prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 882
 883	/*
 884	 * The events can happen in critical sections where
 885	 * checking a work queue can cause deadlocks.
 886	 * After adding a task to the queue, this flag is set
 887	 * only to notify events to try to wake up the queue
 888	 * using irq_work.
 889	 *
 890	 * We don't clear it even if the buffer is no longer
 891	 * empty. The flag only causes the next event to run
 892	 * irq_work to do the work queue wake up. The worse
 893	 * that can happen if we race with !trace_empty() is that
 894	 * an event will cause an irq_work to try to wake up
 895	 * an empty queue.
 896	 *
 897	 * There's no reason to protect this flag either, as
 898	 * the work queue and irq_work logic will do the necessary
 899	 * synchronization for the wake ups. The only thing
 900	 * that is necessary is that the wake up happens after
 901	 * a task has been queued. It's OK for spurious wake ups.
 902	 */
 903	if (full)
 904		work->full_waiters_pending = true;
 905	else
 906		work->waiters_pending = true;
 907
 908	if (rb_watermark_hit(buffer, cpu, full))
 909		goto out;
 910
 911	if (signal_pending(current)) {
 912		ret = -EINTR;
 913		goto out;
 
 
 
 914	}
 915
 916	schedule();
 917 out:
 918	if (full)
 919		finish_wait(&work->full_waiters, &wait);
 920	else
 921		finish_wait(&work->waiters, &wait);
 922
 923	if (!ret && !rb_watermark_hit(buffer, cpu, full) && signal_pending(current))
 924		ret = -EINTR;
 925
 926	return ret;
 927}
 928
 929/**
 930 * ring_buffer_poll_wait - poll on buffer input
 931 * @buffer: buffer to wait on
 932 * @cpu: the cpu buffer to wait on
 933 * @filp: the file descriptor
 934 * @poll_table: The poll descriptor
 935 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
 936 *
 937 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 938 * as data is added to any of the @buffer's cpu buffers. Otherwise
 939 * it will wait for data to be added to a specific cpu buffer.
 940 *
 941 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
 942 * zero otherwise.
 943 */
 944__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
 945			  struct file *filp, poll_table *poll_table, int full)
 946{
 947	struct ring_buffer_per_cpu *cpu_buffer;
 948	struct rb_irq_work *rbwork;
 949
 950	if (cpu == RING_BUFFER_ALL_CPUS) {
 951		rbwork = &buffer->irq_work;
 952		full = 0;
 953	} else {
 954		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 955			return EPOLLERR;
 956
 957		cpu_buffer = buffer->buffers[cpu];
 958		rbwork = &cpu_buffer->irq_work;
 959	}
 960
 961	if (full) {
 962		unsigned long flags;
 963
 964		poll_wait(filp, &rbwork->full_waiters, poll_table);
 965
 966		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967		rbwork->full_waiters_pending = true;
 968		if (!cpu_buffer->shortest_full ||
 969		    cpu_buffer->shortest_full > full)
 970			cpu_buffer->shortest_full = full;
 971		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 972	} else {
 973		poll_wait(filp, &rbwork->waiters, poll_table);
 974		rbwork->waiters_pending = true;
 975	}
 976
 
 
 
 977	/*
 978	 * There's a tight race between setting the waiters_pending and
 979	 * checking if the ring buffer is empty.  Once the waiters_pending bit
 980	 * is set, the next event will wake the task up, but we can get stuck
 981	 * if there's only a single event in.
 982	 *
 983	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
 984	 * but adding a memory barrier to all events will cause too much of a
 985	 * performance hit in the fast path.  We only need a memory barrier when
 986	 * the buffer goes from empty to having content.  But as this race is
 987	 * extremely small, and it's not a problem if another event comes in, we
 988	 * will fix it later.
 989	 */
 990	smp_mb();
 991
 992	if (full)
 993		return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
 994
 995	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
 996	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
 997		return EPOLLIN | EPOLLRDNORM;
 998	return 0;
 999}
1000
1001/* buffer may be either ring_buffer or ring_buffer_per_cpu */
1002#define RB_WARN_ON(b, cond)						\
1003	({								\
1004		int _____ret = unlikely(cond);				\
1005		if (_____ret) {						\
1006			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1007				struct ring_buffer_per_cpu *__b =	\
1008					(void *)b;			\
1009				atomic_inc(&__b->buffer->record_disabled); \
1010			} else						\
1011				atomic_inc(&b->record_disabled);	\
1012			WARN_ON(1);					\
1013		}							\
1014		_____ret;						\
1015	})
1016
1017/* Up this if you want to test the TIME_EXTENTS and normalization */
1018#define DEBUG_SHIFT 0
1019
1020static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1021{
1022	u64 ts;
1023
1024	/* Skip retpolines :-( */
1025	if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1026		ts = trace_clock_local();
1027	else
1028		ts = buffer->clock();
1029
1030	/* shift to debug/test normalization and TIME_EXTENTS */
1031	return ts << DEBUG_SHIFT;
1032}
1033
1034u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1035{
1036	u64 time;
1037
1038	preempt_disable_notrace();
1039	time = rb_time_stamp(buffer);
1040	preempt_enable_notrace();
1041
1042	return time;
1043}
1044EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1045
1046void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1047				      int cpu, u64 *ts)
1048{
1049	/* Just stupid testing the normalize function and deltas */
1050	*ts >>= DEBUG_SHIFT;
1051}
1052EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1053
1054/*
1055 * Making the ring buffer lockless makes things tricky.
1056 * Although writes only happen on the CPU that they are on,
1057 * and they only need to worry about interrupts. Reads can
1058 * happen on any CPU.
1059 *
1060 * The reader page is always off the ring buffer, but when the
1061 * reader finishes with a page, it needs to swap its page with
1062 * a new one from the buffer. The reader needs to take from
1063 * the head (writes go to the tail). But if a writer is in overwrite
1064 * mode and wraps, it must push the head page forward.
1065 *
1066 * Here lies the problem.
1067 *
1068 * The reader must be careful to replace only the head page, and
1069 * not another one. As described at the top of the file in the
1070 * ASCII art, the reader sets its old page to point to the next
1071 * page after head. It then sets the page after head to point to
1072 * the old reader page. But if the writer moves the head page
1073 * during this operation, the reader could end up with the tail.
1074 *
1075 * We use cmpxchg to help prevent this race. We also do something
1076 * special with the page before head. We set the LSB to 1.
1077 *
1078 * When the writer must push the page forward, it will clear the
1079 * bit that points to the head page, move the head, and then set
1080 * the bit that points to the new head page.
1081 *
1082 * We also don't want an interrupt coming in and moving the head
1083 * page on another writer. Thus we use the second LSB to catch
1084 * that too. Thus:
1085 *
1086 * head->list->prev->next        bit 1          bit 0
1087 *                              -------        -------
1088 * Normal page                     0              0
1089 * Points to head page             0              1
1090 * New head page                   1              0
1091 *
1092 * Note we can not trust the prev pointer of the head page, because:
1093 *
1094 * +----+       +-----+        +-----+
1095 * |    |------>|  T  |---X--->|  N  |
1096 * |    |<------|     |        |     |
1097 * +----+       +-----+        +-----+
1098 *   ^                           ^ |
1099 *   |          +-----+          | |
1100 *   +----------|  R  |----------+ |
1101 *              |     |<-----------+
1102 *              +-----+
1103 *
1104 * Key:  ---X-->  HEAD flag set in pointer
1105 *         T      Tail page
1106 *         R      Reader page
1107 *         N      Next page
1108 *
1109 * (see __rb_reserve_next() to see where this happens)
1110 *
1111 *  What the above shows is that the reader just swapped out
1112 *  the reader page with a page in the buffer, but before it
1113 *  could make the new header point back to the new page added
1114 *  it was preempted by a writer. The writer moved forward onto
1115 *  the new page added by the reader and is about to move forward
1116 *  again.
1117 *
1118 *  You can see, it is legitimate for the previous pointer of
1119 *  the head (or any page) not to point back to itself. But only
1120 *  temporarily.
1121 */
1122
1123#define RB_PAGE_NORMAL		0UL
1124#define RB_PAGE_HEAD		1UL
1125#define RB_PAGE_UPDATE		2UL
1126
1127
1128#define RB_FLAG_MASK		3UL
1129
1130/* PAGE_MOVED is not part of the mask */
1131#define RB_PAGE_MOVED		4UL
1132
1133/*
1134 * rb_list_head - remove any bit
1135 */
1136static struct list_head *rb_list_head(struct list_head *list)
1137{
1138	unsigned long val = (unsigned long)list;
1139
1140	return (struct list_head *)(val & ~RB_FLAG_MASK);
1141}
1142
1143/*
1144 * rb_is_head_page - test if the given page is the head page
1145 *
1146 * Because the reader may move the head_page pointer, we can
1147 * not trust what the head page is (it may be pointing to
1148 * the reader page). But if the next page is a header page,
1149 * its flags will be non zero.
1150 */
1151static inline int
1152rb_is_head_page(struct buffer_page *page, struct list_head *list)
1153{
1154	unsigned long val;
1155
1156	val = (unsigned long)list->next;
1157
1158	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1159		return RB_PAGE_MOVED;
1160
1161	return val & RB_FLAG_MASK;
1162}
1163
1164/*
1165 * rb_is_reader_page
1166 *
1167 * The unique thing about the reader page, is that, if the
1168 * writer is ever on it, the previous pointer never points
1169 * back to the reader page.
1170 */
1171static bool rb_is_reader_page(struct buffer_page *page)
1172{
1173	struct list_head *list = page->list.prev;
1174
1175	return rb_list_head(list->next) != &page->list;
1176}
1177
1178/*
1179 * rb_set_list_to_head - set a list_head to be pointing to head.
1180 */
1181static void rb_set_list_to_head(struct list_head *list)
1182{
1183	unsigned long *ptr;
1184
1185	ptr = (unsigned long *)&list->next;
1186	*ptr |= RB_PAGE_HEAD;
1187	*ptr &= ~RB_PAGE_UPDATE;
1188}
1189
1190/*
1191 * rb_head_page_activate - sets up head page
1192 */
1193static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1194{
1195	struct buffer_page *head;
1196
1197	head = cpu_buffer->head_page;
1198	if (!head)
1199		return;
1200
1201	/*
1202	 * Set the previous list pointer to have the HEAD flag.
1203	 */
1204	rb_set_list_to_head(head->list.prev);
1205}
1206
1207static void rb_list_head_clear(struct list_head *list)
1208{
1209	unsigned long *ptr = (unsigned long *)&list->next;
1210
1211	*ptr &= ~RB_FLAG_MASK;
1212}
1213
1214/*
1215 * rb_head_page_deactivate - clears head page ptr (for free list)
1216 */
1217static void
1218rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1219{
1220	struct list_head *hd;
1221
1222	/* Go through the whole list and clear any pointers found. */
1223	rb_list_head_clear(cpu_buffer->pages);
1224
1225	list_for_each(hd, cpu_buffer->pages)
1226		rb_list_head_clear(hd);
1227}
1228
1229static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1230			    struct buffer_page *head,
1231			    struct buffer_page *prev,
1232			    int old_flag, int new_flag)
1233{
1234	struct list_head *list;
1235	unsigned long val = (unsigned long)&head->list;
1236	unsigned long ret;
1237
1238	list = &prev->list;
1239
1240	val &= ~RB_FLAG_MASK;
1241
1242	ret = cmpxchg((unsigned long *)&list->next,
1243		      val | old_flag, val | new_flag);
1244
1245	/* check if the reader took the page */
1246	if ((ret & ~RB_FLAG_MASK) != val)
1247		return RB_PAGE_MOVED;
1248
1249	return ret & RB_FLAG_MASK;
1250}
1251
1252static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1253				   struct buffer_page *head,
1254				   struct buffer_page *prev,
1255				   int old_flag)
1256{
1257	return rb_head_page_set(cpu_buffer, head, prev,
1258				old_flag, RB_PAGE_UPDATE);
1259}
1260
1261static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1262				 struct buffer_page *head,
1263				 struct buffer_page *prev,
1264				 int old_flag)
1265{
1266	return rb_head_page_set(cpu_buffer, head, prev,
1267				old_flag, RB_PAGE_HEAD);
1268}
1269
1270static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1271				   struct buffer_page *head,
1272				   struct buffer_page *prev,
1273				   int old_flag)
1274{
1275	return rb_head_page_set(cpu_buffer, head, prev,
1276				old_flag, RB_PAGE_NORMAL);
1277}
1278
1279static inline void rb_inc_page(struct buffer_page **bpage)
1280{
1281	struct list_head *p = rb_list_head((*bpage)->list.next);
1282
1283	*bpage = list_entry(p, struct buffer_page, list);
1284}
1285
1286static struct buffer_page *
1287rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1288{
1289	struct buffer_page *head;
1290	struct buffer_page *page;
1291	struct list_head *list;
1292	int i;
1293
1294	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1295		return NULL;
1296
1297	/* sanity check */
1298	list = cpu_buffer->pages;
1299	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1300		return NULL;
1301
1302	page = head = cpu_buffer->head_page;
1303	/*
1304	 * It is possible that the writer moves the header behind
1305	 * where we started, and we miss in one loop.
1306	 * A second loop should grab the header, but we'll do
1307	 * three loops just because I'm paranoid.
1308	 */
1309	for (i = 0; i < 3; i++) {
1310		do {
1311			if (rb_is_head_page(page, page->list.prev)) {
1312				cpu_buffer->head_page = page;
1313				return page;
1314			}
1315			rb_inc_page(&page);
1316		} while (page != head);
1317	}
1318
1319	RB_WARN_ON(cpu_buffer, 1);
1320
1321	return NULL;
1322}
1323
1324static bool rb_head_page_replace(struct buffer_page *old,
1325				struct buffer_page *new)
1326{
1327	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1328	unsigned long val;
1329
1330	val = *ptr & ~RB_FLAG_MASK;
1331	val |= RB_PAGE_HEAD;
1332
1333	return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
1334}
1335
1336/*
1337 * rb_tail_page_update - move the tail page forward
1338 */
1339static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1340			       struct buffer_page *tail_page,
1341			       struct buffer_page *next_page)
1342{
1343	unsigned long old_entries;
1344	unsigned long old_write;
1345
1346	/*
1347	 * The tail page now needs to be moved forward.
1348	 *
1349	 * We need to reset the tail page, but without messing
1350	 * with possible erasing of data brought in by interrupts
1351	 * that have moved the tail page and are currently on it.
1352	 *
1353	 * We add a counter to the write field to denote this.
1354	 */
1355	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1356	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1357
1358	local_inc(&cpu_buffer->pages_touched);
1359	/*
1360	 * Just make sure we have seen our old_write and synchronize
1361	 * with any interrupts that come in.
1362	 */
1363	barrier();
1364
1365	/*
1366	 * If the tail page is still the same as what we think
1367	 * it is, then it is up to us to update the tail
1368	 * pointer.
1369	 */
1370	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1371		/* Zero the write counter */
1372		unsigned long val = old_write & ~RB_WRITE_MASK;
1373		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1374
1375		/*
1376		 * This will only succeed if an interrupt did
1377		 * not come in and change it. In which case, we
1378		 * do not want to modify it.
1379		 *
1380		 * We add (void) to let the compiler know that we do not care
1381		 * about the return value of these functions. We use the
1382		 * cmpxchg to only update if an interrupt did not already
1383		 * do it for us. If the cmpxchg fails, we don't care.
1384		 */
1385		(void)local_cmpxchg(&next_page->write, old_write, val);
1386		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1387
1388		/*
1389		 * No need to worry about races with clearing out the commit.
1390		 * it only can increment when a commit takes place. But that
1391		 * only happens in the outer most nested commit.
1392		 */
1393		local_set(&next_page->page->commit, 0);
1394
1395		/* Again, either we update tail_page or an interrupt does */
1396		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
 
1397	}
1398}
1399
1400static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1401			  struct buffer_page *bpage)
1402{
1403	unsigned long val = (unsigned long)bpage;
1404
1405	RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
1406}
1407
1408/**
1409 * rb_check_pages - integrity check of buffer pages
1410 * @cpu_buffer: CPU buffer with pages to test
1411 *
1412 * As a safety measure we check to make sure the data pages have not
1413 * been corrupted.
 
 
 
 
 
1414 */
1415static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1416{
1417	struct list_head *head = rb_list_head(cpu_buffer->pages);
1418	struct list_head *tmp;
1419
1420	if (RB_WARN_ON(cpu_buffer,
1421			rb_list_head(rb_list_head(head->next)->prev) != head))
1422		return;
1423
1424	if (RB_WARN_ON(cpu_buffer,
1425			rb_list_head(rb_list_head(head->prev)->next) != head))
1426		return;
1427
1428	for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
1429		if (RB_WARN_ON(cpu_buffer,
1430				rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
1431			return;
1432
1433		if (RB_WARN_ON(cpu_buffer,
1434				rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
1435			return;
1436	}
1437}
1438
1439static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1440		long nr_pages, struct list_head *pages)
1441{
1442	struct buffer_page *bpage, *tmp;
1443	bool user_thread = current->mm != NULL;
1444	gfp_t mflags;
1445	long i;
1446
1447	/*
1448	 * Check if the available memory is there first.
1449	 * Note, si_mem_available() only gives us a rough estimate of available
1450	 * memory. It may not be accurate. But we don't care, we just want
1451	 * to prevent doing any allocation when it is obvious that it is
1452	 * not going to succeed.
1453	 */
1454	i = si_mem_available();
1455	if (i < nr_pages)
1456		return -ENOMEM;
1457
1458	/*
1459	 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1460	 * gracefully without invoking oom-killer and the system is not
1461	 * destabilized.
1462	 */
1463	mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1464
1465	/*
1466	 * If a user thread allocates too much, and si_mem_available()
1467	 * reports there's enough memory, even though there is not.
1468	 * Make sure the OOM killer kills this thread. This can happen
1469	 * even with RETRY_MAYFAIL because another task may be doing
1470	 * an allocation after this task has taken all memory.
1471	 * This is the task the OOM killer needs to take out during this
1472	 * loop, even if it was triggered by an allocation somewhere else.
1473	 */
1474	if (user_thread)
1475		set_current_oom_origin();
1476	for (i = 0; i < nr_pages; i++) {
1477		struct page *page;
1478
1479		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1480				    mflags, cpu_to_node(cpu_buffer->cpu));
1481		if (!bpage)
1482			goto free_pages;
1483
1484		rb_check_bpage(cpu_buffer, bpage);
1485
1486		list_add(&bpage->list, pages);
1487
1488		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags,
 
1489					cpu_buffer->buffer->subbuf_order);
1490		if (!page)
1491			goto free_pages;
1492		bpage->page = page_address(page);
1493		bpage->order = cpu_buffer->buffer->subbuf_order;
1494		rb_init_page(bpage->page);
1495
1496		if (user_thread && fatal_signal_pending(current))
1497			goto free_pages;
1498	}
1499	if (user_thread)
1500		clear_current_oom_origin();
1501
1502	return 0;
1503
1504free_pages:
1505	list_for_each_entry_safe(bpage, tmp, pages, list) {
1506		list_del_init(&bpage->list);
1507		free_buffer_page(bpage);
1508	}
1509	if (user_thread)
1510		clear_current_oom_origin();
1511
1512	return -ENOMEM;
1513}
1514
1515static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1516			     unsigned long nr_pages)
1517{
1518	LIST_HEAD(pages);
1519
1520	WARN_ON(!nr_pages);
1521
1522	if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
1523		return -ENOMEM;
1524
1525	/*
1526	 * The ring buffer page list is a circular list that does not
1527	 * start and end with a list head. All page list items point to
1528	 * other pages.
1529	 */
1530	cpu_buffer->pages = pages.next;
1531	list_del(&pages);
1532
1533	cpu_buffer->nr_pages = nr_pages;
1534
1535	rb_check_pages(cpu_buffer);
1536
1537	return 0;
1538}
1539
1540static struct ring_buffer_per_cpu *
1541rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1542{
1543	struct ring_buffer_per_cpu *cpu_buffer;
1544	struct buffer_page *bpage;
1545	struct page *page;
1546	int ret;
1547
1548	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1549				  GFP_KERNEL, cpu_to_node(cpu));
1550	if (!cpu_buffer)
1551		return NULL;
1552
1553	cpu_buffer->cpu = cpu;
1554	cpu_buffer->buffer = buffer;
1555	raw_spin_lock_init(&cpu_buffer->reader_lock);
1556	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1557	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1558	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1559	init_completion(&cpu_buffer->update_done);
1560	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1561	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1562	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1563
1564	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1565			    GFP_KERNEL, cpu_to_node(cpu));
1566	if (!bpage)
1567		goto fail_free_buffer;
1568
1569	rb_check_bpage(cpu_buffer, bpage);
1570
1571	cpu_buffer->reader_page = bpage;
1572
1573	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, cpu_buffer->buffer->subbuf_order);
 
1574	if (!page)
1575		goto fail_free_reader;
1576	bpage->page = page_address(page);
1577	rb_init_page(bpage->page);
1578
1579	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1580	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1581
1582	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1583	if (ret < 0)
1584		goto fail_free_reader;
1585
1586	cpu_buffer->head_page
1587		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1588	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1589
1590	rb_head_page_activate(cpu_buffer);
1591
1592	return cpu_buffer;
1593
1594 fail_free_reader:
1595	free_buffer_page(cpu_buffer->reader_page);
1596
1597 fail_free_buffer:
1598	kfree(cpu_buffer);
1599	return NULL;
1600}
1601
1602static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1603{
1604	struct list_head *head = cpu_buffer->pages;
1605	struct buffer_page *bpage, *tmp;
1606
1607	irq_work_sync(&cpu_buffer->irq_work.work);
1608
1609	free_buffer_page(cpu_buffer->reader_page);
1610
1611	if (head) {
1612		rb_head_page_deactivate(cpu_buffer);
1613
1614		list_for_each_entry_safe(bpage, tmp, head, list) {
1615			list_del_init(&bpage->list);
1616			free_buffer_page(bpage);
1617		}
1618		bpage = list_entry(head, struct buffer_page, list);
1619		free_buffer_page(bpage);
1620	}
1621
1622	free_page((unsigned long)cpu_buffer->free_page);
1623
1624	kfree(cpu_buffer);
1625}
1626
1627/**
1628 * __ring_buffer_alloc - allocate a new ring_buffer
1629 * @size: the size in bytes per cpu that is needed.
1630 * @flags: attributes to set for the ring buffer.
1631 * @key: ring buffer reader_lock_key.
1632 *
1633 * Currently the only flag that is available is the RB_FL_OVERWRITE
1634 * flag. This flag means that the buffer will overwrite old data
1635 * when the buffer wraps. If this flag is not set, the buffer will
1636 * drop data when the tail hits the head.
1637 */
1638struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1639					struct lock_class_key *key)
1640{
1641	struct trace_buffer *buffer;
1642	long nr_pages;
1643	int bsize;
1644	int cpu;
1645	int ret;
1646
1647	/* keep it in its own cache line */
1648	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1649			 GFP_KERNEL);
1650	if (!buffer)
1651		return NULL;
1652
1653	if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1654		goto fail_free_buffer;
1655
1656	/* Default buffer page size - one system page */
1657	buffer->subbuf_order = 0;
1658	buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
1659
1660	/* Max payload is buffer page size - header (8bytes) */
1661	buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
1662
1663	nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
1664	buffer->flags = flags;
1665	buffer->clock = trace_clock_local;
1666	buffer->reader_lock_key = key;
1667
1668	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1669	init_waitqueue_head(&buffer->irq_work.waiters);
1670
1671	/* need at least two pages */
1672	if (nr_pages < 2)
1673		nr_pages = 2;
1674
1675	buffer->cpus = nr_cpu_ids;
1676
1677	bsize = sizeof(void *) * nr_cpu_ids;
1678	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1679				  GFP_KERNEL);
1680	if (!buffer->buffers)
1681		goto fail_free_cpumask;
1682
1683	cpu = raw_smp_processor_id();
1684	cpumask_set_cpu(cpu, buffer->cpumask);
1685	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1686	if (!buffer->buffers[cpu])
1687		goto fail_free_buffers;
1688
1689	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1690	if (ret < 0)
1691		goto fail_free_buffers;
1692
1693	mutex_init(&buffer->mutex);
1694
1695	return buffer;
1696
1697 fail_free_buffers:
1698	for_each_buffer_cpu(buffer, cpu) {
1699		if (buffer->buffers[cpu])
1700			rb_free_cpu_buffer(buffer->buffers[cpu]);
1701	}
1702	kfree(buffer->buffers);
1703
1704 fail_free_cpumask:
1705	free_cpumask_var(buffer->cpumask);
1706
1707 fail_free_buffer:
1708	kfree(buffer);
1709	return NULL;
1710}
1711EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1712
1713/**
1714 * ring_buffer_free - free a ring buffer.
1715 * @buffer: the buffer to free.
1716 */
1717void
1718ring_buffer_free(struct trace_buffer *buffer)
1719{
1720	int cpu;
1721
1722	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1723
1724	irq_work_sync(&buffer->irq_work.work);
1725
1726	for_each_buffer_cpu(buffer, cpu)
1727		rb_free_cpu_buffer(buffer->buffers[cpu]);
1728
1729	kfree(buffer->buffers);
1730	free_cpumask_var(buffer->cpumask);
1731
1732	kfree(buffer);
1733}
1734EXPORT_SYMBOL_GPL(ring_buffer_free);
1735
1736void ring_buffer_set_clock(struct trace_buffer *buffer,
1737			   u64 (*clock)(void))
1738{
1739	buffer->clock = clock;
1740}
1741
1742void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1743{
1744	buffer->time_stamp_abs = abs;
1745}
1746
1747bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1748{
1749	return buffer->time_stamp_abs;
1750}
1751
1752static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1753
1754static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1755{
1756	return local_read(&bpage->entries) & RB_WRITE_MASK;
1757}
1758
1759static inline unsigned long rb_page_write(struct buffer_page *bpage)
1760{
1761	return local_read(&bpage->write) & RB_WRITE_MASK;
1762}
1763
1764static bool
1765rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1766{
1767	struct list_head *tail_page, *to_remove, *next_page;
1768	struct buffer_page *to_remove_page, *tmp_iter_page;
1769	struct buffer_page *last_page, *first_page;
1770	unsigned long nr_removed;
1771	unsigned long head_bit;
1772	int page_entries;
1773
1774	head_bit = 0;
1775
1776	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1777	atomic_inc(&cpu_buffer->record_disabled);
1778	/*
1779	 * We don't race with the readers since we have acquired the reader
1780	 * lock. We also don't race with writers after disabling recording.
1781	 * This makes it easy to figure out the first and the last page to be
1782	 * removed from the list. We unlink all the pages in between including
1783	 * the first and last pages. This is done in a busy loop so that we
1784	 * lose the least number of traces.
1785	 * The pages are freed after we restart recording and unlock readers.
1786	 */
1787	tail_page = &cpu_buffer->tail_page->list;
1788
1789	/*
1790	 * tail page might be on reader page, we remove the next page
1791	 * from the ring buffer
1792	 */
1793	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1794		tail_page = rb_list_head(tail_page->next);
1795	to_remove = tail_page;
1796
1797	/* start of pages to remove */
1798	first_page = list_entry(rb_list_head(to_remove->next),
1799				struct buffer_page, list);
1800
1801	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1802		to_remove = rb_list_head(to_remove)->next;
1803		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1804	}
1805	/* Read iterators need to reset themselves when some pages removed */
1806	cpu_buffer->pages_removed += nr_removed;
1807
1808	next_page = rb_list_head(to_remove)->next;
1809
1810	/*
1811	 * Now we remove all pages between tail_page and next_page.
1812	 * Make sure that we have head_bit value preserved for the
1813	 * next page
1814	 */
1815	tail_page->next = (struct list_head *)((unsigned long)next_page |
1816						head_bit);
1817	next_page = rb_list_head(next_page);
1818	next_page->prev = tail_page;
1819
1820	/* make sure pages points to a valid page in the ring buffer */
1821	cpu_buffer->pages = next_page;
1822
1823	/* update head page */
1824	if (head_bit)
1825		cpu_buffer->head_page = list_entry(next_page,
1826						struct buffer_page, list);
1827
1828	/* pages are removed, resume tracing and then free the pages */
1829	atomic_dec(&cpu_buffer->record_disabled);
1830	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1831
1832	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1833
1834	/* last buffer page to remove */
1835	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1836				list);
1837	tmp_iter_page = first_page;
1838
1839	do {
1840		cond_resched();
1841
1842		to_remove_page = tmp_iter_page;
1843		rb_inc_page(&tmp_iter_page);
1844
1845		/* update the counters */
1846		page_entries = rb_page_entries(to_remove_page);
1847		if (page_entries) {
1848			/*
1849			 * If something was added to this page, it was full
1850			 * since it is not the tail page. So we deduct the
1851			 * bytes consumed in ring buffer from here.
1852			 * Increment overrun to account for the lost events.
1853			 */
1854			local_add(page_entries, &cpu_buffer->overrun);
1855			local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
1856			local_inc(&cpu_buffer->pages_lost);
1857		}
1858
1859		/*
1860		 * We have already removed references to this list item, just
1861		 * free up the buffer_page and its page
1862		 */
1863		free_buffer_page(to_remove_page);
1864		nr_removed--;
1865
1866	} while (to_remove_page != last_page);
1867
1868	RB_WARN_ON(cpu_buffer, nr_removed);
1869
1870	return nr_removed == 0;
1871}
1872
1873static bool
1874rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1875{
1876	struct list_head *pages = &cpu_buffer->new_pages;
1877	unsigned long flags;
1878	bool success;
1879	int retries;
1880
1881	/* Can be called at early boot up, where interrupts must not been enabled */
1882	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1883	/*
1884	 * We are holding the reader lock, so the reader page won't be swapped
1885	 * in the ring buffer. Now we are racing with the writer trying to
1886	 * move head page and the tail page.
1887	 * We are going to adapt the reader page update process where:
1888	 * 1. We first splice the start and end of list of new pages between
1889	 *    the head page and its previous page.
1890	 * 2. We cmpxchg the prev_page->next to point from head page to the
1891	 *    start of new pages list.
1892	 * 3. Finally, we update the head->prev to the end of new list.
1893	 *
1894	 * We will try this process 10 times, to make sure that we don't keep
1895	 * spinning.
1896	 */
1897	retries = 10;
1898	success = false;
1899	while (retries--) {
1900		struct list_head *head_page, *prev_page;
1901		struct list_head *last_page, *first_page;
1902		struct list_head *head_page_with_bit;
1903		struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
1904
1905		if (!hpage)
1906			break;
1907		head_page = &hpage->list;
1908		prev_page = head_page->prev;
1909
1910		first_page = pages->next;
1911		last_page  = pages->prev;
1912
1913		head_page_with_bit = (struct list_head *)
1914				     ((unsigned long)head_page | RB_PAGE_HEAD);
1915
1916		last_page->next = head_page_with_bit;
1917		first_page->prev = prev_page;
1918
1919		/* caution: head_page_with_bit gets updated on cmpxchg failure */
1920		if (try_cmpxchg(&prev_page->next,
1921				&head_page_with_bit, first_page)) {
1922			/*
1923			 * yay, we replaced the page pointer to our new list,
1924			 * now, we just have to update to head page's prev
1925			 * pointer to point to end of list
1926			 */
1927			head_page->prev = last_page;
1928			success = true;
1929			break;
1930		}
1931	}
1932
1933	if (success)
1934		INIT_LIST_HEAD(pages);
1935	/*
1936	 * If we weren't successful in adding in new pages, warn and stop
1937	 * tracing
1938	 */
1939	RB_WARN_ON(cpu_buffer, !success);
1940	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1941
1942	/* free pages if they weren't inserted */
1943	if (!success) {
1944		struct buffer_page *bpage, *tmp;
1945		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1946					 list) {
1947			list_del_init(&bpage->list);
1948			free_buffer_page(bpage);
1949		}
1950	}
1951	return success;
1952}
1953
1954static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1955{
1956	bool success;
1957
1958	if (cpu_buffer->nr_pages_to_update > 0)
1959		success = rb_insert_pages(cpu_buffer);
1960	else
1961		success = rb_remove_pages(cpu_buffer,
1962					-cpu_buffer->nr_pages_to_update);
1963
1964	if (success)
1965		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1966}
1967
1968static void update_pages_handler(struct work_struct *work)
1969{
1970	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1971			struct ring_buffer_per_cpu, update_pages_work);
1972	rb_update_pages(cpu_buffer);
1973	complete(&cpu_buffer->update_done);
1974}
1975
1976/**
1977 * ring_buffer_resize - resize the ring buffer
1978 * @buffer: the buffer to resize.
1979 * @size: the new size.
1980 * @cpu_id: the cpu buffer to resize
1981 *
1982 * Minimum size is 2 * buffer->subbuf_size.
1983 *
1984 * Returns 0 on success and < 0 on failure.
1985 */
1986int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
1987			int cpu_id)
1988{
1989	struct ring_buffer_per_cpu *cpu_buffer;
1990	unsigned long nr_pages;
1991	int cpu, err;
1992
1993	/*
1994	 * Always succeed at resizing a non-existent buffer:
1995	 */
1996	if (!buffer)
1997		return 0;
1998
1999	/* Make sure the requested buffer exists */
2000	if (cpu_id != RING_BUFFER_ALL_CPUS &&
2001	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
2002		return 0;
2003
2004	nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
2005
2006	/* we need a minimum of two pages */
2007	if (nr_pages < 2)
2008		nr_pages = 2;
2009
2010	/* prevent another thread from changing buffer sizes */
2011	mutex_lock(&buffer->mutex);
2012	atomic_inc(&buffer->resizing);
2013
2014	if (cpu_id == RING_BUFFER_ALL_CPUS) {
2015		/*
2016		 * Don't succeed if resizing is disabled, as a reader might be
2017		 * manipulating the ring buffer and is expecting a sane state while
2018		 * this is true.
2019		 */
2020		for_each_buffer_cpu(buffer, cpu) {
2021			cpu_buffer = buffer->buffers[cpu];
2022			if (atomic_read(&cpu_buffer->resize_disabled)) {
2023				err = -EBUSY;
2024				goto out_err_unlock;
2025			}
2026		}
2027
2028		/* calculate the pages to update */
2029		for_each_buffer_cpu(buffer, cpu) {
2030			cpu_buffer = buffer->buffers[cpu];
2031
2032			cpu_buffer->nr_pages_to_update = nr_pages -
2033							cpu_buffer->nr_pages;
2034			/*
2035			 * nothing more to do for removing pages or no update
2036			 */
2037			if (cpu_buffer->nr_pages_to_update <= 0)
2038				continue;
2039			/*
2040			 * to add pages, make sure all new pages can be
2041			 * allocated without receiving ENOMEM
2042			 */
2043			INIT_LIST_HEAD(&cpu_buffer->new_pages);
2044			if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2045						&cpu_buffer->new_pages)) {
2046				/* not enough memory for new pages */
2047				err = -ENOMEM;
2048				goto out_err;
2049			}
2050
2051			cond_resched();
2052		}
2053
2054		cpus_read_lock();
2055		/*
2056		 * Fire off all the required work handlers
2057		 * We can't schedule on offline CPUs, but it's not necessary
2058		 * since we can change their buffer sizes without any race.
2059		 */
2060		for_each_buffer_cpu(buffer, cpu) {
2061			cpu_buffer = buffer->buffers[cpu];
2062			if (!cpu_buffer->nr_pages_to_update)
2063				continue;
2064
2065			/* Can't run something on an offline CPU. */
2066			if (!cpu_online(cpu)) {
2067				rb_update_pages(cpu_buffer);
2068				cpu_buffer->nr_pages_to_update = 0;
2069			} else {
2070				/* Run directly if possible. */
2071				migrate_disable();
2072				if (cpu != smp_processor_id()) {
2073					migrate_enable();
2074					schedule_work_on(cpu,
2075							 &cpu_buffer->update_pages_work);
2076				} else {
2077					update_pages_handler(&cpu_buffer->update_pages_work);
2078					migrate_enable();
2079				}
2080			}
2081		}
2082
2083		/* wait for all the updates to complete */
2084		for_each_buffer_cpu(buffer, cpu) {
2085			cpu_buffer = buffer->buffers[cpu];
2086			if (!cpu_buffer->nr_pages_to_update)
2087				continue;
2088
2089			if (cpu_online(cpu))
2090				wait_for_completion(&cpu_buffer->update_done);
2091			cpu_buffer->nr_pages_to_update = 0;
2092		}
2093
2094		cpus_read_unlock();
2095	} else {
2096		cpu_buffer = buffer->buffers[cpu_id];
2097
2098		if (nr_pages == cpu_buffer->nr_pages)
2099			goto out;
2100
2101		/*
2102		 * Don't succeed if resizing is disabled, as a reader might be
2103		 * manipulating the ring buffer and is expecting a sane state while
2104		 * this is true.
2105		 */
2106		if (atomic_read(&cpu_buffer->resize_disabled)) {
2107			err = -EBUSY;
2108			goto out_err_unlock;
2109		}
2110
2111		cpu_buffer->nr_pages_to_update = nr_pages -
2112						cpu_buffer->nr_pages;
2113
2114		INIT_LIST_HEAD(&cpu_buffer->new_pages);
2115		if (cpu_buffer->nr_pages_to_update > 0 &&
2116			__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2117					    &cpu_buffer->new_pages)) {
2118			err = -ENOMEM;
2119			goto out_err;
2120		}
2121
2122		cpus_read_lock();
2123
2124		/* Can't run something on an offline CPU. */
2125		if (!cpu_online(cpu_id))
2126			rb_update_pages(cpu_buffer);
2127		else {
2128			/* Run directly if possible. */
2129			migrate_disable();
2130			if (cpu_id == smp_processor_id()) {
2131				rb_update_pages(cpu_buffer);
2132				migrate_enable();
2133			} else {
2134				migrate_enable();
2135				schedule_work_on(cpu_id,
2136						 &cpu_buffer->update_pages_work);
2137				wait_for_completion(&cpu_buffer->update_done);
2138			}
2139		}
2140
2141		cpu_buffer->nr_pages_to_update = 0;
2142		cpus_read_unlock();
2143	}
2144
2145 out:
2146	/*
2147	 * The ring buffer resize can happen with the ring buffer
2148	 * enabled, so that the update disturbs the tracing as little
2149	 * as possible. But if the buffer is disabled, we do not need
2150	 * to worry about that, and we can take the time to verify
2151	 * that the buffer is not corrupt.
2152	 */
2153	if (atomic_read(&buffer->record_disabled)) {
2154		atomic_inc(&buffer->record_disabled);
2155		/*
2156		 * Even though the buffer was disabled, we must make sure
2157		 * that it is truly disabled before calling rb_check_pages.
2158		 * There could have been a race between checking
2159		 * record_disable and incrementing it.
2160		 */
2161		synchronize_rcu();
2162		for_each_buffer_cpu(buffer, cpu) {
 
 
2163			cpu_buffer = buffer->buffers[cpu];
 
2164			rb_check_pages(cpu_buffer);
 
2165		}
2166		atomic_dec(&buffer->record_disabled);
2167	}
2168
2169	atomic_dec(&buffer->resizing);
2170	mutex_unlock(&buffer->mutex);
2171	return 0;
2172
2173 out_err:
2174	for_each_buffer_cpu(buffer, cpu) {
2175		struct buffer_page *bpage, *tmp;
2176
2177		cpu_buffer = buffer->buffers[cpu];
2178		cpu_buffer->nr_pages_to_update = 0;
2179
2180		if (list_empty(&cpu_buffer->new_pages))
2181			continue;
2182
2183		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2184					list) {
2185			list_del_init(&bpage->list);
2186			free_buffer_page(bpage);
2187		}
2188	}
2189 out_err_unlock:
2190	atomic_dec(&buffer->resizing);
2191	mutex_unlock(&buffer->mutex);
2192	return err;
2193}
2194EXPORT_SYMBOL_GPL(ring_buffer_resize);
2195
2196void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2197{
2198	mutex_lock(&buffer->mutex);
2199	if (val)
2200		buffer->flags |= RB_FL_OVERWRITE;
2201	else
2202		buffer->flags &= ~RB_FL_OVERWRITE;
2203	mutex_unlock(&buffer->mutex);
2204}
2205EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2206
2207static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
2208{
2209	return bpage->page->data + index;
2210}
2211
2212static __always_inline struct ring_buffer_event *
2213rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2214{
2215	return __rb_page_index(cpu_buffer->reader_page,
2216			       cpu_buffer->reader_page->read);
2217}
2218
2219static struct ring_buffer_event *
2220rb_iter_head_event(struct ring_buffer_iter *iter)
2221{
2222	struct ring_buffer_event *event;
2223	struct buffer_page *iter_head_page = iter->head_page;
2224	unsigned long commit;
2225	unsigned length;
2226
2227	if (iter->head != iter->next_event)
2228		return iter->event;
2229
2230	/*
2231	 * When the writer goes across pages, it issues a cmpxchg which
2232	 * is a mb(), which will synchronize with the rmb here.
2233	 * (see rb_tail_page_update() and __rb_reserve_next())
2234	 */
2235	commit = rb_page_commit(iter_head_page);
2236	smp_rmb();
2237
2238	/* An event needs to be at least 8 bytes in size */
2239	if (iter->head > commit - 8)
2240		goto reset;
2241
2242	event = __rb_page_index(iter_head_page, iter->head);
2243	length = rb_event_length(event);
2244
2245	/*
2246	 * READ_ONCE() doesn't work on functions and we don't want the
2247	 * compiler doing any crazy optimizations with length.
2248	 */
2249	barrier();
2250
2251	if ((iter->head + length) > commit || length > iter->event_size)
2252		/* Writer corrupted the read? */
2253		goto reset;
2254
2255	memcpy(iter->event, event, length);
2256	/*
2257	 * If the page stamp is still the same after this rmb() then the
2258	 * event was safely copied without the writer entering the page.
2259	 */
2260	smp_rmb();
2261
2262	/* Make sure the page didn't change since we read this */
2263	if (iter->page_stamp != iter_head_page->page->time_stamp ||
2264	    commit > rb_page_commit(iter_head_page))
2265		goto reset;
2266
2267	iter->next_event = iter->head + length;
2268	return iter->event;
2269 reset:
2270	/* Reset to the beginning */
2271	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2272	iter->head = 0;
2273	iter->next_event = 0;
2274	iter->missed_events = 1;
2275	return NULL;
2276}
2277
2278/* Size is determined by what has been committed */
2279static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2280{
2281	return rb_page_commit(bpage);
2282}
2283
2284static __always_inline unsigned
2285rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2286{
2287	return rb_page_commit(cpu_buffer->commit_page);
2288}
2289
2290static __always_inline unsigned
2291rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
2292{
2293	unsigned long addr = (unsigned long)event;
2294
2295	addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
2296
2297	return addr - BUF_PAGE_HDR_SIZE;
2298}
2299
2300static void rb_inc_iter(struct ring_buffer_iter *iter)
2301{
2302	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2303
2304	/*
2305	 * The iterator could be on the reader page (it starts there).
2306	 * But the head could have moved, since the reader was
2307	 * found. Check for this case and assign the iterator
2308	 * to the head page instead of next.
2309	 */
2310	if (iter->head_page == cpu_buffer->reader_page)
2311		iter->head_page = rb_set_head_page(cpu_buffer);
2312	else
2313		rb_inc_page(&iter->head_page);
2314
2315	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2316	iter->head = 0;
2317	iter->next_event = 0;
2318}
2319
2320/*
2321 * rb_handle_head_page - writer hit the head page
2322 *
2323 * Returns: +1 to retry page
2324 *           0 to continue
2325 *          -1 on error
2326 */
2327static int
2328rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2329		    struct buffer_page *tail_page,
2330		    struct buffer_page *next_page)
2331{
2332	struct buffer_page *new_head;
2333	int entries;
2334	int type;
2335	int ret;
2336
2337	entries = rb_page_entries(next_page);
2338
2339	/*
2340	 * The hard part is here. We need to move the head
2341	 * forward, and protect against both readers on
2342	 * other CPUs and writers coming in via interrupts.
2343	 */
2344	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2345				       RB_PAGE_HEAD);
2346
2347	/*
2348	 * type can be one of four:
2349	 *  NORMAL - an interrupt already moved it for us
2350	 *  HEAD   - we are the first to get here.
2351	 *  UPDATE - we are the interrupt interrupting
2352	 *           a current move.
2353	 *  MOVED  - a reader on another CPU moved the next
2354	 *           pointer to its reader page. Give up
2355	 *           and try again.
2356	 */
2357
2358	switch (type) {
2359	case RB_PAGE_HEAD:
2360		/*
2361		 * We changed the head to UPDATE, thus
2362		 * it is our responsibility to update
2363		 * the counters.
2364		 */
2365		local_add(entries, &cpu_buffer->overrun);
2366		local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
2367		local_inc(&cpu_buffer->pages_lost);
2368
2369		/*
2370		 * The entries will be zeroed out when we move the
2371		 * tail page.
2372		 */
2373
2374		/* still more to do */
2375		break;
2376
2377	case RB_PAGE_UPDATE:
2378		/*
2379		 * This is an interrupt that interrupt the
2380		 * previous update. Still more to do.
2381		 */
2382		break;
2383	case RB_PAGE_NORMAL:
2384		/*
2385		 * An interrupt came in before the update
2386		 * and processed this for us.
2387		 * Nothing left to do.
2388		 */
2389		return 1;
2390	case RB_PAGE_MOVED:
2391		/*
2392		 * The reader is on another CPU and just did
2393		 * a swap with our next_page.
2394		 * Try again.
2395		 */
2396		return 1;
2397	default:
2398		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2399		return -1;
2400	}
2401
2402	/*
2403	 * Now that we are here, the old head pointer is
2404	 * set to UPDATE. This will keep the reader from
2405	 * swapping the head page with the reader page.
2406	 * The reader (on another CPU) will spin till
2407	 * we are finished.
2408	 *
2409	 * We just need to protect against interrupts
2410	 * doing the job. We will set the next pointer
2411	 * to HEAD. After that, we set the old pointer
2412	 * to NORMAL, but only if it was HEAD before.
2413	 * otherwise we are an interrupt, and only
2414	 * want the outer most commit to reset it.
2415	 */
2416	new_head = next_page;
2417	rb_inc_page(&new_head);
2418
2419	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2420				    RB_PAGE_NORMAL);
2421
2422	/*
2423	 * Valid returns are:
2424	 *  HEAD   - an interrupt came in and already set it.
2425	 *  NORMAL - One of two things:
2426	 *            1) We really set it.
2427	 *            2) A bunch of interrupts came in and moved
2428	 *               the page forward again.
2429	 */
2430	switch (ret) {
2431	case RB_PAGE_HEAD:
2432	case RB_PAGE_NORMAL:
2433		/* OK */
2434		break;
2435	default:
2436		RB_WARN_ON(cpu_buffer, 1);
2437		return -1;
2438	}
2439
2440	/*
2441	 * It is possible that an interrupt came in,
2442	 * set the head up, then more interrupts came in
2443	 * and moved it again. When we get back here,
2444	 * the page would have been set to NORMAL but we
2445	 * just set it back to HEAD.
2446	 *
2447	 * How do you detect this? Well, if that happened
2448	 * the tail page would have moved.
2449	 */
2450	if (ret == RB_PAGE_NORMAL) {
2451		struct buffer_page *buffer_tail_page;
2452
2453		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2454		/*
2455		 * If the tail had moved passed next, then we need
2456		 * to reset the pointer.
2457		 */
2458		if (buffer_tail_page != tail_page &&
2459		    buffer_tail_page != next_page)
2460			rb_head_page_set_normal(cpu_buffer, new_head,
2461						next_page,
2462						RB_PAGE_HEAD);
2463	}
2464
2465	/*
2466	 * If this was the outer most commit (the one that
2467	 * changed the original pointer from HEAD to UPDATE),
2468	 * then it is up to us to reset it to NORMAL.
2469	 */
2470	if (type == RB_PAGE_HEAD) {
2471		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2472					      tail_page,
2473					      RB_PAGE_UPDATE);
2474		if (RB_WARN_ON(cpu_buffer,
2475			       ret != RB_PAGE_UPDATE))
2476			return -1;
2477	}
2478
2479	return 0;
2480}
2481
2482static inline void
2483rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2484	      unsigned long tail, struct rb_event_info *info)
2485{
2486	unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
2487	struct buffer_page *tail_page = info->tail_page;
2488	struct ring_buffer_event *event;
2489	unsigned long length = info->length;
2490
2491	/*
2492	 * Only the event that crossed the page boundary
2493	 * must fill the old tail_page with padding.
2494	 */
2495	if (tail >= bsize) {
2496		/*
2497		 * If the page was filled, then we still need
2498		 * to update the real_end. Reset it to zero
2499		 * and the reader will ignore it.
2500		 */
2501		if (tail == bsize)
2502			tail_page->real_end = 0;
2503
2504		local_sub(length, &tail_page->write);
2505		return;
2506	}
2507
2508	event = __rb_page_index(tail_page, tail);
2509
2510	/*
2511	 * Save the original length to the meta data.
2512	 * This will be used by the reader to add lost event
2513	 * counter.
2514	 */
2515	tail_page->real_end = tail;
2516
2517	/*
2518	 * If this event is bigger than the minimum size, then
2519	 * we need to be careful that we don't subtract the
2520	 * write counter enough to allow another writer to slip
2521	 * in on this page.
2522	 * We put in a discarded commit instead, to make sure
2523	 * that this space is not used again, and this space will
2524	 * not be accounted into 'entries_bytes'.
2525	 *
2526	 * If we are less than the minimum size, we don't need to
2527	 * worry about it.
2528	 */
2529	if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
2530		/* No room for any events */
2531
2532		/* Mark the rest of the page with padding */
2533		rb_event_set_padding(event);
2534
2535		/* Make sure the padding is visible before the write update */
2536		smp_wmb();
2537
2538		/* Set the write back to the previous setting */
2539		local_sub(length, &tail_page->write);
2540		return;
2541	}
2542
2543	/* Put in a discarded event */
2544	event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
2545	event->type_len = RINGBUF_TYPE_PADDING;
2546	/* time delta must be non zero */
2547	event->time_delta = 1;
2548
2549	/* account for padding bytes */
2550	local_add(bsize - tail, &cpu_buffer->entries_bytes);
2551
2552	/* Make sure the padding is visible before the tail_page->write update */
2553	smp_wmb();
2554
2555	/* Set write to end of buffer */
2556	length = (tail + length) - bsize;
2557	local_sub(length, &tail_page->write);
2558}
2559
2560static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2561
2562/*
2563 * This is the slow path, force gcc not to inline it.
2564 */
2565static noinline struct ring_buffer_event *
2566rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2567	     unsigned long tail, struct rb_event_info *info)
2568{
2569	struct buffer_page *tail_page = info->tail_page;
2570	struct buffer_page *commit_page = cpu_buffer->commit_page;
2571	struct trace_buffer *buffer = cpu_buffer->buffer;
2572	struct buffer_page *next_page;
2573	int ret;
2574
2575	next_page = tail_page;
2576
2577	rb_inc_page(&next_page);
2578
2579	/*
2580	 * If for some reason, we had an interrupt storm that made
2581	 * it all the way around the buffer, bail, and warn
2582	 * about it.
2583	 */
2584	if (unlikely(next_page == commit_page)) {
2585		local_inc(&cpu_buffer->commit_overrun);
2586		goto out_reset;
2587	}
2588
2589	/*
2590	 * This is where the fun begins!
2591	 *
2592	 * We are fighting against races between a reader that
2593	 * could be on another CPU trying to swap its reader
2594	 * page with the buffer head.
2595	 *
2596	 * We are also fighting against interrupts coming in and
2597	 * moving the head or tail on us as well.
2598	 *
2599	 * If the next page is the head page then we have filled
2600	 * the buffer, unless the commit page is still on the
2601	 * reader page.
2602	 */
2603	if (rb_is_head_page(next_page, &tail_page->list)) {
2604
2605		/*
2606		 * If the commit is not on the reader page, then
2607		 * move the header page.
2608		 */
2609		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2610			/*
2611			 * If we are not in overwrite mode,
2612			 * this is easy, just stop here.
2613			 */
2614			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2615				local_inc(&cpu_buffer->dropped_events);
2616				goto out_reset;
2617			}
2618
2619			ret = rb_handle_head_page(cpu_buffer,
2620						  tail_page,
2621						  next_page);
2622			if (ret < 0)
2623				goto out_reset;
2624			if (ret)
2625				goto out_again;
2626		} else {
2627			/*
2628			 * We need to be careful here too. The
2629			 * commit page could still be on the reader
2630			 * page. We could have a small buffer, and
2631			 * have filled up the buffer with events
2632			 * from interrupts and such, and wrapped.
2633			 *
2634			 * Note, if the tail page is also on the
2635			 * reader_page, we let it move out.
2636			 */
2637			if (unlikely((cpu_buffer->commit_page !=
2638				      cpu_buffer->tail_page) &&
2639				     (cpu_buffer->commit_page ==
2640				      cpu_buffer->reader_page))) {
2641				local_inc(&cpu_buffer->commit_overrun);
2642				goto out_reset;
2643			}
2644		}
2645	}
2646
2647	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2648
2649 out_again:
2650
2651	rb_reset_tail(cpu_buffer, tail, info);
2652
2653	/* Commit what we have for now. */
2654	rb_end_commit(cpu_buffer);
2655	/* rb_end_commit() decs committing */
2656	local_inc(&cpu_buffer->committing);
2657
2658	/* fail and let the caller try again */
2659	return ERR_PTR(-EAGAIN);
2660
2661 out_reset:
2662	/* reset write */
2663	rb_reset_tail(cpu_buffer, tail, info);
2664
2665	return NULL;
2666}
2667
2668/* Slow path */
2669static struct ring_buffer_event *
2670rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2671		  struct ring_buffer_event *event, u64 delta, bool abs)
2672{
2673	if (abs)
2674		event->type_len = RINGBUF_TYPE_TIME_STAMP;
2675	else
2676		event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2677
2678	/* Not the first event on the page, or not delta? */
2679	if (abs || rb_event_index(cpu_buffer, event)) {
2680		event->time_delta = delta & TS_MASK;
2681		event->array[0] = delta >> TS_SHIFT;
2682	} else {
2683		/* nope, just zero it */
2684		event->time_delta = 0;
2685		event->array[0] = 0;
2686	}
2687
2688	return skip_time_extend(event);
2689}
2690
2691#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2692static inline bool sched_clock_stable(void)
2693{
2694	return true;
2695}
2696#endif
2697
2698static void
2699rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2700		   struct rb_event_info *info)
2701{
2702	u64 write_stamp;
2703
2704	WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2705		  (unsigned long long)info->delta,
2706		  (unsigned long long)info->ts,
2707		  (unsigned long long)info->before,
2708		  (unsigned long long)info->after,
2709		  (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
2710		  sched_clock_stable() ? "" :
2711		  "If you just came from a suspend/resume,\n"
2712		  "please switch to the trace global clock:\n"
2713		  "  echo global > /sys/kernel/tracing/trace_clock\n"
2714		  "or add trace_clock=global to the kernel command line\n");
2715}
2716
2717static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2718				      struct ring_buffer_event **event,
2719				      struct rb_event_info *info,
2720				      u64 *delta,
2721				      unsigned int *length)
2722{
2723	bool abs = info->add_timestamp &
2724		(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2725
2726	if (unlikely(info->delta > (1ULL << 59))) {
2727		/*
2728		 * Some timers can use more than 59 bits, and when a timestamp
2729		 * is added to the buffer, it will lose those bits.
2730		 */
2731		if (abs && (info->ts & TS_MSB)) {
2732			info->delta &= ABS_TS_MASK;
2733
2734		/* did the clock go backwards */
2735		} else if (info->before == info->after && info->before > info->ts) {
2736			/* not interrupted */
2737			static int once;
2738
2739			/*
2740			 * This is possible with a recalibrating of the TSC.
2741			 * Do not produce a call stack, but just report it.
2742			 */
2743			if (!once) {
2744				once++;
2745				pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2746					info->before, info->ts);
2747			}
2748		} else
2749			rb_check_timestamp(cpu_buffer, info);
2750		if (!abs)
2751			info->delta = 0;
2752	}
2753	*event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
2754	*length -= RB_LEN_TIME_EXTEND;
2755	*delta = 0;
2756}
2757
2758/**
2759 * rb_update_event - update event type and data
2760 * @cpu_buffer: The per cpu buffer of the @event
2761 * @event: the event to update
2762 * @info: The info to update the @event with (contains length and delta)
2763 *
2764 * Update the type and data fields of the @event. The length
2765 * is the actual size that is written to the ring buffer,
2766 * and with this, we can determine what to place into the
2767 * data field.
2768 */
2769static void
2770rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2771		struct ring_buffer_event *event,
2772		struct rb_event_info *info)
2773{
2774	unsigned length = info->length;
2775	u64 delta = info->delta;
2776	unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2777
2778	if (!WARN_ON_ONCE(nest >= MAX_NEST))
2779		cpu_buffer->event_stamp[nest] = info->ts;
2780
2781	/*
2782	 * If we need to add a timestamp, then we
2783	 * add it to the start of the reserved space.
2784	 */
2785	if (unlikely(info->add_timestamp))
2786		rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
2787
2788	event->time_delta = delta;
2789	length -= RB_EVNT_HDR_SIZE;
2790	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2791		event->type_len = 0;
2792		event->array[0] = length;
2793	} else
2794		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2795}
2796
2797static unsigned rb_calculate_event_length(unsigned length)
2798{
2799	struct ring_buffer_event event; /* Used only for sizeof array */
2800
2801	/* zero length can cause confusions */
2802	if (!length)
2803		length++;
2804
2805	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2806		length += sizeof(event.array[0]);
2807
2808	length += RB_EVNT_HDR_SIZE;
2809	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2810
2811	/*
2812	 * In case the time delta is larger than the 27 bits for it
2813	 * in the header, we need to add a timestamp. If another
2814	 * event comes in when trying to discard this one to increase
2815	 * the length, then the timestamp will be added in the allocated
2816	 * space of this event. If length is bigger than the size needed
2817	 * for the TIME_EXTEND, then padding has to be used. The events
2818	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2819	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2820	 * As length is a multiple of 4, we only need to worry if it
2821	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2822	 */
2823	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2824		length += RB_ALIGNMENT;
2825
2826	return length;
2827}
2828
2829static inline bool
2830rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2831		  struct ring_buffer_event *event)
2832{
2833	unsigned long new_index, old_index;
2834	struct buffer_page *bpage;
2835	unsigned long addr;
2836
2837	new_index = rb_event_index(cpu_buffer, event);
2838	old_index = new_index + rb_event_ts_length(event);
2839	addr = (unsigned long)event;
2840	addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
2841
2842	bpage = READ_ONCE(cpu_buffer->tail_page);
2843
2844	/*
2845	 * Make sure the tail_page is still the same and
2846	 * the next write location is the end of this event
2847	 */
2848	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2849		unsigned long write_mask =
2850			local_read(&bpage->write) & ~RB_WRITE_MASK;
2851		unsigned long event_length = rb_event_length(event);
2852
2853		/*
2854		 * For the before_stamp to be different than the write_stamp
2855		 * to make sure that the next event adds an absolute
2856		 * value and does not rely on the saved write stamp, which
2857		 * is now going to be bogus.
2858		 *
2859		 * By setting the before_stamp to zero, the next event
2860		 * is not going to use the write_stamp and will instead
2861		 * create an absolute timestamp. This means there's no
2862		 * reason to update the wirte_stamp!
2863		 */
2864		rb_time_set(&cpu_buffer->before_stamp, 0);
2865
2866		/*
2867		 * If an event were to come in now, it would see that the
2868		 * write_stamp and the before_stamp are different, and assume
2869		 * that this event just added itself before updating
2870		 * the write stamp. The interrupting event will fix the
2871		 * write stamp for us, and use an absolute timestamp.
2872		 */
2873
2874		/*
2875		 * This is on the tail page. It is possible that
2876		 * a write could come in and move the tail page
2877		 * and write to the next page. That is fine
2878		 * because we just shorten what is on this page.
2879		 */
2880		old_index += write_mask;
2881		new_index += write_mask;
2882
2883		/* caution: old_index gets updated on cmpxchg failure */
2884		if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
2885			/* update counters */
2886			local_sub(event_length, &cpu_buffer->entries_bytes);
2887			return true;
2888		}
2889	}
2890
2891	/* could not discard */
2892	return false;
2893}
2894
2895static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2896{
2897	local_inc(&cpu_buffer->committing);
2898	local_inc(&cpu_buffer->commits);
2899}
2900
2901static __always_inline void
2902rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2903{
2904	unsigned long max_count;
2905
2906	/*
2907	 * We only race with interrupts and NMIs on this CPU.
2908	 * If we own the commit event, then we can commit
2909	 * all others that interrupted us, since the interruptions
2910	 * are in stack format (they finish before they come
2911	 * back to us). This allows us to do a simple loop to
2912	 * assign the commit to the tail.
2913	 */
2914 again:
2915	max_count = cpu_buffer->nr_pages * 100;
2916
2917	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2918		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2919			return;
2920		if (RB_WARN_ON(cpu_buffer,
2921			       rb_is_reader_page(cpu_buffer->tail_page)))
2922			return;
2923		/*
2924		 * No need for a memory barrier here, as the update
2925		 * of the tail_page did it for this page.
2926		 */
2927		local_set(&cpu_buffer->commit_page->page->commit,
2928			  rb_page_write(cpu_buffer->commit_page));
2929		rb_inc_page(&cpu_buffer->commit_page);
2930		/* add barrier to keep gcc from optimizing too much */
2931		barrier();
2932	}
2933	while (rb_commit_index(cpu_buffer) !=
2934	       rb_page_write(cpu_buffer->commit_page)) {
2935
2936		/* Make sure the readers see the content of what is committed. */
2937		smp_wmb();
2938		local_set(&cpu_buffer->commit_page->page->commit,
2939			  rb_page_write(cpu_buffer->commit_page));
2940		RB_WARN_ON(cpu_buffer,
2941			   local_read(&cpu_buffer->commit_page->page->commit) &
2942			   ~RB_WRITE_MASK);
2943		barrier();
2944	}
2945
2946	/* again, keep gcc from optimizing */
2947	barrier();
2948
2949	/*
2950	 * If an interrupt came in just after the first while loop
2951	 * and pushed the tail page forward, we will be left with
2952	 * a dangling commit that will never go forward.
2953	 */
2954	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2955		goto again;
2956}
2957
2958static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2959{
2960	unsigned long commits;
2961
2962	if (RB_WARN_ON(cpu_buffer,
2963		       !local_read(&cpu_buffer->committing)))
2964		return;
2965
2966 again:
2967	commits = local_read(&cpu_buffer->commits);
2968	/* synchronize with interrupts */
2969	barrier();
2970	if (local_read(&cpu_buffer->committing) == 1)
2971		rb_set_commit_to_write(cpu_buffer);
2972
2973	local_dec(&cpu_buffer->committing);
2974
2975	/* synchronize with interrupts */
2976	barrier();
2977
2978	/*
2979	 * Need to account for interrupts coming in between the
2980	 * updating of the commit page and the clearing of the
2981	 * committing counter.
2982	 */
2983	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2984	    !local_read(&cpu_buffer->committing)) {
2985		local_inc(&cpu_buffer->committing);
2986		goto again;
2987	}
2988}
2989
2990static inline void rb_event_discard(struct ring_buffer_event *event)
2991{
2992	if (extended_time(event))
2993		event = skip_time_extend(event);
2994
2995	/* array[0] holds the actual length for the discarded event */
2996	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2997	event->type_len = RINGBUF_TYPE_PADDING;
2998	/* time delta must be non zero */
2999	if (!event->time_delta)
3000		event->time_delta = 1;
3001}
3002
3003static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
3004{
3005	local_inc(&cpu_buffer->entries);
3006	rb_end_commit(cpu_buffer);
3007}
3008
3009static __always_inline void
3010rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3011{
3012	if (buffer->irq_work.waiters_pending) {
3013		buffer->irq_work.waiters_pending = false;
3014		/* irq_work_queue() supplies it's own memory barriers */
3015		irq_work_queue(&buffer->irq_work.work);
3016	}
3017
3018	if (cpu_buffer->irq_work.waiters_pending) {
3019		cpu_buffer->irq_work.waiters_pending = false;
3020		/* irq_work_queue() supplies it's own memory barriers */
3021		irq_work_queue(&cpu_buffer->irq_work.work);
3022	}
3023
3024	if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3025		return;
3026
3027	if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3028		return;
3029
3030	if (!cpu_buffer->irq_work.full_waiters_pending)
3031		return;
3032
3033	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3034
3035	if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3036		return;
3037
3038	cpu_buffer->irq_work.wakeup_full = true;
3039	cpu_buffer->irq_work.full_waiters_pending = false;
3040	/* irq_work_queue() supplies it's own memory barriers */
3041	irq_work_queue(&cpu_buffer->irq_work.work);
3042}
3043
3044#ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3045# define do_ring_buffer_record_recursion()	\
3046	do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3047#else
3048# define do_ring_buffer_record_recursion() do { } while (0)
3049#endif
3050
3051/*
3052 * The lock and unlock are done within a preempt disable section.
3053 * The current_context per_cpu variable can only be modified
3054 * by the current task between lock and unlock. But it can
3055 * be modified more than once via an interrupt. To pass this
3056 * information from the lock to the unlock without having to
3057 * access the 'in_interrupt()' functions again (which do show
3058 * a bit of overhead in something as critical as function tracing,
3059 * we use a bitmask trick.
3060 *
3061 *  bit 1 =  NMI context
3062 *  bit 2 =  IRQ context
3063 *  bit 3 =  SoftIRQ context
3064 *  bit 4 =  normal context.
3065 *
3066 * This works because this is the order of contexts that can
3067 * preempt other contexts. A SoftIRQ never preempts an IRQ
3068 * context.
3069 *
3070 * When the context is determined, the corresponding bit is
3071 * checked and set (if it was set, then a recursion of that context
3072 * happened).
3073 *
3074 * On unlock, we need to clear this bit. To do so, just subtract
3075 * 1 from the current_context and AND it to itself.
3076 *
3077 * (binary)
3078 *  101 - 1 = 100
3079 *  101 & 100 = 100 (clearing bit zero)
3080 *
3081 *  1010 - 1 = 1001
3082 *  1010 & 1001 = 1000 (clearing bit 1)
3083 *
3084 * The least significant bit can be cleared this way, and it
3085 * just so happens that it is the same bit corresponding to
3086 * the current context.
3087 *
3088 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3089 * is set when a recursion is detected at the current context, and if
3090 * the TRANSITION bit is already set, it will fail the recursion.
3091 * This is needed because there's a lag between the changing of
3092 * interrupt context and updating the preempt count. In this case,
3093 * a false positive will be found. To handle this, one extra recursion
3094 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3095 * bit is already set, then it is considered a recursion and the function
3096 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3097 *
3098 * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3099 * to be cleared. Even if it wasn't the context that set it. That is,
3100 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3101 * is called before preempt_count() is updated, since the check will
3102 * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3103 * NMI then comes in, it will set the NMI bit, but when the NMI code
3104 * does the trace_recursive_unlock() it will clear the TRANSITION bit
3105 * and leave the NMI bit set. But this is fine, because the interrupt
3106 * code that set the TRANSITION bit will then clear the NMI bit when it
3107 * calls trace_recursive_unlock(). If another NMI comes in, it will
3108 * set the TRANSITION bit and continue.
3109 *
3110 * Note: The TRANSITION bit only handles a single transition between context.
3111 */
3112
3113static __always_inline bool
3114trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3115{
3116	unsigned int val = cpu_buffer->current_context;
3117	int bit = interrupt_context_level();
3118
3119	bit = RB_CTX_NORMAL - bit;
3120
3121	if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3122		/*
3123		 * It is possible that this was called by transitioning
3124		 * between interrupt context, and preempt_count() has not
3125		 * been updated yet. In this case, use the TRANSITION bit.
3126		 */
3127		bit = RB_CTX_TRANSITION;
3128		if (val & (1 << (bit + cpu_buffer->nest))) {
3129			do_ring_buffer_record_recursion();
3130			return true;
3131		}
3132	}
3133
3134	val |= (1 << (bit + cpu_buffer->nest));
3135	cpu_buffer->current_context = val;
3136
3137	return false;
3138}
3139
3140static __always_inline void
3141trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3142{
3143	cpu_buffer->current_context &=
3144		cpu_buffer->current_context - (1 << cpu_buffer->nest);
3145}
3146
3147/* The recursive locking above uses 5 bits */
3148#define NESTED_BITS 5
3149
3150/**
3151 * ring_buffer_nest_start - Allow to trace while nested
3152 * @buffer: The ring buffer to modify
3153 *
3154 * The ring buffer has a safety mechanism to prevent recursion.
3155 * But there may be a case where a trace needs to be done while
3156 * tracing something else. In this case, calling this function
3157 * will allow this function to nest within a currently active
3158 * ring_buffer_lock_reserve().
3159 *
3160 * Call this function before calling another ring_buffer_lock_reserve() and
3161 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3162 */
3163void ring_buffer_nest_start(struct trace_buffer *buffer)
3164{
3165	struct ring_buffer_per_cpu *cpu_buffer;
3166	int cpu;
3167
3168	/* Enabled by ring_buffer_nest_end() */
3169	preempt_disable_notrace();
3170	cpu = raw_smp_processor_id();
3171	cpu_buffer = buffer->buffers[cpu];
3172	/* This is the shift value for the above recursive locking */
3173	cpu_buffer->nest += NESTED_BITS;
3174}
3175
3176/**
3177 * ring_buffer_nest_end - Allow to trace while nested
3178 * @buffer: The ring buffer to modify
3179 *
3180 * Must be called after ring_buffer_nest_start() and after the
3181 * ring_buffer_unlock_commit().
3182 */
3183void ring_buffer_nest_end(struct trace_buffer *buffer)
3184{
3185	struct ring_buffer_per_cpu *cpu_buffer;
3186	int cpu;
3187
3188	/* disabled by ring_buffer_nest_start() */
3189	cpu = raw_smp_processor_id();
3190	cpu_buffer = buffer->buffers[cpu];
3191	/* This is the shift value for the above recursive locking */
3192	cpu_buffer->nest -= NESTED_BITS;
3193	preempt_enable_notrace();
3194}
3195
3196/**
3197 * ring_buffer_unlock_commit - commit a reserved
3198 * @buffer: The buffer to commit to
3199 *
3200 * This commits the data to the ring buffer, and releases any locks held.
3201 *
3202 * Must be paired with ring_buffer_lock_reserve.
3203 */
3204int ring_buffer_unlock_commit(struct trace_buffer *buffer)
3205{
3206	struct ring_buffer_per_cpu *cpu_buffer;
3207	int cpu = raw_smp_processor_id();
3208
3209	cpu_buffer = buffer->buffers[cpu];
3210
3211	rb_commit(cpu_buffer);
3212
3213	rb_wakeups(buffer, cpu_buffer);
3214
3215	trace_recursive_unlock(cpu_buffer);
3216
3217	preempt_enable_notrace();
3218
3219	return 0;
3220}
3221EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3222
3223/* Special value to validate all deltas on a page. */
3224#define CHECK_FULL_PAGE		1L
3225
3226#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
3227
3228static const char *show_irq_str(int bits)
3229{
3230	const char *type[] = {
3231		".",	// 0
3232		"s",	// 1
3233		"h",	// 2
3234		"Hs",	// 3
3235		"n",	// 4
3236		"Ns",	// 5
3237		"Nh",	// 6
3238		"NHs",	// 7
3239	};
3240
3241	return type[bits];
3242}
3243
3244/* Assume this is an trace event */
3245static const char *show_flags(struct ring_buffer_event *event)
3246{
3247	struct trace_entry *entry;
3248	int bits = 0;
3249
3250	if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3251		return "X";
3252
3253	entry = ring_buffer_event_data(event);
3254
3255	if (entry->flags & TRACE_FLAG_SOFTIRQ)
3256		bits |= 1;
3257
3258	if (entry->flags & TRACE_FLAG_HARDIRQ)
3259		bits |= 2;
3260
3261	if (entry->flags & TRACE_FLAG_NMI)
3262		bits |= 4;
3263
3264	return show_irq_str(bits);
3265}
3266
3267static const char *show_irq(struct ring_buffer_event *event)
3268{
3269	struct trace_entry *entry;
3270
3271	if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3272		return "";
3273
3274	entry = ring_buffer_event_data(event);
3275	if (entry->flags & TRACE_FLAG_IRQS_OFF)
3276		return "d";
3277	return "";
3278}
3279
3280static const char *show_interrupt_level(void)
3281{
3282	unsigned long pc = preempt_count();
3283	unsigned char level = 0;
3284
3285	if (pc & SOFTIRQ_OFFSET)
3286		level |= 1;
3287
3288	if (pc & HARDIRQ_MASK)
3289		level |= 2;
3290
3291	if (pc & NMI_MASK)
3292		level |= 4;
3293
3294	return show_irq_str(level);
3295}
3296
3297static void dump_buffer_page(struct buffer_data_page *bpage,
3298			     struct rb_event_info *info,
3299			     unsigned long tail)
3300{
3301	struct ring_buffer_event *event;
3302	u64 ts, delta;
3303	int e;
3304
3305	ts = bpage->time_stamp;
3306	pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
3307
3308	for (e = 0; e < tail; e += rb_event_length(event)) {
3309
3310		event = (struct ring_buffer_event *)(bpage->data + e);
3311
3312		switch (event->type_len) {
3313
3314		case RINGBUF_TYPE_TIME_EXTEND:
3315			delta = rb_event_time_stamp(event);
3316			ts += delta;
3317			pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
3318				e, ts, delta);
3319			break;
3320
3321		case RINGBUF_TYPE_TIME_STAMP:
3322			delta = rb_event_time_stamp(event);
3323			ts = rb_fix_abs_ts(delta, ts);
3324			pr_warn(" 0x%x:  [%lld] absolute:%lld TIME STAMP\n",
3325				e, ts, delta);
3326			break;
3327
3328		case RINGBUF_TYPE_PADDING:
3329			ts += event->time_delta;
3330			pr_warn(" 0x%x:  [%lld] delta:%d PADDING\n",
3331				e, ts, event->time_delta);
3332			break;
3333
3334		case RINGBUF_TYPE_DATA:
3335			ts += event->time_delta;
3336			pr_warn(" 0x%x:  [%lld] delta:%d %s%s\n",
3337				e, ts, event->time_delta,
3338				show_flags(event), show_irq(event));
3339			break;
3340
3341		default:
3342			break;
3343		}
3344	}
3345	pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
3346}
3347
3348static DEFINE_PER_CPU(atomic_t, checking);
3349static atomic_t ts_dump;
3350
3351#define buffer_warn_return(fmt, ...)					\
3352	do {								\
3353		/* If another report is happening, ignore this one */	\
3354		if (atomic_inc_return(&ts_dump) != 1) {			\
3355			atomic_dec(&ts_dump);				\
3356			goto out;					\
3357		}							\
3358		atomic_inc(&cpu_buffer->record_disabled);		\
3359		pr_warn(fmt, ##__VA_ARGS__);				\
3360		dump_buffer_page(bpage, info, tail);			\
3361		atomic_dec(&ts_dump);					\
3362		/* There's some cases in boot up that this can happen */ \
3363		if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING))	\
3364			/* Do not re-enable checking */			\
3365			return;						\
3366	} while (0)
3367
3368/*
3369 * Check if the current event time stamp matches the deltas on
3370 * the buffer page.
3371 */
3372static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3373			 struct rb_event_info *info,
3374			 unsigned long tail)
3375{
3376	struct ring_buffer_event *event;
3377	struct buffer_data_page *bpage;
3378	u64 ts, delta;
3379	bool full = false;
3380	int e;
3381
3382	bpage = info->tail_page->page;
3383
3384	if (tail == CHECK_FULL_PAGE) {
3385		full = true;
3386		tail = local_read(&bpage->commit);
3387	} else if (info->add_timestamp &
3388		   (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3389		/* Ignore events with absolute time stamps */
3390		return;
3391	}
3392
3393	/*
3394	 * Do not check the first event (skip possible extends too).
3395	 * Also do not check if previous events have not been committed.
3396	 */
3397	if (tail <= 8 || tail > local_read(&bpage->commit))
3398		return;
3399
3400	/*
3401	 * If this interrupted another event,
3402	 */
3403	if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3404		goto out;
3405
3406	ts = bpage->time_stamp;
3407
3408	for (e = 0; e < tail; e += rb_event_length(event)) {
3409
3410		event = (struct ring_buffer_event *)(bpage->data + e);
3411
3412		switch (event->type_len) {
3413
3414		case RINGBUF_TYPE_TIME_EXTEND:
3415			delta = rb_event_time_stamp(event);
3416			ts += delta;
3417			break;
3418
3419		case RINGBUF_TYPE_TIME_STAMP:
3420			delta = rb_event_time_stamp(event);
3421			delta = rb_fix_abs_ts(delta, ts);
3422			if (delta < ts) {
3423				buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
3424						   cpu_buffer->cpu, ts, delta);
3425			}
3426			ts = delta;
3427			break;
3428
3429		case RINGBUF_TYPE_PADDING:
3430			if (event->time_delta == 1)
3431				break;
3432			fallthrough;
3433		case RINGBUF_TYPE_DATA:
3434			ts += event->time_delta;
3435			break;
3436
3437		default:
3438			RB_WARN_ON(cpu_buffer, 1);
3439		}
3440	}
3441	if ((full && ts > info->ts) ||
3442	    (!full && ts + info->delta != info->ts)) {
3443		buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
3444				   cpu_buffer->cpu,
3445				   ts + info->delta, info->ts, info->delta,
3446				   info->before, info->after,
3447				   full ? " (full)" : "", show_interrupt_level());
3448	}
3449out:
3450	atomic_dec(this_cpu_ptr(&checking));
3451}
3452#else
3453static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3454			 struct rb_event_info *info,
3455			 unsigned long tail)
3456{
3457}
3458#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3459
3460static struct ring_buffer_event *
3461__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3462		  struct rb_event_info *info)
3463{
3464	struct ring_buffer_event *event;
3465	struct buffer_page *tail_page;
3466	unsigned long tail, write, w;
3467
3468	/* Don't let the compiler play games with cpu_buffer->tail_page */
3469	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3470
3471 /*A*/	w = local_read(&tail_page->write) & RB_WRITE_MASK;
3472	barrier();
3473	rb_time_read(&cpu_buffer->before_stamp, &info->before);
3474	rb_time_read(&cpu_buffer->write_stamp, &info->after);
3475	barrier();
3476	info->ts = rb_time_stamp(cpu_buffer->buffer);
3477
3478	if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3479		info->delta = info->ts;
3480	} else {
3481		/*
3482		 * If interrupting an event time update, we may need an
3483		 * absolute timestamp.
3484		 * Don't bother if this is the start of a new page (w == 0).
3485		 */
3486		if (!w) {
3487			/* Use the sub-buffer timestamp */
3488			info->delta = 0;
3489		} else if (unlikely(info->before != info->after)) {
3490			info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3491			info->length += RB_LEN_TIME_EXTEND;
3492		} else {
3493			info->delta = info->ts - info->after;
3494			if (unlikely(test_time_stamp(info->delta))) {
3495				info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3496				info->length += RB_LEN_TIME_EXTEND;
3497			}
3498		}
3499	}
3500
3501 /*B*/	rb_time_set(&cpu_buffer->before_stamp, info->ts);
3502
3503 /*C*/	write = local_add_return(info->length, &tail_page->write);
3504
3505	/* set write to only the index of the write */
3506	write &= RB_WRITE_MASK;
3507
3508	tail = write - info->length;
3509
3510	/* See if we shot pass the end of this buffer page */
3511	if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
3512		check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
3513		return rb_move_tail(cpu_buffer, tail, info);
3514	}
3515
3516	if (likely(tail == w)) {
3517		/* Nothing interrupted us between A and C */
3518 /*D*/		rb_time_set(&cpu_buffer->write_stamp, info->ts);
3519		/*
3520		 * If something came in between C and D, the write stamp
3521		 * may now not be in sync. But that's fine as the before_stamp
3522		 * will be different and then next event will just be forced
3523		 * to use an absolute timestamp.
3524		 */
3525		if (likely(!(info->add_timestamp &
3526			     (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3527			/* This did not interrupt any time update */
3528			info->delta = info->ts - info->after;
3529		else
3530			/* Just use full timestamp for interrupting event */
3531			info->delta = info->ts;
3532		check_buffer(cpu_buffer, info, tail);
3533	} else {
3534		u64 ts;
3535		/* SLOW PATH - Interrupted between A and C */
3536
3537		/* Save the old before_stamp */
3538		rb_time_read(&cpu_buffer->before_stamp, &info->before);
3539
3540		/*
3541		 * Read a new timestamp and update the before_stamp to make
3542		 * the next event after this one force using an absolute
3543		 * timestamp. This is in case an interrupt were to come in
3544		 * between E and F.
3545		 */
3546		ts = rb_time_stamp(cpu_buffer->buffer);
3547		rb_time_set(&cpu_buffer->before_stamp, ts);
3548
3549		barrier();
3550 /*E*/		rb_time_read(&cpu_buffer->write_stamp, &info->after);
3551		barrier();
3552 /*F*/		if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3553		    info->after == info->before && info->after < ts) {
3554			/*
3555			 * Nothing came after this event between C and F, it is
3556			 * safe to use info->after for the delta as it
3557			 * matched info->before and is still valid.
3558			 */
3559			info->delta = ts - info->after;
3560		} else {
3561			/*
3562			 * Interrupted between C and F:
3563			 * Lost the previous events time stamp. Just set the
3564			 * delta to zero, and this will be the same time as
3565			 * the event this event interrupted. And the events that
3566			 * came after this will still be correct (as they would
3567			 * have built their delta on the previous event.
3568			 */
3569			info->delta = 0;
3570		}
3571		info->ts = ts;
3572		info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3573	}
3574
3575	/*
3576	 * If this is the first commit on the page, then it has the same
3577	 * timestamp as the page itself.
3578	 */
3579	if (unlikely(!tail && !(info->add_timestamp &
3580				(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3581		info->delta = 0;
3582
3583	/* We reserved something on the buffer */
3584
3585	event = __rb_page_index(tail_page, tail);
3586	rb_update_event(cpu_buffer, event, info);
3587
3588	local_inc(&tail_page->entries);
3589
3590	/*
3591	 * If this is the first commit on the page, then update
3592	 * its timestamp.
3593	 */
3594	if (unlikely(!tail))
3595		tail_page->page->time_stamp = info->ts;
3596
3597	/* account for these added bytes */
3598	local_add(info->length, &cpu_buffer->entries_bytes);
3599
3600	return event;
3601}
3602
3603static __always_inline struct ring_buffer_event *
3604rb_reserve_next_event(struct trace_buffer *buffer,
3605		      struct ring_buffer_per_cpu *cpu_buffer,
3606		      unsigned long length)
3607{
3608	struct ring_buffer_event *event;
3609	struct rb_event_info info;
3610	int nr_loops = 0;
3611	int add_ts_default;
3612
3613	/* ring buffer does cmpxchg, make sure it is safe in NMI context */
3614	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
3615	    (unlikely(in_nmi()))) {
3616		return NULL;
3617	}
3618
3619	rb_start_commit(cpu_buffer);
3620	/* The commit page can not change after this */
3621
3622#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3623	/*
3624	 * Due to the ability to swap a cpu buffer from a buffer
3625	 * it is possible it was swapped before we committed.
3626	 * (committing stops a swap). We check for it here and
3627	 * if it happened, we have to fail the write.
3628	 */
3629	barrier();
3630	if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3631		local_dec(&cpu_buffer->committing);
3632		local_dec(&cpu_buffer->commits);
3633		return NULL;
3634	}
3635#endif
3636
3637	info.length = rb_calculate_event_length(length);
3638
3639	if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3640		add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3641		info.length += RB_LEN_TIME_EXTEND;
3642		if (info.length > cpu_buffer->buffer->max_data_size)
3643			goto out_fail;
3644	} else {
3645		add_ts_default = RB_ADD_STAMP_NONE;
3646	}
3647
3648 again:
3649	info.add_timestamp = add_ts_default;
3650	info.delta = 0;
3651
3652	/*
3653	 * We allow for interrupts to reenter here and do a trace.
3654	 * If one does, it will cause this original code to loop
3655	 * back here. Even with heavy interrupts happening, this
3656	 * should only happen a few times in a row. If this happens
3657	 * 1000 times in a row, there must be either an interrupt
3658	 * storm or we have something buggy.
3659	 * Bail!
3660	 */
3661	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3662		goto out_fail;
3663
3664	event = __rb_reserve_next(cpu_buffer, &info);
3665
3666	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3667		if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3668			info.length -= RB_LEN_TIME_EXTEND;
3669		goto again;
3670	}
3671
3672	if (likely(event))
3673		return event;
3674 out_fail:
3675	rb_end_commit(cpu_buffer);
3676	return NULL;
3677}
3678
3679/**
3680 * ring_buffer_lock_reserve - reserve a part of the buffer
3681 * @buffer: the ring buffer to reserve from
3682 * @length: the length of the data to reserve (excluding event header)
3683 *
3684 * Returns a reserved event on the ring buffer to copy directly to.
3685 * The user of this interface will need to get the body to write into
3686 * and can use the ring_buffer_event_data() interface.
3687 *
3688 * The length is the length of the data needed, not the event length
3689 * which also includes the event header.
3690 *
3691 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3692 * If NULL is returned, then nothing has been allocated or locked.
3693 */
3694struct ring_buffer_event *
3695ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3696{
3697	struct ring_buffer_per_cpu *cpu_buffer;
3698	struct ring_buffer_event *event;
3699	int cpu;
3700
3701	/* If we are tracing schedule, we don't want to recurse */
3702	preempt_disable_notrace();
3703
3704	if (unlikely(atomic_read(&buffer->record_disabled)))
3705		goto out;
3706
3707	cpu = raw_smp_processor_id();
3708
3709	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3710		goto out;
3711
3712	cpu_buffer = buffer->buffers[cpu];
3713
3714	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3715		goto out;
3716
3717	if (unlikely(length > buffer->max_data_size))
3718		goto out;
3719
3720	if (unlikely(trace_recursive_lock(cpu_buffer)))
3721		goto out;
3722
3723	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3724	if (!event)
3725		goto out_unlock;
3726
3727	return event;
3728
3729 out_unlock:
3730	trace_recursive_unlock(cpu_buffer);
3731 out:
3732	preempt_enable_notrace();
3733	return NULL;
3734}
3735EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3736
3737/*
3738 * Decrement the entries to the page that an event is on.
3739 * The event does not even need to exist, only the pointer
3740 * to the page it is on. This may only be called before the commit
3741 * takes place.
3742 */
3743static inline void
3744rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3745		   struct ring_buffer_event *event)
3746{
3747	unsigned long addr = (unsigned long)event;
3748	struct buffer_page *bpage = cpu_buffer->commit_page;
3749	struct buffer_page *start;
3750
3751	addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
3752
3753	/* Do the likely case first */
3754	if (likely(bpage->page == (void *)addr)) {
3755		local_dec(&bpage->entries);
3756		return;
3757	}
3758
3759	/*
3760	 * Because the commit page may be on the reader page we
3761	 * start with the next page and check the end loop there.
3762	 */
3763	rb_inc_page(&bpage);
3764	start = bpage;
3765	do {
3766		if (bpage->page == (void *)addr) {
3767			local_dec(&bpage->entries);
3768			return;
3769		}
3770		rb_inc_page(&bpage);
3771	} while (bpage != start);
3772
3773	/* commit not part of this buffer?? */
3774	RB_WARN_ON(cpu_buffer, 1);
3775}
3776
3777/**
3778 * ring_buffer_discard_commit - discard an event that has not been committed
3779 * @buffer: the ring buffer
3780 * @event: non committed event to discard
3781 *
3782 * Sometimes an event that is in the ring buffer needs to be ignored.
3783 * This function lets the user discard an event in the ring buffer
3784 * and then that event will not be read later.
3785 *
3786 * This function only works if it is called before the item has been
3787 * committed. It will try to free the event from the ring buffer
3788 * if another event has not been added behind it.
3789 *
3790 * If another event has been added behind it, it will set the event
3791 * up as discarded, and perform the commit.
3792 *
3793 * If this function is called, do not call ring_buffer_unlock_commit on
3794 * the event.
3795 */
3796void ring_buffer_discard_commit(struct trace_buffer *buffer,
3797				struct ring_buffer_event *event)
3798{
3799	struct ring_buffer_per_cpu *cpu_buffer;
3800	int cpu;
3801
3802	/* The event is discarded regardless */
3803	rb_event_discard(event);
3804
3805	cpu = smp_processor_id();
3806	cpu_buffer = buffer->buffers[cpu];
3807
3808	/*
3809	 * This must only be called if the event has not been
3810	 * committed yet. Thus we can assume that preemption
3811	 * is still disabled.
3812	 */
3813	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3814
3815	rb_decrement_entry(cpu_buffer, event);
3816	if (rb_try_to_discard(cpu_buffer, event))
3817		goto out;
3818
3819 out:
3820	rb_end_commit(cpu_buffer);
3821
3822	trace_recursive_unlock(cpu_buffer);
3823
3824	preempt_enable_notrace();
3825
3826}
3827EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3828
3829/**
3830 * ring_buffer_write - write data to the buffer without reserving
3831 * @buffer: The ring buffer to write to.
3832 * @length: The length of the data being written (excluding the event header)
3833 * @data: The data to write to the buffer.
3834 *
3835 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3836 * one function. If you already have the data to write to the buffer, it
3837 * may be easier to simply call this function.
3838 *
3839 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3840 * and not the length of the event which would hold the header.
3841 */
3842int ring_buffer_write(struct trace_buffer *buffer,
3843		      unsigned long length,
3844		      void *data)
3845{
3846	struct ring_buffer_per_cpu *cpu_buffer;
3847	struct ring_buffer_event *event;
3848	void *body;
3849	int ret = -EBUSY;
3850	int cpu;
3851
3852	preempt_disable_notrace();
3853
3854	if (atomic_read(&buffer->record_disabled))
3855		goto out;
3856
3857	cpu = raw_smp_processor_id();
3858
3859	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3860		goto out;
3861
3862	cpu_buffer = buffer->buffers[cpu];
3863
3864	if (atomic_read(&cpu_buffer->record_disabled))
3865		goto out;
3866
3867	if (length > buffer->max_data_size)
3868		goto out;
3869
3870	if (unlikely(trace_recursive_lock(cpu_buffer)))
3871		goto out;
3872
3873	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3874	if (!event)
3875		goto out_unlock;
3876
3877	body = rb_event_data(event);
3878
3879	memcpy(body, data, length);
3880
3881	rb_commit(cpu_buffer);
3882
3883	rb_wakeups(buffer, cpu_buffer);
3884
3885	ret = 0;
3886
3887 out_unlock:
3888	trace_recursive_unlock(cpu_buffer);
3889
3890 out:
3891	preempt_enable_notrace();
3892
3893	return ret;
3894}
3895EXPORT_SYMBOL_GPL(ring_buffer_write);
3896
3897static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3898{
3899	struct buffer_page *reader = cpu_buffer->reader_page;
3900	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3901	struct buffer_page *commit = cpu_buffer->commit_page;
3902
3903	/* In case of error, head will be NULL */
3904	if (unlikely(!head))
3905		return true;
3906
3907	/* Reader should exhaust content in reader page */
3908	if (reader->read != rb_page_commit(reader))
3909		return false;
3910
3911	/*
3912	 * If writers are committing on the reader page, knowing all
3913	 * committed content has been read, the ring buffer is empty.
3914	 */
3915	if (commit == reader)
3916		return true;
3917
3918	/*
3919	 * If writers are committing on a page other than reader page
3920	 * and head page, there should always be content to read.
3921	 */
3922	if (commit != head)
3923		return false;
3924
3925	/*
3926	 * Writers are committing on the head page, we just need
3927	 * to care about there're committed data, and the reader will
3928	 * swap reader page with head page when it is to read data.
3929	 */
3930	return rb_page_commit(commit) == 0;
3931}
3932
3933/**
3934 * ring_buffer_record_disable - stop all writes into the buffer
3935 * @buffer: The ring buffer to stop writes to.
3936 *
3937 * This prevents all writes to the buffer. Any attempt to write
3938 * to the buffer after this will fail and return NULL.
3939 *
3940 * The caller should call synchronize_rcu() after this.
3941 */
3942void ring_buffer_record_disable(struct trace_buffer *buffer)
3943{
3944	atomic_inc(&buffer->record_disabled);
3945}
3946EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3947
3948/**
3949 * ring_buffer_record_enable - enable writes to the buffer
3950 * @buffer: The ring buffer to enable writes
3951 *
3952 * Note, multiple disables will need the same number of enables
3953 * to truly enable the writing (much like preempt_disable).
3954 */
3955void ring_buffer_record_enable(struct trace_buffer *buffer)
3956{
3957	atomic_dec(&buffer->record_disabled);
3958}
3959EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3960
3961/**
3962 * ring_buffer_record_off - stop all writes into the buffer
3963 * @buffer: The ring buffer to stop writes to.
3964 *
3965 * This prevents all writes to the buffer. Any attempt to write
3966 * to the buffer after this will fail and return NULL.
3967 *
3968 * This is different than ring_buffer_record_disable() as
3969 * it works like an on/off switch, where as the disable() version
3970 * must be paired with a enable().
3971 */
3972void ring_buffer_record_off(struct trace_buffer *buffer)
3973{
3974	unsigned int rd;
3975	unsigned int new_rd;
3976
3977	rd = atomic_read(&buffer->record_disabled);
3978	do {
3979		new_rd = rd | RB_BUFFER_OFF;
3980	} while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
3981}
3982EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3983
3984/**
3985 * ring_buffer_record_on - restart writes into the buffer
3986 * @buffer: The ring buffer to start writes to.
3987 *
3988 * This enables all writes to the buffer that was disabled by
3989 * ring_buffer_record_off().
3990 *
3991 * This is different than ring_buffer_record_enable() as
3992 * it works like an on/off switch, where as the enable() version
3993 * must be paired with a disable().
3994 */
3995void ring_buffer_record_on(struct trace_buffer *buffer)
3996{
3997	unsigned int rd;
3998	unsigned int new_rd;
3999
4000	rd = atomic_read(&buffer->record_disabled);
4001	do {
4002		new_rd = rd & ~RB_BUFFER_OFF;
4003	} while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4004}
4005EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4006
4007/**
4008 * ring_buffer_record_is_on - return true if the ring buffer can write
4009 * @buffer: The ring buffer to see if write is enabled
4010 *
4011 * Returns true if the ring buffer is in a state that it accepts writes.
4012 */
4013bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4014{
4015	return !atomic_read(&buffer->record_disabled);
4016}
4017
4018/**
4019 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4020 * @buffer: The ring buffer to see if write is set enabled
4021 *
4022 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4023 * Note that this does NOT mean it is in a writable state.
4024 *
4025 * It may return true when the ring buffer has been disabled by
4026 * ring_buffer_record_disable(), as that is a temporary disabling of
4027 * the ring buffer.
4028 */
4029bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4030{
4031	return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4032}
4033
4034/**
4035 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4036 * @buffer: The ring buffer to stop writes to.
4037 * @cpu: The CPU buffer to stop
4038 *
4039 * This prevents all writes to the buffer. Any attempt to write
4040 * to the buffer after this will fail and return NULL.
4041 *
4042 * The caller should call synchronize_rcu() after this.
4043 */
4044void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4045{
4046	struct ring_buffer_per_cpu *cpu_buffer;
4047
4048	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4049		return;
4050
4051	cpu_buffer = buffer->buffers[cpu];
4052	atomic_inc(&cpu_buffer->record_disabled);
4053}
4054EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4055
4056/**
4057 * ring_buffer_record_enable_cpu - enable writes to the buffer
4058 * @buffer: The ring buffer to enable writes
4059 * @cpu: The CPU to enable.
4060 *
4061 * Note, multiple disables will need the same number of enables
4062 * to truly enable the writing (much like preempt_disable).
4063 */
4064void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4065{
4066	struct ring_buffer_per_cpu *cpu_buffer;
4067
4068	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4069		return;
4070
4071	cpu_buffer = buffer->buffers[cpu];
4072	atomic_dec(&cpu_buffer->record_disabled);
4073}
4074EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4075
4076/*
4077 * The total entries in the ring buffer is the running counter
4078 * of entries entered into the ring buffer, minus the sum of
4079 * the entries read from the ring buffer and the number of
4080 * entries that were overwritten.
4081 */
4082static inline unsigned long
4083rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4084{
4085	return local_read(&cpu_buffer->entries) -
4086		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4087}
4088
4089/**
4090 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4091 * @buffer: The ring buffer
4092 * @cpu: The per CPU buffer to read from.
4093 */
4094u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4095{
4096	unsigned long flags;
4097	struct ring_buffer_per_cpu *cpu_buffer;
4098	struct buffer_page *bpage;
4099	u64 ret = 0;
4100
4101	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4102		return 0;
4103
4104	cpu_buffer = buffer->buffers[cpu];
4105	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4106	/*
4107	 * if the tail is on reader_page, oldest time stamp is on the reader
4108	 * page
4109	 */
4110	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4111		bpage = cpu_buffer->reader_page;
4112	else
4113		bpage = rb_set_head_page(cpu_buffer);
4114	if (bpage)
4115		ret = bpage->page->time_stamp;
4116	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4117
4118	return ret;
4119}
4120EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4121
4122/**
4123 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4124 * @buffer: The ring buffer
4125 * @cpu: The per CPU buffer to read from.
4126 */
4127unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4128{
4129	struct ring_buffer_per_cpu *cpu_buffer;
4130	unsigned long ret;
4131
4132	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4133		return 0;
4134
4135	cpu_buffer = buffer->buffers[cpu];
4136	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4137
4138	return ret;
4139}
4140EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4141
4142/**
4143 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4144 * @buffer: The ring buffer
4145 * @cpu: The per CPU buffer to get the entries from.
4146 */
4147unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4148{
4149	struct ring_buffer_per_cpu *cpu_buffer;
4150
4151	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4152		return 0;
4153
4154	cpu_buffer = buffer->buffers[cpu];
4155
4156	return rb_num_of_entries(cpu_buffer);
4157}
4158EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4159
4160/**
4161 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4162 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4163 * @buffer: The ring buffer
4164 * @cpu: The per CPU buffer to get the number of overruns from
4165 */
4166unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
4167{
4168	struct ring_buffer_per_cpu *cpu_buffer;
4169	unsigned long ret;
4170
4171	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4172		return 0;
4173
4174	cpu_buffer = buffer->buffers[cpu];
4175	ret = local_read(&cpu_buffer->overrun);
4176
4177	return ret;
4178}
4179EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
4180
4181/**
4182 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4183 * commits failing due to the buffer wrapping around while there are uncommitted
4184 * events, such as during an interrupt storm.
4185 * @buffer: The ring buffer
4186 * @cpu: The per CPU buffer to get the number of overruns from
4187 */
4188unsigned long
4189ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
4190{
4191	struct ring_buffer_per_cpu *cpu_buffer;
4192	unsigned long ret;
4193
4194	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4195		return 0;
4196
4197	cpu_buffer = buffer->buffers[cpu];
4198	ret = local_read(&cpu_buffer->commit_overrun);
4199
4200	return ret;
4201}
4202EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4203
4204/**
4205 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4206 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4207 * @buffer: The ring buffer
4208 * @cpu: The per CPU buffer to get the number of overruns from
4209 */
4210unsigned long
4211ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
4212{
4213	struct ring_buffer_per_cpu *cpu_buffer;
4214	unsigned long ret;
4215
4216	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4217		return 0;
4218
4219	cpu_buffer = buffer->buffers[cpu];
4220	ret = local_read(&cpu_buffer->dropped_events);
4221
4222	return ret;
4223}
4224EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4225
4226/**
4227 * ring_buffer_read_events_cpu - get the number of events successfully read
4228 * @buffer: The ring buffer
4229 * @cpu: The per CPU buffer to get the number of events read
4230 */
4231unsigned long
4232ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
4233{
4234	struct ring_buffer_per_cpu *cpu_buffer;
4235
4236	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4237		return 0;
4238
4239	cpu_buffer = buffer->buffers[cpu];
4240	return cpu_buffer->read;
4241}
4242EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4243
4244/**
4245 * ring_buffer_entries - get the number of entries in a buffer
4246 * @buffer: The ring buffer
4247 *
4248 * Returns the total number of entries in the ring buffer
4249 * (all CPU entries)
4250 */
4251unsigned long ring_buffer_entries(struct trace_buffer *buffer)
4252{
4253	struct ring_buffer_per_cpu *cpu_buffer;
4254	unsigned long entries = 0;
4255	int cpu;
4256
4257	/* if you care about this being correct, lock the buffer */
4258	for_each_buffer_cpu(buffer, cpu) {
4259		cpu_buffer = buffer->buffers[cpu];
4260		entries += rb_num_of_entries(cpu_buffer);
4261	}
4262
4263	return entries;
4264}
4265EXPORT_SYMBOL_GPL(ring_buffer_entries);
4266
4267/**
4268 * ring_buffer_overruns - get the number of overruns in buffer
4269 * @buffer: The ring buffer
4270 *
4271 * Returns the total number of overruns in the ring buffer
4272 * (all CPU entries)
4273 */
4274unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
4275{
4276	struct ring_buffer_per_cpu *cpu_buffer;
4277	unsigned long overruns = 0;
4278	int cpu;
4279
4280	/* if you care about this being correct, lock the buffer */
4281	for_each_buffer_cpu(buffer, cpu) {
4282		cpu_buffer = buffer->buffers[cpu];
4283		overruns += local_read(&cpu_buffer->overrun);
4284	}
4285
4286	return overruns;
4287}
4288EXPORT_SYMBOL_GPL(ring_buffer_overruns);
4289
4290static void rb_iter_reset(struct ring_buffer_iter *iter)
4291{
4292	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4293
4294	/* Iterator usage is expected to have record disabled */
4295	iter->head_page = cpu_buffer->reader_page;
4296	iter->head = cpu_buffer->reader_page->read;
4297	iter->next_event = iter->head;
4298
4299	iter->cache_reader_page = iter->head_page;
4300	iter->cache_read = cpu_buffer->read;
4301	iter->cache_pages_removed = cpu_buffer->pages_removed;
4302
4303	if (iter->head) {
4304		iter->read_stamp = cpu_buffer->read_stamp;
4305		iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4306	} else {
4307		iter->read_stamp = iter->head_page->page->time_stamp;
4308		iter->page_stamp = iter->read_stamp;
4309	}
4310}
4311
4312/**
4313 * ring_buffer_iter_reset - reset an iterator
4314 * @iter: The iterator to reset
4315 *
4316 * Resets the iterator, so that it will start from the beginning
4317 * again.
4318 */
4319void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4320{
4321	struct ring_buffer_per_cpu *cpu_buffer;
4322	unsigned long flags;
4323
4324	if (!iter)
4325		return;
4326
4327	cpu_buffer = iter->cpu_buffer;
4328
4329	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4330	rb_iter_reset(iter);
4331	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4332}
4333EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4334
4335/**
4336 * ring_buffer_iter_empty - check if an iterator has no more to read
4337 * @iter: The iterator to check
4338 */
4339int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4340{
4341	struct ring_buffer_per_cpu *cpu_buffer;
4342	struct buffer_page *reader;
4343	struct buffer_page *head_page;
4344	struct buffer_page *commit_page;
4345	struct buffer_page *curr_commit_page;
4346	unsigned commit;
4347	u64 curr_commit_ts;
4348	u64 commit_ts;
4349
4350	cpu_buffer = iter->cpu_buffer;
4351	reader = cpu_buffer->reader_page;
4352	head_page = cpu_buffer->head_page;
4353	commit_page = cpu_buffer->commit_page;
4354	commit_ts = commit_page->page->time_stamp;
4355
4356	/*
4357	 * When the writer goes across pages, it issues a cmpxchg which
4358	 * is a mb(), which will synchronize with the rmb here.
4359	 * (see rb_tail_page_update())
4360	 */
4361	smp_rmb();
4362	commit = rb_page_commit(commit_page);
4363	/* We want to make sure that the commit page doesn't change */
4364	smp_rmb();
4365
4366	/* Make sure commit page didn't change */
4367	curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4368	curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4369
4370	/* If the commit page changed, then there's more data */
4371	if (curr_commit_page != commit_page ||
4372	    curr_commit_ts != commit_ts)
4373		return 0;
4374
4375	/* Still racy, as it may return a false positive, but that's OK */
4376	return ((iter->head_page == commit_page && iter->head >= commit) ||
4377		(iter->head_page == reader && commit_page == head_page &&
4378		 head_page->read == commit &&
4379		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
4380}
4381EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4382
4383static void
4384rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4385		     struct ring_buffer_event *event)
4386{
4387	u64 delta;
4388
4389	switch (event->type_len) {
4390	case RINGBUF_TYPE_PADDING:
4391		return;
4392
4393	case RINGBUF_TYPE_TIME_EXTEND:
4394		delta = rb_event_time_stamp(event);
4395		cpu_buffer->read_stamp += delta;
4396		return;
4397
4398	case RINGBUF_TYPE_TIME_STAMP:
4399		delta = rb_event_time_stamp(event);
4400		delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
4401		cpu_buffer->read_stamp = delta;
4402		return;
4403
4404	case RINGBUF_TYPE_DATA:
4405		cpu_buffer->read_stamp += event->time_delta;
4406		return;
4407
4408	default:
4409		RB_WARN_ON(cpu_buffer, 1);
4410	}
4411}
4412
4413static void
4414rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4415			  struct ring_buffer_event *event)
4416{
4417	u64 delta;
4418
4419	switch (event->type_len) {
4420	case RINGBUF_TYPE_PADDING:
4421		return;
4422
4423	case RINGBUF_TYPE_TIME_EXTEND:
4424		delta = rb_event_time_stamp(event);
4425		iter->read_stamp += delta;
4426		return;
4427
4428	case RINGBUF_TYPE_TIME_STAMP:
4429		delta = rb_event_time_stamp(event);
4430		delta = rb_fix_abs_ts(delta, iter->read_stamp);
4431		iter->read_stamp = delta;
4432		return;
4433
4434	case RINGBUF_TYPE_DATA:
4435		iter->read_stamp += event->time_delta;
4436		return;
4437
4438	default:
4439		RB_WARN_ON(iter->cpu_buffer, 1);
4440	}
4441}
4442
4443static struct buffer_page *
4444rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4445{
4446	struct buffer_page *reader = NULL;
4447	unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
4448	unsigned long overwrite;
4449	unsigned long flags;
4450	int nr_loops = 0;
4451	bool ret;
4452
4453	local_irq_save(flags);
4454	arch_spin_lock(&cpu_buffer->lock);
4455
4456 again:
4457	/*
4458	 * This should normally only loop twice. But because the
4459	 * start of the reader inserts an empty page, it causes
4460	 * a case where we will loop three times. There should be no
4461	 * reason to loop four times (that I know of).
4462	 */
4463	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4464		reader = NULL;
4465		goto out;
4466	}
4467
4468	reader = cpu_buffer->reader_page;
4469
4470	/* If there's more to read, return this page */
4471	if (cpu_buffer->reader_page->read < rb_page_size(reader))
4472		goto out;
4473
4474	/* Never should we have an index greater than the size */
4475	if (RB_WARN_ON(cpu_buffer,
4476		       cpu_buffer->reader_page->read > rb_page_size(reader)))
4477		goto out;
4478
4479	/* check if we caught up to the tail */
4480	reader = NULL;
4481	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4482		goto out;
4483
4484	/* Don't bother swapping if the ring buffer is empty */
4485	if (rb_num_of_entries(cpu_buffer) == 0)
4486		goto out;
4487
4488	/*
4489	 * Reset the reader page to size zero.
4490	 */
4491	local_set(&cpu_buffer->reader_page->write, 0);
4492	local_set(&cpu_buffer->reader_page->entries, 0);
4493	local_set(&cpu_buffer->reader_page->page->commit, 0);
4494	cpu_buffer->reader_page->real_end = 0;
4495
4496 spin:
4497	/*
4498	 * Splice the empty reader page into the list around the head.
4499	 */
4500	reader = rb_set_head_page(cpu_buffer);
4501	if (!reader)
4502		goto out;
4503	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4504	cpu_buffer->reader_page->list.prev = reader->list.prev;
4505
4506	/*
4507	 * cpu_buffer->pages just needs to point to the buffer, it
4508	 *  has no specific buffer page to point to. Lets move it out
4509	 *  of our way so we don't accidentally swap it.
4510	 */
4511	cpu_buffer->pages = reader->list.prev;
4512
4513	/* The reader page will be pointing to the new head */
4514	rb_set_list_to_head(&cpu_buffer->reader_page->list);
4515
4516	/*
4517	 * We want to make sure we read the overruns after we set up our
4518	 * pointers to the next object. The writer side does a
4519	 * cmpxchg to cross pages which acts as the mb on the writer
4520	 * side. Note, the reader will constantly fail the swap
4521	 * while the writer is updating the pointers, so this
4522	 * guarantees that the overwrite recorded here is the one we
4523	 * want to compare with the last_overrun.
4524	 */
4525	smp_mb();
4526	overwrite = local_read(&(cpu_buffer->overrun));
4527
4528	/*
4529	 * Here's the tricky part.
4530	 *
4531	 * We need to move the pointer past the header page.
4532	 * But we can only do that if a writer is not currently
4533	 * moving it. The page before the header page has the
4534	 * flag bit '1' set if it is pointing to the page we want.
4535	 * but if the writer is in the process of moving it
4536	 * than it will be '2' or already moved '0'.
4537	 */
4538
4539	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4540
4541	/*
4542	 * If we did not convert it, then we must try again.
4543	 */
4544	if (!ret)
4545		goto spin;
4546
4547	/*
4548	 * Yay! We succeeded in replacing the page.
4549	 *
4550	 * Now make the new head point back to the reader page.
4551	 */
4552	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4553	rb_inc_page(&cpu_buffer->head_page);
4554
4555	local_inc(&cpu_buffer->pages_read);
4556
4557	/* Finally update the reader page to the new head */
4558	cpu_buffer->reader_page = reader;
4559	cpu_buffer->reader_page->read = 0;
4560
4561	if (overwrite != cpu_buffer->last_overrun) {
4562		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4563		cpu_buffer->last_overrun = overwrite;
4564	}
4565
4566	goto again;
4567
4568 out:
4569	/* Update the read_stamp on the first event */
4570	if (reader && reader->read == 0)
4571		cpu_buffer->read_stamp = reader->page->time_stamp;
4572
4573	arch_spin_unlock(&cpu_buffer->lock);
4574	local_irq_restore(flags);
4575
4576	/*
4577	 * The writer has preempt disable, wait for it. But not forever
4578	 * Although, 1 second is pretty much "forever"
4579	 */
4580#define USECS_WAIT	1000000
4581        for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
4582		/* If the write is past the end of page, a writer is still updating it */
4583		if (likely(!reader || rb_page_write(reader) <= bsize))
4584			break;
4585
4586		udelay(1);
4587
4588		/* Get the latest version of the reader write value */
4589		smp_rmb();
4590	}
4591
4592	/* The writer is not moving forward? Something is wrong */
4593	if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
4594		reader = NULL;
4595
4596	/*
4597	 * Make sure we see any padding after the write update
4598	 * (see rb_reset_tail()).
4599	 *
4600	 * In addition, a writer may be writing on the reader page
4601	 * if the page has not been fully filled, so the read barrier
4602	 * is also needed to make sure we see the content of what is
4603	 * committed by the writer (see rb_set_commit_to_write()).
4604	 */
4605	smp_rmb();
4606
4607
4608	return reader;
4609}
4610
4611static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4612{
4613	struct ring_buffer_event *event;
4614	struct buffer_page *reader;
4615	unsigned length;
4616
4617	reader = rb_get_reader_page(cpu_buffer);
4618
4619	/* This function should not be called when buffer is empty */
4620	if (RB_WARN_ON(cpu_buffer, !reader))
4621		return;
4622
4623	event = rb_reader_event(cpu_buffer);
4624
4625	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4626		cpu_buffer->read++;
4627
4628	rb_update_read_stamp(cpu_buffer, event);
4629
4630	length = rb_event_length(event);
4631	cpu_buffer->reader_page->read += length;
4632	cpu_buffer->read_bytes += length;
4633}
4634
4635static void rb_advance_iter(struct ring_buffer_iter *iter)
4636{
4637	struct ring_buffer_per_cpu *cpu_buffer;
4638
4639	cpu_buffer = iter->cpu_buffer;
4640
4641	/* If head == next_event then we need to jump to the next event */
4642	if (iter->head == iter->next_event) {
4643		/* If the event gets overwritten again, there's nothing to do */
4644		if (rb_iter_head_event(iter) == NULL)
4645			return;
4646	}
4647
4648	iter->head = iter->next_event;
4649
4650	/*
4651	 * Check if we are at the end of the buffer.
4652	 */
4653	if (iter->next_event >= rb_page_size(iter->head_page)) {
4654		/* discarded commits can make the page empty */
4655		if (iter->head_page == cpu_buffer->commit_page)
4656			return;
4657		rb_inc_iter(iter);
4658		return;
4659	}
4660
4661	rb_update_iter_read_stamp(iter, iter->event);
4662}
4663
4664static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4665{
4666	return cpu_buffer->lost_events;
4667}
4668
4669static struct ring_buffer_event *
4670rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4671	       unsigned long *lost_events)
4672{
4673	struct ring_buffer_event *event;
4674	struct buffer_page *reader;
4675	int nr_loops = 0;
4676
4677	if (ts)
4678		*ts = 0;
4679 again:
4680	/*
4681	 * We repeat when a time extend is encountered.
4682	 * Since the time extend is always attached to a data event,
4683	 * we should never loop more than once.
4684	 * (We never hit the following condition more than twice).
4685	 */
4686	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4687		return NULL;
4688
4689	reader = rb_get_reader_page(cpu_buffer);
4690	if (!reader)
4691		return NULL;
4692
4693	event = rb_reader_event(cpu_buffer);
4694
4695	switch (event->type_len) {
4696	case RINGBUF_TYPE_PADDING:
4697		if (rb_null_event(event))
4698			RB_WARN_ON(cpu_buffer, 1);
4699		/*
4700		 * Because the writer could be discarding every
4701		 * event it creates (which would probably be bad)
4702		 * if we were to go back to "again" then we may never
4703		 * catch up, and will trigger the warn on, or lock
4704		 * the box. Return the padding, and we will release
4705		 * the current locks, and try again.
4706		 */
4707		return event;
4708
4709	case RINGBUF_TYPE_TIME_EXTEND:
4710		/* Internal data, OK to advance */
4711		rb_advance_reader(cpu_buffer);
4712		goto again;
4713
4714	case RINGBUF_TYPE_TIME_STAMP:
4715		if (ts) {
4716			*ts = rb_event_time_stamp(event);
4717			*ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
4718			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4719							 cpu_buffer->cpu, ts);
4720		}
4721		/* Internal data, OK to advance */
4722		rb_advance_reader(cpu_buffer);
4723		goto again;
4724
4725	case RINGBUF_TYPE_DATA:
4726		if (ts && !(*ts)) {
4727			*ts = cpu_buffer->read_stamp + event->time_delta;
4728			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4729							 cpu_buffer->cpu, ts);
4730		}
4731		if (lost_events)
4732			*lost_events = rb_lost_events(cpu_buffer);
4733		return event;
4734
4735	default:
4736		RB_WARN_ON(cpu_buffer, 1);
4737	}
4738
4739	return NULL;
4740}
4741EXPORT_SYMBOL_GPL(ring_buffer_peek);
4742
4743static struct ring_buffer_event *
4744rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4745{
4746	struct trace_buffer *buffer;
4747	struct ring_buffer_per_cpu *cpu_buffer;
4748	struct ring_buffer_event *event;
4749	int nr_loops = 0;
4750
4751	if (ts)
4752		*ts = 0;
4753
4754	cpu_buffer = iter->cpu_buffer;
4755	buffer = cpu_buffer->buffer;
4756
4757	/*
4758	 * Check if someone performed a consuming read to the buffer
4759	 * or removed some pages from the buffer. In these cases,
4760	 * iterator was invalidated and we need to reset it.
4761	 */
4762	if (unlikely(iter->cache_read != cpu_buffer->read ||
4763		     iter->cache_reader_page != cpu_buffer->reader_page ||
4764		     iter->cache_pages_removed != cpu_buffer->pages_removed))
4765		rb_iter_reset(iter);
4766
4767 again:
4768	if (ring_buffer_iter_empty(iter))
4769		return NULL;
4770
4771	/*
4772	 * As the writer can mess with what the iterator is trying
4773	 * to read, just give up if we fail to get an event after
4774	 * three tries. The iterator is not as reliable when reading
4775	 * the ring buffer with an active write as the consumer is.
4776	 * Do not warn if the three failures is reached.
4777	 */
4778	if (++nr_loops > 3)
4779		return NULL;
4780
4781	if (rb_per_cpu_empty(cpu_buffer))
4782		return NULL;
4783
4784	if (iter->head >= rb_page_size(iter->head_page)) {
4785		rb_inc_iter(iter);
4786		goto again;
4787	}
4788
4789	event = rb_iter_head_event(iter);
4790	if (!event)
4791		goto again;
4792
4793	switch (event->type_len) {
4794	case RINGBUF_TYPE_PADDING:
4795		if (rb_null_event(event)) {
4796			rb_inc_iter(iter);
4797			goto again;
4798		}
4799		rb_advance_iter(iter);
4800		return event;
4801
4802	case RINGBUF_TYPE_TIME_EXTEND:
4803		/* Internal data, OK to advance */
4804		rb_advance_iter(iter);
4805		goto again;
4806
4807	case RINGBUF_TYPE_TIME_STAMP:
4808		if (ts) {
4809			*ts = rb_event_time_stamp(event);
4810			*ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
4811			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4812							 cpu_buffer->cpu, ts);
4813		}
4814		/* Internal data, OK to advance */
4815		rb_advance_iter(iter);
4816		goto again;
4817
4818	case RINGBUF_TYPE_DATA:
4819		if (ts && !(*ts)) {
4820			*ts = iter->read_stamp + event->time_delta;
4821			ring_buffer_normalize_time_stamp(buffer,
4822							 cpu_buffer->cpu, ts);
4823		}
4824		return event;
4825
4826	default:
4827		RB_WARN_ON(cpu_buffer, 1);
4828	}
4829
4830	return NULL;
4831}
4832EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4833
4834static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4835{
4836	if (likely(!in_nmi())) {
4837		raw_spin_lock(&cpu_buffer->reader_lock);
4838		return true;
4839	}
4840
4841	/*
4842	 * If an NMI die dumps out the content of the ring buffer
4843	 * trylock must be used to prevent a deadlock if the NMI
4844	 * preempted a task that holds the ring buffer locks. If
4845	 * we get the lock then all is fine, if not, then continue
4846	 * to do the read, but this can corrupt the ring buffer,
4847	 * so it must be permanently disabled from future writes.
4848	 * Reading from NMI is a oneshot deal.
4849	 */
4850	if (raw_spin_trylock(&cpu_buffer->reader_lock))
4851		return true;
4852
4853	/* Continue without locking, but disable the ring buffer */
4854	atomic_inc(&cpu_buffer->record_disabled);
4855	return false;
4856}
4857
4858static inline void
4859rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4860{
4861	if (likely(locked))
4862		raw_spin_unlock(&cpu_buffer->reader_lock);
4863}
4864
4865/**
4866 * ring_buffer_peek - peek at the next event to be read
4867 * @buffer: The ring buffer to read
4868 * @cpu: The cpu to peak at
4869 * @ts: The timestamp counter of this event.
4870 * @lost_events: a variable to store if events were lost (may be NULL)
4871 *
4872 * This will return the event that will be read next, but does
4873 * not consume the data.
4874 */
4875struct ring_buffer_event *
4876ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
4877		 unsigned long *lost_events)
4878{
4879	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4880	struct ring_buffer_event *event;
4881	unsigned long flags;
4882	bool dolock;
4883
4884	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4885		return NULL;
4886
4887 again:
4888	local_irq_save(flags);
4889	dolock = rb_reader_lock(cpu_buffer);
4890	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4891	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4892		rb_advance_reader(cpu_buffer);
4893	rb_reader_unlock(cpu_buffer, dolock);
4894	local_irq_restore(flags);
4895
4896	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4897		goto again;
4898
4899	return event;
4900}
4901
4902/** ring_buffer_iter_dropped - report if there are dropped events
4903 * @iter: The ring buffer iterator
4904 *
4905 * Returns true if there was dropped events since the last peek.
4906 */
4907bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4908{
4909	bool ret = iter->missed_events != 0;
4910
4911	iter->missed_events = 0;
4912	return ret;
4913}
4914EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
4915
4916/**
4917 * ring_buffer_iter_peek - peek at the next event to be read
4918 * @iter: The ring buffer iterator
4919 * @ts: The timestamp counter of this event.
4920 *
4921 * This will return the event that will be read next, but does
4922 * not increment the iterator.
4923 */
4924struct ring_buffer_event *
4925ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4926{
4927	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4928	struct ring_buffer_event *event;
4929	unsigned long flags;
4930
4931 again:
4932	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4933	event = rb_iter_peek(iter, ts);
4934	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4935
4936	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4937		goto again;
4938
4939	return event;
4940}
4941
4942/**
4943 * ring_buffer_consume - return an event and consume it
4944 * @buffer: The ring buffer to get the next event from
4945 * @cpu: the cpu to read the buffer from
4946 * @ts: a variable to store the timestamp (may be NULL)
4947 * @lost_events: a variable to store if events were lost (may be NULL)
4948 *
4949 * Returns the next event in the ring buffer, and that event is consumed.
4950 * Meaning, that sequential reads will keep returning a different event,
4951 * and eventually empty the ring buffer if the producer is slower.
4952 */
4953struct ring_buffer_event *
4954ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
4955		    unsigned long *lost_events)
4956{
4957	struct ring_buffer_per_cpu *cpu_buffer;
4958	struct ring_buffer_event *event = NULL;
4959	unsigned long flags;
4960	bool dolock;
4961
4962 again:
4963	/* might be called in atomic */
4964	preempt_disable();
4965
4966	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4967		goto out;
4968
4969	cpu_buffer = buffer->buffers[cpu];
4970	local_irq_save(flags);
4971	dolock = rb_reader_lock(cpu_buffer);
4972
4973	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4974	if (event) {
4975		cpu_buffer->lost_events = 0;
4976		rb_advance_reader(cpu_buffer);
4977	}
4978
4979	rb_reader_unlock(cpu_buffer, dolock);
4980	local_irq_restore(flags);
4981
4982 out:
4983	preempt_enable();
4984
4985	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4986		goto again;
4987
4988	return event;
4989}
4990EXPORT_SYMBOL_GPL(ring_buffer_consume);
4991
4992/**
4993 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4994 * @buffer: The ring buffer to read from
4995 * @cpu: The cpu buffer to iterate over
4996 * @flags: gfp flags to use for memory allocation
4997 *
4998 * This performs the initial preparations necessary to iterate
4999 * through the buffer.  Memory is allocated, buffer recording
5000 * is disabled, and the iterator pointer is returned to the caller.
5001 *
5002 * Disabling buffer recording prevents the reading from being
5003 * corrupted. This is not a consuming read, so a producer is not
5004 * expected.
5005 *
5006 * After a sequence of ring_buffer_read_prepare calls, the user is
5007 * expected to make at least one call to ring_buffer_read_prepare_sync.
5008 * Afterwards, ring_buffer_read_start is invoked to get things going
5009 * for real.
5010 *
5011 * This overall must be paired with ring_buffer_read_finish.
5012 */
5013struct ring_buffer_iter *
5014ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5015{
5016	struct ring_buffer_per_cpu *cpu_buffer;
5017	struct ring_buffer_iter *iter;
5018
5019	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5020		return NULL;
5021
5022	iter = kzalloc(sizeof(*iter), flags);
5023	if (!iter)
5024		return NULL;
5025
5026	/* Holds the entire event: data and meta data */
5027	iter->event_size = buffer->subbuf_size;
5028	iter->event = kmalloc(iter->event_size, flags);
5029	if (!iter->event) {
5030		kfree(iter);
5031		return NULL;
5032	}
5033
5034	cpu_buffer = buffer->buffers[cpu];
5035
5036	iter->cpu_buffer = cpu_buffer;
5037
5038	atomic_inc(&cpu_buffer->resize_disabled);
5039
5040	return iter;
5041}
5042EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5043
5044/**
5045 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5046 *
5047 * All previously invoked ring_buffer_read_prepare calls to prepare
5048 * iterators will be synchronized.  Afterwards, read_buffer_read_start
5049 * calls on those iterators are allowed.
5050 */
5051void
5052ring_buffer_read_prepare_sync(void)
5053{
5054	synchronize_rcu();
5055}
5056EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5057
5058/**
5059 * ring_buffer_read_start - start a non consuming read of the buffer
5060 * @iter: The iterator returned by ring_buffer_read_prepare
5061 *
5062 * This finalizes the startup of an iteration through the buffer.
5063 * The iterator comes from a call to ring_buffer_read_prepare and
5064 * an intervening ring_buffer_read_prepare_sync must have been
5065 * performed.
5066 *
5067 * Must be paired with ring_buffer_read_finish.
5068 */
5069void
5070ring_buffer_read_start(struct ring_buffer_iter *iter)
5071{
5072	struct ring_buffer_per_cpu *cpu_buffer;
5073	unsigned long flags;
5074
5075	if (!iter)
5076		return;
5077
5078	cpu_buffer = iter->cpu_buffer;
5079
5080	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5081	arch_spin_lock(&cpu_buffer->lock);
5082	rb_iter_reset(iter);
5083	arch_spin_unlock(&cpu_buffer->lock);
5084	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5085}
5086EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5087
5088/**
5089 * ring_buffer_read_finish - finish reading the iterator of the buffer
5090 * @iter: The iterator retrieved by ring_buffer_start
5091 *
5092 * This re-enables the recording to the buffer, and frees the
5093 * iterator.
5094 */
5095void
5096ring_buffer_read_finish(struct ring_buffer_iter *iter)
5097{
5098	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5099	unsigned long flags;
5100
5101	/*
5102	 * Ring buffer is disabled from recording, here's a good place
5103	 * to check the integrity of the ring buffer.
5104	 * Must prevent readers from trying to read, as the check
5105	 * clears the HEAD page and readers require it.
5106	 */
5107	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5108	rb_check_pages(cpu_buffer);
5109	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5110
5111	atomic_dec(&cpu_buffer->resize_disabled);
5112	kfree(iter->event);
5113	kfree(iter);
5114}
5115EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5116
5117/**
5118 * ring_buffer_iter_advance - advance the iterator to the next location
5119 * @iter: The ring buffer iterator
5120 *
5121 * Move the location of the iterator such that the next read will
5122 * be the next location of the iterator.
5123 */
5124void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
5125{
5126	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5127	unsigned long flags;
5128
5129	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5130
5131	rb_advance_iter(iter);
5132
5133	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5134}
5135EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5136
5137/**
5138 * ring_buffer_size - return the size of the ring buffer (in bytes)
5139 * @buffer: The ring buffer.
5140 * @cpu: The CPU to get ring buffer size from.
5141 */
5142unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5143{
5144	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5145		return 0;
5146
5147	return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
5148}
5149EXPORT_SYMBOL_GPL(ring_buffer_size);
5150
5151/**
5152 * ring_buffer_max_event_size - return the max data size of an event
5153 * @buffer: The ring buffer.
5154 *
5155 * Returns the maximum size an event can be.
5156 */
5157unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
5158{
5159	/* If abs timestamp is requested, events have a timestamp too */
5160	if (ring_buffer_time_stamp_abs(buffer))
5161		return buffer->max_data_size - RB_LEN_TIME_EXTEND;
5162	return buffer->max_data_size;
5163}
5164EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
5165
5166static void rb_clear_buffer_page(struct buffer_page *page)
5167{
5168	local_set(&page->write, 0);
5169	local_set(&page->entries, 0);
5170	rb_init_page(page->page);
5171	page->read = 0;
5172}
5173
5174static void
5175rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5176{
5177	struct buffer_page *page;
5178
5179	rb_head_page_deactivate(cpu_buffer);
5180
5181	cpu_buffer->head_page
5182		= list_entry(cpu_buffer->pages, struct buffer_page, list);
5183	rb_clear_buffer_page(cpu_buffer->head_page);
5184	list_for_each_entry(page, cpu_buffer->pages, list) {
5185		rb_clear_buffer_page(page);
5186	}
5187
5188	cpu_buffer->tail_page = cpu_buffer->head_page;
5189	cpu_buffer->commit_page = cpu_buffer->head_page;
5190
5191	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5192	INIT_LIST_HEAD(&cpu_buffer->new_pages);
5193	rb_clear_buffer_page(cpu_buffer->reader_page);
5194
5195	local_set(&cpu_buffer->entries_bytes, 0);
5196	local_set(&cpu_buffer->overrun, 0);
5197	local_set(&cpu_buffer->commit_overrun, 0);
5198	local_set(&cpu_buffer->dropped_events, 0);
5199	local_set(&cpu_buffer->entries, 0);
5200	local_set(&cpu_buffer->committing, 0);
5201	local_set(&cpu_buffer->commits, 0);
5202	local_set(&cpu_buffer->pages_touched, 0);
5203	local_set(&cpu_buffer->pages_lost, 0);
5204	local_set(&cpu_buffer->pages_read, 0);
5205	cpu_buffer->last_pages_touch = 0;
5206	cpu_buffer->shortest_full = 0;
5207	cpu_buffer->read = 0;
5208	cpu_buffer->read_bytes = 0;
5209
5210	rb_time_set(&cpu_buffer->write_stamp, 0);
5211	rb_time_set(&cpu_buffer->before_stamp, 0);
5212
5213	memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5214
5215	cpu_buffer->lost_events = 0;
5216	cpu_buffer->last_overrun = 0;
5217
5218	rb_head_page_activate(cpu_buffer);
5219	cpu_buffer->pages_removed = 0;
5220}
5221
5222/* Must have disabled the cpu buffer then done a synchronize_rcu */
5223static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5224{
5225	unsigned long flags;
5226
5227	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5228
5229	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5230		goto out;
5231
5232	arch_spin_lock(&cpu_buffer->lock);
5233
5234	rb_reset_cpu(cpu_buffer);
5235
5236	arch_spin_unlock(&cpu_buffer->lock);
5237
5238 out:
5239	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5240}
5241
5242/**
5243 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5244 * @buffer: The ring buffer to reset a per cpu buffer of
5245 * @cpu: The CPU buffer to be reset
5246 */
5247void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
5248{
5249	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5250
5251	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5252		return;
5253
5254	/* prevent another thread from changing buffer sizes */
5255	mutex_lock(&buffer->mutex);
5256
5257	atomic_inc(&cpu_buffer->resize_disabled);
5258	atomic_inc(&cpu_buffer->record_disabled);
5259
5260	/* Make sure all commits have finished */
5261	synchronize_rcu();
5262
5263	reset_disabled_cpu_buffer(cpu_buffer);
5264
5265	atomic_dec(&cpu_buffer->record_disabled);
5266	atomic_dec(&cpu_buffer->resize_disabled);
5267
5268	mutex_unlock(&buffer->mutex);
5269}
5270EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
5271
5272/* Flag to ensure proper resetting of atomic variables */
5273#define RESET_BIT	(1 << 30)
5274
5275/**
5276 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5277 * @buffer: The ring buffer to reset a per cpu buffer of
5278 */
5279void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5280{
5281	struct ring_buffer_per_cpu *cpu_buffer;
5282	int cpu;
5283
5284	/* prevent another thread from changing buffer sizes */
5285	mutex_lock(&buffer->mutex);
5286
5287	for_each_online_buffer_cpu(buffer, cpu) {
5288		cpu_buffer = buffer->buffers[cpu];
5289
5290		atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
5291		atomic_inc(&cpu_buffer->record_disabled);
5292	}
5293
5294	/* Make sure all commits have finished */
5295	synchronize_rcu();
5296
5297	for_each_buffer_cpu(buffer, cpu) {
5298		cpu_buffer = buffer->buffers[cpu];
5299
5300		/*
5301		 * If a CPU came online during the synchronize_rcu(), then
5302		 * ignore it.
5303		 */
5304		if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
5305			continue;
5306
5307		reset_disabled_cpu_buffer(cpu_buffer);
5308
5309		atomic_dec(&cpu_buffer->record_disabled);
5310		atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
5311	}
5312
5313	mutex_unlock(&buffer->mutex);
5314}
5315
5316/**
5317 * ring_buffer_reset - reset a ring buffer
5318 * @buffer: The ring buffer to reset all cpu buffers
5319 */
5320void ring_buffer_reset(struct trace_buffer *buffer)
5321{
5322	struct ring_buffer_per_cpu *cpu_buffer;
5323	int cpu;
5324
5325	/* prevent another thread from changing buffer sizes */
5326	mutex_lock(&buffer->mutex);
5327
5328	for_each_buffer_cpu(buffer, cpu) {
5329		cpu_buffer = buffer->buffers[cpu];
5330
5331		atomic_inc(&cpu_buffer->resize_disabled);
5332		atomic_inc(&cpu_buffer->record_disabled);
5333	}
5334
5335	/* Make sure all commits have finished */
5336	synchronize_rcu();
5337
5338	for_each_buffer_cpu(buffer, cpu) {
5339		cpu_buffer = buffer->buffers[cpu];
5340
5341		reset_disabled_cpu_buffer(cpu_buffer);
5342
5343		atomic_dec(&cpu_buffer->record_disabled);
5344		atomic_dec(&cpu_buffer->resize_disabled);
5345	}
5346
5347	mutex_unlock(&buffer->mutex);
5348}
5349EXPORT_SYMBOL_GPL(ring_buffer_reset);
5350
5351/**
5352 * ring_buffer_empty - is the ring buffer empty?
5353 * @buffer: The ring buffer to test
5354 */
5355bool ring_buffer_empty(struct trace_buffer *buffer)
5356{
5357	struct ring_buffer_per_cpu *cpu_buffer;
5358	unsigned long flags;
5359	bool dolock;
5360	bool ret;
5361	int cpu;
5362
5363	/* yes this is racy, but if you don't like the race, lock the buffer */
5364	for_each_buffer_cpu(buffer, cpu) {
5365		cpu_buffer = buffer->buffers[cpu];
5366		local_irq_save(flags);
5367		dolock = rb_reader_lock(cpu_buffer);
5368		ret = rb_per_cpu_empty(cpu_buffer);
5369		rb_reader_unlock(cpu_buffer, dolock);
5370		local_irq_restore(flags);
5371
5372		if (!ret)
5373			return false;
5374	}
5375
5376	return true;
5377}
5378EXPORT_SYMBOL_GPL(ring_buffer_empty);
5379
5380/**
5381 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5382 * @buffer: The ring buffer
5383 * @cpu: The CPU buffer to test
5384 */
5385bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
5386{
5387	struct ring_buffer_per_cpu *cpu_buffer;
5388	unsigned long flags;
5389	bool dolock;
5390	bool ret;
5391
5392	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5393		return true;
5394
5395	cpu_buffer = buffer->buffers[cpu];
5396	local_irq_save(flags);
5397	dolock = rb_reader_lock(cpu_buffer);
5398	ret = rb_per_cpu_empty(cpu_buffer);
5399	rb_reader_unlock(cpu_buffer, dolock);
5400	local_irq_restore(flags);
5401
5402	return ret;
5403}
5404EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
5405
5406#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
5407/**
5408 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5409 * @buffer_a: One buffer to swap with
5410 * @buffer_b: The other buffer to swap with
5411 * @cpu: the CPU of the buffers to swap
5412 *
5413 * This function is useful for tracers that want to take a "snapshot"
5414 * of a CPU buffer and has another back up buffer lying around.
5415 * it is expected that the tracer handles the cpu buffer not being
5416 * used at the moment.
5417 */
5418int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5419			 struct trace_buffer *buffer_b, int cpu)
5420{
5421	struct ring_buffer_per_cpu *cpu_buffer_a;
5422	struct ring_buffer_per_cpu *cpu_buffer_b;
5423	int ret = -EINVAL;
5424
5425	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5426	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
5427		goto out;
5428
5429	cpu_buffer_a = buffer_a->buffers[cpu];
5430	cpu_buffer_b = buffer_b->buffers[cpu];
5431
5432	/* At least make sure the two buffers are somewhat the same */
5433	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5434		goto out;
5435
5436	if (buffer_a->subbuf_order != buffer_b->subbuf_order)
5437		goto out;
5438
5439	ret = -EAGAIN;
5440
5441	if (atomic_read(&buffer_a->record_disabled))
5442		goto out;
5443
5444	if (atomic_read(&buffer_b->record_disabled))
5445		goto out;
5446
5447	if (atomic_read(&cpu_buffer_a->record_disabled))
5448		goto out;
5449
5450	if (atomic_read(&cpu_buffer_b->record_disabled))
5451		goto out;
5452
5453	/*
5454	 * We can't do a synchronize_rcu here because this
5455	 * function can be called in atomic context.
5456	 * Normally this will be called from the same CPU as cpu.
5457	 * If not it's up to the caller to protect this.
5458	 */
5459	atomic_inc(&cpu_buffer_a->record_disabled);
5460	atomic_inc(&cpu_buffer_b->record_disabled);
5461
5462	ret = -EBUSY;
5463	if (local_read(&cpu_buffer_a->committing))
5464		goto out_dec;
5465	if (local_read(&cpu_buffer_b->committing))
5466		goto out_dec;
5467
5468	/*
5469	 * When resize is in progress, we cannot swap it because
5470	 * it will mess the state of the cpu buffer.
5471	 */
5472	if (atomic_read(&buffer_a->resizing))
5473		goto out_dec;
5474	if (atomic_read(&buffer_b->resizing))
5475		goto out_dec;
5476
5477	buffer_a->buffers[cpu] = cpu_buffer_b;
5478	buffer_b->buffers[cpu] = cpu_buffer_a;
5479
5480	cpu_buffer_b->buffer = buffer_a;
5481	cpu_buffer_a->buffer = buffer_b;
5482
5483	ret = 0;
5484
5485out_dec:
5486	atomic_dec(&cpu_buffer_a->record_disabled);
5487	atomic_dec(&cpu_buffer_b->record_disabled);
5488out:
5489	return ret;
5490}
5491EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5492#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
5493
5494/**
5495 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5496 * @buffer: the buffer to allocate for.
5497 * @cpu: the cpu buffer to allocate.
5498 *
5499 * This function is used in conjunction with ring_buffer_read_page.
5500 * When reading a full page from the ring buffer, these functions
5501 * can be used to speed up the process. The calling function should
5502 * allocate a few pages first with this function. Then when it
5503 * needs to get pages from the ring buffer, it passes the result
5504 * of this function into ring_buffer_read_page, which will swap
5505 * the page that was allocated, with the read page of the buffer.
5506 *
5507 * Returns:
5508 *  The page allocated, or ERR_PTR
5509 */
5510struct buffer_data_read_page *
5511ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5512{
5513	struct ring_buffer_per_cpu *cpu_buffer;
5514	struct buffer_data_read_page *bpage = NULL;
5515	unsigned long flags;
5516	struct page *page;
5517
5518	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5519		return ERR_PTR(-ENODEV);
5520
5521	bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
5522	if (!bpage)
5523		return ERR_PTR(-ENOMEM);
5524
5525	bpage->order = buffer->subbuf_order;
5526	cpu_buffer = buffer->buffers[cpu];
5527	local_irq_save(flags);
5528	arch_spin_lock(&cpu_buffer->lock);
5529
5530	if (cpu_buffer->free_page) {
5531		bpage->data = cpu_buffer->free_page;
5532		cpu_buffer->free_page = NULL;
5533	}
5534
5535	arch_spin_unlock(&cpu_buffer->lock);
5536	local_irq_restore(flags);
5537
5538	if (bpage->data)
5539		goto out;
5540
5541	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY,
 
5542				cpu_buffer->buffer->subbuf_order);
5543	if (!page) {
5544		kfree(bpage);
5545		return ERR_PTR(-ENOMEM);
5546	}
5547
5548	bpage->data = page_address(page);
5549
5550 out:
5551	rb_init_page(bpage->data);
5552
5553	return bpage;
5554}
5555EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5556
5557/**
5558 * ring_buffer_free_read_page - free an allocated read page
5559 * @buffer: the buffer the page was allocate for
5560 * @cpu: the cpu buffer the page came from
5561 * @data_page: the page to free
5562 *
5563 * Free a page allocated from ring_buffer_alloc_read_page.
5564 */
5565void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
5566				struct buffer_data_read_page *data_page)
5567{
5568	struct ring_buffer_per_cpu *cpu_buffer;
5569	struct buffer_data_page *bpage = data_page->data;
5570	struct page *page = virt_to_page(bpage);
5571	unsigned long flags;
5572
5573	if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
5574		return;
5575
5576	cpu_buffer = buffer->buffers[cpu];
5577
5578	/*
5579	 * If the page is still in use someplace else, or order of the page
5580	 * is different from the subbuffer order of the buffer -
5581	 * we can't reuse it
5582	 */
5583	if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
5584		goto out;
5585
5586	local_irq_save(flags);
5587	arch_spin_lock(&cpu_buffer->lock);
5588
5589	if (!cpu_buffer->free_page) {
5590		cpu_buffer->free_page = bpage;
5591		bpage = NULL;
5592	}
5593
5594	arch_spin_unlock(&cpu_buffer->lock);
5595	local_irq_restore(flags);
5596
5597 out:
5598	free_pages((unsigned long)bpage, data_page->order);
5599	kfree(data_page);
5600}
5601EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5602
5603/**
5604 * ring_buffer_read_page - extract a page from the ring buffer
5605 * @buffer: buffer to extract from
5606 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
5607 * @len: amount to extract
5608 * @cpu: the cpu of the buffer to extract
5609 * @full: should the extraction only happen when the page is full.
5610 *
5611 * This function will pull out a page from the ring buffer and consume it.
5612 * @data_page must be the address of the variable that was returned
5613 * from ring_buffer_alloc_read_page. This is because the page might be used
5614 * to swap with a page in the ring buffer.
5615 *
5616 * for example:
5617 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
5618 *	if (IS_ERR(rpage))
5619 *		return PTR_ERR(rpage);
5620 *	ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
5621 *	if (ret >= 0)
5622 *		process_page(ring_buffer_read_page_data(rpage), ret);
5623 *	ring_buffer_free_read_page(buffer, cpu, rpage);
5624 *
5625 * When @full is set, the function will not return true unless
5626 * the writer is off the reader page.
5627 *
5628 * Note: it is up to the calling functions to handle sleeps and wakeups.
5629 *  The ring buffer can be used anywhere in the kernel and can not
5630 *  blindly call wake_up. The layer that uses the ring buffer must be
5631 *  responsible for that.
5632 *
5633 * Returns:
5634 *  >=0 if data has been transferred, returns the offset of consumed data.
5635 *  <0 if no data has been transferred.
5636 */
5637int ring_buffer_read_page(struct trace_buffer *buffer,
5638			  struct buffer_data_read_page *data_page,
5639			  size_t len, int cpu, int full)
5640{
5641	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5642	struct ring_buffer_event *event;
5643	struct buffer_data_page *bpage;
5644	struct buffer_page *reader;
5645	unsigned long missed_events;
5646	unsigned long flags;
5647	unsigned int commit;
5648	unsigned int read;
5649	u64 save_timestamp;
5650	int ret = -1;
5651
5652	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5653		goto out;
5654
5655	/*
5656	 * If len is not big enough to hold the page header, then
5657	 * we can not copy anything.
5658	 */
5659	if (len <= BUF_PAGE_HDR_SIZE)
5660		goto out;
5661
5662	len -= BUF_PAGE_HDR_SIZE;
5663
5664	if (!data_page || !data_page->data)
5665		goto out;
5666	if (data_page->order != buffer->subbuf_order)
5667		goto out;
5668
5669	bpage = data_page->data;
5670	if (!bpage)
5671		goto out;
5672
5673	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5674
5675	reader = rb_get_reader_page(cpu_buffer);
5676	if (!reader)
5677		goto out_unlock;
5678
5679	event = rb_reader_event(cpu_buffer);
5680
5681	read = reader->read;
5682	commit = rb_page_commit(reader);
5683
5684	/* Check if any events were dropped */
5685	missed_events = cpu_buffer->lost_events;
5686
5687	/*
5688	 * If this page has been partially read or
5689	 * if len is not big enough to read the rest of the page or
5690	 * a writer is still on the page, then
5691	 * we must copy the data from the page to the buffer.
5692	 * Otherwise, we can simply swap the page with the one passed in.
5693	 */
5694	if (read || (len < (commit - read)) ||
5695	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
5696		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5697		unsigned int rpos = read;
5698		unsigned int pos = 0;
5699		unsigned int size;
5700
5701		/*
5702		 * If a full page is expected, this can still be returned
5703		 * if there's been a previous partial read and the
5704		 * rest of the page can be read and the commit page is off
5705		 * the reader page.
5706		 */
5707		if (full &&
5708		    (!read || (len < (commit - read)) ||
5709		     cpu_buffer->reader_page == cpu_buffer->commit_page))
5710			goto out_unlock;
5711
5712		if (len > (commit - read))
5713			len = (commit - read);
5714
5715		/* Always keep the time extend and data together */
5716		size = rb_event_ts_length(event);
5717
5718		if (len < size)
5719			goto out_unlock;
5720
5721		/* save the current timestamp, since the user will need it */
5722		save_timestamp = cpu_buffer->read_stamp;
5723
5724		/* Need to copy one event at a time */
5725		do {
5726			/* We need the size of one event, because
5727			 * rb_advance_reader only advances by one event,
5728			 * whereas rb_event_ts_length may include the size of
5729			 * one or two events.
5730			 * We have already ensured there's enough space if this
5731			 * is a time extend. */
5732			size = rb_event_length(event);
5733			memcpy(bpage->data + pos, rpage->data + rpos, size);
5734
5735			len -= size;
5736
5737			rb_advance_reader(cpu_buffer);
5738			rpos = reader->read;
5739			pos += size;
5740
5741			if (rpos >= commit)
5742				break;
5743
5744			event = rb_reader_event(cpu_buffer);
5745			/* Always keep the time extend and data together */
5746			size = rb_event_ts_length(event);
5747		} while (len >= size);
5748
5749		/* update bpage */
5750		local_set(&bpage->commit, pos);
5751		bpage->time_stamp = save_timestamp;
5752
5753		/* we copied everything to the beginning */
5754		read = 0;
5755	} else {
5756		/* update the entry counter */
5757		cpu_buffer->read += rb_page_entries(reader);
5758		cpu_buffer->read_bytes += rb_page_commit(reader);
5759
5760		/* swap the pages */
5761		rb_init_page(bpage);
5762		bpage = reader->page;
5763		reader->page = data_page->data;
5764		local_set(&reader->write, 0);
5765		local_set(&reader->entries, 0);
5766		reader->read = 0;
5767		data_page->data = bpage;
5768
5769		/*
5770		 * Use the real_end for the data size,
5771		 * This gives us a chance to store the lost events
5772		 * on the page.
5773		 */
5774		if (reader->real_end)
5775			local_set(&bpage->commit, reader->real_end);
5776	}
5777	ret = read;
5778
5779	cpu_buffer->lost_events = 0;
5780
5781	commit = local_read(&bpage->commit);
5782	/*
5783	 * Set a flag in the commit field if we lost events
5784	 */
5785	if (missed_events) {
5786		/* If there is room at the end of the page to save the
5787		 * missed events, then record it there.
5788		 */
5789		if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
5790			memcpy(&bpage->data[commit], &missed_events,
5791			       sizeof(missed_events));
5792			local_add(RB_MISSED_STORED, &bpage->commit);
5793			commit += sizeof(missed_events);
5794		}
5795		local_add(RB_MISSED_EVENTS, &bpage->commit);
5796	}
5797
5798	/*
5799	 * This page may be off to user land. Zero it out here.
5800	 */
5801	if (commit < buffer->subbuf_size)
5802		memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
5803
5804 out_unlock:
5805	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5806
5807 out:
5808	return ret;
5809}
5810EXPORT_SYMBOL_GPL(ring_buffer_read_page);
5811
5812/**
5813 * ring_buffer_read_page_data - get pointer to the data in the page.
5814 * @page:  the page to get the data from
5815 *
5816 * Returns pointer to the actual data in this page.
5817 */
5818void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
5819{
5820	return page->data;
5821}
5822EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
5823
5824/**
5825 * ring_buffer_subbuf_size_get - get size of the sub buffer.
5826 * @buffer: the buffer to get the sub buffer size from
5827 *
5828 * Returns size of the sub buffer, in bytes.
5829 */
5830int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
5831{
5832	return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
5833}
5834EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
5835
5836/**
5837 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
5838 * @buffer: The ring_buffer to get the system sub page order from
5839 *
5840 * By default, one ring buffer sub page equals to one system page. This parameter
5841 * is configurable, per ring buffer. The size of the ring buffer sub page can be
5842 * extended, but must be an order of system page size.
5843 *
5844 * Returns the order of buffer sub page size, in system pages:
5845 * 0 means the sub buffer size is 1 system page and so forth.
5846 * In case of an error < 0 is returned.
5847 */
5848int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
5849{
5850	if (!buffer)
5851		return -EINVAL;
5852
5853	return buffer->subbuf_order;
5854}
5855EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
5856
5857/**
5858 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
5859 * @buffer: The ring_buffer to set the new page size.
5860 * @order: Order of the system pages in one sub buffer page
5861 *
5862 * By default, one ring buffer pages equals to one system page. This API can be
5863 * used to set new size of the ring buffer page. The size must be order of
5864 * system page size, that's why the input parameter @order is the order of
5865 * system pages that are allocated for one ring buffer page:
5866 *  0 - 1 system page
5867 *  1 - 2 system pages
5868 *  3 - 4 system pages
5869 *  ...
5870 *
5871 * Returns 0 on success or < 0 in case of an error.
5872 */
5873int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
5874{
5875	struct ring_buffer_per_cpu *cpu_buffer;
5876	struct buffer_page *bpage, *tmp;
5877	int old_order, old_size;
5878	int nr_pages;
5879	int psize;
5880	int err;
5881	int cpu;
5882
5883	if (!buffer || order < 0)
5884		return -EINVAL;
5885
5886	if (buffer->subbuf_order == order)
5887		return 0;
5888
5889	psize = (1 << order) * PAGE_SIZE;
5890	if (psize <= BUF_PAGE_HDR_SIZE)
5891		return -EINVAL;
5892
5893	/* Size of a subbuf cannot be greater than the write counter */
5894	if (psize > RB_WRITE_MASK + 1)
5895		return -EINVAL;
5896
5897	old_order = buffer->subbuf_order;
5898	old_size = buffer->subbuf_size;
5899
5900	/* prevent another thread from changing buffer sizes */
5901	mutex_lock(&buffer->mutex);
5902	atomic_inc(&buffer->record_disabled);
5903
5904	/* Make sure all commits have finished */
5905	synchronize_rcu();
5906
5907	buffer->subbuf_order = order;
5908	buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
5909
5910	/* Make sure all new buffers are allocated, before deleting the old ones */
5911	for_each_buffer_cpu(buffer, cpu) {
5912
5913		if (!cpumask_test_cpu(cpu, buffer->cpumask))
5914			continue;
5915
5916		cpu_buffer = buffer->buffers[cpu];
5917
5918		/* Update the number of pages to match the new size */
5919		nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
5920		nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
5921
5922		/* we need a minimum of two pages */
5923		if (nr_pages < 2)
5924			nr_pages = 2;
5925
5926		cpu_buffer->nr_pages_to_update = nr_pages;
5927
5928		/* Include the reader page */
5929		nr_pages++;
5930
5931		/* Allocate the new size buffer */
5932		INIT_LIST_HEAD(&cpu_buffer->new_pages);
5933		if (__rb_allocate_pages(cpu_buffer, nr_pages,
5934					&cpu_buffer->new_pages)) {
5935			/* not enough memory for new pages */
5936			err = -ENOMEM;
5937			goto error;
5938		}
5939	}
5940
5941	for_each_buffer_cpu(buffer, cpu) {
5942
5943		if (!cpumask_test_cpu(cpu, buffer->cpumask))
5944			continue;
5945
5946		cpu_buffer = buffer->buffers[cpu];
5947
5948		/* Clear the head bit to make the link list normal to read */
5949		rb_head_page_deactivate(cpu_buffer);
5950
5951		/* Now walk the list and free all the old sub buffers */
5952		list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
5953			list_del_init(&bpage->list);
5954			free_buffer_page(bpage);
5955		}
5956		/* The above loop stopped an the last page needing to be freed */
5957		bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
5958		free_buffer_page(bpage);
5959
5960		/* Free the current reader page */
5961		free_buffer_page(cpu_buffer->reader_page);
5962
5963		/* One page was allocated for the reader page */
5964		cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
5965						     struct buffer_page, list);
5966		list_del_init(&cpu_buffer->reader_page->list);
5967
5968		/* The cpu_buffer pages are a link list with no head */
5969		cpu_buffer->pages = cpu_buffer->new_pages.next;
5970		cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
5971		cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
5972
5973		/* Clear the new_pages list */
5974		INIT_LIST_HEAD(&cpu_buffer->new_pages);
5975
5976		cpu_buffer->head_page
5977			= list_entry(cpu_buffer->pages, struct buffer_page, list);
5978		cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
5979
5980		cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
5981		cpu_buffer->nr_pages_to_update = 0;
5982
5983		free_pages((unsigned long)cpu_buffer->free_page, old_order);
5984		cpu_buffer->free_page = NULL;
5985
5986		rb_head_page_activate(cpu_buffer);
5987
5988		rb_check_pages(cpu_buffer);
5989	}
5990
5991	atomic_dec(&buffer->record_disabled);
5992	mutex_unlock(&buffer->mutex);
5993
5994	return 0;
5995
5996error:
5997	buffer->subbuf_order = old_order;
5998	buffer->subbuf_size = old_size;
5999
6000	atomic_dec(&buffer->record_disabled);
6001	mutex_unlock(&buffer->mutex);
6002
6003	for_each_buffer_cpu(buffer, cpu) {
6004		cpu_buffer = buffer->buffers[cpu];
6005
6006		if (!cpu_buffer->nr_pages_to_update)
6007			continue;
6008
6009		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
6010			list_del_init(&bpage->list);
6011			free_buffer_page(bpage);
6012		}
6013	}
6014
6015	return err;
6016}
6017EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
6018
6019/*
6020 * We only allocate new buffers, never free them if the CPU goes down.
6021 * If we were to free the buffer, then the user would lose any trace that was in
6022 * the buffer.
6023 */
6024int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
6025{
6026	struct trace_buffer *buffer;
6027	long nr_pages_same;
6028	int cpu_i;
6029	unsigned long nr_pages;
6030
6031	buffer = container_of(node, struct trace_buffer, node);
6032	if (cpumask_test_cpu(cpu, buffer->cpumask))
6033		return 0;
6034
6035	nr_pages = 0;
6036	nr_pages_same = 1;
6037	/* check if all cpu sizes are same */
6038	for_each_buffer_cpu(buffer, cpu_i) {
6039		/* fill in the size from first enabled cpu */
6040		if (nr_pages == 0)
6041			nr_pages = buffer->buffers[cpu_i]->nr_pages;
6042		if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
6043			nr_pages_same = 0;
6044			break;
6045		}
6046	}
6047	/* allocate minimum pages, user can later expand it */
6048	if (!nr_pages_same)
6049		nr_pages = 2;
6050	buffer->buffers[cpu] =
6051		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
6052	if (!buffer->buffers[cpu]) {
6053		WARN(1, "failed to allocate ring buffer on CPU %u\n",
6054		     cpu);
6055		return -ENOMEM;
6056	}
6057	smp_wmb();
6058	cpumask_set_cpu(cpu, buffer->cpumask);
6059	return 0;
6060}
6061
6062#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
6063/*
6064 * This is a basic integrity check of the ring buffer.
6065 * Late in the boot cycle this test will run when configured in.
6066 * It will kick off a thread per CPU that will go into a loop
6067 * writing to the per cpu ring buffer various sizes of data.
6068 * Some of the data will be large items, some small.
6069 *
6070 * Another thread is created that goes into a spin, sending out
6071 * IPIs to the other CPUs to also write into the ring buffer.
6072 * this is to test the nesting ability of the buffer.
6073 *
6074 * Basic stats are recorded and reported. If something in the
6075 * ring buffer should happen that's not expected, a big warning
6076 * is displayed and all ring buffers are disabled.
6077 */
6078static struct task_struct *rb_threads[NR_CPUS] __initdata;
6079
6080struct rb_test_data {
6081	struct trace_buffer *buffer;
6082	unsigned long		events;
6083	unsigned long		bytes_written;
6084	unsigned long		bytes_alloc;
6085	unsigned long		bytes_dropped;
6086	unsigned long		events_nested;
6087	unsigned long		bytes_written_nested;
6088	unsigned long		bytes_alloc_nested;
6089	unsigned long		bytes_dropped_nested;
6090	int			min_size_nested;
6091	int			max_size_nested;
6092	int			max_size;
6093	int			min_size;
6094	int			cpu;
6095	int			cnt;
6096};
6097
6098static struct rb_test_data rb_data[NR_CPUS] __initdata;
6099
6100/* 1 meg per cpu */
6101#define RB_TEST_BUFFER_SIZE	1048576
6102
6103static char rb_string[] __initdata =
6104	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
6105	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
6106	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
6107
6108static bool rb_test_started __initdata;
6109
6110struct rb_item {
6111	int size;
6112	char str[];
6113};
6114
6115static __init int rb_write_something(struct rb_test_data *data, bool nested)
6116{
6117	struct ring_buffer_event *event;
6118	struct rb_item *item;
6119	bool started;
6120	int event_len;
6121	int size;
6122	int len;
6123	int cnt;
6124
6125	/* Have nested writes different that what is written */
6126	cnt = data->cnt + (nested ? 27 : 0);
6127
6128	/* Multiply cnt by ~e, to make some unique increment */
6129	size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
6130
6131	len = size + sizeof(struct rb_item);
6132
6133	started = rb_test_started;
6134	/* read rb_test_started before checking buffer enabled */
6135	smp_rmb();
6136
6137	event = ring_buffer_lock_reserve(data->buffer, len);
6138	if (!event) {
6139		/* Ignore dropped events before test starts. */
6140		if (started) {
6141			if (nested)
6142				data->bytes_dropped += len;
6143			else
6144				data->bytes_dropped_nested += len;
6145		}
6146		return len;
6147	}
6148
6149	event_len = ring_buffer_event_length(event);
6150
6151	if (RB_WARN_ON(data->buffer, event_len < len))
6152		goto out;
6153
6154	item = ring_buffer_event_data(event);
6155	item->size = size;
6156	memcpy(item->str, rb_string, size);
6157
6158	if (nested) {
6159		data->bytes_alloc_nested += event_len;
6160		data->bytes_written_nested += len;
6161		data->events_nested++;
6162		if (!data->min_size_nested || len < data->min_size_nested)
6163			data->min_size_nested = len;
6164		if (len > data->max_size_nested)
6165			data->max_size_nested = len;
6166	} else {
6167		data->bytes_alloc += event_len;
6168		data->bytes_written += len;
6169		data->events++;
6170		if (!data->min_size || len < data->min_size)
6171			data->max_size = len;
6172		if (len > data->max_size)
6173			data->max_size = len;
6174	}
6175
6176 out:
6177	ring_buffer_unlock_commit(data->buffer);
6178
6179	return 0;
6180}
6181
6182static __init int rb_test(void *arg)
6183{
6184	struct rb_test_data *data = arg;
6185
6186	while (!kthread_should_stop()) {
6187		rb_write_something(data, false);
6188		data->cnt++;
6189
6190		set_current_state(TASK_INTERRUPTIBLE);
6191		/* Now sleep between a min of 100-300us and a max of 1ms */
6192		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
6193	}
6194
6195	return 0;
6196}
6197
6198static __init void rb_ipi(void *ignore)
6199{
6200	struct rb_test_data *data;
6201	int cpu = smp_processor_id();
6202
6203	data = &rb_data[cpu];
6204	rb_write_something(data, true);
6205}
6206
6207static __init int rb_hammer_test(void *arg)
6208{
6209	while (!kthread_should_stop()) {
6210
6211		/* Send an IPI to all cpus to write data! */
6212		smp_call_function(rb_ipi, NULL, 1);
6213		/* No sleep, but for non preempt, let others run */
6214		schedule();
6215	}
6216
6217	return 0;
6218}
6219
6220static __init int test_ringbuffer(void)
6221{
6222	struct task_struct *rb_hammer;
6223	struct trace_buffer *buffer;
6224	int cpu;
6225	int ret = 0;
6226
6227	if (security_locked_down(LOCKDOWN_TRACEFS)) {
6228		pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
6229		return 0;
6230	}
6231
6232	pr_info("Running ring buffer tests...\n");
6233
6234	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
6235	if (WARN_ON(!buffer))
6236		return 0;
6237
6238	/* Disable buffer so that threads can't write to it yet */
6239	ring_buffer_record_off(buffer);
6240
6241	for_each_online_cpu(cpu) {
6242		rb_data[cpu].buffer = buffer;
6243		rb_data[cpu].cpu = cpu;
6244		rb_data[cpu].cnt = cpu;
6245		rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
6246						     cpu, "rbtester/%u");
6247		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
6248			pr_cont("FAILED\n");
6249			ret = PTR_ERR(rb_threads[cpu]);
6250			goto out_free;
6251		}
6252	}
6253
6254	/* Now create the rb hammer! */
6255	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
6256	if (WARN_ON(IS_ERR(rb_hammer))) {
6257		pr_cont("FAILED\n");
6258		ret = PTR_ERR(rb_hammer);
6259		goto out_free;
6260	}
6261
6262	ring_buffer_record_on(buffer);
6263	/*
6264	 * Show buffer is enabled before setting rb_test_started.
6265	 * Yes there's a small race window where events could be
6266	 * dropped and the thread wont catch it. But when a ring
6267	 * buffer gets enabled, there will always be some kind of
6268	 * delay before other CPUs see it. Thus, we don't care about
6269	 * those dropped events. We care about events dropped after
6270	 * the threads see that the buffer is active.
6271	 */
6272	smp_wmb();
6273	rb_test_started = true;
6274
6275	set_current_state(TASK_INTERRUPTIBLE);
6276	/* Just run for 10 seconds */;
6277	schedule_timeout(10 * HZ);
6278
6279	kthread_stop(rb_hammer);
6280
6281 out_free:
6282	for_each_online_cpu(cpu) {
6283		if (!rb_threads[cpu])
6284			break;
6285		kthread_stop(rb_threads[cpu]);
6286	}
6287	if (ret) {
6288		ring_buffer_free(buffer);
6289		return ret;
6290	}
6291
6292	/* Report! */
6293	pr_info("finished\n");
6294	for_each_online_cpu(cpu) {
6295		struct ring_buffer_event *event;
6296		struct rb_test_data *data = &rb_data[cpu];
6297		struct rb_item *item;
6298		unsigned long total_events;
6299		unsigned long total_dropped;
6300		unsigned long total_written;
6301		unsigned long total_alloc;
6302		unsigned long total_read = 0;
6303		unsigned long total_size = 0;
6304		unsigned long total_len = 0;
6305		unsigned long total_lost = 0;
6306		unsigned long lost;
6307		int big_event_size;
6308		int small_event_size;
6309
6310		ret = -1;
6311
6312		total_events = data->events + data->events_nested;
6313		total_written = data->bytes_written + data->bytes_written_nested;
6314		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
6315		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
6316
6317		big_event_size = data->max_size + data->max_size_nested;
6318		small_event_size = data->min_size + data->min_size_nested;
6319
6320		pr_info("CPU %d:\n", cpu);
6321		pr_info("              events:    %ld\n", total_events);
6322		pr_info("       dropped bytes:    %ld\n", total_dropped);
6323		pr_info("       alloced bytes:    %ld\n", total_alloc);
6324		pr_info("       written bytes:    %ld\n", total_written);
6325		pr_info("       biggest event:    %d\n", big_event_size);
6326		pr_info("      smallest event:    %d\n", small_event_size);
6327
6328		if (RB_WARN_ON(buffer, total_dropped))
6329			break;
6330
6331		ret = 0;
6332
6333		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
6334			total_lost += lost;
6335			item = ring_buffer_event_data(event);
6336			total_len += ring_buffer_event_length(event);
6337			total_size += item->size + sizeof(struct rb_item);
6338			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
6339				pr_info("FAILED!\n");
6340				pr_info("buffer had: %.*s\n", item->size, item->str);
6341				pr_info("expected:   %.*s\n", item->size, rb_string);
6342				RB_WARN_ON(buffer, 1);
6343				ret = -1;
6344				break;
6345			}
6346			total_read++;
6347		}
6348		if (ret)
6349			break;
6350
6351		ret = -1;
6352
6353		pr_info("         read events:   %ld\n", total_read);
6354		pr_info("         lost events:   %ld\n", total_lost);
6355		pr_info("        total events:   %ld\n", total_lost + total_read);
6356		pr_info("  recorded len bytes:   %ld\n", total_len);
6357		pr_info(" recorded size bytes:   %ld\n", total_size);
6358		if (total_lost) {
6359			pr_info(" With dropped events, record len and size may not match\n"
6360				" alloced and written from above\n");
6361		} else {
6362			if (RB_WARN_ON(buffer, total_len != total_alloc ||
6363				       total_size != total_written))
6364				break;
6365		}
6366		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6367			break;
6368
6369		ret = 0;
6370	}
6371	if (!ret)
6372		pr_info("Ring buffer PASSED!\n");
6373
6374	ring_buffer_free(buffer);
6375	return 0;
6376}
6377
6378late_initcall(test_ringbuffer);
6379#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */