Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Generic ring buffer
   4 *
   5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   6 */
   7#include <linux/trace_recursion.h>
   8#include <linux/trace_events.h>
   9#include <linux/ring_buffer.h>
  10#include <linux/trace_clock.h>
  11#include <linux/sched/clock.h>
  12#include <linux/trace_seq.h>
  13#include <linux/spinlock.h>
  14#include <linux/irq_work.h>
  15#include <linux/security.h>
  16#include <linux/uaccess.h>
  17#include <linux/hardirq.h>
  18#include <linux/kthread.h>	/* for self test */
 
  19#include <linux/module.h>
  20#include <linux/percpu.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/slab.h>
  24#include <linux/init.h>
  25#include <linux/hash.h>
  26#include <linux/list.h>
  27#include <linux/cpu.h>
  28#include <linux/oom.h>
  29
  30#include <asm/local.h>
  31
  32/*
  33 * The "absolute" timestamp in the buffer is only 59 bits.
  34 * If a clock has the 5 MSBs set, it needs to be saved and
  35 * reinserted.
  36 */
  37#define TS_MSB		(0xf8ULL << 56)
  38#define ABS_TS_MASK	(~TS_MSB)
  39
  40static void update_pages_handler(struct work_struct *work);
  41
  42/*
  43 * The ring buffer header is special. We must manually up keep it.
  44 */
  45int ring_buffer_print_entry_header(struct trace_seq *s)
  46{
  47	trace_seq_puts(s, "# compressed entry header\n");
  48	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  49	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  50	trace_seq_puts(s, "\tarray       :   32 bits\n");
  51	trace_seq_putc(s, '\n');
  52	trace_seq_printf(s, "\tpadding     : type == %d\n",
  53			 RINGBUF_TYPE_PADDING);
  54	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  55			 RINGBUF_TYPE_TIME_EXTEND);
  56	trace_seq_printf(s, "\ttime_stamp : type == %d\n",
  57			 RINGBUF_TYPE_TIME_STAMP);
  58	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  59			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  60
  61	return !trace_seq_has_overflowed(s);
  62}
  63
  64/*
  65 * The ring buffer is made up of a list of pages. A separate list of pages is
  66 * allocated for each CPU. A writer may only write to a buffer that is
  67 * associated with the CPU it is currently executing on.  A reader may read
  68 * from any per cpu buffer.
  69 *
  70 * The reader is special. For each per cpu buffer, the reader has its own
  71 * reader page. When a reader has read the entire reader page, this reader
  72 * page is swapped with another page in the ring buffer.
  73 *
  74 * Now, as long as the writer is off the reader page, the reader can do what
  75 * ever it wants with that page. The writer will never write to that page
  76 * again (as long as it is out of the ring buffer).
  77 *
  78 * Here's some silly ASCII art.
  79 *
  80 *   +------+
  81 *   |reader|          RING BUFFER
  82 *   |page  |
  83 *   +------+        +---+   +---+   +---+
  84 *                   |   |-->|   |-->|   |
  85 *                   +---+   +---+   +---+
  86 *                     ^               |
  87 *                     |               |
  88 *                     +---------------+
  89 *
  90 *
  91 *   +------+
  92 *   |reader|          RING BUFFER
  93 *   |page  |------------------v
  94 *   +------+        +---+   +---+   +---+
  95 *                   |   |-->|   |-->|   |
  96 *                   +---+   +---+   +---+
  97 *                     ^               |
  98 *                     |               |
  99 *                     +---------------+
 100 *
 101 *
 102 *   +------+
 103 *   |reader|          RING BUFFER
 104 *   |page  |------------------v
 105 *   +------+        +---+   +---+   +---+
 106 *      ^            |   |-->|   |-->|   |
 107 *      |            +---+   +---+   +---+
 108 *      |                              |
 109 *      |                              |
 110 *      +------------------------------+
 111 *
 112 *
 113 *   +------+
 114 *   |buffer|          RING BUFFER
 115 *   |page  |------------------v
 116 *   +------+        +---+   +---+   +---+
 117 *      ^            |   |   |   |-->|   |
 118 *      |   New      +---+   +---+   +---+
 119 *      |  Reader------^               |
 120 *      |   page                       |
 121 *      +------------------------------+
 122 *
 123 *
 124 * After we make this swap, the reader can hand this page off to the splice
 125 * code and be done with it. It can even allocate a new page if it needs to
 126 * and swap that into the ring buffer.
 127 *
 128 * We will be using cmpxchg soon to make all this lockless.
 129 *
 130 */
 131
 132/* Used for individual buffers (after the counter) */
 133#define RB_BUFFER_OFF		(1 << 20)
 134
 135#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 136
 137#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 138#define RB_ALIGNMENT		4U
 139#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 140#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 141
 142#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 143# define RB_FORCE_8BYTE_ALIGNMENT	0
 144# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 145#else
 146# define RB_FORCE_8BYTE_ALIGNMENT	1
 147# define RB_ARCH_ALIGNMENT		8U
 148#endif
 149
 150#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 151
 152/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 153#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 154
 155enum {
 156	RB_LEN_TIME_EXTEND = 8,
 157	RB_LEN_TIME_STAMP =  8,
 158};
 159
 160#define skip_time_extend(event) \
 161	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 162
 163#define extended_time(event) \
 164	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
 165
 166static inline int rb_null_event(struct ring_buffer_event *event)
 167{
 168	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 169}
 170
 171static void rb_event_set_padding(struct ring_buffer_event *event)
 172{
 173	/* padding has a NULL time_delta */
 174	event->type_len = RINGBUF_TYPE_PADDING;
 175	event->time_delta = 0;
 176}
 177
 178static unsigned
 179rb_event_data_length(struct ring_buffer_event *event)
 180{
 181	unsigned length;
 182
 183	if (event->type_len)
 184		length = event->type_len * RB_ALIGNMENT;
 185	else
 186		length = event->array[0];
 187	return length + RB_EVNT_HDR_SIZE;
 188}
 189
 190/*
 191 * Return the length of the given event. Will return
 192 * the length of the time extend if the event is a
 193 * time extend.
 194 */
 195static inline unsigned
 196rb_event_length(struct ring_buffer_event *event)
 197{
 198	switch (event->type_len) {
 199	case RINGBUF_TYPE_PADDING:
 200		if (rb_null_event(event))
 201			/* undefined */
 202			return -1;
 203		return  event->array[0] + RB_EVNT_HDR_SIZE;
 204
 205	case RINGBUF_TYPE_TIME_EXTEND:
 206		return RB_LEN_TIME_EXTEND;
 207
 208	case RINGBUF_TYPE_TIME_STAMP:
 209		return RB_LEN_TIME_STAMP;
 210
 211	case RINGBUF_TYPE_DATA:
 212		return rb_event_data_length(event);
 213	default:
 214		WARN_ON_ONCE(1);
 215	}
 216	/* not hit */
 217	return 0;
 218}
 219
 220/*
 221 * Return total length of time extend and data,
 222 *   or just the event length for all other events.
 223 */
 224static inline unsigned
 225rb_event_ts_length(struct ring_buffer_event *event)
 226{
 227	unsigned len = 0;
 228
 229	if (extended_time(event)) {
 230		/* time extends include the data event after it */
 231		len = RB_LEN_TIME_EXTEND;
 232		event = skip_time_extend(event);
 233	}
 234	return len + rb_event_length(event);
 235}
 236
 237/**
 238 * ring_buffer_event_length - return the length of the event
 239 * @event: the event to get the length of
 240 *
 241 * Returns the size of the data load of a data event.
 242 * If the event is something other than a data event, it
 243 * returns the size of the event itself. With the exception
 244 * of a TIME EXTEND, where it still returns the size of the
 245 * data load of the data event after it.
 246 */
 247unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 248{
 249	unsigned length;
 250
 251	if (extended_time(event))
 252		event = skip_time_extend(event);
 253
 254	length = rb_event_length(event);
 255	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 256		return length;
 257	length -= RB_EVNT_HDR_SIZE;
 258	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 259                length -= sizeof(event->array[0]);
 260	return length;
 261}
 262EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 263
 264/* inline for ring buffer fast paths */
 265static __always_inline void *
 266rb_event_data(struct ring_buffer_event *event)
 267{
 268	if (extended_time(event))
 269		event = skip_time_extend(event);
 270	WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 271	/* If length is in len field, then array[0] has the data */
 272	if (event->type_len)
 273		return (void *)&event->array[0];
 274	/* Otherwise length is in array[0] and array[1] has the data */
 275	return (void *)&event->array[1];
 276}
 277
 278/**
 279 * ring_buffer_event_data - return the data of the event
 280 * @event: the event to get the data from
 281 */
 282void *ring_buffer_event_data(struct ring_buffer_event *event)
 283{
 284	return rb_event_data(event);
 285}
 286EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 287
 288#define for_each_buffer_cpu(buffer, cpu)		\
 289	for_each_cpu(cpu, buffer->cpumask)
 290
 291#define for_each_online_buffer_cpu(buffer, cpu)		\
 292	for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
 293
 294#define TS_SHIFT	27
 295#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 296#define TS_DELTA_TEST	(~TS_MASK)
 297
 298static u64 rb_event_time_stamp(struct ring_buffer_event *event)
 299{
 300	u64 ts;
 301
 302	ts = event->array[0];
 303	ts <<= TS_SHIFT;
 304	ts += event->time_delta;
 305
 306	return ts;
 307}
 308
 309/* Flag when events were overwritten */
 310#define RB_MISSED_EVENTS	(1 << 31)
 311/* Missed count stored at end */
 312#define RB_MISSED_STORED	(1 << 30)
 313
 314struct buffer_data_page {
 315	u64		 time_stamp;	/* page time stamp */
 316	local_t		 commit;	/* write committed index */
 317	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 318};
 319
 320/*
 321 * Note, the buffer_page list must be first. The buffer pages
 322 * are allocated in cache lines, which means that each buffer
 323 * page will be at the beginning of a cache line, and thus
 324 * the least significant bits will be zero. We use this to
 325 * add flags in the list struct pointers, to make the ring buffer
 326 * lockless.
 327 */
 328struct buffer_page {
 329	struct list_head list;		/* list of buffer pages */
 330	local_t		 write;		/* index for next write */
 331	unsigned	 read;		/* index for next read */
 332	local_t		 entries;	/* entries on this page */
 333	unsigned long	 real_end;	/* real end of data */
 334	struct buffer_data_page *page;	/* Actual data page */
 335};
 336
 337/*
 338 * The buffer page counters, write and entries, must be reset
 339 * atomically when crossing page boundaries. To synchronize this
 340 * update, two counters are inserted into the number. One is
 341 * the actual counter for the write position or count on the page.
 342 *
 343 * The other is a counter of updaters. Before an update happens
 344 * the update partition of the counter is incremented. This will
 345 * allow the updater to update the counter atomically.
 346 *
 347 * The counter is 20 bits, and the state data is 12.
 348 */
 349#define RB_WRITE_MASK		0xfffff
 350#define RB_WRITE_INTCNT		(1 << 20)
 351
 352static void rb_init_page(struct buffer_data_page *bpage)
 353{
 354	local_set(&bpage->commit, 0);
 355}
 356
 
 
 
 
 
 
 
 
 
 
 
 
 357/*
 358 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 359 * this issue out.
 360 */
 361static void free_buffer_page(struct buffer_page *bpage)
 362{
 363	free_page((unsigned long)bpage->page);
 364	kfree(bpage);
 365}
 366
 367/*
 368 * We need to fit the time_stamp delta into 27 bits.
 369 */
 370static inline int test_time_stamp(u64 delta)
 371{
 372	if (delta & TS_DELTA_TEST)
 373		return 1;
 374	return 0;
 375}
 376
 377#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 378
 379/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 380#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 381
 382int ring_buffer_print_page_header(struct trace_seq *s)
 383{
 384	struct buffer_data_page field;
 385
 386	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 387			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 388			 (unsigned int)sizeof(field.time_stamp),
 389			 (unsigned int)is_signed_type(u64));
 390
 391	trace_seq_printf(s, "\tfield: local_t commit;\t"
 392			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 393			 (unsigned int)offsetof(typeof(field), commit),
 394			 (unsigned int)sizeof(field.commit),
 395			 (unsigned int)is_signed_type(long));
 396
 397	trace_seq_printf(s, "\tfield: int overwrite;\t"
 398			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 399			 (unsigned int)offsetof(typeof(field), commit),
 400			 1,
 401			 (unsigned int)is_signed_type(long));
 402
 403	trace_seq_printf(s, "\tfield: char data;\t"
 404			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 405			 (unsigned int)offsetof(typeof(field), data),
 406			 (unsigned int)BUF_PAGE_SIZE,
 407			 (unsigned int)is_signed_type(char));
 408
 409	return !trace_seq_has_overflowed(s);
 410}
 411
 412struct rb_irq_work {
 413	struct irq_work			work;
 414	wait_queue_head_t		waiters;
 415	wait_queue_head_t		full_waiters;
 416	long				wait_index;
 417	bool				waiters_pending;
 418	bool				full_waiters_pending;
 419	bool				wakeup_full;
 420};
 421
 422/*
 423 * Structure to hold event state and handle nested events.
 424 */
 425struct rb_event_info {
 426	u64			ts;
 427	u64			delta;
 428	u64			before;
 429	u64			after;
 430	unsigned long		length;
 431	struct buffer_page	*tail_page;
 432	int			add_timestamp;
 433};
 434
 435/*
 436 * Used for the add_timestamp
 437 *  NONE
 438 *  EXTEND - wants a time extend
 439 *  ABSOLUTE - the buffer requests all events to have absolute time stamps
 440 *  FORCE - force a full time stamp.
 441 */
 442enum {
 443	RB_ADD_STAMP_NONE		= 0,
 444	RB_ADD_STAMP_EXTEND		= BIT(1),
 445	RB_ADD_STAMP_ABSOLUTE		= BIT(2),
 446	RB_ADD_STAMP_FORCE		= BIT(3)
 447};
 448/*
 449 * Used for which event context the event is in.
 450 *  TRANSITION = 0
 451 *  NMI     = 1
 452 *  IRQ     = 2
 453 *  SOFTIRQ = 3
 454 *  NORMAL  = 4
 455 *
 456 * See trace_recursive_lock() comment below for more details.
 457 */
 458enum {
 459	RB_CTX_TRANSITION,
 460	RB_CTX_NMI,
 461	RB_CTX_IRQ,
 462	RB_CTX_SOFTIRQ,
 463	RB_CTX_NORMAL,
 464	RB_CTX_MAX
 465};
 466
 467#if BITS_PER_LONG == 32
 468#define RB_TIME_32
 469#endif
 470
 471/* To test on 64 bit machines */
 472//#define RB_TIME_32
 473
 474#ifdef RB_TIME_32
 475
 476struct rb_time_struct {
 477	local_t		cnt;
 478	local_t		top;
 479	local_t		bottom;
 480	local_t		msb;
 481};
 482#else
 483#include <asm/local64.h>
 484struct rb_time_struct {
 485	local64_t	time;
 486};
 487#endif
 488typedef struct rb_time_struct rb_time_t;
 489
 490#define MAX_NEST	5
 491
 492/*
 493 * head_page == tail_page && head == tail then buffer is empty.
 494 */
 495struct ring_buffer_per_cpu {
 496	int				cpu;
 497	atomic_t			record_disabled;
 498	atomic_t			resize_disabled;
 499	struct trace_buffer	*buffer;
 500	raw_spinlock_t			reader_lock;	/* serialize readers */
 501	arch_spinlock_t			lock;
 502	struct lock_class_key		lock_key;
 503	struct buffer_data_page		*free_page;
 504	unsigned long			nr_pages;
 505	unsigned int			current_context;
 506	struct list_head		*pages;
 507	struct buffer_page		*head_page;	/* read from head */
 508	struct buffer_page		*tail_page;	/* write to tail */
 509	struct buffer_page		*commit_page;	/* committed pages */
 510	struct buffer_page		*reader_page;
 511	unsigned long			lost_events;
 512	unsigned long			last_overrun;
 513	unsigned long			nest;
 514	local_t				entries_bytes;
 515	local_t				entries;
 516	local_t				overrun;
 517	local_t				commit_overrun;
 518	local_t				dropped_events;
 519	local_t				committing;
 520	local_t				commits;
 521	local_t				pages_touched;
 522	local_t				pages_lost;
 523	local_t				pages_read;
 524	long				last_pages_touch;
 525	size_t				shortest_full;
 526	unsigned long			read;
 527	unsigned long			read_bytes;
 528	rb_time_t			write_stamp;
 529	rb_time_t			before_stamp;
 530	u64				event_stamp[MAX_NEST];
 531	u64				read_stamp;
 532	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 533	long				nr_pages_to_update;
 534	struct list_head		new_pages; /* new pages to add */
 535	struct work_struct		update_pages_work;
 536	struct completion		update_done;
 537
 538	struct rb_irq_work		irq_work;
 539};
 540
 541struct trace_buffer {
 542	unsigned			flags;
 543	int				cpus;
 544	atomic_t			record_disabled;
 
 545	cpumask_var_t			cpumask;
 546
 547	struct lock_class_key		*reader_lock_key;
 548
 549	struct mutex			mutex;
 550
 551	struct ring_buffer_per_cpu	**buffers;
 552
 553	struct hlist_node		node;
 
 
 554	u64				(*clock)(void);
 555
 556	struct rb_irq_work		irq_work;
 557	bool				time_stamp_abs;
 558};
 559
 560struct ring_buffer_iter {
 561	struct ring_buffer_per_cpu	*cpu_buffer;
 562	unsigned long			head;
 563	unsigned long			next_event;
 564	struct buffer_page		*head_page;
 565	struct buffer_page		*cache_reader_page;
 566	unsigned long			cache_read;
 567	u64				read_stamp;
 568	u64				page_stamp;
 569	struct ring_buffer_event	*event;
 570	int				missed_events;
 571};
 572
 573#ifdef RB_TIME_32
 574
 575/*
 576 * On 32 bit machines, local64_t is very expensive. As the ring
 577 * buffer doesn't need all the features of a true 64 bit atomic,
 578 * on 32 bit, it uses these functions (64 still uses local64_t).
 579 *
 580 * For the ring buffer, 64 bit required operations for the time is
 581 * the following:
 582 *
 583 *  - Reads may fail if it interrupted a modification of the time stamp.
 584 *      It will succeed if it did not interrupt another write even if
 585 *      the read itself is interrupted by a write.
 586 *      It returns whether it was successful or not.
 587 *
 588 *  - Writes always succeed and will overwrite other writes and writes
 589 *      that were done by events interrupting the current write.
 590 *
 591 *  - A write followed by a read of the same time stamp will always succeed,
 592 *      but may not contain the same value.
 593 *
 594 *  - A cmpxchg will fail if it interrupted another write or cmpxchg.
 595 *      Other than that, it acts like a normal cmpxchg.
 596 *
 597 * The 60 bit time stamp is broken up by 30 bits in a top and bottom half
 598 *  (bottom being the least significant 30 bits of the 60 bit time stamp).
 599 *
 600 * The two most significant bits of each half holds a 2 bit counter (0-3).
 601 * Each update will increment this counter by one.
 602 * When reading the top and bottom, if the two counter bits match then the
 603 *  top and bottom together make a valid 60 bit number.
 604 */
 605#define RB_TIME_SHIFT	30
 606#define RB_TIME_VAL_MASK ((1 << RB_TIME_SHIFT) - 1)
 607#define RB_TIME_MSB_SHIFT	 60
 608
 609static inline int rb_time_cnt(unsigned long val)
 610{
 611	return (val >> RB_TIME_SHIFT) & 3;
 612}
 613
 614static inline u64 rb_time_val(unsigned long top, unsigned long bottom)
 615{
 616	u64 val;
 617
 618	val = top & RB_TIME_VAL_MASK;
 619	val <<= RB_TIME_SHIFT;
 620	val |= bottom & RB_TIME_VAL_MASK;
 621
 622	return val;
 623}
 624
 625static inline bool __rb_time_read(rb_time_t *t, u64 *ret, unsigned long *cnt)
 626{
 627	unsigned long top, bottom, msb;
 628	unsigned long c;
 629
 630	/*
 631	 * If the read is interrupted by a write, then the cnt will
 632	 * be different. Loop until both top and bottom have been read
 633	 * without interruption.
 634	 */
 635	do {
 636		c = local_read(&t->cnt);
 637		top = local_read(&t->top);
 638		bottom = local_read(&t->bottom);
 639		msb = local_read(&t->msb);
 640	} while (c != local_read(&t->cnt));
 641
 642	*cnt = rb_time_cnt(top);
 643
 644	/* If top and bottom counts don't match, this interrupted a write */
 645	if (*cnt != rb_time_cnt(bottom))
 646		return false;
 647
 648	/* The shift to msb will lose its cnt bits */
 649	*ret = rb_time_val(top, bottom) | ((u64)msb << RB_TIME_MSB_SHIFT);
 650	return true;
 651}
 652
 653static bool rb_time_read(rb_time_t *t, u64 *ret)
 654{
 655	unsigned long cnt;
 656
 657	return __rb_time_read(t, ret, &cnt);
 658}
 659
 660static inline unsigned long rb_time_val_cnt(unsigned long val, unsigned long cnt)
 661{
 662	return (val & RB_TIME_VAL_MASK) | ((cnt & 3) << RB_TIME_SHIFT);
 663}
 664
 665static inline void rb_time_split(u64 val, unsigned long *top, unsigned long *bottom,
 666				 unsigned long *msb)
 667{
 668	*top = (unsigned long)((val >> RB_TIME_SHIFT) & RB_TIME_VAL_MASK);
 669	*bottom = (unsigned long)(val & RB_TIME_VAL_MASK);
 670	*msb = (unsigned long)(val >> RB_TIME_MSB_SHIFT);
 671}
 672
 673static inline void rb_time_val_set(local_t *t, unsigned long val, unsigned long cnt)
 674{
 675	val = rb_time_val_cnt(val, cnt);
 676	local_set(t, val);
 677}
 678
 679static void rb_time_set(rb_time_t *t, u64 val)
 680{
 681	unsigned long cnt, top, bottom, msb;
 682
 683	rb_time_split(val, &top, &bottom, &msb);
 684
 685	/* Writes always succeed with a valid number even if it gets interrupted. */
 686	do {
 687		cnt = local_inc_return(&t->cnt);
 688		rb_time_val_set(&t->top, top, cnt);
 689		rb_time_val_set(&t->bottom, bottom, cnt);
 690		rb_time_val_set(&t->msb, val >> RB_TIME_MSB_SHIFT, cnt);
 691	} while (cnt != local_read(&t->cnt));
 692}
 693
 694static inline bool
 695rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
 696{
 697	unsigned long ret;
 698
 699	ret = local_cmpxchg(l, expect, set);
 700	return ret == expect;
 701}
 702
 703static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
 704{
 705	unsigned long cnt, top, bottom, msb;
 706	unsigned long cnt2, top2, bottom2, msb2;
 707	u64 val;
 708
 709	/* The cmpxchg always fails if it interrupted an update */
 710	 if (!__rb_time_read(t, &val, &cnt2))
 711		 return false;
 712
 713	 if (val != expect)
 714		 return false;
 715
 716	 cnt = local_read(&t->cnt);
 717	 if ((cnt & 3) != cnt2)
 718		 return false;
 719
 720	 cnt2 = cnt + 1;
 721
 722	 rb_time_split(val, &top, &bottom, &msb);
 723	 top = rb_time_val_cnt(top, cnt);
 724	 bottom = rb_time_val_cnt(bottom, cnt);
 725
 726	 rb_time_split(set, &top2, &bottom2, &msb2);
 727	 top2 = rb_time_val_cnt(top2, cnt2);
 728	 bottom2 = rb_time_val_cnt(bottom2, cnt2);
 729
 730	if (!rb_time_read_cmpxchg(&t->cnt, cnt, cnt2))
 731		return false;
 732	if (!rb_time_read_cmpxchg(&t->msb, msb, msb2))
 733		return false;
 734	if (!rb_time_read_cmpxchg(&t->top, top, top2))
 735		return false;
 736	if (!rb_time_read_cmpxchg(&t->bottom, bottom, bottom2))
 737		return false;
 738	return true;
 739}
 740
 741#else /* 64 bits */
 742
 743/* local64_t always succeeds */
 744
 745static inline bool rb_time_read(rb_time_t *t, u64 *ret)
 746{
 747	*ret = local64_read(&t->time);
 748	return true;
 749}
 750static void rb_time_set(rb_time_t *t, u64 val)
 751{
 752	local64_set(&t->time, val);
 753}
 754
 755static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
 756{
 757	u64 val;
 758	val = local64_cmpxchg(&t->time, expect, set);
 759	return val == expect;
 760}
 761#endif
 762
 763/*
 764 * Enable this to make sure that the event passed to
 765 * ring_buffer_event_time_stamp() is not committed and also
 766 * is on the buffer that it passed in.
 767 */
 768//#define RB_VERIFY_EVENT
 769#ifdef RB_VERIFY_EVENT
 770static struct list_head *rb_list_head(struct list_head *list);
 771static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 772			 void *event)
 773{
 774	struct buffer_page *page = cpu_buffer->commit_page;
 775	struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
 776	struct list_head *next;
 777	long commit, write;
 778	unsigned long addr = (unsigned long)event;
 779	bool done = false;
 780	int stop = 0;
 781
 782	/* Make sure the event exists and is not committed yet */
 783	do {
 784		if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
 785			done = true;
 786		commit = local_read(&page->page->commit);
 787		write = local_read(&page->write);
 788		if (addr >= (unsigned long)&page->page->data[commit] &&
 789		    addr < (unsigned long)&page->page->data[write])
 790			return;
 791
 792		next = rb_list_head(page->list.next);
 793		page = list_entry(next, struct buffer_page, list);
 794	} while (!done);
 795	WARN_ON_ONCE(1);
 796}
 797#else
 798static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 799			 void *event)
 800{
 801}
 802#endif
 803
 804/*
 805 * The absolute time stamp drops the 5 MSBs and some clocks may
 806 * require them. The rb_fix_abs_ts() will take a previous full
 807 * time stamp, and add the 5 MSB of that time stamp on to the
 808 * saved absolute time stamp. Then they are compared in case of
 809 * the unlikely event that the latest time stamp incremented
 810 * the 5 MSB.
 811 */
 812static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
 813{
 814	if (save_ts & TS_MSB) {
 815		abs |= save_ts & TS_MSB;
 816		/* Check for overflow */
 817		if (unlikely(abs < save_ts))
 818			abs += 1ULL << 59;
 819	}
 820	return abs;
 821}
 822
 823static inline u64 rb_time_stamp(struct trace_buffer *buffer);
 824
 825/**
 826 * ring_buffer_event_time_stamp - return the event's current time stamp
 827 * @buffer: The buffer that the event is on
 828 * @event: the event to get the time stamp of
 829 *
 830 * Note, this must be called after @event is reserved, and before it is
 831 * committed to the ring buffer. And must be called from the same
 832 * context where the event was reserved (normal, softirq, irq, etc).
 833 *
 834 * Returns the time stamp associated with the current event.
 835 * If the event has an extended time stamp, then that is used as
 836 * the time stamp to return.
 837 * In the highly unlikely case that the event was nested more than
 838 * the max nesting, then the write_stamp of the buffer is returned,
 839 * otherwise  current time is returned, but that really neither of
 840 * the last two cases should ever happen.
 841 */
 842u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
 843				 struct ring_buffer_event *event)
 844{
 845	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
 846	unsigned int nest;
 847	u64 ts;
 848
 849	/* If the event includes an absolute time, then just use that */
 850	if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
 851		ts = rb_event_time_stamp(event);
 852		return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
 853	}
 854
 855	nest = local_read(&cpu_buffer->committing);
 856	verify_event(cpu_buffer, event);
 857	if (WARN_ON_ONCE(!nest))
 858		goto fail;
 859
 860	/* Read the current saved nesting level time stamp */
 861	if (likely(--nest < MAX_NEST))
 862		return cpu_buffer->event_stamp[nest];
 863
 864	/* Shouldn't happen, warn if it does */
 865	WARN_ONCE(1, "nest (%d) greater than max", nest);
 866
 867 fail:
 868	/* Can only fail on 32 bit */
 869	if (!rb_time_read(&cpu_buffer->write_stamp, &ts))
 870		/* Screw it, just read the current time */
 871		ts = rb_time_stamp(cpu_buffer->buffer);
 872
 873	return ts;
 874}
 875
 876/**
 877 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
 878 * @buffer: The ring_buffer to get the number of pages from
 879 * @cpu: The cpu of the ring_buffer to get the number of pages from
 880 *
 881 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
 882 */
 883size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
 884{
 885	return buffer->buffers[cpu]->nr_pages;
 886}
 887
 888/**
 889 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
 890 * @buffer: The ring_buffer to get the number of pages from
 891 * @cpu: The cpu of the ring_buffer to get the number of pages from
 892 *
 893 * Returns the number of pages that have content in the ring buffer.
 894 */
 895size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
 896{
 897	size_t read;
 898	size_t lost;
 899	size_t cnt;
 900
 901	read = local_read(&buffer->buffers[cpu]->pages_read);
 902	lost = local_read(&buffer->buffers[cpu]->pages_lost);
 903	cnt = local_read(&buffer->buffers[cpu]->pages_touched);
 904
 905	if (WARN_ON_ONCE(cnt < lost))
 906		return 0;
 907
 908	cnt -= lost;
 909
 910	/* The reader can read an empty page, but not more than that */
 911	if (cnt < read) {
 912		WARN_ON_ONCE(read > cnt + 1);
 913		return 0;
 914	}
 915
 916	return cnt - read;
 917}
 918
 919static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
 920{
 921	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 922	size_t nr_pages;
 923	size_t dirty;
 924
 925	nr_pages = cpu_buffer->nr_pages;
 926	if (!nr_pages || !full)
 927		return true;
 928
 929	dirty = ring_buffer_nr_dirty_pages(buffer, cpu);
 930
 931	return (dirty * 100) > (full * nr_pages);
 932}
 933
 934/*
 935 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 936 *
 937 * Schedules a delayed work to wake up any task that is blocked on the
 938 * ring buffer waiters queue.
 939 */
 940static void rb_wake_up_waiters(struct irq_work *work)
 941{
 942	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 943
 944	wake_up_all(&rbwork->waiters);
 945	if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
 946		rbwork->wakeup_full = false;
 947		rbwork->full_waiters_pending = false;
 948		wake_up_all(&rbwork->full_waiters);
 949	}
 950}
 951
 952/**
 953 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
 954 * @buffer: The ring buffer to wake waiters on
 955 *
 956 * In the case of a file that represents a ring buffer is closing,
 957 * it is prudent to wake up any waiters that are on this.
 958 */
 959void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
 960{
 961	struct ring_buffer_per_cpu *cpu_buffer;
 962	struct rb_irq_work *rbwork;
 963
 964	if (!buffer)
 965		return;
 966
 967	if (cpu == RING_BUFFER_ALL_CPUS) {
 968
 969		/* Wake up individual ones too. One level recursion */
 970		for_each_buffer_cpu(buffer, cpu)
 971			ring_buffer_wake_waiters(buffer, cpu);
 972
 973		rbwork = &buffer->irq_work;
 974	} else {
 975		if (WARN_ON_ONCE(!buffer->buffers))
 976			return;
 977		if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
 978			return;
 979
 980		cpu_buffer = buffer->buffers[cpu];
 981		/* The CPU buffer may not have been initialized yet */
 982		if (!cpu_buffer)
 983			return;
 984		rbwork = &cpu_buffer->irq_work;
 985	}
 986
 987	rbwork->wait_index++;
 988	/* make sure the waiters see the new index */
 989	smp_wmb();
 990
 991	rb_wake_up_waiters(&rbwork->work);
 992}
 993
 994/**
 995 * ring_buffer_wait - wait for input to the ring buffer
 996 * @buffer: buffer to wait on
 997 * @cpu: the cpu buffer to wait on
 998 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
 999 *
1000 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1001 * as data is added to any of the @buffer's cpu buffers. Otherwise
1002 * it will wait for data to be added to a specific cpu buffer.
1003 */
1004int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
1005{
1006	struct ring_buffer_per_cpu *cpu_buffer;
1007	DEFINE_WAIT(wait);
1008	struct rb_irq_work *work;
1009	long wait_index;
1010	int ret = 0;
1011
1012	/*
1013	 * Depending on what the caller is waiting for, either any
1014	 * data in any cpu buffer, or a specific buffer, put the
1015	 * caller on the appropriate wait queue.
1016	 */
1017	if (cpu == RING_BUFFER_ALL_CPUS) {
1018		work = &buffer->irq_work;
1019		/* Full only makes sense on per cpu reads */
1020		full = 0;
1021	} else {
1022		if (!cpumask_test_cpu(cpu, buffer->cpumask))
1023			return -ENODEV;
1024		cpu_buffer = buffer->buffers[cpu];
1025		work = &cpu_buffer->irq_work;
1026	}
1027
1028	wait_index = READ_ONCE(work->wait_index);
1029
1030	while (true) {
1031		if (full)
1032			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
1033		else
1034			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
1035
1036		/*
1037		 * The events can happen in critical sections where
1038		 * checking a work queue can cause deadlocks.
1039		 * After adding a task to the queue, this flag is set
1040		 * only to notify events to try to wake up the queue
1041		 * using irq_work.
1042		 *
1043		 * We don't clear it even if the buffer is no longer
1044		 * empty. The flag only causes the next event to run
1045		 * irq_work to do the work queue wake up. The worse
1046		 * that can happen if we race with !trace_empty() is that
1047		 * an event will cause an irq_work to try to wake up
1048		 * an empty queue.
1049		 *
1050		 * There's no reason to protect this flag either, as
1051		 * the work queue and irq_work logic will do the necessary
1052		 * synchronization for the wake ups. The only thing
1053		 * that is necessary is that the wake up happens after
1054		 * a task has been queued. It's OK for spurious wake ups.
1055		 */
1056		if (full)
1057			work->full_waiters_pending = true;
1058		else
1059			work->waiters_pending = true;
1060
1061		if (signal_pending(current)) {
1062			ret = -EINTR;
1063			break;
1064		}
1065
1066		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
1067			break;
1068
1069		if (cpu != RING_BUFFER_ALL_CPUS &&
1070		    !ring_buffer_empty_cpu(buffer, cpu)) {
1071			unsigned long flags;
1072			bool pagebusy;
1073			bool done;
1074
1075			if (!full)
1076				break;
1077
1078			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1079			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
1080			done = !pagebusy && full_hit(buffer, cpu, full);
1081
1082			if (!cpu_buffer->shortest_full ||
1083			    cpu_buffer->shortest_full > full)
1084				cpu_buffer->shortest_full = full;
1085			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1086			if (done)
 
1087				break;
1088		}
1089
1090		schedule();
1091
1092		/* Make sure to see the new wait index */
1093		smp_rmb();
1094		if (wait_index != work->wait_index)
1095			break;
1096	}
1097
1098	if (full)
1099		finish_wait(&work->full_waiters, &wait);
1100	else
1101		finish_wait(&work->waiters, &wait);
1102
1103	return ret;
1104}
1105
1106/**
1107 * ring_buffer_poll_wait - poll on buffer input
1108 * @buffer: buffer to wait on
1109 * @cpu: the cpu buffer to wait on
1110 * @filp: the file descriptor
1111 * @poll_table: The poll descriptor
1112 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
1113 *
1114 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
1115 * as data is added to any of the @buffer's cpu buffers. Otherwise
1116 * it will wait for data to be added to a specific cpu buffer.
1117 *
1118 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
1119 * zero otherwise.
1120 */
1121__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
1122			  struct file *filp, poll_table *poll_table, int full)
1123{
1124	struct ring_buffer_per_cpu *cpu_buffer;
1125	struct rb_irq_work *work;
1126
1127	if (cpu == RING_BUFFER_ALL_CPUS) {
1128		work = &buffer->irq_work;
1129		full = 0;
1130	} else {
1131		if (!cpumask_test_cpu(cpu, buffer->cpumask))
1132			return -EINVAL;
1133
1134		cpu_buffer = buffer->buffers[cpu];
1135		work = &cpu_buffer->irq_work;
1136	}
1137
1138	if (full) {
1139		poll_wait(filp, &work->full_waiters, poll_table);
1140		work->full_waiters_pending = true;
1141	} else {
1142		poll_wait(filp, &work->waiters, poll_table);
1143		work->waiters_pending = true;
1144	}
1145
1146	/*
1147	 * There's a tight race between setting the waiters_pending and
1148	 * checking if the ring buffer is empty.  Once the waiters_pending bit
1149	 * is set, the next event will wake the task up, but we can get stuck
1150	 * if there's only a single event in.
1151	 *
1152	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
1153	 * but adding a memory barrier to all events will cause too much of a
1154	 * performance hit in the fast path.  We only need a memory barrier when
1155	 * the buffer goes from empty to having content.  But as this race is
1156	 * extremely small, and it's not a problem if another event comes in, we
1157	 * will fix it later.
1158	 */
1159	smp_mb();
1160
1161	if (full)
1162		return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0;
1163
1164	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1165	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1166		return EPOLLIN | EPOLLRDNORM;
1167	return 0;
1168}
1169
1170/* buffer may be either ring_buffer or ring_buffer_per_cpu */
1171#define RB_WARN_ON(b, cond)						\
1172	({								\
1173		int _____ret = unlikely(cond);				\
1174		if (_____ret) {						\
1175			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1176				struct ring_buffer_per_cpu *__b =	\
1177					(void *)b;			\
1178				atomic_inc(&__b->buffer->record_disabled); \
1179			} else						\
1180				atomic_inc(&b->record_disabled);	\
1181			WARN_ON(1);					\
1182		}							\
1183		_____ret;						\
1184	})
1185
1186/* Up this if you want to test the TIME_EXTENTS and normalization */
1187#define DEBUG_SHIFT 0
1188
1189static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1190{
1191	u64 ts;
1192
1193	/* Skip retpolines :-( */
1194	if (IS_ENABLED(CONFIG_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1195		ts = trace_clock_local();
1196	else
1197		ts = buffer->clock();
1198
1199	/* shift to debug/test normalization and TIME_EXTENTS */
1200	return ts << DEBUG_SHIFT;
1201}
1202
1203u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1204{
1205	u64 time;
1206
1207	preempt_disable_notrace();
1208	time = rb_time_stamp(buffer);
1209	preempt_enable_notrace();
1210
1211	return time;
1212}
1213EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1214
1215void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1216				      int cpu, u64 *ts)
1217{
1218	/* Just stupid testing the normalize function and deltas */
1219	*ts >>= DEBUG_SHIFT;
1220}
1221EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1222
1223/*
1224 * Making the ring buffer lockless makes things tricky.
1225 * Although writes only happen on the CPU that they are on,
1226 * and they only need to worry about interrupts. Reads can
1227 * happen on any CPU.
1228 *
1229 * The reader page is always off the ring buffer, but when the
1230 * reader finishes with a page, it needs to swap its page with
1231 * a new one from the buffer. The reader needs to take from
1232 * the head (writes go to the tail). But if a writer is in overwrite
1233 * mode and wraps, it must push the head page forward.
1234 *
1235 * Here lies the problem.
1236 *
1237 * The reader must be careful to replace only the head page, and
1238 * not another one. As described at the top of the file in the
1239 * ASCII art, the reader sets its old page to point to the next
1240 * page after head. It then sets the page after head to point to
1241 * the old reader page. But if the writer moves the head page
1242 * during this operation, the reader could end up with the tail.
1243 *
1244 * We use cmpxchg to help prevent this race. We also do something
1245 * special with the page before head. We set the LSB to 1.
1246 *
1247 * When the writer must push the page forward, it will clear the
1248 * bit that points to the head page, move the head, and then set
1249 * the bit that points to the new head page.
1250 *
1251 * We also don't want an interrupt coming in and moving the head
1252 * page on another writer. Thus we use the second LSB to catch
1253 * that too. Thus:
1254 *
1255 * head->list->prev->next        bit 1          bit 0
1256 *                              -------        -------
1257 * Normal page                     0              0
1258 * Points to head page             0              1
1259 * New head page                   1              0
1260 *
1261 * Note we can not trust the prev pointer of the head page, because:
1262 *
1263 * +----+       +-----+        +-----+
1264 * |    |------>|  T  |---X--->|  N  |
1265 * |    |<------|     |        |     |
1266 * +----+       +-----+        +-----+
1267 *   ^                           ^ |
1268 *   |          +-----+          | |
1269 *   +----------|  R  |----------+ |
1270 *              |     |<-----------+
1271 *              +-----+
1272 *
1273 * Key:  ---X-->  HEAD flag set in pointer
1274 *         T      Tail page
1275 *         R      Reader page
1276 *         N      Next page
1277 *
1278 * (see __rb_reserve_next() to see where this happens)
1279 *
1280 *  What the above shows is that the reader just swapped out
1281 *  the reader page with a page in the buffer, but before it
1282 *  could make the new header point back to the new page added
1283 *  it was preempted by a writer. The writer moved forward onto
1284 *  the new page added by the reader and is about to move forward
1285 *  again.
1286 *
1287 *  You can see, it is legitimate for the previous pointer of
1288 *  the head (or any page) not to point back to itself. But only
1289 *  temporarily.
1290 */
1291
1292#define RB_PAGE_NORMAL		0UL
1293#define RB_PAGE_HEAD		1UL
1294#define RB_PAGE_UPDATE		2UL
1295
1296
1297#define RB_FLAG_MASK		3UL
1298
1299/* PAGE_MOVED is not part of the mask */
1300#define RB_PAGE_MOVED		4UL
1301
1302/*
1303 * rb_list_head - remove any bit
1304 */
1305static struct list_head *rb_list_head(struct list_head *list)
1306{
1307	unsigned long val = (unsigned long)list;
1308
1309	return (struct list_head *)(val & ~RB_FLAG_MASK);
1310}
1311
1312/*
1313 * rb_is_head_page - test if the given page is the head page
1314 *
1315 * Because the reader may move the head_page pointer, we can
1316 * not trust what the head page is (it may be pointing to
1317 * the reader page). But if the next page is a header page,
1318 * its flags will be non zero.
1319 */
1320static inline int
1321rb_is_head_page(struct buffer_page *page, struct list_head *list)
 
1322{
1323	unsigned long val;
1324
1325	val = (unsigned long)list->next;
1326
1327	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1328		return RB_PAGE_MOVED;
1329
1330	return val & RB_FLAG_MASK;
1331}
1332
1333/*
1334 * rb_is_reader_page
1335 *
1336 * The unique thing about the reader page, is that, if the
1337 * writer is ever on it, the previous pointer never points
1338 * back to the reader page.
1339 */
1340static bool rb_is_reader_page(struct buffer_page *page)
1341{
1342	struct list_head *list = page->list.prev;
1343
1344	return rb_list_head(list->next) != &page->list;
1345}
1346
1347/*
1348 * rb_set_list_to_head - set a list_head to be pointing to head.
1349 */
1350static void rb_set_list_to_head(struct list_head *list)
 
1351{
1352	unsigned long *ptr;
1353
1354	ptr = (unsigned long *)&list->next;
1355	*ptr |= RB_PAGE_HEAD;
1356	*ptr &= ~RB_PAGE_UPDATE;
1357}
1358
1359/*
1360 * rb_head_page_activate - sets up head page
1361 */
1362static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1363{
1364	struct buffer_page *head;
1365
1366	head = cpu_buffer->head_page;
1367	if (!head)
1368		return;
1369
1370	/*
1371	 * Set the previous list pointer to have the HEAD flag.
1372	 */
1373	rb_set_list_to_head(head->list.prev);
1374}
1375
1376static void rb_list_head_clear(struct list_head *list)
1377{
1378	unsigned long *ptr = (unsigned long *)&list->next;
1379
1380	*ptr &= ~RB_FLAG_MASK;
1381}
1382
1383/*
1384 * rb_head_page_deactivate - clears head page ptr (for free list)
1385 */
1386static void
1387rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1388{
1389	struct list_head *hd;
1390
1391	/* Go through the whole list and clear any pointers found. */
1392	rb_list_head_clear(cpu_buffer->pages);
1393
1394	list_for_each(hd, cpu_buffer->pages)
1395		rb_list_head_clear(hd);
1396}
1397
1398static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1399			    struct buffer_page *head,
1400			    struct buffer_page *prev,
1401			    int old_flag, int new_flag)
1402{
1403	struct list_head *list;
1404	unsigned long val = (unsigned long)&head->list;
1405	unsigned long ret;
1406
1407	list = &prev->list;
1408
1409	val &= ~RB_FLAG_MASK;
1410
1411	ret = cmpxchg((unsigned long *)&list->next,
1412		      val | old_flag, val | new_flag);
1413
1414	/* check if the reader took the page */
1415	if ((ret & ~RB_FLAG_MASK) != val)
1416		return RB_PAGE_MOVED;
1417
1418	return ret & RB_FLAG_MASK;
1419}
1420
1421static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1422				   struct buffer_page *head,
1423				   struct buffer_page *prev,
1424				   int old_flag)
1425{
1426	return rb_head_page_set(cpu_buffer, head, prev,
1427				old_flag, RB_PAGE_UPDATE);
1428}
1429
1430static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1431				 struct buffer_page *head,
1432				 struct buffer_page *prev,
1433				 int old_flag)
1434{
1435	return rb_head_page_set(cpu_buffer, head, prev,
1436				old_flag, RB_PAGE_HEAD);
1437}
1438
1439static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1440				   struct buffer_page *head,
1441				   struct buffer_page *prev,
1442				   int old_flag)
1443{
1444	return rb_head_page_set(cpu_buffer, head, prev,
1445				old_flag, RB_PAGE_NORMAL);
1446}
1447
1448static inline void rb_inc_page(struct buffer_page **bpage)
 
1449{
1450	struct list_head *p = rb_list_head((*bpage)->list.next);
1451
1452	*bpage = list_entry(p, struct buffer_page, list);
1453}
1454
1455static struct buffer_page *
1456rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1457{
1458	struct buffer_page *head;
1459	struct buffer_page *page;
1460	struct list_head *list;
1461	int i;
1462
1463	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1464		return NULL;
1465
1466	/* sanity check */
1467	list = cpu_buffer->pages;
1468	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1469		return NULL;
1470
1471	page = head = cpu_buffer->head_page;
1472	/*
1473	 * It is possible that the writer moves the header behind
1474	 * where we started, and we miss in one loop.
1475	 * A second loop should grab the header, but we'll do
1476	 * three loops just because I'm paranoid.
1477	 */
1478	for (i = 0; i < 3; i++) {
1479		do {
1480			if (rb_is_head_page(page, page->list.prev)) {
1481				cpu_buffer->head_page = page;
1482				return page;
1483			}
1484			rb_inc_page(&page);
1485		} while (page != head);
1486	}
1487
1488	RB_WARN_ON(cpu_buffer, 1);
1489
1490	return NULL;
1491}
1492
1493static int rb_head_page_replace(struct buffer_page *old,
1494				struct buffer_page *new)
1495{
1496	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1497	unsigned long val;
1498	unsigned long ret;
1499
1500	val = *ptr & ~RB_FLAG_MASK;
1501	val |= RB_PAGE_HEAD;
1502
1503	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
1504
1505	return ret == val;
1506}
1507
1508/*
1509 * rb_tail_page_update - move the tail page forward
1510 */
1511static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1512			       struct buffer_page *tail_page,
1513			       struct buffer_page *next_page)
1514{
1515	unsigned long old_entries;
1516	unsigned long old_write;
1517
1518	/*
1519	 * The tail page now needs to be moved forward.
1520	 *
1521	 * We need to reset the tail page, but without messing
1522	 * with possible erasing of data brought in by interrupts
1523	 * that have moved the tail page and are currently on it.
1524	 *
1525	 * We add a counter to the write field to denote this.
1526	 */
1527	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1528	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1529
1530	local_inc(&cpu_buffer->pages_touched);
1531	/*
1532	 * Just make sure we have seen our old_write and synchronize
1533	 * with any interrupts that come in.
1534	 */
1535	barrier();
1536
1537	/*
1538	 * If the tail page is still the same as what we think
1539	 * it is, then it is up to us to update the tail
1540	 * pointer.
1541	 */
1542	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1543		/* Zero the write counter */
1544		unsigned long val = old_write & ~RB_WRITE_MASK;
1545		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1546
1547		/*
1548		 * This will only succeed if an interrupt did
1549		 * not come in and change it. In which case, we
1550		 * do not want to modify it.
1551		 *
1552		 * We add (void) to let the compiler know that we do not care
1553		 * about the return value of these functions. We use the
1554		 * cmpxchg to only update if an interrupt did not already
1555		 * do it for us. If the cmpxchg fails, we don't care.
1556		 */
1557		(void)local_cmpxchg(&next_page->write, old_write, val);
1558		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1559
1560		/*
1561		 * No need to worry about races with clearing out the commit.
1562		 * it only can increment when a commit takes place. But that
1563		 * only happens in the outer most nested commit.
1564		 */
1565		local_set(&next_page->page->commit, 0);
1566
1567		/* Again, either we update tail_page or an interrupt does */
1568		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1569	}
1570}
1571
1572static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1573			  struct buffer_page *bpage)
1574{
1575	unsigned long val = (unsigned long)bpage;
1576
1577	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1578		return 1;
1579
1580	return 0;
1581}
1582
1583/**
1584 * rb_check_list - make sure a pointer to a list has the last bits zero
1585 */
1586static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1587			 struct list_head *list)
1588{
1589	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1590		return 1;
1591	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1592		return 1;
1593	return 0;
1594}
1595
1596/**
1597 * rb_check_pages - integrity check of buffer pages
1598 * @cpu_buffer: CPU buffer with pages to test
1599 *
1600 * As a safety measure we check to make sure the data pages have not
1601 * been corrupted.
1602 */
1603static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1604{
1605	struct list_head *head = cpu_buffer->pages;
1606	struct buffer_page *bpage, *tmp;
1607
1608	/* Reset the head page if it exists */
1609	if (cpu_buffer->head_page)
1610		rb_set_head_page(cpu_buffer);
1611
1612	rb_head_page_deactivate(cpu_buffer);
1613
1614	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1615		return -1;
1616	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1617		return -1;
1618
1619	if (rb_check_list(cpu_buffer, head))
1620		return -1;
1621
1622	list_for_each_entry_safe(bpage, tmp, head, list) {
1623		if (RB_WARN_ON(cpu_buffer,
1624			       bpage->list.next->prev != &bpage->list))
1625			return -1;
1626		if (RB_WARN_ON(cpu_buffer,
1627			       bpage->list.prev->next != &bpage->list))
1628			return -1;
1629		if (rb_check_list(cpu_buffer, &bpage->list))
1630			return -1;
1631	}
1632
1633	rb_head_page_activate(cpu_buffer);
1634
1635	return 0;
1636}
1637
1638static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1639		long nr_pages, struct list_head *pages)
1640{
 
1641	struct buffer_page *bpage, *tmp;
1642	bool user_thread = current->mm != NULL;
1643	gfp_t mflags;
1644	long i;
1645
1646	/*
1647	 * Check if the available memory is there first.
1648	 * Note, si_mem_available() only gives us a rough estimate of available
1649	 * memory. It may not be accurate. But we don't care, we just want
1650	 * to prevent doing any allocation when it is obvious that it is
1651	 * not going to succeed.
1652	 */
1653	i = si_mem_available();
1654	if (i < nr_pages)
1655		return -ENOMEM;
1656
1657	/*
1658	 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1659	 * gracefully without invoking oom-killer and the system is not
1660	 * destabilized.
1661	 */
1662	mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1663
1664	/*
1665	 * If a user thread allocates too much, and si_mem_available()
1666	 * reports there's enough memory, even though there is not.
1667	 * Make sure the OOM killer kills this thread. This can happen
1668	 * even with RETRY_MAYFAIL because another task may be doing
1669	 * an allocation after this task has taken all memory.
1670	 * This is the task the OOM killer needs to take out during this
1671	 * loop, even if it was triggered by an allocation somewhere else.
1672	 */
1673	if (user_thread)
1674		set_current_oom_origin();
1675	for (i = 0; i < nr_pages; i++) {
1676		struct page *page;
1677
 
 
 
 
1678		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1679				    mflags, cpu_to_node(cpu_buffer->cpu));
 
1680		if (!bpage)
1681			goto free_pages;
1682
1683		rb_check_bpage(cpu_buffer, bpage);
1684
1685		list_add(&bpage->list, pages);
1686
1687		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0);
 
1688		if (!page)
1689			goto free_pages;
1690		bpage->page = page_address(page);
1691		rb_init_page(bpage->page);
1692
1693		if (user_thread && fatal_signal_pending(current))
1694			goto free_pages;
1695	}
1696	if (user_thread)
1697		clear_current_oom_origin();
1698
1699	return 0;
1700
1701free_pages:
1702	list_for_each_entry_safe(bpage, tmp, pages, list) {
1703		list_del_init(&bpage->list);
1704		free_buffer_page(bpage);
1705	}
1706	if (user_thread)
1707		clear_current_oom_origin();
1708
1709	return -ENOMEM;
1710}
1711
1712static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1713			     unsigned long nr_pages)
1714{
1715	LIST_HEAD(pages);
1716
1717	WARN_ON(!nr_pages);
1718
1719	if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
1720		return -ENOMEM;
1721
1722	/*
1723	 * The ring buffer page list is a circular list that does not
1724	 * start and end with a list head. All page list items point to
1725	 * other pages.
1726	 */
1727	cpu_buffer->pages = pages.next;
1728	list_del(&pages);
1729
1730	cpu_buffer->nr_pages = nr_pages;
1731
1732	rb_check_pages(cpu_buffer);
1733
1734	return 0;
1735}
1736
1737static struct ring_buffer_per_cpu *
1738rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1739{
1740	struct ring_buffer_per_cpu *cpu_buffer;
1741	struct buffer_page *bpage;
1742	struct page *page;
1743	int ret;
1744
1745	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1746				  GFP_KERNEL, cpu_to_node(cpu));
1747	if (!cpu_buffer)
1748		return NULL;
1749
1750	cpu_buffer->cpu = cpu;
1751	cpu_buffer->buffer = buffer;
1752	raw_spin_lock_init(&cpu_buffer->reader_lock);
1753	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1754	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1755	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1756	init_completion(&cpu_buffer->update_done);
1757	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1758	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1759	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1760
1761	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1762			    GFP_KERNEL, cpu_to_node(cpu));
1763	if (!bpage)
1764		goto fail_free_buffer;
1765
1766	rb_check_bpage(cpu_buffer, bpage);
1767
1768	cpu_buffer->reader_page = bpage;
1769	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1770	if (!page)
1771		goto fail_free_reader;
1772	bpage->page = page_address(page);
1773	rb_init_page(bpage->page);
1774
1775	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1776	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1777
1778	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1779	if (ret < 0)
1780		goto fail_free_reader;
1781
1782	cpu_buffer->head_page
1783		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1784	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1785
1786	rb_head_page_activate(cpu_buffer);
1787
1788	return cpu_buffer;
1789
1790 fail_free_reader:
1791	free_buffer_page(cpu_buffer->reader_page);
1792
1793 fail_free_buffer:
1794	kfree(cpu_buffer);
1795	return NULL;
1796}
1797
1798static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1799{
1800	struct list_head *head = cpu_buffer->pages;
1801	struct buffer_page *bpage, *tmp;
1802
1803	free_buffer_page(cpu_buffer->reader_page);
1804
1805	if (head) {
1806		rb_head_page_deactivate(cpu_buffer);
1807
 
1808		list_for_each_entry_safe(bpage, tmp, head, list) {
1809			list_del_init(&bpage->list);
1810			free_buffer_page(bpage);
1811		}
1812		bpage = list_entry(head, struct buffer_page, list);
1813		free_buffer_page(bpage);
1814	}
1815
1816	kfree(cpu_buffer);
1817}
1818
 
 
 
 
 
1819/**
1820 * __ring_buffer_alloc - allocate a new ring_buffer
1821 * @size: the size in bytes per cpu that is needed.
1822 * @flags: attributes to set for the ring buffer.
1823 * @key: ring buffer reader_lock_key.
1824 *
1825 * Currently the only flag that is available is the RB_FL_OVERWRITE
1826 * flag. This flag means that the buffer will overwrite old data
1827 * when the buffer wraps. If this flag is not set, the buffer will
1828 * drop data when the tail hits the head.
1829 */
1830struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1831					struct lock_class_key *key)
1832{
1833	struct trace_buffer *buffer;
1834	long nr_pages;
1835	int bsize;
1836	int cpu;
1837	int ret;
1838
1839	/* keep it in its own cache line */
1840	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1841			 GFP_KERNEL);
1842	if (!buffer)
1843		return NULL;
1844
1845	if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1846		goto fail_free_buffer;
1847
1848	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1849	buffer->flags = flags;
1850	buffer->clock = trace_clock_local;
1851	buffer->reader_lock_key = key;
1852
1853	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1854	init_waitqueue_head(&buffer->irq_work.waiters);
1855
1856	/* need at least two pages */
1857	if (nr_pages < 2)
1858		nr_pages = 2;
1859
 
 
 
 
 
 
 
 
 
 
 
1860	buffer->cpus = nr_cpu_ids;
1861
1862	bsize = sizeof(void *) * nr_cpu_ids;
1863	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1864				  GFP_KERNEL);
1865	if (!buffer->buffers)
1866		goto fail_free_cpumask;
1867
1868	cpu = raw_smp_processor_id();
1869	cpumask_set_cpu(cpu, buffer->cpumask);
1870	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1871	if (!buffer->buffers[cpu])
1872		goto fail_free_buffers;
 
1873
1874	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1875	if (ret < 0)
1876		goto fail_free_buffers;
 
 
 
1877
1878	mutex_init(&buffer->mutex);
1879
1880	return buffer;
1881
1882 fail_free_buffers:
1883	for_each_buffer_cpu(buffer, cpu) {
1884		if (buffer->buffers[cpu])
1885			rb_free_cpu_buffer(buffer->buffers[cpu]);
1886	}
1887	kfree(buffer->buffers);
1888
1889 fail_free_cpumask:
1890	free_cpumask_var(buffer->cpumask);
 
 
 
1891
1892 fail_free_buffer:
1893	kfree(buffer);
1894	return NULL;
1895}
1896EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1897
1898/**
1899 * ring_buffer_free - free a ring buffer.
1900 * @buffer: the buffer to free.
1901 */
1902void
1903ring_buffer_free(struct trace_buffer *buffer)
1904{
1905	int cpu;
1906
1907	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
 
 
 
1908
1909	for_each_buffer_cpu(buffer, cpu)
1910		rb_free_cpu_buffer(buffer->buffers[cpu]);
1911
 
 
 
 
1912	kfree(buffer->buffers);
1913	free_cpumask_var(buffer->cpumask);
1914
1915	kfree(buffer);
1916}
1917EXPORT_SYMBOL_GPL(ring_buffer_free);
1918
1919void ring_buffer_set_clock(struct trace_buffer *buffer,
1920			   u64 (*clock)(void))
1921{
1922	buffer->clock = clock;
1923}
1924
1925void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1926{
1927	buffer->time_stamp_abs = abs;
1928}
1929
1930bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1931{
1932	return buffer->time_stamp_abs;
1933}
1934
1935static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1936
1937static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1938{
1939	return local_read(&bpage->entries) & RB_WRITE_MASK;
1940}
1941
1942static inline unsigned long rb_page_write(struct buffer_page *bpage)
1943{
1944	return local_read(&bpage->write) & RB_WRITE_MASK;
1945}
1946
1947static int
1948rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1949{
1950	struct list_head *tail_page, *to_remove, *next_page;
1951	struct buffer_page *to_remove_page, *tmp_iter_page;
1952	struct buffer_page *last_page, *first_page;
1953	unsigned long nr_removed;
1954	unsigned long head_bit;
1955	int page_entries;
1956
1957	head_bit = 0;
1958
1959	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1960	atomic_inc(&cpu_buffer->record_disabled);
1961	/*
1962	 * We don't race with the readers since we have acquired the reader
1963	 * lock. We also don't race with writers after disabling recording.
1964	 * This makes it easy to figure out the first and the last page to be
1965	 * removed from the list. We unlink all the pages in between including
1966	 * the first and last pages. This is done in a busy loop so that we
1967	 * lose the least number of traces.
1968	 * The pages are freed after we restart recording and unlock readers.
1969	 */
1970	tail_page = &cpu_buffer->tail_page->list;
1971
1972	/*
1973	 * tail page might be on reader page, we remove the next page
1974	 * from the ring buffer
1975	 */
1976	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1977		tail_page = rb_list_head(tail_page->next);
1978	to_remove = tail_page;
1979
1980	/* start of pages to remove */
1981	first_page = list_entry(rb_list_head(to_remove->next),
1982				struct buffer_page, list);
1983
1984	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1985		to_remove = rb_list_head(to_remove)->next;
1986		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1987	}
1988
1989	next_page = rb_list_head(to_remove)->next;
1990
1991	/*
1992	 * Now we remove all pages between tail_page and next_page.
1993	 * Make sure that we have head_bit value preserved for the
1994	 * next page
1995	 */
1996	tail_page->next = (struct list_head *)((unsigned long)next_page |
1997						head_bit);
1998	next_page = rb_list_head(next_page);
1999	next_page->prev = tail_page;
2000
2001	/* make sure pages points to a valid page in the ring buffer */
2002	cpu_buffer->pages = next_page;
2003
2004	/* update head page */
2005	if (head_bit)
2006		cpu_buffer->head_page = list_entry(next_page,
2007						struct buffer_page, list);
2008
2009	/*
2010	 * change read pointer to make sure any read iterators reset
2011	 * themselves
2012	 */
2013	cpu_buffer->read = 0;
2014
2015	/* pages are removed, resume tracing and then free the pages */
2016	atomic_dec(&cpu_buffer->record_disabled);
2017	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
2018
2019	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
2020
2021	/* last buffer page to remove */
2022	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
2023				list);
2024	tmp_iter_page = first_page;
2025
2026	do {
2027		cond_resched();
2028
2029		to_remove_page = tmp_iter_page;
2030		rb_inc_page(&tmp_iter_page);
2031
2032		/* update the counters */
2033		page_entries = rb_page_entries(to_remove_page);
2034		if (page_entries) {
2035			/*
2036			 * If something was added to this page, it was full
2037			 * since it is not the tail page. So we deduct the
2038			 * bytes consumed in ring buffer from here.
2039			 * Increment overrun to account for the lost events.
2040			 */
2041			local_add(page_entries, &cpu_buffer->overrun);
2042			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2043			local_inc(&cpu_buffer->pages_lost);
2044		}
2045
2046		/*
2047		 * We have already removed references to this list item, just
2048		 * free up the buffer_page and its page
2049		 */
2050		free_buffer_page(to_remove_page);
2051		nr_removed--;
2052
2053	} while (to_remove_page != last_page);
2054
2055	RB_WARN_ON(cpu_buffer, nr_removed);
2056
2057	return nr_removed == 0;
2058}
2059
2060static int
2061rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
2062{
2063	struct list_head *pages = &cpu_buffer->new_pages;
2064	int retries, success;
2065	unsigned long flags;
2066
2067	/* Can be called at early boot up, where interrupts must not been enabled */
2068	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2069	/*
2070	 * We are holding the reader lock, so the reader page won't be swapped
2071	 * in the ring buffer. Now we are racing with the writer trying to
2072	 * move head page and the tail page.
2073	 * We are going to adapt the reader page update process where:
2074	 * 1. We first splice the start and end of list of new pages between
2075	 *    the head page and its previous page.
2076	 * 2. We cmpxchg the prev_page->next to point from head page to the
2077	 *    start of new pages list.
2078	 * 3. Finally, we update the head->prev to the end of new list.
2079	 *
2080	 * We will try this process 10 times, to make sure that we don't keep
2081	 * spinning.
2082	 */
2083	retries = 10;
2084	success = 0;
2085	while (retries--) {
2086		struct list_head *head_page, *prev_page, *r;
2087		struct list_head *last_page, *first_page;
2088		struct list_head *head_page_with_bit;
2089
2090		head_page = &rb_set_head_page(cpu_buffer)->list;
2091		if (!head_page)
2092			break;
2093		prev_page = head_page->prev;
2094
2095		first_page = pages->next;
2096		last_page  = pages->prev;
2097
2098		head_page_with_bit = (struct list_head *)
2099				     ((unsigned long)head_page | RB_PAGE_HEAD);
2100
2101		last_page->next = head_page_with_bit;
2102		first_page->prev = prev_page;
2103
2104		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
2105
2106		if (r == head_page_with_bit) {
2107			/*
2108			 * yay, we replaced the page pointer to our new list,
2109			 * now, we just have to update to head page's prev
2110			 * pointer to point to end of list
2111			 */
2112			head_page->prev = last_page;
2113			success = 1;
2114			break;
2115		}
2116	}
2117
2118	if (success)
2119		INIT_LIST_HEAD(pages);
2120	/*
2121	 * If we weren't successful in adding in new pages, warn and stop
2122	 * tracing
2123	 */
2124	RB_WARN_ON(cpu_buffer, !success);
2125	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2126
2127	/* free pages if they weren't inserted */
2128	if (!success) {
2129		struct buffer_page *bpage, *tmp;
2130		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2131					 list) {
2132			list_del_init(&bpage->list);
2133			free_buffer_page(bpage);
2134		}
2135	}
2136	return success;
2137}
2138
2139static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2140{
2141	int success;
2142
2143	if (cpu_buffer->nr_pages_to_update > 0)
2144		success = rb_insert_pages(cpu_buffer);
2145	else
2146		success = rb_remove_pages(cpu_buffer,
2147					-cpu_buffer->nr_pages_to_update);
2148
2149	if (success)
2150		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
2151}
2152
2153static void update_pages_handler(struct work_struct *work)
2154{
2155	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2156			struct ring_buffer_per_cpu, update_pages_work);
2157	rb_update_pages(cpu_buffer);
2158	complete(&cpu_buffer->update_done);
2159}
2160
2161/**
2162 * ring_buffer_resize - resize the ring buffer
2163 * @buffer: the buffer to resize.
2164 * @size: the new size.
2165 * @cpu_id: the cpu buffer to resize
2166 *
2167 * Minimum size is 2 * BUF_PAGE_SIZE.
2168 *
2169 * Returns 0 on success and < 0 on failure.
2170 */
2171int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2172			int cpu_id)
2173{
2174	struct ring_buffer_per_cpu *cpu_buffer;
2175	unsigned long nr_pages;
2176	int cpu, err;
2177
2178	/*
2179	 * Always succeed at resizing a non-existent buffer:
2180	 */
2181	if (!buffer)
2182		return 0;
2183
2184	/* Make sure the requested buffer exists */
2185	if (cpu_id != RING_BUFFER_ALL_CPUS &&
2186	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
2187		return 0;
2188
2189	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
2190
2191	/* we need a minimum of two pages */
2192	if (nr_pages < 2)
2193		nr_pages = 2;
 
 
 
 
 
 
 
 
 
 
2194
2195	/* prevent another thread from changing buffer sizes */
2196	mutex_lock(&buffer->mutex);
2197
2198
2199	if (cpu_id == RING_BUFFER_ALL_CPUS) {
2200		/*
2201		 * Don't succeed if resizing is disabled, as a reader might be
2202		 * manipulating the ring buffer and is expecting a sane state while
2203		 * this is true.
2204		 */
2205		for_each_buffer_cpu(buffer, cpu) {
2206			cpu_buffer = buffer->buffers[cpu];
2207			if (atomic_read(&cpu_buffer->resize_disabled)) {
2208				err = -EBUSY;
2209				goto out_err_unlock;
2210			}
2211		}
2212
2213		/* calculate the pages to update */
2214		for_each_buffer_cpu(buffer, cpu) {
2215			cpu_buffer = buffer->buffers[cpu];
2216
2217			cpu_buffer->nr_pages_to_update = nr_pages -
2218							cpu_buffer->nr_pages;
2219			/*
2220			 * nothing more to do for removing pages or no update
2221			 */
2222			if (cpu_buffer->nr_pages_to_update <= 0)
2223				continue;
2224			/*
2225			 * to add pages, make sure all new pages can be
2226			 * allocated without receiving ENOMEM
2227			 */
2228			INIT_LIST_HEAD(&cpu_buffer->new_pages);
2229			if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2230						&cpu_buffer->new_pages)) {
2231				/* not enough memory for new pages */
2232				err = -ENOMEM;
2233				goto out_err;
2234			}
2235		}
2236
2237		cpus_read_lock();
2238		/*
2239		 * Fire off all the required work handlers
2240		 * We can't schedule on offline CPUs, but it's not necessary
2241		 * since we can change their buffer sizes without any race.
2242		 */
2243		for_each_buffer_cpu(buffer, cpu) {
2244			cpu_buffer = buffer->buffers[cpu];
2245			if (!cpu_buffer->nr_pages_to_update)
2246				continue;
2247
2248			/* Can't run something on an offline CPU. */
2249			if (!cpu_online(cpu)) {
2250				rb_update_pages(cpu_buffer);
2251				cpu_buffer->nr_pages_to_update = 0;
2252			} else {
2253				/* Run directly if possible. */
2254				migrate_disable();
2255				if (cpu != smp_processor_id()) {
2256					migrate_enable();
2257					schedule_work_on(cpu,
2258							 &cpu_buffer->update_pages_work);
2259				} else {
2260					update_pages_handler(&cpu_buffer->update_pages_work);
2261					migrate_enable();
2262				}
2263			}
2264		}
2265
2266		/* wait for all the updates to complete */
2267		for_each_buffer_cpu(buffer, cpu) {
2268			cpu_buffer = buffer->buffers[cpu];
2269			if (!cpu_buffer->nr_pages_to_update)
2270				continue;
2271
2272			if (cpu_online(cpu))
2273				wait_for_completion(&cpu_buffer->update_done);
2274			cpu_buffer->nr_pages_to_update = 0;
2275		}
2276
2277		cpus_read_unlock();
2278	} else {
 
 
 
 
2279		cpu_buffer = buffer->buffers[cpu_id];
2280
2281		if (nr_pages == cpu_buffer->nr_pages)
2282			goto out;
2283
2284		/*
2285		 * Don't succeed if resizing is disabled, as a reader might be
2286		 * manipulating the ring buffer and is expecting a sane state while
2287		 * this is true.
2288		 */
2289		if (atomic_read(&cpu_buffer->resize_disabled)) {
2290			err = -EBUSY;
2291			goto out_err_unlock;
2292		}
2293
2294		cpu_buffer->nr_pages_to_update = nr_pages -
2295						cpu_buffer->nr_pages;
2296
2297		INIT_LIST_HEAD(&cpu_buffer->new_pages);
2298		if (cpu_buffer->nr_pages_to_update > 0 &&
2299			__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2300					    &cpu_buffer->new_pages)) {
2301			err = -ENOMEM;
2302			goto out_err;
2303		}
2304
2305		cpus_read_lock();
2306
2307		/* Can't run something on an offline CPU. */
2308		if (!cpu_online(cpu_id))
2309			rb_update_pages(cpu_buffer);
2310		else {
2311			/* Run directly if possible. */
2312			migrate_disable();
2313			if (cpu_id == smp_processor_id()) {
2314				rb_update_pages(cpu_buffer);
2315				migrate_enable();
2316			} else {
2317				migrate_enable();
2318				schedule_work_on(cpu_id,
2319						 &cpu_buffer->update_pages_work);
2320				wait_for_completion(&cpu_buffer->update_done);
2321			}
2322		}
2323
2324		cpu_buffer->nr_pages_to_update = 0;
2325		cpus_read_unlock();
2326	}
2327
2328 out:
2329	/*
2330	 * The ring buffer resize can happen with the ring buffer
2331	 * enabled, so that the update disturbs the tracing as little
2332	 * as possible. But if the buffer is disabled, we do not need
2333	 * to worry about that, and we can take the time to verify
2334	 * that the buffer is not corrupt.
2335	 */
2336	if (atomic_read(&buffer->record_disabled)) {
2337		atomic_inc(&buffer->record_disabled);
2338		/*
2339		 * Even though the buffer was disabled, we must make sure
2340		 * that it is truly disabled before calling rb_check_pages.
2341		 * There could have been a race between checking
2342		 * record_disable and incrementing it.
2343		 */
2344		synchronize_rcu();
2345		for_each_buffer_cpu(buffer, cpu) {
2346			cpu_buffer = buffer->buffers[cpu];
2347			rb_check_pages(cpu_buffer);
2348		}
2349		atomic_dec(&buffer->record_disabled);
2350	}
2351
2352	mutex_unlock(&buffer->mutex);
2353	return 0;
2354
2355 out_err:
2356	for_each_buffer_cpu(buffer, cpu) {
2357		struct buffer_page *bpage, *tmp;
2358
2359		cpu_buffer = buffer->buffers[cpu];
2360		cpu_buffer->nr_pages_to_update = 0;
2361
2362		if (list_empty(&cpu_buffer->new_pages))
2363			continue;
2364
2365		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2366					list) {
2367			list_del_init(&bpage->list);
2368			free_buffer_page(bpage);
2369		}
2370	}
2371 out_err_unlock:
2372	mutex_unlock(&buffer->mutex);
2373	return err;
2374}
2375EXPORT_SYMBOL_GPL(ring_buffer_resize);
2376
2377void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2378{
2379	mutex_lock(&buffer->mutex);
2380	if (val)
2381		buffer->flags |= RB_FL_OVERWRITE;
2382	else
2383		buffer->flags &= ~RB_FL_OVERWRITE;
2384	mutex_unlock(&buffer->mutex);
2385}
2386EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2387
2388static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
 
 
 
 
 
 
2389{
2390	return bpage->page->data + index;
2391}
2392
2393static __always_inline struct ring_buffer_event *
2394rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2395{
2396	return __rb_page_index(cpu_buffer->reader_page,
2397			       cpu_buffer->reader_page->read);
2398}
2399
2400static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
 
2401{
2402	return local_read(&bpage->page->commit);
2403}
2404
2405static struct ring_buffer_event *
2406rb_iter_head_event(struct ring_buffer_iter *iter)
2407{
2408	struct ring_buffer_event *event;
2409	struct buffer_page *iter_head_page = iter->head_page;
2410	unsigned long commit;
2411	unsigned length;
2412
2413	if (iter->head != iter->next_event)
2414		return iter->event;
2415
2416	/*
2417	 * When the writer goes across pages, it issues a cmpxchg which
2418	 * is a mb(), which will synchronize with the rmb here.
2419	 * (see rb_tail_page_update() and __rb_reserve_next())
2420	 */
2421	commit = rb_page_commit(iter_head_page);
2422	smp_rmb();
2423	event = __rb_page_index(iter_head_page, iter->head);
2424	length = rb_event_length(event);
2425
2426	/*
2427	 * READ_ONCE() doesn't work on functions and we don't want the
2428	 * compiler doing any crazy optimizations with length.
2429	 */
2430	barrier();
2431
2432	if ((iter->head + length) > commit || length > BUF_MAX_DATA_SIZE)
2433		/* Writer corrupted the read? */
2434		goto reset;
2435
2436	memcpy(iter->event, event, length);
2437	/*
2438	 * If the page stamp is still the same after this rmb() then the
2439	 * event was safely copied without the writer entering the page.
2440	 */
2441	smp_rmb();
2442
2443	/* Make sure the page didn't change since we read this */
2444	if (iter->page_stamp != iter_head_page->page->time_stamp ||
2445	    commit > rb_page_commit(iter_head_page))
2446		goto reset;
2447
2448	iter->next_event = iter->head + length;
2449	return iter->event;
2450 reset:
2451	/* Reset to the beginning */
2452	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2453	iter->head = 0;
2454	iter->next_event = 0;
2455	iter->missed_events = 1;
2456	return NULL;
2457}
2458
2459/* Size is determined by what has been committed */
2460static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2461{
2462	return rb_page_commit(bpage);
2463}
2464
2465static __always_inline unsigned
2466rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2467{
2468	return rb_page_commit(cpu_buffer->commit_page);
2469}
2470
2471static __always_inline unsigned
2472rb_event_index(struct ring_buffer_event *event)
2473{
2474	unsigned long addr = (unsigned long)event;
2475
2476	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
2477}
2478
2479static void rb_inc_iter(struct ring_buffer_iter *iter)
2480{
2481	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2482
2483	/*
2484	 * The iterator could be on the reader page (it starts there).
2485	 * But the head could have moved, since the reader was
2486	 * found. Check for this case and assign the iterator
2487	 * to the head page instead of next.
2488	 */
2489	if (iter->head_page == cpu_buffer->reader_page)
2490		iter->head_page = rb_set_head_page(cpu_buffer);
2491	else
2492		rb_inc_page(&iter->head_page);
2493
2494	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2495	iter->head = 0;
2496	iter->next_event = 0;
2497}
2498
2499/*
2500 * rb_handle_head_page - writer hit the head page
2501 *
2502 * Returns: +1 to retry page
2503 *           0 to continue
2504 *          -1 on error
2505 */
2506static int
2507rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2508		    struct buffer_page *tail_page,
2509		    struct buffer_page *next_page)
2510{
2511	struct buffer_page *new_head;
2512	int entries;
2513	int type;
2514	int ret;
2515
2516	entries = rb_page_entries(next_page);
2517
2518	/*
2519	 * The hard part is here. We need to move the head
2520	 * forward, and protect against both readers on
2521	 * other CPUs and writers coming in via interrupts.
2522	 */
2523	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2524				       RB_PAGE_HEAD);
2525
2526	/*
2527	 * type can be one of four:
2528	 *  NORMAL - an interrupt already moved it for us
2529	 *  HEAD   - we are the first to get here.
2530	 *  UPDATE - we are the interrupt interrupting
2531	 *           a current move.
2532	 *  MOVED  - a reader on another CPU moved the next
2533	 *           pointer to its reader page. Give up
2534	 *           and try again.
2535	 */
2536
2537	switch (type) {
2538	case RB_PAGE_HEAD:
2539		/*
2540		 * We changed the head to UPDATE, thus
2541		 * it is our responsibility to update
2542		 * the counters.
2543		 */
2544		local_add(entries, &cpu_buffer->overrun);
2545		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2546		local_inc(&cpu_buffer->pages_lost);
2547
2548		/*
2549		 * The entries will be zeroed out when we move the
2550		 * tail page.
2551		 */
2552
2553		/* still more to do */
2554		break;
2555
2556	case RB_PAGE_UPDATE:
2557		/*
2558		 * This is an interrupt that interrupt the
2559		 * previous update. Still more to do.
2560		 */
2561		break;
2562	case RB_PAGE_NORMAL:
2563		/*
2564		 * An interrupt came in before the update
2565		 * and processed this for us.
2566		 * Nothing left to do.
2567		 */
2568		return 1;
2569	case RB_PAGE_MOVED:
2570		/*
2571		 * The reader is on another CPU and just did
2572		 * a swap with our next_page.
2573		 * Try again.
2574		 */
2575		return 1;
2576	default:
2577		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2578		return -1;
2579	}
2580
2581	/*
2582	 * Now that we are here, the old head pointer is
2583	 * set to UPDATE. This will keep the reader from
2584	 * swapping the head page with the reader page.
2585	 * The reader (on another CPU) will spin till
2586	 * we are finished.
2587	 *
2588	 * We just need to protect against interrupts
2589	 * doing the job. We will set the next pointer
2590	 * to HEAD. After that, we set the old pointer
2591	 * to NORMAL, but only if it was HEAD before.
2592	 * otherwise we are an interrupt, and only
2593	 * want the outer most commit to reset it.
2594	 */
2595	new_head = next_page;
2596	rb_inc_page(&new_head);
2597
2598	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2599				    RB_PAGE_NORMAL);
2600
2601	/*
2602	 * Valid returns are:
2603	 *  HEAD   - an interrupt came in and already set it.
2604	 *  NORMAL - One of two things:
2605	 *            1) We really set it.
2606	 *            2) A bunch of interrupts came in and moved
2607	 *               the page forward again.
2608	 */
2609	switch (ret) {
2610	case RB_PAGE_HEAD:
2611	case RB_PAGE_NORMAL:
2612		/* OK */
2613		break;
2614	default:
2615		RB_WARN_ON(cpu_buffer, 1);
2616		return -1;
2617	}
2618
2619	/*
2620	 * It is possible that an interrupt came in,
2621	 * set the head up, then more interrupts came in
2622	 * and moved it again. When we get back here,
2623	 * the page would have been set to NORMAL but we
2624	 * just set it back to HEAD.
2625	 *
2626	 * How do you detect this? Well, if that happened
2627	 * the tail page would have moved.
2628	 */
2629	if (ret == RB_PAGE_NORMAL) {
2630		struct buffer_page *buffer_tail_page;
2631
2632		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2633		/*
2634		 * If the tail had moved passed next, then we need
2635		 * to reset the pointer.
2636		 */
2637		if (buffer_tail_page != tail_page &&
2638		    buffer_tail_page != next_page)
2639			rb_head_page_set_normal(cpu_buffer, new_head,
2640						next_page,
2641						RB_PAGE_HEAD);
2642	}
2643
2644	/*
2645	 * If this was the outer most commit (the one that
2646	 * changed the original pointer from HEAD to UPDATE),
2647	 * then it is up to us to reset it to NORMAL.
2648	 */
2649	if (type == RB_PAGE_HEAD) {
2650		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2651					      tail_page,
2652					      RB_PAGE_UPDATE);
2653		if (RB_WARN_ON(cpu_buffer,
2654			       ret != RB_PAGE_UPDATE))
2655			return -1;
2656	}
2657
2658	return 0;
2659}
2660
2661static inline void
2662rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2663	      unsigned long tail, struct rb_event_info *info)
2664{
2665	struct buffer_page *tail_page = info->tail_page;
2666	struct ring_buffer_event *event;
2667	unsigned long length = info->length;
2668
2669	/*
2670	 * Only the event that crossed the page boundary
2671	 * must fill the old tail_page with padding.
2672	 */
2673	if (tail >= BUF_PAGE_SIZE) {
2674		/*
2675		 * If the page was filled, then we still need
2676		 * to update the real_end. Reset it to zero
2677		 * and the reader will ignore it.
2678		 */
2679		if (tail == BUF_PAGE_SIZE)
2680			tail_page->real_end = 0;
2681
2682		local_sub(length, &tail_page->write);
2683		return;
2684	}
2685
2686	event = __rb_page_index(tail_page, tail);
 
2687
2688	/* account for padding bytes */
2689	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2690
2691	/*
2692	 * Save the original length to the meta data.
2693	 * This will be used by the reader to add lost event
2694	 * counter.
2695	 */
2696	tail_page->real_end = tail;
2697
2698	/*
2699	 * If this event is bigger than the minimum size, then
2700	 * we need to be careful that we don't subtract the
2701	 * write counter enough to allow another writer to slip
2702	 * in on this page.
2703	 * We put in a discarded commit instead, to make sure
2704	 * that this space is not used again.
2705	 *
2706	 * If we are less than the minimum size, we don't need to
2707	 * worry about it.
2708	 */
2709	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2710		/* No room for any events */
2711
2712		/* Mark the rest of the page with padding */
2713		rb_event_set_padding(event);
2714
2715		/* Make sure the padding is visible before the write update */
2716		smp_wmb();
2717
2718		/* Set the write back to the previous setting */
2719		local_sub(length, &tail_page->write);
2720		return;
2721	}
2722
2723	/* Put in a discarded event */
2724	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2725	event->type_len = RINGBUF_TYPE_PADDING;
2726	/* time delta must be non zero */
2727	event->time_delta = 1;
2728
2729	/* Make sure the padding is visible before the tail_page->write update */
2730	smp_wmb();
2731
2732	/* Set write to end of buffer */
2733	length = (tail + length) - BUF_PAGE_SIZE;
2734	local_sub(length, &tail_page->write);
2735}
2736
2737static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2738
2739/*
2740 * This is the slow path, force gcc not to inline it.
2741 */
2742static noinline struct ring_buffer_event *
2743rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2744	     unsigned long tail, struct rb_event_info *info)
2745{
2746	struct buffer_page *tail_page = info->tail_page;
2747	struct buffer_page *commit_page = cpu_buffer->commit_page;
2748	struct trace_buffer *buffer = cpu_buffer->buffer;
2749	struct buffer_page *next_page;
2750	int ret;
2751
2752	next_page = tail_page;
2753
2754	rb_inc_page(&next_page);
2755
2756	/*
2757	 * If for some reason, we had an interrupt storm that made
2758	 * it all the way around the buffer, bail, and warn
2759	 * about it.
2760	 */
2761	if (unlikely(next_page == commit_page)) {
2762		local_inc(&cpu_buffer->commit_overrun);
2763		goto out_reset;
2764	}
2765
2766	/*
2767	 * This is where the fun begins!
2768	 *
2769	 * We are fighting against races between a reader that
2770	 * could be on another CPU trying to swap its reader
2771	 * page with the buffer head.
2772	 *
2773	 * We are also fighting against interrupts coming in and
2774	 * moving the head or tail on us as well.
2775	 *
2776	 * If the next page is the head page then we have filled
2777	 * the buffer, unless the commit page is still on the
2778	 * reader page.
2779	 */
2780	if (rb_is_head_page(next_page, &tail_page->list)) {
2781
2782		/*
2783		 * If the commit is not on the reader page, then
2784		 * move the header page.
2785		 */
2786		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2787			/*
2788			 * If we are not in overwrite mode,
2789			 * this is easy, just stop here.
2790			 */
2791			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2792				local_inc(&cpu_buffer->dropped_events);
2793				goto out_reset;
2794			}
2795
2796			ret = rb_handle_head_page(cpu_buffer,
2797						  tail_page,
2798						  next_page);
2799			if (ret < 0)
2800				goto out_reset;
2801			if (ret)
2802				goto out_again;
2803		} else {
2804			/*
2805			 * We need to be careful here too. The
2806			 * commit page could still be on the reader
2807			 * page. We could have a small buffer, and
2808			 * have filled up the buffer with events
2809			 * from interrupts and such, and wrapped.
2810			 *
2811			 * Note, if the tail page is also on the
2812			 * reader_page, we let it move out.
2813			 */
2814			if (unlikely((cpu_buffer->commit_page !=
2815				      cpu_buffer->tail_page) &&
2816				     (cpu_buffer->commit_page ==
2817				      cpu_buffer->reader_page))) {
2818				local_inc(&cpu_buffer->commit_overrun);
2819				goto out_reset;
2820			}
2821		}
2822	}
2823
2824	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2825
2826 out_again:
2827
2828	rb_reset_tail(cpu_buffer, tail, info);
2829
2830	/* Commit what we have for now. */
2831	rb_end_commit(cpu_buffer);
2832	/* rb_end_commit() decs committing */
2833	local_inc(&cpu_buffer->committing);
2834
2835	/* fail and let the caller try again */
2836	return ERR_PTR(-EAGAIN);
2837
2838 out_reset:
2839	/* reset write */
2840	rb_reset_tail(cpu_buffer, tail, info);
2841
2842	return NULL;
2843}
2844
2845/* Slow path */
2846static struct ring_buffer_event *
2847rb_add_time_stamp(struct ring_buffer_event *event, u64 delta, bool abs)
2848{
2849	if (abs)
2850		event->type_len = RINGBUF_TYPE_TIME_STAMP;
2851	else
2852		event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2853
2854	/* Not the first event on the page, or not delta? */
2855	if (abs || rb_event_index(event)) {
2856		event->time_delta = delta & TS_MASK;
2857		event->array[0] = delta >> TS_SHIFT;
2858	} else {
2859		/* nope, just zero it */
2860		event->time_delta = 0;
2861		event->array[0] = 0;
2862	}
2863
2864	return skip_time_extend(event);
2865}
2866
2867#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2868static inline bool sched_clock_stable(void)
2869{
2870	return true;
2871}
2872#endif
2873
2874static void
2875rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2876		   struct rb_event_info *info)
2877{
2878	u64 write_stamp;
2879
2880	WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2881		  (unsigned long long)info->delta,
2882		  (unsigned long long)info->ts,
2883		  (unsigned long long)info->before,
2884		  (unsigned long long)info->after,
2885		  (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0),
2886		  sched_clock_stable() ? "" :
2887		  "If you just came from a suspend/resume,\n"
2888		  "please switch to the trace global clock:\n"
2889		  "  echo global > /sys/kernel/debug/tracing/trace_clock\n"
2890		  "or add trace_clock=global to the kernel command line\n");
2891}
2892
2893static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2894				      struct ring_buffer_event **event,
2895				      struct rb_event_info *info,
2896				      u64 *delta,
2897				      unsigned int *length)
2898{
2899	bool abs = info->add_timestamp &
2900		(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2901
2902	if (unlikely(info->delta > (1ULL << 59))) {
2903		/*
2904		 * Some timers can use more than 59 bits, and when a timestamp
2905		 * is added to the buffer, it will lose those bits.
2906		 */
2907		if (abs && (info->ts & TS_MSB)) {
2908			info->delta &= ABS_TS_MASK;
2909
2910		/* did the clock go backwards */
2911		} else if (info->before == info->after && info->before > info->ts) {
2912			/* not interrupted */
2913			static int once;
2914
2915			/*
2916			 * This is possible with a recalibrating of the TSC.
2917			 * Do not produce a call stack, but just report it.
2918			 */
2919			if (!once) {
2920				once++;
2921				pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2922					info->before, info->ts);
2923			}
2924		} else
2925			rb_check_timestamp(cpu_buffer, info);
2926		if (!abs)
2927			info->delta = 0;
2928	}
2929	*event = rb_add_time_stamp(*event, info->delta, abs);
2930	*length -= RB_LEN_TIME_EXTEND;
2931	*delta = 0;
2932}
2933
2934/**
2935 * rb_update_event - update event type and data
2936 * @cpu_buffer: The per cpu buffer of the @event
2937 * @event: the event to update
2938 * @info: The info to update the @event with (contains length and delta)
 
2939 *
2940 * Update the type and data fields of the @event. The length
2941 * is the actual size that is written to the ring buffer,
2942 * and with this, we can determine what to place into the
2943 * data field.
2944 */
2945static void
2946rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2947		struct ring_buffer_event *event,
2948		struct rb_event_info *info)
2949{
2950	unsigned length = info->length;
2951	u64 delta = info->delta;
2952	unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2953
2954	if (!WARN_ON_ONCE(nest >= MAX_NEST))
2955		cpu_buffer->event_stamp[nest] = info->ts;
 
2956
2957	/*
2958	 * If we need to add a timestamp, then we
2959	 * add it to the start of the reserved space.
2960	 */
2961	if (unlikely(info->add_timestamp))
2962		rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
 
 
 
2963
2964	event->time_delta = delta;
2965	length -= RB_EVNT_HDR_SIZE;
2966	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2967		event->type_len = 0;
2968		event->array[0] = length;
2969	} else
2970		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2971}
2972
2973static unsigned rb_calculate_event_length(unsigned length)
2974{
2975	struct ring_buffer_event event; /* Used only for sizeof array */
2976
2977	/* zero length can cause confusions */
2978	if (!length)
2979		length++;
2980
2981	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2982		length += sizeof(event.array[0]);
2983
2984	length += RB_EVNT_HDR_SIZE;
2985	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2986
2987	/*
2988	 * In case the time delta is larger than the 27 bits for it
2989	 * in the header, we need to add a timestamp. If another
2990	 * event comes in when trying to discard this one to increase
2991	 * the length, then the timestamp will be added in the allocated
2992	 * space of this event. If length is bigger than the size needed
2993	 * for the TIME_EXTEND, then padding has to be used. The events
2994	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2995	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2996	 * As length is a multiple of 4, we only need to worry if it
2997	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2998	 */
2999	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
3000		length += RB_ALIGNMENT;
3001
3002	return length;
3003}
3004
3005static u64 rb_time_delta(struct ring_buffer_event *event)
 
3006{
3007	switch (event->type_len) {
3008	case RINGBUF_TYPE_PADDING:
3009		return 0;
3010
3011	case RINGBUF_TYPE_TIME_EXTEND:
3012		return rb_event_time_stamp(event);
3013
3014	case RINGBUF_TYPE_TIME_STAMP:
3015		return 0;
3016
3017	case RINGBUF_TYPE_DATA:
3018		return event->time_delta;
3019	default:
3020		return 0;
3021	}
3022}
 
3023
3024static inline int
3025rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
3026		  struct ring_buffer_event *event)
3027{
3028	unsigned long new_index, old_index;
3029	struct buffer_page *bpage;
3030	unsigned long index;
3031	unsigned long addr;
3032	u64 write_stamp;
3033	u64 delta;
3034
3035	new_index = rb_event_index(event);
3036	old_index = new_index + rb_event_ts_length(event);
3037	addr = (unsigned long)event;
3038	addr &= PAGE_MASK;
3039
3040	bpage = READ_ONCE(cpu_buffer->tail_page);
3041
3042	delta = rb_time_delta(event);
3043
3044	if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
3045		return 0;
3046
3047	/* Make sure the write stamp is read before testing the location */
3048	barrier();
3049
3050	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
3051		unsigned long write_mask =
3052			local_read(&bpage->write) & ~RB_WRITE_MASK;
3053		unsigned long event_length = rb_event_length(event);
3054
3055		/* Something came in, can't discard */
3056		if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
3057				       write_stamp, write_stamp - delta))
3058			return 0;
3059
3060		/*
3061		 * It's possible that the event time delta is zero
3062		 * (has the same time stamp as the previous event)
3063		 * in which case write_stamp and before_stamp could
3064		 * be the same. In such a case, force before_stamp
3065		 * to be different than write_stamp. It doesn't
3066		 * matter what it is, as long as its different.
3067		 */
3068		if (!delta)
3069			rb_time_set(&cpu_buffer->before_stamp, 0);
3070
3071		/*
3072		 * If an event were to come in now, it would see that the
3073		 * write_stamp and the before_stamp are different, and assume
3074		 * that this event just added itself before updating
3075		 * the write stamp. The interrupting event will fix the
3076		 * write stamp for us, and use the before stamp as its delta.
3077		 */
3078
3079		/*
3080		 * This is on the tail page. It is possible that
3081		 * a write could come in and move the tail page
3082		 * and write to the next page. That is fine
3083		 * because we just shorten what is on this page.
3084		 */
3085		old_index += write_mask;
3086		new_index += write_mask;
3087		index = local_cmpxchg(&bpage->write, old_index, new_index);
3088		if (index == old_index) {
3089			/* update counters */
3090			local_sub(event_length, &cpu_buffer->entries_bytes);
3091			return 1;
3092		}
3093	}
3094
3095	/* could not discard */
3096	return 0;
3097}
3098
3099static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
3100{
3101	local_inc(&cpu_buffer->committing);
3102	local_inc(&cpu_buffer->commits);
3103}
3104
3105static __always_inline void
3106rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
3107{
3108	unsigned long max_count;
3109
3110	/*
3111	 * We only race with interrupts and NMIs on this CPU.
3112	 * If we own the commit event, then we can commit
3113	 * all others that interrupted us, since the interruptions
3114	 * are in stack format (they finish before they come
3115	 * back to us). This allows us to do a simple loop to
3116	 * assign the commit to the tail.
3117	 */
3118 again:
3119	max_count = cpu_buffer->nr_pages * 100;
3120
3121	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
3122		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
3123			return;
3124		if (RB_WARN_ON(cpu_buffer,
3125			       rb_is_reader_page(cpu_buffer->tail_page)))
3126			return;
3127		local_set(&cpu_buffer->commit_page->page->commit,
3128			  rb_page_write(cpu_buffer->commit_page));
3129		rb_inc_page(&cpu_buffer->commit_page);
 
 
 
 
3130		/* add barrier to keep gcc from optimizing too much */
3131		barrier();
3132	}
3133	while (rb_commit_index(cpu_buffer) !=
3134	       rb_page_write(cpu_buffer->commit_page)) {
3135
3136		local_set(&cpu_buffer->commit_page->page->commit,
3137			  rb_page_write(cpu_buffer->commit_page));
3138		RB_WARN_ON(cpu_buffer,
3139			   local_read(&cpu_buffer->commit_page->page->commit) &
3140			   ~RB_WRITE_MASK);
3141		barrier();
3142	}
3143
3144	/* again, keep gcc from optimizing */
3145	barrier();
3146
3147	/*
3148	 * If an interrupt came in just after the first while loop
3149	 * and pushed the tail page forward, we will be left with
3150	 * a dangling commit that will never go forward.
3151	 */
3152	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3153		goto again;
3154}
3155
3156static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3157{
3158	unsigned long commits;
3159
3160	if (RB_WARN_ON(cpu_buffer,
3161		       !local_read(&cpu_buffer->committing)))
3162		return;
3163
3164 again:
3165	commits = local_read(&cpu_buffer->commits);
3166	/* synchronize with interrupts */
3167	barrier();
3168	if (local_read(&cpu_buffer->committing) == 1)
3169		rb_set_commit_to_write(cpu_buffer);
3170
3171	local_dec(&cpu_buffer->committing);
3172
3173	/* synchronize with interrupts */
3174	barrier();
3175
3176	/*
3177	 * Need to account for interrupts coming in between the
3178	 * updating of the commit page and the clearing of the
3179	 * committing counter.
3180	 */
3181	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3182	    !local_read(&cpu_buffer->committing)) {
3183		local_inc(&cpu_buffer->committing);
3184		goto again;
3185	}
3186}
3187
3188static inline void rb_event_discard(struct ring_buffer_event *event)
3189{
3190	if (extended_time(event))
3191		event = skip_time_extend(event);
3192
3193	/* array[0] holds the actual length for the discarded event */
3194	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3195	event->type_len = RINGBUF_TYPE_PADDING;
3196	/* time delta must be non zero */
3197	if (!event->time_delta)
3198		event->time_delta = 1;
3199}
3200
3201static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3202{
3203	local_inc(&cpu_buffer->entries);
 
3204	rb_end_commit(cpu_buffer);
3205}
3206
3207static __always_inline void
3208rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3209{
 
 
3210	if (buffer->irq_work.waiters_pending) {
3211		buffer->irq_work.waiters_pending = false;
3212		/* irq_work_queue() supplies it's own memory barriers */
3213		irq_work_queue(&buffer->irq_work.work);
3214	}
3215
3216	if (cpu_buffer->irq_work.waiters_pending) {
3217		cpu_buffer->irq_work.waiters_pending = false;
3218		/* irq_work_queue() supplies it's own memory barriers */
3219		irq_work_queue(&cpu_buffer->irq_work.work);
3220	}
3221
3222	if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3223		return;
3224
3225	if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3226		return;
3227
3228	if (!cpu_buffer->irq_work.full_waiters_pending)
3229		return;
3230
3231	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3232
3233	if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3234		return;
3235
3236	cpu_buffer->irq_work.wakeup_full = true;
3237	cpu_buffer->irq_work.full_waiters_pending = false;
3238	/* irq_work_queue() supplies it's own memory barriers */
3239	irq_work_queue(&cpu_buffer->irq_work.work);
 
 
3240}
3241
3242#ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3243# define do_ring_buffer_record_recursion()	\
3244	do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3245#else
3246# define do_ring_buffer_record_recursion() do { } while (0)
3247#endif
3248
3249/*
3250 * The lock and unlock are done within a preempt disable section.
3251 * The current_context per_cpu variable can only be modified
3252 * by the current task between lock and unlock. But it can
3253 * be modified more than once via an interrupt. To pass this
3254 * information from the lock to the unlock without having to
3255 * access the 'in_interrupt()' functions again (which do show
3256 * a bit of overhead in something as critical as function tracing,
3257 * we use a bitmask trick.
3258 *
3259 *  bit 1 =  NMI context
3260 *  bit 2 =  IRQ context
3261 *  bit 3 =  SoftIRQ context
3262 *  bit 4 =  normal context.
3263 *
3264 * This works because this is the order of contexts that can
3265 * preempt other contexts. A SoftIRQ never preempts an IRQ
3266 * context.
3267 *
3268 * When the context is determined, the corresponding bit is
3269 * checked and set (if it was set, then a recursion of that context
3270 * happened).
3271 *
3272 * On unlock, we need to clear this bit. To do so, just subtract
3273 * 1 from the current_context and AND it to itself.
3274 *
3275 * (binary)
3276 *  101 - 1 = 100
3277 *  101 & 100 = 100 (clearing bit zero)
3278 *
3279 *  1010 - 1 = 1001
3280 *  1010 & 1001 = 1000 (clearing bit 1)
3281 *
3282 * The least significant bit can be cleared this way, and it
3283 * just so happens that it is the same bit corresponding to
3284 * the current context.
3285 *
3286 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3287 * is set when a recursion is detected at the current context, and if
3288 * the TRANSITION bit is already set, it will fail the recursion.
3289 * This is needed because there's a lag between the changing of
3290 * interrupt context and updating the preempt count. In this case,
3291 * a false positive will be found. To handle this, one extra recursion
3292 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3293 * bit is already set, then it is considered a recursion and the function
3294 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3295 *
3296 * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3297 * to be cleared. Even if it wasn't the context that set it. That is,
3298 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3299 * is called before preempt_count() is updated, since the check will
3300 * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3301 * NMI then comes in, it will set the NMI bit, but when the NMI code
3302 * does the trace_recursive_unlock() it will clear the TRANSITION bit
3303 * and leave the NMI bit set. But this is fine, because the interrupt
3304 * code that set the TRANSITION bit will then clear the NMI bit when it
3305 * calls trace_recursive_unlock(). If another NMI comes in, it will
3306 * set the TRANSITION bit and continue.
3307 *
3308 * Note: The TRANSITION bit only handles a single transition between context.
3309 */
3310
3311static __always_inline int
3312trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3313{
3314	unsigned int val = cpu_buffer->current_context;
3315	int bit = interrupt_context_level();
3316
3317	bit = RB_CTX_NORMAL - bit;
 
 
 
 
 
 
 
 
3318
3319	if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3320		/*
3321		 * It is possible that this was called by transitioning
3322		 * between interrupt context, and preempt_count() has not
3323		 * been updated yet. In this case, use the TRANSITION bit.
3324		 */
3325		bit = RB_CTX_TRANSITION;
3326		if (val & (1 << (bit + cpu_buffer->nest))) {
3327			do_ring_buffer_record_recursion();
3328			return 1;
3329		}
3330	}
3331
3332	val |= (1 << (bit + cpu_buffer->nest));
3333	cpu_buffer->current_context = val;
3334
3335	return 0;
3336}
3337
3338static __always_inline void
3339trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3340{
3341	cpu_buffer->current_context &=
3342		cpu_buffer->current_context - (1 << cpu_buffer->nest);
3343}
3344
3345/* The recursive locking above uses 5 bits */
3346#define NESTED_BITS 5
3347
3348/**
3349 * ring_buffer_nest_start - Allow to trace while nested
3350 * @buffer: The ring buffer to modify
3351 *
3352 * The ring buffer has a safety mechanism to prevent recursion.
3353 * But there may be a case where a trace needs to be done while
3354 * tracing something else. In this case, calling this function
3355 * will allow this function to nest within a currently active
3356 * ring_buffer_lock_reserve().
3357 *
3358 * Call this function before calling another ring_buffer_lock_reserve() and
3359 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3360 */
3361void ring_buffer_nest_start(struct trace_buffer *buffer)
3362{
3363	struct ring_buffer_per_cpu *cpu_buffer;
3364	int cpu;
3365
3366	/* Enabled by ring_buffer_nest_end() */
3367	preempt_disable_notrace();
3368	cpu = raw_smp_processor_id();
3369	cpu_buffer = buffer->buffers[cpu];
3370	/* This is the shift value for the above recursive locking */
3371	cpu_buffer->nest += NESTED_BITS;
3372}
3373
3374/**
3375 * ring_buffer_nest_end - Allow to trace while nested
3376 * @buffer: The ring buffer to modify
3377 *
3378 * Must be called after ring_buffer_nest_start() and after the
3379 * ring_buffer_unlock_commit().
3380 */
3381void ring_buffer_nest_end(struct trace_buffer *buffer)
3382{
3383	struct ring_buffer_per_cpu *cpu_buffer;
3384	int cpu;
3385
3386	/* disabled by ring_buffer_nest_start() */
3387	cpu = raw_smp_processor_id();
3388	cpu_buffer = buffer->buffers[cpu];
3389	/* This is the shift value for the above recursive locking */
3390	cpu_buffer->nest -= NESTED_BITS;
3391	preempt_enable_notrace();
3392}
3393
3394/**
3395 * ring_buffer_unlock_commit - commit a reserved
3396 * @buffer: The buffer to commit to
3397 * @event: The event pointer to commit.
3398 *
3399 * This commits the data to the ring buffer, and releases any locks held.
3400 *
3401 * Must be paired with ring_buffer_lock_reserve.
3402 */
3403int ring_buffer_unlock_commit(struct trace_buffer *buffer)
 
3404{
3405	struct ring_buffer_per_cpu *cpu_buffer;
3406	int cpu = raw_smp_processor_id();
3407
3408	cpu_buffer = buffer->buffers[cpu];
3409
3410	rb_commit(cpu_buffer);
3411
3412	rb_wakeups(buffer, cpu_buffer);
3413
3414	trace_recursive_unlock(cpu_buffer);
3415
3416	preempt_enable_notrace();
3417
3418	return 0;
3419}
3420EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3421
3422/* Special value to validate all deltas on a page. */
3423#define CHECK_FULL_PAGE		1L
3424
3425#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
3426static void dump_buffer_page(struct buffer_data_page *bpage,
3427			     struct rb_event_info *info,
3428			     unsigned long tail)
3429{
3430	struct ring_buffer_event *event;
3431	u64 ts, delta;
3432	int e;
3433
3434	ts = bpage->time_stamp;
3435	pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
3436
3437	for (e = 0; e < tail; e += rb_event_length(event)) {
3438
3439		event = (struct ring_buffer_event *)(bpage->data + e);
3440
3441		switch (event->type_len) {
3442
3443		case RINGBUF_TYPE_TIME_EXTEND:
3444			delta = rb_event_time_stamp(event);
3445			ts += delta;
3446			pr_warn("  [%lld] delta:%lld TIME EXTEND\n", ts, delta);
3447			break;
3448
3449		case RINGBUF_TYPE_TIME_STAMP:
3450			delta = rb_event_time_stamp(event);
3451			ts = rb_fix_abs_ts(delta, ts);
3452			pr_warn("  [%lld] absolute:%lld TIME STAMP\n", ts, delta);
3453			break;
3454
3455		case RINGBUF_TYPE_PADDING:
3456			ts += event->time_delta;
3457			pr_warn("  [%lld] delta:%d PADDING\n", ts, event->time_delta);
3458			break;
3459
3460		case RINGBUF_TYPE_DATA:
3461			ts += event->time_delta;
3462			pr_warn("  [%lld] delta:%d\n", ts, event->time_delta);
3463			break;
3464
3465		default:
3466			break;
3467		}
3468	}
3469}
3470
3471static DEFINE_PER_CPU(atomic_t, checking);
3472static atomic_t ts_dump;
3473
3474/*
3475 * Check if the current event time stamp matches the deltas on
3476 * the buffer page.
3477 */
3478static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3479			 struct rb_event_info *info,
3480			 unsigned long tail)
3481{
3482	struct ring_buffer_event *event;
3483	struct buffer_data_page *bpage;
3484	u64 ts, delta;
3485	bool full = false;
3486	int e;
3487
3488	bpage = info->tail_page->page;
3489
3490	if (tail == CHECK_FULL_PAGE) {
3491		full = true;
3492		tail = local_read(&bpage->commit);
3493	} else if (info->add_timestamp &
3494		   (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3495		/* Ignore events with absolute time stamps */
3496		return;
3497	}
3498
3499	/*
3500	 * Do not check the first event (skip possible extends too).
3501	 * Also do not check if previous events have not been committed.
3502	 */
3503	if (tail <= 8 || tail > local_read(&bpage->commit))
3504		return;
3505
3506	/*
3507	 * If this interrupted another event, 
3508	 */
3509	if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3510		goto out;
3511
3512	ts = bpage->time_stamp;
3513
3514	for (e = 0; e < tail; e += rb_event_length(event)) {
3515
3516		event = (struct ring_buffer_event *)(bpage->data + e);
3517
3518		switch (event->type_len) {
3519
3520		case RINGBUF_TYPE_TIME_EXTEND:
3521			delta = rb_event_time_stamp(event);
3522			ts += delta;
3523			break;
3524
3525		case RINGBUF_TYPE_TIME_STAMP:
3526			delta = rb_event_time_stamp(event);
3527			ts = rb_fix_abs_ts(delta, ts);
3528			break;
3529
3530		case RINGBUF_TYPE_PADDING:
3531			if (event->time_delta == 1)
3532				break;
3533			fallthrough;
3534		case RINGBUF_TYPE_DATA:
3535			ts += event->time_delta;
3536			break;
3537
3538		default:
3539			RB_WARN_ON(cpu_buffer, 1);
3540		}
3541	}
3542	if ((full && ts > info->ts) ||
3543	    (!full && ts + info->delta != info->ts)) {
3544		/* If another report is happening, ignore this one */
3545		if (atomic_inc_return(&ts_dump) != 1) {
3546			atomic_dec(&ts_dump);
3547			goto out;
3548		}
3549		atomic_inc(&cpu_buffer->record_disabled);
3550		/* There's some cases in boot up that this can happen */
3551		WARN_ON_ONCE(system_state != SYSTEM_BOOTING);
3552		pr_warn("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s\n",
3553			cpu_buffer->cpu,
3554			ts + info->delta, info->ts, info->delta,
3555			info->before, info->after,
3556			full ? " (full)" : "");
3557		dump_buffer_page(bpage, info, tail);
3558		atomic_dec(&ts_dump);
3559		/* Do not re-enable checking */
3560		return;
3561	}
3562out:
3563	atomic_dec(this_cpu_ptr(&checking));
3564}
3565#else
3566static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3567			 struct rb_event_info *info,
3568			 unsigned long tail)
3569{
 
 
 
 
 
 
 
 
 
 
3570}
3571#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3572
3573static struct ring_buffer_event *
3574__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3575		  struct rb_event_info *info)
3576{
3577	struct ring_buffer_event *event;
3578	struct buffer_page *tail_page;
3579	unsigned long tail, write, w;
3580	bool a_ok;
3581	bool b_ok;
 
 
 
 
 
 
3582
3583	/* Don't let the compiler play games with cpu_buffer->tail_page */
3584	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3585
3586 /*A*/	w = local_read(&tail_page->write) & RB_WRITE_MASK;
3587	barrier();
3588	b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3589	a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3590	barrier();
3591	info->ts = rb_time_stamp(cpu_buffer->buffer);
3592
3593	if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3594		info->delta = info->ts;
3595	} else {
3596		/*
3597		 * If interrupting an event time update, we may need an
3598		 * absolute timestamp.
3599		 * Don't bother if this is the start of a new page (w == 0).
3600		 */
3601		if (unlikely(!a_ok || !b_ok || (info->before != info->after && w))) {
3602			info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3603			info->length += RB_LEN_TIME_EXTEND;
3604		} else {
3605			info->delta = info->ts - info->after;
3606			if (unlikely(test_time_stamp(info->delta))) {
3607				info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3608				info->length += RB_LEN_TIME_EXTEND;
3609			}
3610		}
3611	}
3612
3613 /*B*/	rb_time_set(&cpu_buffer->before_stamp, info->ts);
3614
3615 /*C*/	write = local_add_return(info->length, &tail_page->write);
3616
3617	/* set write to only the index of the write */
3618	write &= RB_WRITE_MASK;
3619
3620	tail = write - info->length;
3621
3622	/* See if we shot pass the end of this buffer page */
3623	if (unlikely(write > BUF_PAGE_SIZE)) {
3624		/* before and after may now different, fix it up*/
3625		b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before);
3626		a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3627		if (a_ok && b_ok && info->before != info->after)
3628			(void)rb_time_cmpxchg(&cpu_buffer->before_stamp,
3629					      info->before, info->after);
3630		if (a_ok && b_ok)
3631			check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
3632		return rb_move_tail(cpu_buffer, tail, info);
3633	}
3634
3635	if (likely(tail == w)) {
3636		u64 save_before;
3637		bool s_ok;
3638
3639		/* Nothing interrupted us between A and C */
3640 /*D*/		rb_time_set(&cpu_buffer->write_stamp, info->ts);
3641		barrier();
3642 /*E*/		s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before);
3643		RB_WARN_ON(cpu_buffer, !s_ok);
3644		if (likely(!(info->add_timestamp &
3645			     (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3646			/* This did not interrupt any time update */
3647			info->delta = info->ts - info->after;
3648		else
3649			/* Just use full timestamp for interrupting event */
3650			info->delta = info->ts;
3651		barrier();
3652		check_buffer(cpu_buffer, info, tail);
3653		if (unlikely(info->ts != save_before)) {
3654			/* SLOW PATH - Interrupted between C and E */
3655
3656			a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3657			RB_WARN_ON(cpu_buffer, !a_ok);
3658
3659			/* Write stamp must only go forward */
3660			if (save_before > info->after) {
3661				/*
3662				 * We do not care about the result, only that
3663				 * it gets updated atomically.
3664				 */
3665				(void)rb_time_cmpxchg(&cpu_buffer->write_stamp,
3666						      info->after, save_before);
3667			}
3668		}
3669	} else {
3670		u64 ts;
3671		/* SLOW PATH - Interrupted between A and C */
3672		a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after);
3673		/* Was interrupted before here, write_stamp must be valid */
3674		RB_WARN_ON(cpu_buffer, !a_ok);
3675		ts = rb_time_stamp(cpu_buffer->buffer);
3676		barrier();
3677 /*E*/		if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3678		    info->after < ts &&
3679		    rb_time_cmpxchg(&cpu_buffer->write_stamp,
3680				    info->after, ts)) {
3681			/* Nothing came after this event between C and E */
3682			info->delta = ts - info->after;
3683		} else {
3684			/*
3685			 * Interrupted between C and E:
3686			 * Lost the previous events time stamp. Just set the
3687			 * delta to zero, and this will be the same time as
3688			 * the event this event interrupted. And the events that
3689			 * came after this will still be correct (as they would
3690			 * have built their delta on the previous event.
3691			 */
3692			info->delta = 0;
3693		}
3694		info->ts = ts;
3695		info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3696	}
3697
3698	/*
3699	 * If this is the first commit on the page, then it has the same
3700	 * timestamp as the page itself.
3701	 */
3702	if (unlikely(!tail && !(info->add_timestamp &
3703				(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3704		info->delta = 0;
3705
 
 
 
 
3706	/* We reserved something on the buffer */
3707
3708	event = __rb_page_index(tail_page, tail);
 
3709	rb_update_event(cpu_buffer, event, info);
3710
3711	local_inc(&tail_page->entries);
3712
3713	/*
3714	 * If this is the first commit on the page, then update
3715	 * its timestamp.
3716	 */
3717	if (unlikely(!tail))
3718		tail_page->page->time_stamp = info->ts;
3719
3720	/* account for these added bytes */
3721	local_add(info->length, &cpu_buffer->entries_bytes);
3722
3723	return event;
3724}
3725
3726static __always_inline struct ring_buffer_event *
3727rb_reserve_next_event(struct trace_buffer *buffer,
3728		      struct ring_buffer_per_cpu *cpu_buffer,
3729		      unsigned long length)
3730{
3731	struct ring_buffer_event *event;
3732	struct rb_event_info info;
3733	int nr_loops = 0;
3734	int add_ts_default;
3735
3736	rb_start_commit(cpu_buffer);
3737	/* The commit page can not change after this */
3738
3739#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3740	/*
3741	 * Due to the ability to swap a cpu buffer from a buffer
3742	 * it is possible it was swapped before we committed.
3743	 * (committing stops a swap). We check for it here and
3744	 * if it happened, we have to fail the write.
3745	 */
3746	barrier();
3747	if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3748		local_dec(&cpu_buffer->committing);
3749		local_dec(&cpu_buffer->commits);
3750		return NULL;
3751	}
3752#endif
3753
3754	info.length = rb_calculate_event_length(length);
3755
3756	if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3757		add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3758		info.length += RB_LEN_TIME_EXTEND;
3759	} else {
3760		add_ts_default = RB_ADD_STAMP_NONE;
3761	}
3762
3763 again:
3764	info.add_timestamp = add_ts_default;
3765	info.delta = 0;
3766
3767	/*
3768	 * We allow for interrupts to reenter here and do a trace.
3769	 * If one does, it will cause this original code to loop
3770	 * back here. Even with heavy interrupts happening, this
3771	 * should only happen a few times in a row. If this happens
3772	 * 1000 times in a row, there must be either an interrupt
3773	 * storm or we have something buggy.
3774	 * Bail!
3775	 */
3776	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3777		goto out_fail;
3778
 
 
 
 
 
 
 
 
 
 
 
 
 
3779	event = __rb_reserve_next(cpu_buffer, &info);
3780
3781	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3782		if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3783			info.length -= RB_LEN_TIME_EXTEND;
3784		goto again;
3785	}
3786
3787	if (likely(event))
3788		return event;
 
 
 
3789 out_fail:
3790	rb_end_commit(cpu_buffer);
3791	return NULL;
3792}
3793
3794/**
3795 * ring_buffer_lock_reserve - reserve a part of the buffer
3796 * @buffer: the ring buffer to reserve from
3797 * @length: the length of the data to reserve (excluding event header)
3798 *
3799 * Returns a reserved event on the ring buffer to copy directly to.
3800 * The user of this interface will need to get the body to write into
3801 * and can use the ring_buffer_event_data() interface.
3802 *
3803 * The length is the length of the data needed, not the event length
3804 * which also includes the event header.
3805 *
3806 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3807 * If NULL is returned, then nothing has been allocated or locked.
3808 */
3809struct ring_buffer_event *
3810ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3811{
3812	struct ring_buffer_per_cpu *cpu_buffer;
3813	struct ring_buffer_event *event;
3814	int cpu;
3815
3816	/* If we are tracing schedule, we don't want to recurse */
3817	preempt_disable_notrace();
3818
3819	if (unlikely(atomic_read(&buffer->record_disabled)))
3820		goto out;
3821
3822	cpu = raw_smp_processor_id();
3823
3824	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3825		goto out;
3826
3827	cpu_buffer = buffer->buffers[cpu];
3828
3829	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3830		goto out;
3831
3832	if (unlikely(length > BUF_MAX_DATA_SIZE))
3833		goto out;
3834
3835	if (unlikely(trace_recursive_lock(cpu_buffer)))
3836		goto out;
3837
3838	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3839	if (!event)
3840		goto out_unlock;
3841
3842	return event;
3843
3844 out_unlock:
3845	trace_recursive_unlock(cpu_buffer);
3846 out:
3847	preempt_enable_notrace();
3848	return NULL;
3849}
3850EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3851
3852/*
3853 * Decrement the entries to the page that an event is on.
3854 * The event does not even need to exist, only the pointer
3855 * to the page it is on. This may only be called before the commit
3856 * takes place.
3857 */
3858static inline void
3859rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3860		   struct ring_buffer_event *event)
3861{
3862	unsigned long addr = (unsigned long)event;
3863	struct buffer_page *bpage = cpu_buffer->commit_page;
3864	struct buffer_page *start;
3865
3866	addr &= PAGE_MASK;
3867
3868	/* Do the likely case first */
3869	if (likely(bpage->page == (void *)addr)) {
3870		local_dec(&bpage->entries);
3871		return;
3872	}
3873
3874	/*
3875	 * Because the commit page may be on the reader page we
3876	 * start with the next page and check the end loop there.
3877	 */
3878	rb_inc_page(&bpage);
3879	start = bpage;
3880	do {
3881		if (bpage->page == (void *)addr) {
3882			local_dec(&bpage->entries);
3883			return;
3884		}
3885		rb_inc_page(&bpage);
3886	} while (bpage != start);
3887
3888	/* commit not part of this buffer?? */
3889	RB_WARN_ON(cpu_buffer, 1);
3890}
3891
3892/**
3893 * ring_buffer_discard_commit - discard an event that has not been committed
3894 * @buffer: the ring buffer
3895 * @event: non committed event to discard
3896 *
3897 * Sometimes an event that is in the ring buffer needs to be ignored.
3898 * This function lets the user discard an event in the ring buffer
3899 * and then that event will not be read later.
3900 *
3901 * This function only works if it is called before the item has been
3902 * committed. It will try to free the event from the ring buffer
3903 * if another event has not been added behind it.
3904 *
3905 * If another event has been added behind it, it will set the event
3906 * up as discarded, and perform the commit.
3907 *
3908 * If this function is called, do not call ring_buffer_unlock_commit on
3909 * the event.
3910 */
3911void ring_buffer_discard_commit(struct trace_buffer *buffer,
3912				struct ring_buffer_event *event)
3913{
3914	struct ring_buffer_per_cpu *cpu_buffer;
3915	int cpu;
3916
3917	/* The event is discarded regardless */
3918	rb_event_discard(event);
3919
3920	cpu = smp_processor_id();
3921	cpu_buffer = buffer->buffers[cpu];
3922
3923	/*
3924	 * This must only be called if the event has not been
3925	 * committed yet. Thus we can assume that preemption
3926	 * is still disabled.
3927	 */
3928	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3929
3930	rb_decrement_entry(cpu_buffer, event);
3931	if (rb_try_to_discard(cpu_buffer, event))
3932		goto out;
3933
 
 
 
 
 
3934 out:
3935	rb_end_commit(cpu_buffer);
3936
3937	trace_recursive_unlock(cpu_buffer);
3938
3939	preempt_enable_notrace();
3940
3941}
3942EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3943
3944/**
3945 * ring_buffer_write - write data to the buffer without reserving
3946 * @buffer: The ring buffer to write to.
3947 * @length: The length of the data being written (excluding the event header)
3948 * @data: The data to write to the buffer.
3949 *
3950 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3951 * one function. If you already have the data to write to the buffer, it
3952 * may be easier to simply call this function.
3953 *
3954 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3955 * and not the length of the event which would hold the header.
3956 */
3957int ring_buffer_write(struct trace_buffer *buffer,
3958		      unsigned long length,
3959		      void *data)
3960{
3961	struct ring_buffer_per_cpu *cpu_buffer;
3962	struct ring_buffer_event *event;
3963	void *body;
3964	int ret = -EBUSY;
3965	int cpu;
3966
3967	preempt_disable_notrace();
3968
3969	if (atomic_read(&buffer->record_disabled))
3970		goto out;
3971
3972	cpu = raw_smp_processor_id();
3973
3974	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3975		goto out;
3976
3977	cpu_buffer = buffer->buffers[cpu];
3978
3979	if (atomic_read(&cpu_buffer->record_disabled))
3980		goto out;
3981
3982	if (length > BUF_MAX_DATA_SIZE)
3983		goto out;
3984
3985	if (unlikely(trace_recursive_lock(cpu_buffer)))
3986		goto out;
3987
3988	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3989	if (!event)
3990		goto out_unlock;
3991
3992	body = rb_event_data(event);
3993
3994	memcpy(body, data, length);
3995
3996	rb_commit(cpu_buffer);
3997
3998	rb_wakeups(buffer, cpu_buffer);
3999
4000	ret = 0;
4001
4002 out_unlock:
4003	trace_recursive_unlock(cpu_buffer);
4004
4005 out:
4006	preempt_enable_notrace();
4007
4008	return ret;
4009}
4010EXPORT_SYMBOL_GPL(ring_buffer_write);
4011
4012static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
4013{
4014	struct buffer_page *reader = cpu_buffer->reader_page;
4015	struct buffer_page *head = rb_set_head_page(cpu_buffer);
4016	struct buffer_page *commit = cpu_buffer->commit_page;
4017
4018	/* In case of error, head will be NULL */
4019	if (unlikely(!head))
4020		return true;
4021
4022	/* Reader should exhaust content in reader page */
4023	if (reader->read != rb_page_commit(reader))
4024		return false;
4025
4026	/*
4027	 * If writers are committing on the reader page, knowing all
4028	 * committed content has been read, the ring buffer is empty.
4029	 */
4030	if (commit == reader)
4031		return true;
4032
4033	/*
4034	 * If writers are committing on a page other than reader page
4035	 * and head page, there should always be content to read.
4036	 */
4037	if (commit != head)
4038		return false;
4039
4040	/*
4041	 * Writers are committing on the head page, we just need
4042	 * to care about there're committed data, and the reader will
4043	 * swap reader page with head page when it is to read data.
4044	 */
4045	return rb_page_commit(commit) == 0;
4046}
4047
4048/**
4049 * ring_buffer_record_disable - stop all writes into the buffer
4050 * @buffer: The ring buffer to stop writes to.
4051 *
4052 * This prevents all writes to the buffer. Any attempt to write
4053 * to the buffer after this will fail and return NULL.
4054 *
4055 * The caller should call synchronize_rcu() after this.
4056 */
4057void ring_buffer_record_disable(struct trace_buffer *buffer)
4058{
4059	atomic_inc(&buffer->record_disabled);
4060}
4061EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
4062
4063/**
4064 * ring_buffer_record_enable - enable writes to the buffer
4065 * @buffer: The ring buffer to enable writes
4066 *
4067 * Note, multiple disables will need the same number of enables
4068 * to truly enable the writing (much like preempt_disable).
4069 */
4070void ring_buffer_record_enable(struct trace_buffer *buffer)
4071{
4072	atomic_dec(&buffer->record_disabled);
4073}
4074EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
4075
4076/**
4077 * ring_buffer_record_off - stop all writes into the buffer
4078 * @buffer: The ring buffer to stop writes to.
4079 *
4080 * This prevents all writes to the buffer. Any attempt to write
4081 * to the buffer after this will fail and return NULL.
4082 *
4083 * This is different than ring_buffer_record_disable() as
4084 * it works like an on/off switch, where as the disable() version
4085 * must be paired with a enable().
4086 */
4087void ring_buffer_record_off(struct trace_buffer *buffer)
4088{
4089	unsigned int rd;
4090	unsigned int new_rd;
4091
4092	do {
4093		rd = atomic_read(&buffer->record_disabled);
4094		new_rd = rd | RB_BUFFER_OFF;
4095	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
4096}
4097EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4098
4099/**
4100 * ring_buffer_record_on - restart writes into the buffer
4101 * @buffer: The ring buffer to start writes to.
4102 *
4103 * This enables all writes to the buffer that was disabled by
4104 * ring_buffer_record_off().
4105 *
4106 * This is different than ring_buffer_record_enable() as
4107 * it works like an on/off switch, where as the enable() version
4108 * must be paired with a disable().
4109 */
4110void ring_buffer_record_on(struct trace_buffer *buffer)
4111{
4112	unsigned int rd;
4113	unsigned int new_rd;
4114
4115	do {
4116		rd = atomic_read(&buffer->record_disabled);
4117		new_rd = rd & ~RB_BUFFER_OFF;
4118	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
4119}
4120EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4121
4122/**
4123 * ring_buffer_record_is_on - return true if the ring buffer can write
4124 * @buffer: The ring buffer to see if write is enabled
4125 *
4126 * Returns true if the ring buffer is in a state that it accepts writes.
4127 */
4128bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4129{
4130	return !atomic_read(&buffer->record_disabled);
4131}
4132
4133/**
4134 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4135 * @buffer: The ring buffer to see if write is set enabled
4136 *
4137 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4138 * Note that this does NOT mean it is in a writable state.
4139 *
4140 * It may return true when the ring buffer has been disabled by
4141 * ring_buffer_record_disable(), as that is a temporary disabling of
4142 * the ring buffer.
4143 */
4144bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4145{
4146	return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4147}
4148
4149/**
4150 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4151 * @buffer: The ring buffer to stop writes to.
4152 * @cpu: The CPU buffer to stop
4153 *
4154 * This prevents all writes to the buffer. Any attempt to write
4155 * to the buffer after this will fail and return NULL.
4156 *
4157 * The caller should call synchronize_rcu() after this.
4158 */
4159void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4160{
4161	struct ring_buffer_per_cpu *cpu_buffer;
4162
4163	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4164		return;
4165
4166	cpu_buffer = buffer->buffers[cpu];
4167	atomic_inc(&cpu_buffer->record_disabled);
4168}
4169EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4170
4171/**
4172 * ring_buffer_record_enable_cpu - enable writes to the buffer
4173 * @buffer: The ring buffer to enable writes
4174 * @cpu: The CPU to enable.
4175 *
4176 * Note, multiple disables will need the same number of enables
4177 * to truly enable the writing (much like preempt_disable).
4178 */
4179void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4180{
4181	struct ring_buffer_per_cpu *cpu_buffer;
4182
4183	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4184		return;
4185
4186	cpu_buffer = buffer->buffers[cpu];
4187	atomic_dec(&cpu_buffer->record_disabled);
4188}
4189EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4190
4191/*
4192 * The total entries in the ring buffer is the running counter
4193 * of entries entered into the ring buffer, minus the sum of
4194 * the entries read from the ring buffer and the number of
4195 * entries that were overwritten.
4196 */
4197static inline unsigned long
4198rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4199{
4200	return local_read(&cpu_buffer->entries) -
4201		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4202}
4203
4204/**
4205 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4206 * @buffer: The ring buffer
4207 * @cpu: The per CPU buffer to read from.
4208 */
4209u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4210{
4211	unsigned long flags;
4212	struct ring_buffer_per_cpu *cpu_buffer;
4213	struct buffer_page *bpage;
4214	u64 ret = 0;
4215
4216	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4217		return 0;
4218
4219	cpu_buffer = buffer->buffers[cpu];
4220	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4221	/*
4222	 * if the tail is on reader_page, oldest time stamp is on the reader
4223	 * page
4224	 */
4225	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4226		bpage = cpu_buffer->reader_page;
4227	else
4228		bpage = rb_set_head_page(cpu_buffer);
4229	if (bpage)
4230		ret = bpage->page->time_stamp;
4231	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4232
4233	return ret;
4234}
4235EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4236
4237/**
4238 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
4239 * @buffer: The ring buffer
4240 * @cpu: The per CPU buffer to read from.
4241 */
4242unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4243{
4244	struct ring_buffer_per_cpu *cpu_buffer;
4245	unsigned long ret;
4246
4247	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4248		return 0;
4249
4250	cpu_buffer = buffer->buffers[cpu];
4251	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4252
4253	return ret;
4254}
4255EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4256
4257/**
4258 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4259 * @buffer: The ring buffer
4260 * @cpu: The per CPU buffer to get the entries from.
4261 */
4262unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4263{
4264	struct ring_buffer_per_cpu *cpu_buffer;
4265
4266	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4267		return 0;
4268
4269	cpu_buffer = buffer->buffers[cpu];
4270
4271	return rb_num_of_entries(cpu_buffer);
4272}
4273EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4274
4275/**
4276 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4277 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4278 * @buffer: The ring buffer
4279 * @cpu: The per CPU buffer to get the number of overruns from
4280 */
4281unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
4282{
4283	struct ring_buffer_per_cpu *cpu_buffer;
4284	unsigned long ret;
4285
4286	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4287		return 0;
4288
4289	cpu_buffer = buffer->buffers[cpu];
4290	ret = local_read(&cpu_buffer->overrun);
4291
4292	return ret;
4293}
4294EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
4295
4296/**
4297 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4298 * commits failing due to the buffer wrapping around while there are uncommitted
4299 * events, such as during an interrupt storm.
4300 * @buffer: The ring buffer
4301 * @cpu: The per CPU buffer to get the number of overruns from
4302 */
4303unsigned long
4304ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
4305{
4306	struct ring_buffer_per_cpu *cpu_buffer;
4307	unsigned long ret;
4308
4309	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4310		return 0;
4311
4312	cpu_buffer = buffer->buffers[cpu];
4313	ret = local_read(&cpu_buffer->commit_overrun);
4314
4315	return ret;
4316}
4317EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4318
4319/**
4320 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4321 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4322 * @buffer: The ring buffer
4323 * @cpu: The per CPU buffer to get the number of overruns from
4324 */
4325unsigned long
4326ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
4327{
4328	struct ring_buffer_per_cpu *cpu_buffer;
4329	unsigned long ret;
4330
4331	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4332		return 0;
4333
4334	cpu_buffer = buffer->buffers[cpu];
4335	ret = local_read(&cpu_buffer->dropped_events);
4336
4337	return ret;
4338}
4339EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4340
4341/**
4342 * ring_buffer_read_events_cpu - get the number of events successfully read
4343 * @buffer: The ring buffer
4344 * @cpu: The per CPU buffer to get the number of events read
4345 */
4346unsigned long
4347ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
4348{
4349	struct ring_buffer_per_cpu *cpu_buffer;
4350
4351	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4352		return 0;
4353
4354	cpu_buffer = buffer->buffers[cpu];
4355	return cpu_buffer->read;
4356}
4357EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4358
4359/**
4360 * ring_buffer_entries - get the number of entries in a buffer
4361 * @buffer: The ring buffer
4362 *
4363 * Returns the total number of entries in the ring buffer
4364 * (all CPU entries)
4365 */
4366unsigned long ring_buffer_entries(struct trace_buffer *buffer)
4367{
4368	struct ring_buffer_per_cpu *cpu_buffer;
4369	unsigned long entries = 0;
4370	int cpu;
4371
4372	/* if you care about this being correct, lock the buffer */
4373	for_each_buffer_cpu(buffer, cpu) {
4374		cpu_buffer = buffer->buffers[cpu];
4375		entries += rb_num_of_entries(cpu_buffer);
4376	}
4377
4378	return entries;
4379}
4380EXPORT_SYMBOL_GPL(ring_buffer_entries);
4381
4382/**
4383 * ring_buffer_overruns - get the number of overruns in buffer
4384 * @buffer: The ring buffer
4385 *
4386 * Returns the total number of overruns in the ring buffer
4387 * (all CPU entries)
4388 */
4389unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
4390{
4391	struct ring_buffer_per_cpu *cpu_buffer;
4392	unsigned long overruns = 0;
4393	int cpu;
4394
4395	/* if you care about this being correct, lock the buffer */
4396	for_each_buffer_cpu(buffer, cpu) {
4397		cpu_buffer = buffer->buffers[cpu];
4398		overruns += local_read(&cpu_buffer->overrun);
4399	}
4400
4401	return overruns;
4402}
4403EXPORT_SYMBOL_GPL(ring_buffer_overruns);
4404
4405static void rb_iter_reset(struct ring_buffer_iter *iter)
4406{
4407	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4408
4409	/* Iterator usage is expected to have record disabled */
4410	iter->head_page = cpu_buffer->reader_page;
4411	iter->head = cpu_buffer->reader_page->read;
4412	iter->next_event = iter->head;
4413
4414	iter->cache_reader_page = iter->head_page;
4415	iter->cache_read = cpu_buffer->read;
4416
4417	if (iter->head) {
4418		iter->read_stamp = cpu_buffer->read_stamp;
4419		iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4420	} else {
4421		iter->read_stamp = iter->head_page->page->time_stamp;
4422		iter->page_stamp = iter->read_stamp;
4423	}
4424}
4425
4426/**
4427 * ring_buffer_iter_reset - reset an iterator
4428 * @iter: The iterator to reset
4429 *
4430 * Resets the iterator, so that it will start from the beginning
4431 * again.
4432 */
4433void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4434{
4435	struct ring_buffer_per_cpu *cpu_buffer;
4436	unsigned long flags;
4437
4438	if (!iter)
4439		return;
4440
4441	cpu_buffer = iter->cpu_buffer;
4442
4443	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4444	rb_iter_reset(iter);
4445	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4446}
4447EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4448
4449/**
4450 * ring_buffer_iter_empty - check if an iterator has no more to read
4451 * @iter: The iterator to check
4452 */
4453int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4454{
4455	struct ring_buffer_per_cpu *cpu_buffer;
4456	struct buffer_page *reader;
4457	struct buffer_page *head_page;
4458	struct buffer_page *commit_page;
4459	struct buffer_page *curr_commit_page;
4460	unsigned commit;
4461	u64 curr_commit_ts;
4462	u64 commit_ts;
4463
4464	cpu_buffer = iter->cpu_buffer;
4465	reader = cpu_buffer->reader_page;
4466	head_page = cpu_buffer->head_page;
4467	commit_page = cpu_buffer->commit_page;
4468	commit_ts = commit_page->page->time_stamp;
4469
4470	/*
4471	 * When the writer goes across pages, it issues a cmpxchg which
4472	 * is a mb(), which will synchronize with the rmb here.
4473	 * (see rb_tail_page_update())
4474	 */
4475	smp_rmb();
4476	commit = rb_page_commit(commit_page);
4477	/* We want to make sure that the commit page doesn't change */
4478	smp_rmb();
4479
4480	/* Make sure commit page didn't change */
4481	curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4482	curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4483
4484	/* If the commit page changed, then there's more data */
4485	if (curr_commit_page != commit_page ||
4486	    curr_commit_ts != commit_ts)
4487		return 0;
4488
4489	/* Still racy, as it may return a false positive, but that's OK */
4490	return ((iter->head_page == commit_page && iter->head >= commit) ||
4491		(iter->head_page == reader && commit_page == head_page &&
4492		 head_page->read == commit &&
4493		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
4494}
4495EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4496
4497static void
4498rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4499		     struct ring_buffer_event *event)
4500{
4501	u64 delta;
4502
4503	switch (event->type_len) {
4504	case RINGBUF_TYPE_PADDING:
4505		return;
4506
4507	case RINGBUF_TYPE_TIME_EXTEND:
4508		delta = rb_event_time_stamp(event);
 
 
4509		cpu_buffer->read_stamp += delta;
4510		return;
4511
4512	case RINGBUF_TYPE_TIME_STAMP:
4513		delta = rb_event_time_stamp(event);
4514		delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
4515		cpu_buffer->read_stamp = delta;
4516		return;
4517
4518	case RINGBUF_TYPE_DATA:
4519		cpu_buffer->read_stamp += event->time_delta;
4520		return;
4521
4522	default:
4523		RB_WARN_ON(cpu_buffer, 1);
4524	}
4525	return;
4526}
4527
4528static void
4529rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4530			  struct ring_buffer_event *event)
4531{
4532	u64 delta;
4533
4534	switch (event->type_len) {
4535	case RINGBUF_TYPE_PADDING:
4536		return;
4537
4538	case RINGBUF_TYPE_TIME_EXTEND:
4539		delta = rb_event_time_stamp(event);
 
 
4540		iter->read_stamp += delta;
4541		return;
4542
4543	case RINGBUF_TYPE_TIME_STAMP:
4544		delta = rb_event_time_stamp(event);
4545		delta = rb_fix_abs_ts(delta, iter->read_stamp);
4546		iter->read_stamp = delta;
4547		return;
4548
4549	case RINGBUF_TYPE_DATA:
4550		iter->read_stamp += event->time_delta;
4551		return;
4552
4553	default:
4554		RB_WARN_ON(iter->cpu_buffer, 1);
4555	}
4556	return;
4557}
4558
4559static struct buffer_page *
4560rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4561{
4562	struct buffer_page *reader = NULL;
4563	unsigned long overwrite;
4564	unsigned long flags;
4565	int nr_loops = 0;
4566	int ret;
4567
4568	local_irq_save(flags);
4569	arch_spin_lock(&cpu_buffer->lock);
4570
4571 again:
4572	/*
4573	 * This should normally only loop twice. But because the
4574	 * start of the reader inserts an empty page, it causes
4575	 * a case where we will loop three times. There should be no
4576	 * reason to loop four times (that I know of).
4577	 */
4578	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4579		reader = NULL;
4580		goto out;
4581	}
4582
4583	reader = cpu_buffer->reader_page;
4584
4585	/* If there's more to read, return this page */
4586	if (cpu_buffer->reader_page->read < rb_page_size(reader))
4587		goto out;
4588
4589	/* Never should we have an index greater than the size */
4590	if (RB_WARN_ON(cpu_buffer,
4591		       cpu_buffer->reader_page->read > rb_page_size(reader)))
4592		goto out;
4593
4594	/* check if we caught up to the tail */
4595	reader = NULL;
4596	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4597		goto out;
4598
4599	/* Don't bother swapping if the ring buffer is empty */
4600	if (rb_num_of_entries(cpu_buffer) == 0)
4601		goto out;
4602
4603	/*
4604	 * Reset the reader page to size zero.
4605	 */
4606	local_set(&cpu_buffer->reader_page->write, 0);
4607	local_set(&cpu_buffer->reader_page->entries, 0);
4608	local_set(&cpu_buffer->reader_page->page->commit, 0);
4609	cpu_buffer->reader_page->real_end = 0;
4610
4611 spin:
4612	/*
4613	 * Splice the empty reader page into the list around the head.
4614	 */
4615	reader = rb_set_head_page(cpu_buffer);
4616	if (!reader)
4617		goto out;
4618	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4619	cpu_buffer->reader_page->list.prev = reader->list.prev;
4620
4621	/*
4622	 * cpu_buffer->pages just needs to point to the buffer, it
4623	 *  has no specific buffer page to point to. Lets move it out
4624	 *  of our way so we don't accidentally swap it.
4625	 */
4626	cpu_buffer->pages = reader->list.prev;
4627
4628	/* The reader page will be pointing to the new head */
4629	rb_set_list_to_head(&cpu_buffer->reader_page->list);
4630
4631	/*
4632	 * We want to make sure we read the overruns after we set up our
4633	 * pointers to the next object. The writer side does a
4634	 * cmpxchg to cross pages which acts as the mb on the writer
4635	 * side. Note, the reader will constantly fail the swap
4636	 * while the writer is updating the pointers, so this
4637	 * guarantees that the overwrite recorded here is the one we
4638	 * want to compare with the last_overrun.
4639	 */
4640	smp_mb();
4641	overwrite = local_read(&(cpu_buffer->overrun));
4642
4643	/*
4644	 * Here's the tricky part.
4645	 *
4646	 * We need to move the pointer past the header page.
4647	 * But we can only do that if a writer is not currently
4648	 * moving it. The page before the header page has the
4649	 * flag bit '1' set if it is pointing to the page we want.
4650	 * but if the writer is in the process of moving it
4651	 * than it will be '2' or already moved '0'.
4652	 */
4653
4654	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4655
4656	/*
4657	 * If we did not convert it, then we must try again.
4658	 */
4659	if (!ret)
4660		goto spin;
4661
4662	/*
4663	 * Yay! We succeeded in replacing the page.
4664	 *
4665	 * Now make the new head point back to the reader page.
4666	 */
4667	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4668	rb_inc_page(&cpu_buffer->head_page);
4669
4670	local_inc(&cpu_buffer->pages_read);
4671
4672	/* Finally update the reader page to the new head */
4673	cpu_buffer->reader_page = reader;
4674	cpu_buffer->reader_page->read = 0;
4675
4676	if (overwrite != cpu_buffer->last_overrun) {
4677		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4678		cpu_buffer->last_overrun = overwrite;
4679	}
4680
4681	goto again;
4682
4683 out:
4684	/* Update the read_stamp on the first event */
4685	if (reader && reader->read == 0)
4686		cpu_buffer->read_stamp = reader->page->time_stamp;
4687
4688	arch_spin_unlock(&cpu_buffer->lock);
4689	local_irq_restore(flags);
4690
4691	/*
4692	 * The writer has preempt disable, wait for it. But not forever
4693	 * Although, 1 second is pretty much "forever"
4694	 */
4695#define USECS_WAIT	1000000
4696        for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
4697		/* If the write is past the end of page, a writer is still updating it */
4698		if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE))
4699			break;
4700
4701		udelay(1);
4702
4703		/* Get the latest version of the reader write value */
4704		smp_rmb();
4705	}
4706
4707	/* The writer is not moving forward? Something is wrong */
4708	if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
4709		reader = NULL;
4710
4711	/*
4712	 * Make sure we see any padding after the write update
4713	 * (see rb_reset_tail())
4714	 */
4715	smp_rmb();
4716
4717
4718	return reader;
4719}
4720
4721static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4722{
4723	struct ring_buffer_event *event;
4724	struct buffer_page *reader;
4725	unsigned length;
4726
4727	reader = rb_get_reader_page(cpu_buffer);
4728
4729	/* This function should not be called when buffer is empty */
4730	if (RB_WARN_ON(cpu_buffer, !reader))
4731		return;
4732
4733	event = rb_reader_event(cpu_buffer);
4734
4735	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4736		cpu_buffer->read++;
4737
4738	rb_update_read_stamp(cpu_buffer, event);
4739
4740	length = rb_event_length(event);
4741	cpu_buffer->reader_page->read += length;
4742}
4743
4744static void rb_advance_iter(struct ring_buffer_iter *iter)
4745{
4746	struct ring_buffer_per_cpu *cpu_buffer;
 
 
4747
4748	cpu_buffer = iter->cpu_buffer;
4749
4750	/* If head == next_event then we need to jump to the next event */
4751	if (iter->head == iter->next_event) {
4752		/* If the event gets overwritten again, there's nothing to do */
4753		if (rb_iter_head_event(iter) == NULL)
4754			return;
4755	}
4756
4757	iter->head = iter->next_event;
4758
4759	/*
4760	 * Check if we are at the end of the buffer.
4761	 */
4762	if (iter->next_event >= rb_page_size(iter->head_page)) {
4763		/* discarded commits can make the page empty */
4764		if (iter->head_page == cpu_buffer->commit_page)
4765			return;
4766		rb_inc_iter(iter);
4767		return;
4768	}
4769
4770	rb_update_iter_read_stamp(iter, iter->event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4771}
4772
4773static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4774{
4775	return cpu_buffer->lost_events;
4776}
4777
4778static struct ring_buffer_event *
4779rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4780	       unsigned long *lost_events)
4781{
4782	struct ring_buffer_event *event;
4783	struct buffer_page *reader;
4784	int nr_loops = 0;
4785
4786	if (ts)
4787		*ts = 0;
4788 again:
4789	/*
4790	 * We repeat when a time extend is encountered.
4791	 * Since the time extend is always attached to a data event,
4792	 * we should never loop more than once.
4793	 * (We never hit the following condition more than twice).
4794	 */
4795	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4796		return NULL;
4797
4798	reader = rb_get_reader_page(cpu_buffer);
4799	if (!reader)
4800		return NULL;
4801
4802	event = rb_reader_event(cpu_buffer);
4803
4804	switch (event->type_len) {
4805	case RINGBUF_TYPE_PADDING:
4806		if (rb_null_event(event))
4807			RB_WARN_ON(cpu_buffer, 1);
4808		/*
4809		 * Because the writer could be discarding every
4810		 * event it creates (which would probably be bad)
4811		 * if we were to go back to "again" then we may never
4812		 * catch up, and will trigger the warn on, or lock
4813		 * the box. Return the padding, and we will release
4814		 * the current locks, and try again.
4815		 */
4816		return event;
4817
4818	case RINGBUF_TYPE_TIME_EXTEND:
4819		/* Internal data, OK to advance */
4820		rb_advance_reader(cpu_buffer);
4821		goto again;
4822
4823	case RINGBUF_TYPE_TIME_STAMP:
4824		if (ts) {
4825			*ts = rb_event_time_stamp(event);
4826			*ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
4827			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4828							 cpu_buffer->cpu, ts);
4829		}
4830		/* Internal data, OK to advance */
4831		rb_advance_reader(cpu_buffer);
4832		goto again;
4833
4834	case RINGBUF_TYPE_DATA:
4835		if (ts && !(*ts)) {
4836			*ts = cpu_buffer->read_stamp + event->time_delta;
4837			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4838							 cpu_buffer->cpu, ts);
4839		}
4840		if (lost_events)
4841			*lost_events = rb_lost_events(cpu_buffer);
4842		return event;
4843
4844	default:
4845		RB_WARN_ON(cpu_buffer, 1);
4846	}
4847
4848	return NULL;
4849}
4850EXPORT_SYMBOL_GPL(ring_buffer_peek);
4851
4852static struct ring_buffer_event *
4853rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4854{
4855	struct trace_buffer *buffer;
4856	struct ring_buffer_per_cpu *cpu_buffer;
4857	struct ring_buffer_event *event;
4858	int nr_loops = 0;
4859
4860	if (ts)
4861		*ts = 0;
4862
4863	cpu_buffer = iter->cpu_buffer;
4864	buffer = cpu_buffer->buffer;
4865
4866	/*
4867	 * Check if someone performed a consuming read to
4868	 * the buffer. A consuming read invalidates the iterator
4869	 * and we need to reset the iterator in this case.
4870	 */
4871	if (unlikely(iter->cache_read != cpu_buffer->read ||
4872		     iter->cache_reader_page != cpu_buffer->reader_page))
4873		rb_iter_reset(iter);
4874
4875 again:
4876	if (ring_buffer_iter_empty(iter))
4877		return NULL;
4878
4879	/*
4880	 * As the writer can mess with what the iterator is trying
4881	 * to read, just give up if we fail to get an event after
4882	 * three tries. The iterator is not as reliable when reading
4883	 * the ring buffer with an active write as the consumer is.
4884	 * Do not warn if the three failures is reached.
 
4885	 */
4886	if (++nr_loops > 3)
4887		return NULL;
4888
4889	if (rb_per_cpu_empty(cpu_buffer))
4890		return NULL;
4891
4892	if (iter->head >= rb_page_size(iter->head_page)) {
4893		rb_inc_iter(iter);
4894		goto again;
4895	}
4896
4897	event = rb_iter_head_event(iter);
4898	if (!event)
4899		goto again;
4900
4901	switch (event->type_len) {
4902	case RINGBUF_TYPE_PADDING:
4903		if (rb_null_event(event)) {
4904			rb_inc_iter(iter);
4905			goto again;
4906		}
4907		rb_advance_iter(iter);
4908		return event;
4909
4910	case RINGBUF_TYPE_TIME_EXTEND:
4911		/* Internal data, OK to advance */
4912		rb_advance_iter(iter);
4913		goto again;
4914
4915	case RINGBUF_TYPE_TIME_STAMP:
4916		if (ts) {
4917			*ts = rb_event_time_stamp(event);
4918			*ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
4919			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4920							 cpu_buffer->cpu, ts);
4921		}
4922		/* Internal data, OK to advance */
4923		rb_advance_iter(iter);
4924		goto again;
4925
4926	case RINGBUF_TYPE_DATA:
4927		if (ts && !(*ts)) {
4928			*ts = iter->read_stamp + event->time_delta;
4929			ring_buffer_normalize_time_stamp(buffer,
4930							 cpu_buffer->cpu, ts);
4931		}
4932		return event;
4933
4934	default:
4935		RB_WARN_ON(cpu_buffer, 1);
4936	}
4937
4938	return NULL;
4939}
4940EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4941
4942static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4943{
4944	if (likely(!in_nmi())) {
4945		raw_spin_lock(&cpu_buffer->reader_lock);
4946		return true;
4947	}
4948
4949	/*
4950	 * If an NMI die dumps out the content of the ring buffer
4951	 * trylock must be used to prevent a deadlock if the NMI
4952	 * preempted a task that holds the ring buffer locks. If
4953	 * we get the lock then all is fine, if not, then continue
4954	 * to do the read, but this can corrupt the ring buffer,
4955	 * so it must be permanently disabled from future writes.
4956	 * Reading from NMI is a oneshot deal.
4957	 */
4958	if (raw_spin_trylock(&cpu_buffer->reader_lock))
4959		return true;
4960
4961	/* Continue without locking, but disable the ring buffer */
4962	atomic_inc(&cpu_buffer->record_disabled);
4963	return false;
4964}
4965
4966static inline void
4967rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4968{
4969	if (likely(locked))
4970		raw_spin_unlock(&cpu_buffer->reader_lock);
4971	return;
4972}
4973
4974/**
4975 * ring_buffer_peek - peek at the next event to be read
4976 * @buffer: The ring buffer to read
4977 * @cpu: The cpu to peak at
4978 * @ts: The timestamp counter of this event.
4979 * @lost_events: a variable to store if events were lost (may be NULL)
4980 *
4981 * This will return the event that will be read next, but does
4982 * not consume the data.
4983 */
4984struct ring_buffer_event *
4985ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
4986		 unsigned long *lost_events)
4987{
4988	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4989	struct ring_buffer_event *event;
4990	unsigned long flags;
4991	bool dolock;
4992
4993	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4994		return NULL;
4995
4996 again:
4997	local_irq_save(flags);
4998	dolock = rb_reader_lock(cpu_buffer);
4999	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5000	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5001		rb_advance_reader(cpu_buffer);
5002	rb_reader_unlock(cpu_buffer, dolock);
5003	local_irq_restore(flags);
5004
5005	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5006		goto again;
5007
5008	return event;
5009}
5010
5011/** ring_buffer_iter_dropped - report if there are dropped events
5012 * @iter: The ring buffer iterator
5013 *
5014 * Returns true if there was dropped events since the last peek.
5015 */
5016bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
5017{
5018	bool ret = iter->missed_events != 0;
5019
5020	iter->missed_events = 0;
5021	return ret;
5022}
5023EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
5024
5025/**
5026 * ring_buffer_iter_peek - peek at the next event to be read
5027 * @iter: The ring buffer iterator
5028 * @ts: The timestamp counter of this event.
5029 *
5030 * This will return the event that will be read next, but does
5031 * not increment the iterator.
5032 */
5033struct ring_buffer_event *
5034ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
5035{
5036	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5037	struct ring_buffer_event *event;
5038	unsigned long flags;
5039
5040 again:
5041	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5042	event = rb_iter_peek(iter, ts);
5043	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5044
5045	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5046		goto again;
5047
5048	return event;
5049}
5050
5051/**
5052 * ring_buffer_consume - return an event and consume it
5053 * @buffer: The ring buffer to get the next event from
5054 * @cpu: the cpu to read the buffer from
5055 * @ts: a variable to store the timestamp (may be NULL)
5056 * @lost_events: a variable to store if events were lost (may be NULL)
5057 *
5058 * Returns the next event in the ring buffer, and that event is consumed.
5059 * Meaning, that sequential reads will keep returning a different event,
5060 * and eventually empty the ring buffer if the producer is slower.
5061 */
5062struct ring_buffer_event *
5063ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
5064		    unsigned long *lost_events)
5065{
5066	struct ring_buffer_per_cpu *cpu_buffer;
5067	struct ring_buffer_event *event = NULL;
5068	unsigned long flags;
5069	bool dolock;
5070
5071 again:
5072	/* might be called in atomic */
5073	preempt_disable();
5074
5075	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5076		goto out;
5077
5078	cpu_buffer = buffer->buffers[cpu];
5079	local_irq_save(flags);
5080	dolock = rb_reader_lock(cpu_buffer);
5081
5082	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5083	if (event) {
5084		cpu_buffer->lost_events = 0;
5085		rb_advance_reader(cpu_buffer);
5086	}
5087
5088	rb_reader_unlock(cpu_buffer, dolock);
5089	local_irq_restore(flags);
5090
5091 out:
5092	preempt_enable();
5093
5094	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5095		goto again;
5096
5097	return event;
5098}
5099EXPORT_SYMBOL_GPL(ring_buffer_consume);
5100
5101/**
5102 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5103 * @buffer: The ring buffer to read from
5104 * @cpu: The cpu buffer to iterate over
5105 * @flags: gfp flags to use for memory allocation
5106 *
5107 * This performs the initial preparations necessary to iterate
5108 * through the buffer.  Memory is allocated, buffer recording
5109 * is disabled, and the iterator pointer is returned to the caller.
5110 *
5111 * Disabling buffer recording prevents the reading from being
5112 * corrupted. This is not a consuming read, so a producer is not
5113 * expected.
5114 *
5115 * After a sequence of ring_buffer_read_prepare calls, the user is
5116 * expected to make at least one call to ring_buffer_read_prepare_sync.
5117 * Afterwards, ring_buffer_read_start is invoked to get things going
5118 * for real.
5119 *
5120 * This overall must be paired with ring_buffer_read_finish.
5121 */
5122struct ring_buffer_iter *
5123ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5124{
5125	struct ring_buffer_per_cpu *cpu_buffer;
5126	struct ring_buffer_iter *iter;
5127
5128	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5129		return NULL;
5130
5131	iter = kzalloc(sizeof(*iter), flags);
5132	if (!iter)
5133		return NULL;
5134
5135	iter->event = kmalloc(BUF_MAX_DATA_SIZE, flags);
5136	if (!iter->event) {
5137		kfree(iter);
5138		return NULL;
5139	}
5140
5141	cpu_buffer = buffer->buffers[cpu];
5142
5143	iter->cpu_buffer = cpu_buffer;
5144
5145	atomic_inc(&cpu_buffer->resize_disabled);
 
5146
5147	return iter;
5148}
5149EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5150
5151/**
5152 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5153 *
5154 * All previously invoked ring_buffer_read_prepare calls to prepare
5155 * iterators will be synchronized.  Afterwards, read_buffer_read_start
5156 * calls on those iterators are allowed.
5157 */
5158void
5159ring_buffer_read_prepare_sync(void)
5160{
5161	synchronize_rcu();
5162}
5163EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5164
5165/**
5166 * ring_buffer_read_start - start a non consuming read of the buffer
5167 * @iter: The iterator returned by ring_buffer_read_prepare
5168 *
5169 * This finalizes the startup of an iteration through the buffer.
5170 * The iterator comes from a call to ring_buffer_read_prepare and
5171 * an intervening ring_buffer_read_prepare_sync must have been
5172 * performed.
5173 *
5174 * Must be paired with ring_buffer_read_finish.
5175 */
5176void
5177ring_buffer_read_start(struct ring_buffer_iter *iter)
5178{
5179	struct ring_buffer_per_cpu *cpu_buffer;
5180	unsigned long flags;
5181
5182	if (!iter)
5183		return;
5184
5185	cpu_buffer = iter->cpu_buffer;
5186
5187	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5188	arch_spin_lock(&cpu_buffer->lock);
5189	rb_iter_reset(iter);
5190	arch_spin_unlock(&cpu_buffer->lock);
5191	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5192}
5193EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5194
5195/**
5196 * ring_buffer_read_finish - finish reading the iterator of the buffer
5197 * @iter: The iterator retrieved by ring_buffer_start
5198 *
5199 * This re-enables the recording to the buffer, and frees the
5200 * iterator.
5201 */
5202void
5203ring_buffer_read_finish(struct ring_buffer_iter *iter)
5204{
5205	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5206	unsigned long flags;
5207
5208	/*
5209	 * Ring buffer is disabled from recording, here's a good place
5210	 * to check the integrity of the ring buffer.
5211	 * Must prevent readers from trying to read, as the check
5212	 * clears the HEAD page and readers require it.
5213	 */
5214	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5215	rb_check_pages(cpu_buffer);
5216	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5217
5218	atomic_dec(&cpu_buffer->resize_disabled);
5219	kfree(iter->event);
5220	kfree(iter);
5221}
5222EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5223
5224/**
5225 * ring_buffer_iter_advance - advance the iterator to the next location
5226 * @iter: The ring buffer iterator
 
5227 *
5228 * Move the location of the iterator such that the next read will
5229 * be the next location of the iterator.
5230 */
5231void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
 
5232{
 
5233	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5234	unsigned long flags;
5235
5236	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
 
 
 
5237
5238	rb_advance_iter(iter);
 
5239
 
 
5240	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
 
5241}
5242EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5243
5244/**
5245 * ring_buffer_size - return the size of the ring buffer (in bytes)
5246 * @buffer: The ring buffer.
5247 * @cpu: The CPU to get ring buffer size from.
5248 */
5249unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5250{
5251	/*
5252	 * Earlier, this method returned
5253	 *	BUF_PAGE_SIZE * buffer->nr_pages
5254	 * Since the nr_pages field is now removed, we have converted this to
5255	 * return the per cpu buffer value.
5256	 */
5257	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5258		return 0;
5259
5260	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
5261}
5262EXPORT_SYMBOL_GPL(ring_buffer_size);
5263
5264static void
5265rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5266{
5267	rb_head_page_deactivate(cpu_buffer);
5268
5269	cpu_buffer->head_page
5270		= list_entry(cpu_buffer->pages, struct buffer_page, list);
5271	local_set(&cpu_buffer->head_page->write, 0);
5272	local_set(&cpu_buffer->head_page->entries, 0);
5273	local_set(&cpu_buffer->head_page->page->commit, 0);
5274
5275	cpu_buffer->head_page->read = 0;
5276
5277	cpu_buffer->tail_page = cpu_buffer->head_page;
5278	cpu_buffer->commit_page = cpu_buffer->head_page;
5279
5280	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5281	INIT_LIST_HEAD(&cpu_buffer->new_pages);
5282	local_set(&cpu_buffer->reader_page->write, 0);
5283	local_set(&cpu_buffer->reader_page->entries, 0);
5284	local_set(&cpu_buffer->reader_page->page->commit, 0);
5285	cpu_buffer->reader_page->read = 0;
5286
5287	local_set(&cpu_buffer->entries_bytes, 0);
5288	local_set(&cpu_buffer->overrun, 0);
5289	local_set(&cpu_buffer->commit_overrun, 0);
5290	local_set(&cpu_buffer->dropped_events, 0);
5291	local_set(&cpu_buffer->entries, 0);
5292	local_set(&cpu_buffer->committing, 0);
5293	local_set(&cpu_buffer->commits, 0);
5294	local_set(&cpu_buffer->pages_touched, 0);
5295	local_set(&cpu_buffer->pages_lost, 0);
5296	local_set(&cpu_buffer->pages_read, 0);
5297	cpu_buffer->last_pages_touch = 0;
5298	cpu_buffer->shortest_full = 0;
5299	cpu_buffer->read = 0;
5300	cpu_buffer->read_bytes = 0;
5301
5302	rb_time_set(&cpu_buffer->write_stamp, 0);
5303	rb_time_set(&cpu_buffer->before_stamp, 0);
5304
5305	memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5306
5307	cpu_buffer->lost_events = 0;
5308	cpu_buffer->last_overrun = 0;
5309
5310	rb_head_page_activate(cpu_buffer);
5311}
5312
5313/* Must have disabled the cpu buffer then done a synchronize_rcu */
5314static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5315{
5316	unsigned long flags;
5317
5318	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5319
5320	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5321		goto out;
5322
5323	arch_spin_lock(&cpu_buffer->lock);
5324
5325	rb_reset_cpu(cpu_buffer);
5326
5327	arch_spin_unlock(&cpu_buffer->lock);
5328
5329 out:
5330	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5331}
5332
5333/**
5334 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5335 * @buffer: The ring buffer to reset a per cpu buffer of
5336 * @cpu: The CPU buffer to be reset
5337 */
5338void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
5339{
5340	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 
5341
5342	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5343		return;
5344
5345	/* prevent another thread from changing buffer sizes */
5346	mutex_lock(&buffer->mutex);
5347
5348	atomic_inc(&cpu_buffer->resize_disabled);
5349	atomic_inc(&cpu_buffer->record_disabled);
5350
5351	/* Make sure all commits have finished */
5352	synchronize_rcu();
5353
5354	reset_disabled_cpu_buffer(cpu_buffer);
5355
5356	atomic_dec(&cpu_buffer->record_disabled);
5357	atomic_dec(&cpu_buffer->resize_disabled);
5358
5359	mutex_unlock(&buffer->mutex);
5360}
5361EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
5362
5363/**
5364 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5365 * @buffer: The ring buffer to reset a per cpu buffer of
5366 * @cpu: The CPU buffer to be reset
5367 */
5368void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5369{
5370	struct ring_buffer_per_cpu *cpu_buffer;
5371	int cpu;
5372
5373	/* prevent another thread from changing buffer sizes */
5374	mutex_lock(&buffer->mutex);
5375
5376	for_each_online_buffer_cpu(buffer, cpu) {
5377		cpu_buffer = buffer->buffers[cpu];
5378
5379		atomic_inc(&cpu_buffer->resize_disabled);
5380		atomic_inc(&cpu_buffer->record_disabled);
5381	}
5382
5383	/* Make sure all commits have finished */
5384	synchronize_rcu();
5385
5386	for_each_online_buffer_cpu(buffer, cpu) {
5387		cpu_buffer = buffer->buffers[cpu];
5388
5389		reset_disabled_cpu_buffer(cpu_buffer);
5390
5391		atomic_dec(&cpu_buffer->record_disabled);
5392		atomic_dec(&cpu_buffer->resize_disabled);
5393	}
5394
5395	mutex_unlock(&buffer->mutex);
 
5396}
 
5397
5398/**
5399 * ring_buffer_reset - reset a ring buffer
5400 * @buffer: The ring buffer to reset all cpu buffers
5401 */
5402void ring_buffer_reset(struct trace_buffer *buffer)
5403{
5404	struct ring_buffer_per_cpu *cpu_buffer;
5405	int cpu;
5406
5407	/* prevent another thread from changing buffer sizes */
5408	mutex_lock(&buffer->mutex);
5409
5410	for_each_buffer_cpu(buffer, cpu) {
5411		cpu_buffer = buffer->buffers[cpu];
5412
5413		atomic_inc(&cpu_buffer->resize_disabled);
5414		atomic_inc(&cpu_buffer->record_disabled);
5415	}
5416
5417	/* Make sure all commits have finished */
5418	synchronize_rcu();
5419
5420	for_each_buffer_cpu(buffer, cpu) {
5421		cpu_buffer = buffer->buffers[cpu];
5422
5423		reset_disabled_cpu_buffer(cpu_buffer);
5424
5425		atomic_dec(&cpu_buffer->record_disabled);
5426		atomic_dec(&cpu_buffer->resize_disabled);
5427	}
5428
5429	mutex_unlock(&buffer->mutex);
5430}
5431EXPORT_SYMBOL_GPL(ring_buffer_reset);
5432
5433/**
5434 * ring_buffer_empty - is the ring buffer empty?
5435 * @buffer: The ring buffer to test
5436 */
5437bool ring_buffer_empty(struct trace_buffer *buffer)
5438{
5439	struct ring_buffer_per_cpu *cpu_buffer;
5440	unsigned long flags;
5441	bool dolock;
5442	int cpu;
5443	int ret;
5444
5445	/* yes this is racy, but if you don't like the race, lock the buffer */
5446	for_each_buffer_cpu(buffer, cpu) {
5447		cpu_buffer = buffer->buffers[cpu];
5448		local_irq_save(flags);
5449		dolock = rb_reader_lock(cpu_buffer);
5450		ret = rb_per_cpu_empty(cpu_buffer);
5451		rb_reader_unlock(cpu_buffer, dolock);
5452		local_irq_restore(flags);
5453
5454		if (!ret)
5455			return false;
5456	}
5457
5458	return true;
5459}
5460EXPORT_SYMBOL_GPL(ring_buffer_empty);
5461
5462/**
5463 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5464 * @buffer: The ring buffer
5465 * @cpu: The CPU buffer to test
5466 */
5467bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
5468{
5469	struct ring_buffer_per_cpu *cpu_buffer;
5470	unsigned long flags;
5471	bool dolock;
5472	int ret;
5473
5474	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5475		return true;
5476
5477	cpu_buffer = buffer->buffers[cpu];
5478	local_irq_save(flags);
5479	dolock = rb_reader_lock(cpu_buffer);
5480	ret = rb_per_cpu_empty(cpu_buffer);
5481	rb_reader_unlock(cpu_buffer, dolock);
5482	local_irq_restore(flags);
5483
5484	return ret;
5485}
5486EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
5487
5488#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
5489/**
5490 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5491 * @buffer_a: One buffer to swap with
5492 * @buffer_b: The other buffer to swap with
5493 * @cpu: the CPU of the buffers to swap
5494 *
5495 * This function is useful for tracers that want to take a "snapshot"
5496 * of a CPU buffer and has another back up buffer lying around.
5497 * it is expected that the tracer handles the cpu buffer not being
5498 * used at the moment.
5499 */
5500int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5501			 struct trace_buffer *buffer_b, int cpu)
5502{
5503	struct ring_buffer_per_cpu *cpu_buffer_a;
5504	struct ring_buffer_per_cpu *cpu_buffer_b;
5505	int ret = -EINVAL;
5506
5507	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5508	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
5509		goto out;
5510
5511	cpu_buffer_a = buffer_a->buffers[cpu];
5512	cpu_buffer_b = buffer_b->buffers[cpu];
5513
5514	/* At least make sure the two buffers are somewhat the same */
5515	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5516		goto out;
5517
5518	ret = -EAGAIN;
5519
5520	if (atomic_read(&buffer_a->record_disabled))
5521		goto out;
5522
5523	if (atomic_read(&buffer_b->record_disabled))
5524		goto out;
5525
5526	if (atomic_read(&cpu_buffer_a->record_disabled))
5527		goto out;
5528
5529	if (atomic_read(&cpu_buffer_b->record_disabled))
5530		goto out;
5531
5532	/*
5533	 * We can't do a synchronize_rcu here because this
5534	 * function can be called in atomic context.
5535	 * Normally this will be called from the same CPU as cpu.
5536	 * If not it's up to the caller to protect this.
5537	 */
5538	atomic_inc(&cpu_buffer_a->record_disabled);
5539	atomic_inc(&cpu_buffer_b->record_disabled);
5540
5541	ret = -EBUSY;
5542	if (local_read(&cpu_buffer_a->committing))
5543		goto out_dec;
5544	if (local_read(&cpu_buffer_b->committing))
5545		goto out_dec;
5546
5547	buffer_a->buffers[cpu] = cpu_buffer_b;
5548	buffer_b->buffers[cpu] = cpu_buffer_a;
5549
5550	cpu_buffer_b->buffer = buffer_a;
5551	cpu_buffer_a->buffer = buffer_b;
5552
5553	ret = 0;
5554
5555out_dec:
5556	atomic_dec(&cpu_buffer_a->record_disabled);
5557	atomic_dec(&cpu_buffer_b->record_disabled);
5558out:
5559	return ret;
5560}
5561EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5562#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
5563
5564/**
5565 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5566 * @buffer: the buffer to allocate for.
5567 * @cpu: the cpu buffer to allocate.
5568 *
5569 * This function is used in conjunction with ring_buffer_read_page.
5570 * When reading a full page from the ring buffer, these functions
5571 * can be used to speed up the process. The calling function should
5572 * allocate a few pages first with this function. Then when it
5573 * needs to get pages from the ring buffer, it passes the result
5574 * of this function into ring_buffer_read_page, which will swap
5575 * the page that was allocated, with the read page of the buffer.
5576 *
5577 * Returns:
5578 *  The page allocated, or ERR_PTR
5579 */
5580void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5581{
5582	struct ring_buffer_per_cpu *cpu_buffer;
5583	struct buffer_data_page *bpage = NULL;
5584	unsigned long flags;
5585	struct page *page;
5586
5587	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5588		return ERR_PTR(-ENODEV);
5589
5590	cpu_buffer = buffer->buffers[cpu];
5591	local_irq_save(flags);
5592	arch_spin_lock(&cpu_buffer->lock);
5593
5594	if (cpu_buffer->free_page) {
5595		bpage = cpu_buffer->free_page;
5596		cpu_buffer->free_page = NULL;
5597	}
5598
5599	arch_spin_unlock(&cpu_buffer->lock);
5600	local_irq_restore(flags);
5601
5602	if (bpage)
5603		goto out;
5604
5605	page = alloc_pages_node(cpu_to_node(cpu),
5606				GFP_KERNEL | __GFP_NORETRY, 0);
5607	if (!page)
5608		return ERR_PTR(-ENOMEM);
5609
5610	bpage = page_address(page);
5611
5612 out:
5613	rb_init_page(bpage);
5614
5615	return bpage;
5616}
5617EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5618
5619/**
5620 * ring_buffer_free_read_page - free an allocated read page
5621 * @buffer: the buffer the page was allocate for
5622 * @cpu: the cpu buffer the page came from
5623 * @data: the page to free
5624 *
5625 * Free a page allocated from ring_buffer_alloc_read_page.
5626 */
5627void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
5628{
5629	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5630	struct buffer_data_page *bpage = data;
5631	struct page *page = virt_to_page(bpage);
5632	unsigned long flags;
5633
5634	/* If the page is still in use someplace else, we can't reuse it */
5635	if (page_ref_count(page) > 1)
5636		goto out;
5637
5638	local_irq_save(flags);
5639	arch_spin_lock(&cpu_buffer->lock);
5640
5641	if (!cpu_buffer->free_page) {
5642		cpu_buffer->free_page = bpage;
5643		bpage = NULL;
5644	}
5645
5646	arch_spin_unlock(&cpu_buffer->lock);
5647	local_irq_restore(flags);
5648
5649 out:
5650	free_page((unsigned long)bpage);
5651}
5652EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5653
5654/**
5655 * ring_buffer_read_page - extract a page from the ring buffer
5656 * @buffer: buffer to extract from
5657 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
5658 * @len: amount to extract
5659 * @cpu: the cpu of the buffer to extract
5660 * @full: should the extraction only happen when the page is full.
5661 *
5662 * This function will pull out a page from the ring buffer and consume it.
5663 * @data_page must be the address of the variable that was returned
5664 * from ring_buffer_alloc_read_page. This is because the page might be used
5665 * to swap with a page in the ring buffer.
5666 *
5667 * for example:
5668 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
5669 *	if (IS_ERR(rpage))
5670 *		return PTR_ERR(rpage);
5671 *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
5672 *	if (ret >= 0)
5673 *		process_page(rpage, ret);
5674 *
5675 * When @full is set, the function will not return true unless
5676 * the writer is off the reader page.
5677 *
5678 * Note: it is up to the calling functions to handle sleeps and wakeups.
5679 *  The ring buffer can be used anywhere in the kernel and can not
5680 *  blindly call wake_up. The layer that uses the ring buffer must be
5681 *  responsible for that.
5682 *
5683 * Returns:
5684 *  >=0 if data has been transferred, returns the offset of consumed data.
5685 *  <0 if no data has been transferred.
5686 */
5687int ring_buffer_read_page(struct trace_buffer *buffer,
5688			  void **data_page, size_t len, int cpu, int full)
5689{
5690	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5691	struct ring_buffer_event *event;
5692	struct buffer_data_page *bpage;
5693	struct buffer_page *reader;
5694	unsigned long missed_events;
5695	unsigned long flags;
5696	unsigned int commit;
5697	unsigned int read;
5698	u64 save_timestamp;
5699	int ret = -1;
5700
5701	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5702		goto out;
5703
5704	/*
5705	 * If len is not big enough to hold the page header, then
5706	 * we can not copy anything.
5707	 */
5708	if (len <= BUF_PAGE_HDR_SIZE)
5709		goto out;
5710
5711	len -= BUF_PAGE_HDR_SIZE;
5712
5713	if (!data_page)
5714		goto out;
5715
5716	bpage = *data_page;
5717	if (!bpage)
5718		goto out;
5719
5720	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5721
5722	reader = rb_get_reader_page(cpu_buffer);
5723	if (!reader)
5724		goto out_unlock;
5725
5726	event = rb_reader_event(cpu_buffer);
5727
5728	read = reader->read;
5729	commit = rb_page_commit(reader);
5730
5731	/* Check if any events were dropped */
5732	missed_events = cpu_buffer->lost_events;
5733
5734	/*
5735	 * If this page has been partially read or
5736	 * if len is not big enough to read the rest of the page or
5737	 * a writer is still on the page, then
5738	 * we must copy the data from the page to the buffer.
5739	 * Otherwise, we can simply swap the page with the one passed in.
5740	 */
5741	if (read || (len < (commit - read)) ||
5742	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
5743		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5744		unsigned int rpos = read;
5745		unsigned int pos = 0;
5746		unsigned int size;
5747
5748		/*
5749		 * If a full page is expected, this can still be returned
5750		 * if there's been a previous partial read and the
5751		 * rest of the page can be read and the commit page is off
5752		 * the reader page.
5753		 */
5754		if (full &&
5755		    (!read || (len < (commit - read)) ||
5756		     cpu_buffer->reader_page == cpu_buffer->commit_page))
5757			goto out_unlock;
5758
5759		if (len > (commit - read))
5760			len = (commit - read);
5761
5762		/* Always keep the time extend and data together */
5763		size = rb_event_ts_length(event);
5764
5765		if (len < size)
5766			goto out_unlock;
5767
5768		/* save the current timestamp, since the user will need it */
5769		save_timestamp = cpu_buffer->read_stamp;
5770
5771		/* Need to copy one event at a time */
5772		do {
5773			/* We need the size of one event, because
5774			 * rb_advance_reader only advances by one event,
5775			 * whereas rb_event_ts_length may include the size of
5776			 * one or two events.
5777			 * We have already ensured there's enough space if this
5778			 * is a time extend. */
5779			size = rb_event_length(event);
5780			memcpy(bpage->data + pos, rpage->data + rpos, size);
5781
5782			len -= size;
5783
5784			rb_advance_reader(cpu_buffer);
5785			rpos = reader->read;
5786			pos += size;
5787
5788			if (rpos >= commit)
5789				break;
5790
5791			event = rb_reader_event(cpu_buffer);
5792			/* Always keep the time extend and data together */
5793			size = rb_event_ts_length(event);
5794		} while (len >= size);
5795
5796		/* update bpage */
5797		local_set(&bpage->commit, pos);
5798		bpage->time_stamp = save_timestamp;
5799
5800		/* we copied everything to the beginning */
5801		read = 0;
5802	} else {
5803		/* update the entry counter */
5804		cpu_buffer->read += rb_page_entries(reader);
5805		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
5806
5807		/* swap the pages */
5808		rb_init_page(bpage);
5809		bpage = reader->page;
5810		reader->page = *data_page;
5811		local_set(&reader->write, 0);
5812		local_set(&reader->entries, 0);
5813		reader->read = 0;
5814		*data_page = bpage;
5815
5816		/*
5817		 * Use the real_end for the data size,
5818		 * This gives us a chance to store the lost events
5819		 * on the page.
5820		 */
5821		if (reader->real_end)
5822			local_set(&bpage->commit, reader->real_end);
5823	}
5824	ret = read;
5825
5826	cpu_buffer->lost_events = 0;
5827
5828	commit = local_read(&bpage->commit);
5829	/*
5830	 * Set a flag in the commit field if we lost events
5831	 */
5832	if (missed_events) {
5833		/* If there is room at the end of the page to save the
5834		 * missed events, then record it there.
5835		 */
5836		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
5837			memcpy(&bpage->data[commit], &missed_events,
5838			       sizeof(missed_events));
5839			local_add(RB_MISSED_STORED, &bpage->commit);
5840			commit += sizeof(missed_events);
5841		}
5842		local_add(RB_MISSED_EVENTS, &bpage->commit);
5843	}
5844
5845	/*
5846	 * This page may be off to user land. Zero it out here.
5847	 */
5848	if (commit < BUF_PAGE_SIZE)
5849		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
5850
5851 out_unlock:
5852	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5853
5854 out:
5855	return ret;
5856}
5857EXPORT_SYMBOL_GPL(ring_buffer_read_page);
5858
5859/*
5860 * We only allocate new buffers, never free them if the CPU goes down.
5861 * If we were to free the buffer, then the user would lose any trace that was in
5862 * the buffer.
5863 */
5864int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
5865{
5866	struct trace_buffer *buffer;
5867	long nr_pages_same;
5868	int cpu_i;
5869	unsigned long nr_pages;
5870
5871	buffer = container_of(node, struct trace_buffer, node);
5872	if (cpumask_test_cpu(cpu, buffer->cpumask))
5873		return 0;
5874
5875	nr_pages = 0;
5876	nr_pages_same = 1;
5877	/* check if all cpu sizes are same */
5878	for_each_buffer_cpu(buffer, cpu_i) {
5879		/* fill in the size from first enabled cpu */
5880		if (nr_pages == 0)
5881			nr_pages = buffer->buffers[cpu_i]->nr_pages;
5882		if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
5883			nr_pages_same = 0;
5884			break;
 
5885		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5886	}
5887	/* allocate minimum pages, user can later expand it */
5888	if (!nr_pages_same)
5889		nr_pages = 2;
5890	buffer->buffers[cpu] =
5891		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
5892	if (!buffer->buffers[cpu]) {
5893		WARN(1, "failed to allocate ring buffer on CPU %u\n",
5894		     cpu);
5895		return -ENOMEM;
5896	}
5897	smp_wmb();
5898	cpumask_set_cpu(cpu, buffer->cpumask);
5899	return 0;
5900}
 
5901
5902#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
5903/*
5904 * This is a basic integrity check of the ring buffer.
5905 * Late in the boot cycle this test will run when configured in.
5906 * It will kick off a thread per CPU that will go into a loop
5907 * writing to the per cpu ring buffer various sizes of data.
5908 * Some of the data will be large items, some small.
5909 *
5910 * Another thread is created that goes into a spin, sending out
5911 * IPIs to the other CPUs to also write into the ring buffer.
5912 * this is to test the nesting ability of the buffer.
5913 *
5914 * Basic stats are recorded and reported. If something in the
5915 * ring buffer should happen that's not expected, a big warning
5916 * is displayed and all ring buffers are disabled.
5917 */
5918static struct task_struct *rb_threads[NR_CPUS] __initdata;
5919
5920struct rb_test_data {
5921	struct trace_buffer *buffer;
5922	unsigned long		events;
5923	unsigned long		bytes_written;
5924	unsigned long		bytes_alloc;
5925	unsigned long		bytes_dropped;
5926	unsigned long		events_nested;
5927	unsigned long		bytes_written_nested;
5928	unsigned long		bytes_alloc_nested;
5929	unsigned long		bytes_dropped_nested;
5930	int			min_size_nested;
5931	int			max_size_nested;
5932	int			max_size;
5933	int			min_size;
5934	int			cpu;
5935	int			cnt;
5936};
5937
5938static struct rb_test_data rb_data[NR_CPUS] __initdata;
5939
5940/* 1 meg per cpu */
5941#define RB_TEST_BUFFER_SIZE	1048576
5942
5943static char rb_string[] __initdata =
5944	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
5945	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
5946	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
5947
5948static bool rb_test_started __initdata;
5949
5950struct rb_item {
5951	int size;
5952	char str[];
5953};
5954
5955static __init int rb_write_something(struct rb_test_data *data, bool nested)
5956{
5957	struct ring_buffer_event *event;
5958	struct rb_item *item;
5959	bool started;
5960	int event_len;
5961	int size;
5962	int len;
5963	int cnt;
5964
5965	/* Have nested writes different that what is written */
5966	cnt = data->cnt + (nested ? 27 : 0);
5967
5968	/* Multiply cnt by ~e, to make some unique increment */
5969	size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
5970
5971	len = size + sizeof(struct rb_item);
5972
5973	started = rb_test_started;
5974	/* read rb_test_started before checking buffer enabled */
5975	smp_rmb();
5976
5977	event = ring_buffer_lock_reserve(data->buffer, len);
5978	if (!event) {
5979		/* Ignore dropped events before test starts. */
5980		if (started) {
5981			if (nested)
5982				data->bytes_dropped += len;
5983			else
5984				data->bytes_dropped_nested += len;
5985		}
5986		return len;
5987	}
5988
5989	event_len = ring_buffer_event_length(event);
5990
5991	if (RB_WARN_ON(data->buffer, event_len < len))
5992		goto out;
5993
5994	item = ring_buffer_event_data(event);
5995	item->size = size;
5996	memcpy(item->str, rb_string, size);
5997
5998	if (nested) {
5999		data->bytes_alloc_nested += event_len;
6000		data->bytes_written_nested += len;
6001		data->events_nested++;
6002		if (!data->min_size_nested || len < data->min_size_nested)
6003			data->min_size_nested = len;
6004		if (len > data->max_size_nested)
6005			data->max_size_nested = len;
6006	} else {
6007		data->bytes_alloc += event_len;
6008		data->bytes_written += len;
6009		data->events++;
6010		if (!data->min_size || len < data->min_size)
6011			data->max_size = len;
6012		if (len > data->max_size)
6013			data->max_size = len;
6014	}
6015
6016 out:
6017	ring_buffer_unlock_commit(data->buffer);
6018
6019	return 0;
6020}
6021
6022static __init int rb_test(void *arg)
6023{
6024	struct rb_test_data *data = arg;
6025
6026	while (!kthread_should_stop()) {
6027		rb_write_something(data, false);
6028		data->cnt++;
6029
6030		set_current_state(TASK_INTERRUPTIBLE);
6031		/* Now sleep between a min of 100-300us and a max of 1ms */
6032		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
6033	}
6034
6035	return 0;
6036}
6037
6038static __init void rb_ipi(void *ignore)
6039{
6040	struct rb_test_data *data;
6041	int cpu = smp_processor_id();
6042
6043	data = &rb_data[cpu];
6044	rb_write_something(data, true);
6045}
6046
6047static __init int rb_hammer_test(void *arg)
6048{
6049	while (!kthread_should_stop()) {
6050
6051		/* Send an IPI to all cpus to write data! */
6052		smp_call_function(rb_ipi, NULL, 1);
6053		/* No sleep, but for non preempt, let others run */
6054		schedule();
6055	}
6056
6057	return 0;
6058}
6059
6060static __init int test_ringbuffer(void)
6061{
6062	struct task_struct *rb_hammer;
6063	struct trace_buffer *buffer;
6064	int cpu;
6065	int ret = 0;
6066
6067	if (security_locked_down(LOCKDOWN_TRACEFS)) {
6068		pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
6069		return 0;
6070	}
6071
6072	pr_info("Running ring buffer tests...\n");
6073
6074	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
6075	if (WARN_ON(!buffer))
6076		return 0;
6077
6078	/* Disable buffer so that threads can't write to it yet */
6079	ring_buffer_record_off(buffer);
6080
6081	for_each_online_cpu(cpu) {
6082		rb_data[cpu].buffer = buffer;
6083		rb_data[cpu].cpu = cpu;
6084		rb_data[cpu].cnt = cpu;
6085		rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
6086						     cpu, "rbtester/%u");
6087		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
6088			pr_cont("FAILED\n");
6089			ret = PTR_ERR(rb_threads[cpu]);
6090			goto out_free;
6091		}
 
 
 
6092	}
6093
6094	/* Now create the rb hammer! */
6095	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
6096	if (WARN_ON(IS_ERR(rb_hammer))) {
6097		pr_cont("FAILED\n");
6098		ret = PTR_ERR(rb_hammer);
6099		goto out_free;
6100	}
6101
6102	ring_buffer_record_on(buffer);
6103	/*
6104	 * Show buffer is enabled before setting rb_test_started.
6105	 * Yes there's a small race window where events could be
6106	 * dropped and the thread wont catch it. But when a ring
6107	 * buffer gets enabled, there will always be some kind of
6108	 * delay before other CPUs see it. Thus, we don't care about
6109	 * those dropped events. We care about events dropped after
6110	 * the threads see that the buffer is active.
6111	 */
6112	smp_wmb();
6113	rb_test_started = true;
6114
6115	set_current_state(TASK_INTERRUPTIBLE);
6116	/* Just run for 10 seconds */;
6117	schedule_timeout(10 * HZ);
6118
6119	kthread_stop(rb_hammer);
6120
6121 out_free:
6122	for_each_online_cpu(cpu) {
6123		if (!rb_threads[cpu])
6124			break;
6125		kthread_stop(rb_threads[cpu]);
6126	}
6127	if (ret) {
6128		ring_buffer_free(buffer);
6129		return ret;
6130	}
6131
6132	/* Report! */
6133	pr_info("finished\n");
6134	for_each_online_cpu(cpu) {
6135		struct ring_buffer_event *event;
6136		struct rb_test_data *data = &rb_data[cpu];
6137		struct rb_item *item;
6138		unsigned long total_events;
6139		unsigned long total_dropped;
6140		unsigned long total_written;
6141		unsigned long total_alloc;
6142		unsigned long total_read = 0;
6143		unsigned long total_size = 0;
6144		unsigned long total_len = 0;
6145		unsigned long total_lost = 0;
6146		unsigned long lost;
6147		int big_event_size;
6148		int small_event_size;
6149
6150		ret = -1;
6151
6152		total_events = data->events + data->events_nested;
6153		total_written = data->bytes_written + data->bytes_written_nested;
6154		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
6155		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
6156
6157		big_event_size = data->max_size + data->max_size_nested;
6158		small_event_size = data->min_size + data->min_size_nested;
6159
6160		pr_info("CPU %d:\n", cpu);
6161		pr_info("              events:    %ld\n", total_events);
6162		pr_info("       dropped bytes:    %ld\n", total_dropped);
6163		pr_info("       alloced bytes:    %ld\n", total_alloc);
6164		pr_info("       written bytes:    %ld\n", total_written);
6165		pr_info("       biggest event:    %d\n", big_event_size);
6166		pr_info("      smallest event:    %d\n", small_event_size);
6167
6168		if (RB_WARN_ON(buffer, total_dropped))
6169			break;
6170
6171		ret = 0;
6172
6173		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
6174			total_lost += lost;
6175			item = ring_buffer_event_data(event);
6176			total_len += ring_buffer_event_length(event);
6177			total_size += item->size + sizeof(struct rb_item);
6178			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
6179				pr_info("FAILED!\n");
6180				pr_info("buffer had: %.*s\n", item->size, item->str);
6181				pr_info("expected:   %.*s\n", item->size, rb_string);
6182				RB_WARN_ON(buffer, 1);
6183				ret = -1;
6184				break;
6185			}
6186			total_read++;
6187		}
6188		if (ret)
6189			break;
6190
6191		ret = -1;
6192
6193		pr_info("         read events:   %ld\n", total_read);
6194		pr_info("         lost events:   %ld\n", total_lost);
6195		pr_info("        total events:   %ld\n", total_lost + total_read);
6196		pr_info("  recorded len bytes:   %ld\n", total_len);
6197		pr_info(" recorded size bytes:   %ld\n", total_size);
6198		if (total_lost) {
6199			pr_info(" With dropped events, record len and size may not match\n"
6200				" alloced and written from above\n");
6201		} else {
6202			if (RB_WARN_ON(buffer, total_len != total_alloc ||
6203				       total_size != total_written))
6204				break;
6205		}
6206		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6207			break;
6208
6209		ret = 0;
6210	}
6211	if (!ret)
6212		pr_info("Ring buffer PASSED!\n");
6213
6214	ring_buffer_free(buffer);
6215	return 0;
6216}
6217
6218late_initcall(test_ringbuffer);
6219#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
v4.6
 
   1/*
   2 * Generic ring buffer
   3 *
   4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   5 */
 
   6#include <linux/trace_events.h>
   7#include <linux/ring_buffer.h>
   8#include <linux/trace_clock.h>
 
   9#include <linux/trace_seq.h>
  10#include <linux/spinlock.h>
  11#include <linux/irq_work.h>
 
  12#include <linux/uaccess.h>
  13#include <linux/hardirq.h>
  14#include <linux/kthread.h>	/* for self test */
  15#include <linux/kmemcheck.h>
  16#include <linux/module.h>
  17#include <linux/percpu.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/slab.h>
  21#include <linux/init.h>
  22#include <linux/hash.h>
  23#include <linux/list.h>
  24#include <linux/cpu.h>
 
  25
  26#include <asm/local.h>
  27
 
 
 
 
 
 
 
 
  28static void update_pages_handler(struct work_struct *work);
  29
  30/*
  31 * The ring buffer header is special. We must manually up keep it.
  32 */
  33int ring_buffer_print_entry_header(struct trace_seq *s)
  34{
  35	trace_seq_puts(s, "# compressed entry header\n");
  36	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  37	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  38	trace_seq_puts(s, "\tarray       :   32 bits\n");
  39	trace_seq_putc(s, '\n');
  40	trace_seq_printf(s, "\tpadding     : type == %d\n",
  41			 RINGBUF_TYPE_PADDING);
  42	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  43			 RINGBUF_TYPE_TIME_EXTEND);
 
 
  44	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  45			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  46
  47	return !trace_seq_has_overflowed(s);
  48}
  49
  50/*
  51 * The ring buffer is made up of a list of pages. A separate list of pages is
  52 * allocated for each CPU. A writer may only write to a buffer that is
  53 * associated with the CPU it is currently executing on.  A reader may read
  54 * from any per cpu buffer.
  55 *
  56 * The reader is special. For each per cpu buffer, the reader has its own
  57 * reader page. When a reader has read the entire reader page, this reader
  58 * page is swapped with another page in the ring buffer.
  59 *
  60 * Now, as long as the writer is off the reader page, the reader can do what
  61 * ever it wants with that page. The writer will never write to that page
  62 * again (as long as it is out of the ring buffer).
  63 *
  64 * Here's some silly ASCII art.
  65 *
  66 *   +------+
  67 *   |reader|          RING BUFFER
  68 *   |page  |
  69 *   +------+        +---+   +---+   +---+
  70 *                   |   |-->|   |-->|   |
  71 *                   +---+   +---+   +---+
  72 *                     ^               |
  73 *                     |               |
  74 *                     +---------------+
  75 *
  76 *
  77 *   +------+
  78 *   |reader|          RING BUFFER
  79 *   |page  |------------------v
  80 *   +------+        +---+   +---+   +---+
  81 *                   |   |-->|   |-->|   |
  82 *                   +---+   +---+   +---+
  83 *                     ^               |
  84 *                     |               |
  85 *                     +---------------+
  86 *
  87 *
  88 *   +------+
  89 *   |reader|          RING BUFFER
  90 *   |page  |------------------v
  91 *   +------+        +---+   +---+   +---+
  92 *      ^            |   |-->|   |-->|   |
  93 *      |            +---+   +---+   +---+
  94 *      |                              |
  95 *      |                              |
  96 *      +------------------------------+
  97 *
  98 *
  99 *   +------+
 100 *   |buffer|          RING BUFFER
 101 *   |page  |------------------v
 102 *   +------+        +---+   +---+   +---+
 103 *      ^            |   |   |   |-->|   |
 104 *      |   New      +---+   +---+   +---+
 105 *      |  Reader------^               |
 106 *      |   page                       |
 107 *      +------------------------------+
 108 *
 109 *
 110 * After we make this swap, the reader can hand this page off to the splice
 111 * code and be done with it. It can even allocate a new page if it needs to
 112 * and swap that into the ring buffer.
 113 *
 114 * We will be using cmpxchg soon to make all this lockless.
 115 *
 116 */
 117
 118/* Used for individual buffers (after the counter) */
 119#define RB_BUFFER_OFF		(1 << 20)
 120
 121#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 122
 123#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 124#define RB_ALIGNMENT		4U
 125#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 126#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 127
 128#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 129# define RB_FORCE_8BYTE_ALIGNMENT	0
 130# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 131#else
 132# define RB_FORCE_8BYTE_ALIGNMENT	1
 133# define RB_ARCH_ALIGNMENT		8U
 134#endif
 135
 136#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 137
 138/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 139#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 140
 141enum {
 142	RB_LEN_TIME_EXTEND = 8,
 143	RB_LEN_TIME_STAMP = 16,
 144};
 145
 146#define skip_time_extend(event) \
 147	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 148
 
 
 
 149static inline int rb_null_event(struct ring_buffer_event *event)
 150{
 151	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 152}
 153
 154static void rb_event_set_padding(struct ring_buffer_event *event)
 155{
 156	/* padding has a NULL time_delta */
 157	event->type_len = RINGBUF_TYPE_PADDING;
 158	event->time_delta = 0;
 159}
 160
 161static unsigned
 162rb_event_data_length(struct ring_buffer_event *event)
 163{
 164	unsigned length;
 165
 166	if (event->type_len)
 167		length = event->type_len * RB_ALIGNMENT;
 168	else
 169		length = event->array[0];
 170	return length + RB_EVNT_HDR_SIZE;
 171}
 172
 173/*
 174 * Return the length of the given event. Will return
 175 * the length of the time extend if the event is a
 176 * time extend.
 177 */
 178static inline unsigned
 179rb_event_length(struct ring_buffer_event *event)
 180{
 181	switch (event->type_len) {
 182	case RINGBUF_TYPE_PADDING:
 183		if (rb_null_event(event))
 184			/* undefined */
 185			return -1;
 186		return  event->array[0] + RB_EVNT_HDR_SIZE;
 187
 188	case RINGBUF_TYPE_TIME_EXTEND:
 189		return RB_LEN_TIME_EXTEND;
 190
 191	case RINGBUF_TYPE_TIME_STAMP:
 192		return RB_LEN_TIME_STAMP;
 193
 194	case RINGBUF_TYPE_DATA:
 195		return rb_event_data_length(event);
 196	default:
 197		BUG();
 198	}
 199	/* not hit */
 200	return 0;
 201}
 202
 203/*
 204 * Return total length of time extend and data,
 205 *   or just the event length for all other events.
 206 */
 207static inline unsigned
 208rb_event_ts_length(struct ring_buffer_event *event)
 209{
 210	unsigned len = 0;
 211
 212	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
 213		/* time extends include the data event after it */
 214		len = RB_LEN_TIME_EXTEND;
 215		event = skip_time_extend(event);
 216	}
 217	return len + rb_event_length(event);
 218}
 219
 220/**
 221 * ring_buffer_event_length - return the length of the event
 222 * @event: the event to get the length of
 223 *
 224 * Returns the size of the data load of a data event.
 225 * If the event is something other than a data event, it
 226 * returns the size of the event itself. With the exception
 227 * of a TIME EXTEND, where it still returns the size of the
 228 * data load of the data event after it.
 229 */
 230unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 231{
 232	unsigned length;
 233
 234	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 235		event = skip_time_extend(event);
 236
 237	length = rb_event_length(event);
 238	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 239		return length;
 240	length -= RB_EVNT_HDR_SIZE;
 241	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 242                length -= sizeof(event->array[0]);
 243	return length;
 244}
 245EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 246
 247/* inline for ring buffer fast paths */
 248static void *
 249rb_event_data(struct ring_buffer_event *event)
 250{
 251	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 252		event = skip_time_extend(event);
 253	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 254	/* If length is in len field, then array[0] has the data */
 255	if (event->type_len)
 256		return (void *)&event->array[0];
 257	/* Otherwise length is in array[0] and array[1] has the data */
 258	return (void *)&event->array[1];
 259}
 260
 261/**
 262 * ring_buffer_event_data - return the data of the event
 263 * @event: the event to get the data from
 264 */
 265void *ring_buffer_event_data(struct ring_buffer_event *event)
 266{
 267	return rb_event_data(event);
 268}
 269EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 270
 271#define for_each_buffer_cpu(buffer, cpu)		\
 272	for_each_cpu(cpu, buffer->cpumask)
 273
 
 
 
 274#define TS_SHIFT	27
 275#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 276#define TS_DELTA_TEST	(~TS_MASK)
 277
 
 
 
 
 
 
 
 
 
 
 
 278/* Flag when events were overwritten */
 279#define RB_MISSED_EVENTS	(1 << 31)
 280/* Missed count stored at end */
 281#define RB_MISSED_STORED	(1 << 30)
 282
 283struct buffer_data_page {
 284	u64		 time_stamp;	/* page time stamp */
 285	local_t		 commit;	/* write committed index */
 286	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 287};
 288
 289/*
 290 * Note, the buffer_page list must be first. The buffer pages
 291 * are allocated in cache lines, which means that each buffer
 292 * page will be at the beginning of a cache line, and thus
 293 * the least significant bits will be zero. We use this to
 294 * add flags in the list struct pointers, to make the ring buffer
 295 * lockless.
 296 */
 297struct buffer_page {
 298	struct list_head list;		/* list of buffer pages */
 299	local_t		 write;		/* index for next write */
 300	unsigned	 read;		/* index for next read */
 301	local_t		 entries;	/* entries on this page */
 302	unsigned long	 real_end;	/* real end of data */
 303	struct buffer_data_page *page;	/* Actual data page */
 304};
 305
 306/*
 307 * The buffer page counters, write and entries, must be reset
 308 * atomically when crossing page boundaries. To synchronize this
 309 * update, two counters are inserted into the number. One is
 310 * the actual counter for the write position or count on the page.
 311 *
 312 * The other is a counter of updaters. Before an update happens
 313 * the update partition of the counter is incremented. This will
 314 * allow the updater to update the counter atomically.
 315 *
 316 * The counter is 20 bits, and the state data is 12.
 317 */
 318#define RB_WRITE_MASK		0xfffff
 319#define RB_WRITE_INTCNT		(1 << 20)
 320
 321static void rb_init_page(struct buffer_data_page *bpage)
 322{
 323	local_set(&bpage->commit, 0);
 324}
 325
 326/**
 327 * ring_buffer_page_len - the size of data on the page.
 328 * @page: The page to read
 329 *
 330 * Returns the amount of data on the page, including buffer page header.
 331 */
 332size_t ring_buffer_page_len(void *page)
 333{
 334	return local_read(&((struct buffer_data_page *)page)->commit)
 335		+ BUF_PAGE_HDR_SIZE;
 336}
 337
 338/*
 339 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 340 * this issue out.
 341 */
 342static void free_buffer_page(struct buffer_page *bpage)
 343{
 344	free_page((unsigned long)bpage->page);
 345	kfree(bpage);
 346}
 347
 348/*
 349 * We need to fit the time_stamp delta into 27 bits.
 350 */
 351static inline int test_time_stamp(u64 delta)
 352{
 353	if (delta & TS_DELTA_TEST)
 354		return 1;
 355	return 0;
 356}
 357
 358#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 359
 360/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 361#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 362
 363int ring_buffer_print_page_header(struct trace_seq *s)
 364{
 365	struct buffer_data_page field;
 366
 367	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 368			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 369			 (unsigned int)sizeof(field.time_stamp),
 370			 (unsigned int)is_signed_type(u64));
 371
 372	trace_seq_printf(s, "\tfield: local_t commit;\t"
 373			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 374			 (unsigned int)offsetof(typeof(field), commit),
 375			 (unsigned int)sizeof(field.commit),
 376			 (unsigned int)is_signed_type(long));
 377
 378	trace_seq_printf(s, "\tfield: int overwrite;\t"
 379			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 380			 (unsigned int)offsetof(typeof(field), commit),
 381			 1,
 382			 (unsigned int)is_signed_type(long));
 383
 384	trace_seq_printf(s, "\tfield: char data;\t"
 385			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 386			 (unsigned int)offsetof(typeof(field), data),
 387			 (unsigned int)BUF_PAGE_SIZE,
 388			 (unsigned int)is_signed_type(char));
 389
 390	return !trace_seq_has_overflowed(s);
 391}
 392
 393struct rb_irq_work {
 394	struct irq_work			work;
 395	wait_queue_head_t		waiters;
 396	wait_queue_head_t		full_waiters;
 
 397	bool				waiters_pending;
 398	bool				full_waiters_pending;
 399	bool				wakeup_full;
 400};
 401
 402/*
 403 * Structure to hold event state and handle nested events.
 404 */
 405struct rb_event_info {
 406	u64			ts;
 407	u64			delta;
 
 
 408	unsigned long		length;
 409	struct buffer_page	*tail_page;
 410	int			add_timestamp;
 411};
 412
 413/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 414 * Used for which event context the event is in.
 415 *  NMI     = 0
 416 *  IRQ     = 1
 417 *  SOFTIRQ = 2
 418 *  NORMAL  = 3
 
 419 *
 420 * See trace_recursive_lock() comment below for more details.
 421 */
 422enum {
 
 423	RB_CTX_NMI,
 424	RB_CTX_IRQ,
 425	RB_CTX_SOFTIRQ,
 426	RB_CTX_NORMAL,
 427	RB_CTX_MAX
 428};
 429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430/*
 431 * head_page == tail_page && head == tail then buffer is empty.
 432 */
 433struct ring_buffer_per_cpu {
 434	int				cpu;
 435	atomic_t			record_disabled;
 436	struct ring_buffer		*buffer;
 
 437	raw_spinlock_t			reader_lock;	/* serialize readers */
 438	arch_spinlock_t			lock;
 439	struct lock_class_key		lock_key;
 440	unsigned int			nr_pages;
 
 441	unsigned int			current_context;
 442	struct list_head		*pages;
 443	struct buffer_page		*head_page;	/* read from head */
 444	struct buffer_page		*tail_page;	/* write to tail */
 445	struct buffer_page		*commit_page;	/* committed pages */
 446	struct buffer_page		*reader_page;
 447	unsigned long			lost_events;
 448	unsigned long			last_overrun;
 
 449	local_t				entries_bytes;
 450	local_t				entries;
 451	local_t				overrun;
 452	local_t				commit_overrun;
 453	local_t				dropped_events;
 454	local_t				committing;
 455	local_t				commits;
 
 
 
 
 
 456	unsigned long			read;
 457	unsigned long			read_bytes;
 458	u64				write_stamp;
 
 
 459	u64				read_stamp;
 460	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 461	int				nr_pages_to_update;
 462	struct list_head		new_pages; /* new pages to add */
 463	struct work_struct		update_pages_work;
 464	struct completion		update_done;
 465
 466	struct rb_irq_work		irq_work;
 467};
 468
 469struct ring_buffer {
 470	unsigned			flags;
 471	int				cpus;
 472	atomic_t			record_disabled;
 473	atomic_t			resize_disabled;
 474	cpumask_var_t			cpumask;
 475
 476	struct lock_class_key		*reader_lock_key;
 477
 478	struct mutex			mutex;
 479
 480	struct ring_buffer_per_cpu	**buffers;
 481
 482#ifdef CONFIG_HOTPLUG_CPU
 483	struct notifier_block		cpu_notify;
 484#endif
 485	u64				(*clock)(void);
 486
 487	struct rb_irq_work		irq_work;
 
 488};
 489
 490struct ring_buffer_iter {
 491	struct ring_buffer_per_cpu	*cpu_buffer;
 492	unsigned long			head;
 
 493	struct buffer_page		*head_page;
 494	struct buffer_page		*cache_reader_page;
 495	unsigned long			cache_read;
 496	u64				read_stamp;
 
 
 
 497};
 498
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499/*
 500 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 501 *
 502 * Schedules a delayed work to wake up any task that is blocked on the
 503 * ring buffer waiters queue.
 504 */
 505static void rb_wake_up_waiters(struct irq_work *work)
 506{
 507	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 508
 509	wake_up_all(&rbwork->waiters);
 510	if (rbwork->wakeup_full) {
 511		rbwork->wakeup_full = false;
 
 512		wake_up_all(&rbwork->full_waiters);
 513	}
 514}
 515
 516/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517 * ring_buffer_wait - wait for input to the ring buffer
 518 * @buffer: buffer to wait on
 519 * @cpu: the cpu buffer to wait on
 520 * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
 521 *
 522 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 523 * as data is added to any of the @buffer's cpu buffers. Otherwise
 524 * it will wait for data to be added to a specific cpu buffer.
 525 */
 526int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
 527{
 528	struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
 529	DEFINE_WAIT(wait);
 530	struct rb_irq_work *work;
 
 531	int ret = 0;
 532
 533	/*
 534	 * Depending on what the caller is waiting for, either any
 535	 * data in any cpu buffer, or a specific buffer, put the
 536	 * caller on the appropriate wait queue.
 537	 */
 538	if (cpu == RING_BUFFER_ALL_CPUS) {
 539		work = &buffer->irq_work;
 540		/* Full only makes sense on per cpu reads */
 541		full = false;
 542	} else {
 543		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 544			return -ENODEV;
 545		cpu_buffer = buffer->buffers[cpu];
 546		work = &cpu_buffer->irq_work;
 547	}
 548
 
 549
 550	while (true) {
 551		if (full)
 552			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
 553		else
 554			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 555
 556		/*
 557		 * The events can happen in critical sections where
 558		 * checking a work queue can cause deadlocks.
 559		 * After adding a task to the queue, this flag is set
 560		 * only to notify events to try to wake up the queue
 561		 * using irq_work.
 562		 *
 563		 * We don't clear it even if the buffer is no longer
 564		 * empty. The flag only causes the next event to run
 565		 * irq_work to do the work queue wake up. The worse
 566		 * that can happen if we race with !trace_empty() is that
 567		 * an event will cause an irq_work to try to wake up
 568		 * an empty queue.
 569		 *
 570		 * There's no reason to protect this flag either, as
 571		 * the work queue and irq_work logic will do the necessary
 572		 * synchronization for the wake ups. The only thing
 573		 * that is necessary is that the wake up happens after
 574		 * a task has been queued. It's OK for spurious wake ups.
 575		 */
 576		if (full)
 577			work->full_waiters_pending = true;
 578		else
 579			work->waiters_pending = true;
 580
 581		if (signal_pending(current)) {
 582			ret = -EINTR;
 583			break;
 584		}
 585
 586		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
 587			break;
 588
 589		if (cpu != RING_BUFFER_ALL_CPUS &&
 590		    !ring_buffer_empty_cpu(buffer, cpu)) {
 591			unsigned long flags;
 592			bool pagebusy;
 
 593
 594			if (!full)
 595				break;
 596
 597			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 598			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 
 
 
 
 
 599			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 600
 601			if (!pagebusy)
 602				break;
 603		}
 604
 605		schedule();
 
 
 
 
 
 606	}
 607
 608	if (full)
 609		finish_wait(&work->full_waiters, &wait);
 610	else
 611		finish_wait(&work->waiters, &wait);
 612
 613	return ret;
 614}
 615
 616/**
 617 * ring_buffer_poll_wait - poll on buffer input
 618 * @buffer: buffer to wait on
 619 * @cpu: the cpu buffer to wait on
 620 * @filp: the file descriptor
 621 * @poll_table: The poll descriptor
 
 622 *
 623 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 624 * as data is added to any of the @buffer's cpu buffers. Otherwise
 625 * it will wait for data to be added to a specific cpu buffer.
 626 *
 627 * Returns POLLIN | POLLRDNORM if data exists in the buffers,
 628 * zero otherwise.
 629 */
 630int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
 631			  struct file *filp, poll_table *poll_table)
 632{
 633	struct ring_buffer_per_cpu *cpu_buffer;
 634	struct rb_irq_work *work;
 635
 636	if (cpu == RING_BUFFER_ALL_CPUS)
 637		work = &buffer->irq_work;
 638	else {
 
 639		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 640			return -EINVAL;
 641
 642		cpu_buffer = buffer->buffers[cpu];
 643		work = &cpu_buffer->irq_work;
 644	}
 645
 646	poll_wait(filp, &work->waiters, poll_table);
 647	work->waiters_pending = true;
 
 
 
 
 
 
 648	/*
 649	 * There's a tight race between setting the waiters_pending and
 650	 * checking if the ring buffer is empty.  Once the waiters_pending bit
 651	 * is set, the next event will wake the task up, but we can get stuck
 652	 * if there's only a single event in.
 653	 *
 654	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
 655	 * but adding a memory barrier to all events will cause too much of a
 656	 * performance hit in the fast path.  We only need a memory barrier when
 657	 * the buffer goes from empty to having content.  But as this race is
 658	 * extremely small, and it's not a problem if another event comes in, we
 659	 * will fix it later.
 660	 */
 661	smp_mb();
 662
 
 
 
 663	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
 664	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
 665		return POLLIN | POLLRDNORM;
 666	return 0;
 667}
 668
 669/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 670#define RB_WARN_ON(b, cond)						\
 671	({								\
 672		int _____ret = unlikely(cond);				\
 673		if (_____ret) {						\
 674			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
 675				struct ring_buffer_per_cpu *__b =	\
 676					(void *)b;			\
 677				atomic_inc(&__b->buffer->record_disabled); \
 678			} else						\
 679				atomic_inc(&b->record_disabled);	\
 680			WARN_ON(1);					\
 681		}							\
 682		_____ret;						\
 683	})
 684
 685/* Up this if you want to test the TIME_EXTENTS and normalization */
 686#define DEBUG_SHIFT 0
 687
 688static inline u64 rb_time_stamp(struct ring_buffer *buffer)
 689{
 
 
 
 
 
 
 
 
 690	/* shift to debug/test normalization and TIME_EXTENTS */
 691	return buffer->clock() << DEBUG_SHIFT;
 692}
 693
 694u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 695{
 696	u64 time;
 697
 698	preempt_disable_notrace();
 699	time = rb_time_stamp(buffer);
 700	preempt_enable_no_resched_notrace();
 701
 702	return time;
 703}
 704EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 705
 706void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
 707				      int cpu, u64 *ts)
 708{
 709	/* Just stupid testing the normalize function and deltas */
 710	*ts >>= DEBUG_SHIFT;
 711}
 712EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 713
 714/*
 715 * Making the ring buffer lockless makes things tricky.
 716 * Although writes only happen on the CPU that they are on,
 717 * and they only need to worry about interrupts. Reads can
 718 * happen on any CPU.
 719 *
 720 * The reader page is always off the ring buffer, but when the
 721 * reader finishes with a page, it needs to swap its page with
 722 * a new one from the buffer. The reader needs to take from
 723 * the head (writes go to the tail). But if a writer is in overwrite
 724 * mode and wraps, it must push the head page forward.
 725 *
 726 * Here lies the problem.
 727 *
 728 * The reader must be careful to replace only the head page, and
 729 * not another one. As described at the top of the file in the
 730 * ASCII art, the reader sets its old page to point to the next
 731 * page after head. It then sets the page after head to point to
 732 * the old reader page. But if the writer moves the head page
 733 * during this operation, the reader could end up with the tail.
 734 *
 735 * We use cmpxchg to help prevent this race. We also do something
 736 * special with the page before head. We set the LSB to 1.
 737 *
 738 * When the writer must push the page forward, it will clear the
 739 * bit that points to the head page, move the head, and then set
 740 * the bit that points to the new head page.
 741 *
 742 * We also don't want an interrupt coming in and moving the head
 743 * page on another writer. Thus we use the second LSB to catch
 744 * that too. Thus:
 745 *
 746 * head->list->prev->next        bit 1          bit 0
 747 *                              -------        -------
 748 * Normal page                     0              0
 749 * Points to head page             0              1
 750 * New head page                   1              0
 751 *
 752 * Note we can not trust the prev pointer of the head page, because:
 753 *
 754 * +----+       +-----+        +-----+
 755 * |    |------>|  T  |---X--->|  N  |
 756 * |    |<------|     |        |     |
 757 * +----+       +-----+        +-----+
 758 *   ^                           ^ |
 759 *   |          +-----+          | |
 760 *   +----------|  R  |----------+ |
 761 *              |     |<-----------+
 762 *              +-----+
 763 *
 764 * Key:  ---X-->  HEAD flag set in pointer
 765 *         T      Tail page
 766 *         R      Reader page
 767 *         N      Next page
 768 *
 769 * (see __rb_reserve_next() to see where this happens)
 770 *
 771 *  What the above shows is that the reader just swapped out
 772 *  the reader page with a page in the buffer, but before it
 773 *  could make the new header point back to the new page added
 774 *  it was preempted by a writer. The writer moved forward onto
 775 *  the new page added by the reader and is about to move forward
 776 *  again.
 777 *
 778 *  You can see, it is legitimate for the previous pointer of
 779 *  the head (or any page) not to point back to itself. But only
 780 *  temporarially.
 781 */
 782
 783#define RB_PAGE_NORMAL		0UL
 784#define RB_PAGE_HEAD		1UL
 785#define RB_PAGE_UPDATE		2UL
 786
 787
 788#define RB_FLAG_MASK		3UL
 789
 790/* PAGE_MOVED is not part of the mask */
 791#define RB_PAGE_MOVED		4UL
 792
 793/*
 794 * rb_list_head - remove any bit
 795 */
 796static struct list_head *rb_list_head(struct list_head *list)
 797{
 798	unsigned long val = (unsigned long)list;
 799
 800	return (struct list_head *)(val & ~RB_FLAG_MASK);
 801}
 802
 803/*
 804 * rb_is_head_page - test if the given page is the head page
 805 *
 806 * Because the reader may move the head_page pointer, we can
 807 * not trust what the head page is (it may be pointing to
 808 * the reader page). But if the next page is a header page,
 809 * its flags will be non zero.
 810 */
 811static inline int
 812rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 813		struct buffer_page *page, struct list_head *list)
 814{
 815	unsigned long val;
 816
 817	val = (unsigned long)list->next;
 818
 819	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
 820		return RB_PAGE_MOVED;
 821
 822	return val & RB_FLAG_MASK;
 823}
 824
 825/*
 826 * rb_is_reader_page
 827 *
 828 * The unique thing about the reader page, is that, if the
 829 * writer is ever on it, the previous pointer never points
 830 * back to the reader page.
 831 */
 832static bool rb_is_reader_page(struct buffer_page *page)
 833{
 834	struct list_head *list = page->list.prev;
 835
 836	return rb_list_head(list->next) != &page->list;
 837}
 838
 839/*
 840 * rb_set_list_to_head - set a list_head to be pointing to head.
 841 */
 842static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
 843				struct list_head *list)
 844{
 845	unsigned long *ptr;
 846
 847	ptr = (unsigned long *)&list->next;
 848	*ptr |= RB_PAGE_HEAD;
 849	*ptr &= ~RB_PAGE_UPDATE;
 850}
 851
 852/*
 853 * rb_head_page_activate - sets up head page
 854 */
 855static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
 856{
 857	struct buffer_page *head;
 858
 859	head = cpu_buffer->head_page;
 860	if (!head)
 861		return;
 862
 863	/*
 864	 * Set the previous list pointer to have the HEAD flag.
 865	 */
 866	rb_set_list_to_head(cpu_buffer, head->list.prev);
 867}
 868
 869static void rb_list_head_clear(struct list_head *list)
 870{
 871	unsigned long *ptr = (unsigned long *)&list->next;
 872
 873	*ptr &= ~RB_FLAG_MASK;
 874}
 875
 876/*
 877 * rb_head_page_dactivate - clears head page ptr (for free list)
 878 */
 879static void
 880rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
 881{
 882	struct list_head *hd;
 883
 884	/* Go through the whole list and clear any pointers found. */
 885	rb_list_head_clear(cpu_buffer->pages);
 886
 887	list_for_each(hd, cpu_buffer->pages)
 888		rb_list_head_clear(hd);
 889}
 890
 891static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 892			    struct buffer_page *head,
 893			    struct buffer_page *prev,
 894			    int old_flag, int new_flag)
 895{
 896	struct list_head *list;
 897	unsigned long val = (unsigned long)&head->list;
 898	unsigned long ret;
 899
 900	list = &prev->list;
 901
 902	val &= ~RB_FLAG_MASK;
 903
 904	ret = cmpxchg((unsigned long *)&list->next,
 905		      val | old_flag, val | new_flag);
 906
 907	/* check if the reader took the page */
 908	if ((ret & ~RB_FLAG_MASK) != val)
 909		return RB_PAGE_MOVED;
 910
 911	return ret & RB_FLAG_MASK;
 912}
 913
 914static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
 915				   struct buffer_page *head,
 916				   struct buffer_page *prev,
 917				   int old_flag)
 918{
 919	return rb_head_page_set(cpu_buffer, head, prev,
 920				old_flag, RB_PAGE_UPDATE);
 921}
 922
 923static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
 924				 struct buffer_page *head,
 925				 struct buffer_page *prev,
 926				 int old_flag)
 927{
 928	return rb_head_page_set(cpu_buffer, head, prev,
 929				old_flag, RB_PAGE_HEAD);
 930}
 931
 932static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
 933				   struct buffer_page *head,
 934				   struct buffer_page *prev,
 935				   int old_flag)
 936{
 937	return rb_head_page_set(cpu_buffer, head, prev,
 938				old_flag, RB_PAGE_NORMAL);
 939}
 940
 941static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
 942			       struct buffer_page **bpage)
 943{
 944	struct list_head *p = rb_list_head((*bpage)->list.next);
 945
 946	*bpage = list_entry(p, struct buffer_page, list);
 947}
 948
 949static struct buffer_page *
 950rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
 951{
 952	struct buffer_page *head;
 953	struct buffer_page *page;
 954	struct list_head *list;
 955	int i;
 956
 957	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
 958		return NULL;
 959
 960	/* sanity check */
 961	list = cpu_buffer->pages;
 962	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
 963		return NULL;
 964
 965	page = head = cpu_buffer->head_page;
 966	/*
 967	 * It is possible that the writer moves the header behind
 968	 * where we started, and we miss in one loop.
 969	 * A second loop should grab the header, but we'll do
 970	 * three loops just because I'm paranoid.
 971	 */
 972	for (i = 0; i < 3; i++) {
 973		do {
 974			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
 975				cpu_buffer->head_page = page;
 976				return page;
 977			}
 978			rb_inc_page(cpu_buffer, &page);
 979		} while (page != head);
 980	}
 981
 982	RB_WARN_ON(cpu_buffer, 1);
 983
 984	return NULL;
 985}
 986
 987static int rb_head_page_replace(struct buffer_page *old,
 988				struct buffer_page *new)
 989{
 990	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
 991	unsigned long val;
 992	unsigned long ret;
 993
 994	val = *ptr & ~RB_FLAG_MASK;
 995	val |= RB_PAGE_HEAD;
 996
 997	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
 998
 999	return ret == val;
1000}
1001
1002/*
1003 * rb_tail_page_update - move the tail page forward
1004 */
1005static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1006			       struct buffer_page *tail_page,
1007			       struct buffer_page *next_page)
1008{
1009	unsigned long old_entries;
1010	unsigned long old_write;
1011
1012	/*
1013	 * The tail page now needs to be moved forward.
1014	 *
1015	 * We need to reset the tail page, but without messing
1016	 * with possible erasing of data brought in by interrupts
1017	 * that have moved the tail page and are currently on it.
1018	 *
1019	 * We add a counter to the write field to denote this.
1020	 */
1021	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1022	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1023
 
1024	/*
1025	 * Just make sure we have seen our old_write and synchronize
1026	 * with any interrupts that come in.
1027	 */
1028	barrier();
1029
1030	/*
1031	 * If the tail page is still the same as what we think
1032	 * it is, then it is up to us to update the tail
1033	 * pointer.
1034	 */
1035	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1036		/* Zero the write counter */
1037		unsigned long val = old_write & ~RB_WRITE_MASK;
1038		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1039
1040		/*
1041		 * This will only succeed if an interrupt did
1042		 * not come in and change it. In which case, we
1043		 * do not want to modify it.
1044		 *
1045		 * We add (void) to let the compiler know that we do not care
1046		 * about the return value of these functions. We use the
1047		 * cmpxchg to only update if an interrupt did not already
1048		 * do it for us. If the cmpxchg fails, we don't care.
1049		 */
1050		(void)local_cmpxchg(&next_page->write, old_write, val);
1051		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1052
1053		/*
1054		 * No need to worry about races with clearing out the commit.
1055		 * it only can increment when a commit takes place. But that
1056		 * only happens in the outer most nested commit.
1057		 */
1058		local_set(&next_page->page->commit, 0);
1059
1060		/* Again, either we update tail_page or an interrupt does */
1061		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
1062	}
1063}
1064
1065static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1066			  struct buffer_page *bpage)
1067{
1068	unsigned long val = (unsigned long)bpage;
1069
1070	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1071		return 1;
1072
1073	return 0;
1074}
1075
1076/**
1077 * rb_check_list - make sure a pointer to a list has the last bits zero
1078 */
1079static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1080			 struct list_head *list)
1081{
1082	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1083		return 1;
1084	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1085		return 1;
1086	return 0;
1087}
1088
1089/**
1090 * rb_check_pages - integrity check of buffer pages
1091 * @cpu_buffer: CPU buffer with pages to test
1092 *
1093 * As a safety measure we check to make sure the data pages have not
1094 * been corrupted.
1095 */
1096static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1097{
1098	struct list_head *head = cpu_buffer->pages;
1099	struct buffer_page *bpage, *tmp;
1100
1101	/* Reset the head page if it exists */
1102	if (cpu_buffer->head_page)
1103		rb_set_head_page(cpu_buffer);
1104
1105	rb_head_page_deactivate(cpu_buffer);
1106
1107	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1108		return -1;
1109	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1110		return -1;
1111
1112	if (rb_check_list(cpu_buffer, head))
1113		return -1;
1114
1115	list_for_each_entry_safe(bpage, tmp, head, list) {
1116		if (RB_WARN_ON(cpu_buffer,
1117			       bpage->list.next->prev != &bpage->list))
1118			return -1;
1119		if (RB_WARN_ON(cpu_buffer,
1120			       bpage->list.prev->next != &bpage->list))
1121			return -1;
1122		if (rb_check_list(cpu_buffer, &bpage->list))
1123			return -1;
1124	}
1125
1126	rb_head_page_activate(cpu_buffer);
1127
1128	return 0;
1129}
1130
1131static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
 
1132{
1133	int i;
1134	struct buffer_page *bpage, *tmp;
 
 
 
1135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136	for (i = 0; i < nr_pages; i++) {
1137		struct page *page;
1138		/*
1139		 * __GFP_NORETRY flag makes sure that the allocation fails
1140		 * gracefully without invoking oom-killer and the system is
1141		 * not destabilized.
1142		 */
1143		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1144				    GFP_KERNEL | __GFP_NORETRY,
1145				    cpu_to_node(cpu));
1146		if (!bpage)
1147			goto free_pages;
1148
 
 
1149		list_add(&bpage->list, pages);
1150
1151		page = alloc_pages_node(cpu_to_node(cpu),
1152					GFP_KERNEL | __GFP_NORETRY, 0);
1153		if (!page)
1154			goto free_pages;
1155		bpage->page = page_address(page);
1156		rb_init_page(bpage->page);
 
 
 
1157	}
 
 
1158
1159	return 0;
1160
1161free_pages:
1162	list_for_each_entry_safe(bpage, tmp, pages, list) {
1163		list_del_init(&bpage->list);
1164		free_buffer_page(bpage);
1165	}
 
 
1166
1167	return -ENOMEM;
1168}
1169
1170static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1171			     unsigned nr_pages)
1172{
1173	LIST_HEAD(pages);
1174
1175	WARN_ON(!nr_pages);
1176
1177	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1178		return -ENOMEM;
1179
1180	/*
1181	 * The ring buffer page list is a circular list that does not
1182	 * start and end with a list head. All page list items point to
1183	 * other pages.
1184	 */
1185	cpu_buffer->pages = pages.next;
1186	list_del(&pages);
1187
1188	cpu_buffer->nr_pages = nr_pages;
1189
1190	rb_check_pages(cpu_buffer);
1191
1192	return 0;
1193}
1194
1195static struct ring_buffer_per_cpu *
1196rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1197{
1198	struct ring_buffer_per_cpu *cpu_buffer;
1199	struct buffer_page *bpage;
1200	struct page *page;
1201	int ret;
1202
1203	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1204				  GFP_KERNEL, cpu_to_node(cpu));
1205	if (!cpu_buffer)
1206		return NULL;
1207
1208	cpu_buffer->cpu = cpu;
1209	cpu_buffer->buffer = buffer;
1210	raw_spin_lock_init(&cpu_buffer->reader_lock);
1211	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1212	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1213	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1214	init_completion(&cpu_buffer->update_done);
1215	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1216	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1217	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1218
1219	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1220			    GFP_KERNEL, cpu_to_node(cpu));
1221	if (!bpage)
1222		goto fail_free_buffer;
1223
1224	rb_check_bpage(cpu_buffer, bpage);
1225
1226	cpu_buffer->reader_page = bpage;
1227	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1228	if (!page)
1229		goto fail_free_reader;
1230	bpage->page = page_address(page);
1231	rb_init_page(bpage->page);
1232
1233	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1234	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1235
1236	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1237	if (ret < 0)
1238		goto fail_free_reader;
1239
1240	cpu_buffer->head_page
1241		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1242	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1243
1244	rb_head_page_activate(cpu_buffer);
1245
1246	return cpu_buffer;
1247
1248 fail_free_reader:
1249	free_buffer_page(cpu_buffer->reader_page);
1250
1251 fail_free_buffer:
1252	kfree(cpu_buffer);
1253	return NULL;
1254}
1255
1256static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1257{
1258	struct list_head *head = cpu_buffer->pages;
1259	struct buffer_page *bpage, *tmp;
1260
1261	free_buffer_page(cpu_buffer->reader_page);
1262
1263	rb_head_page_deactivate(cpu_buffer);
 
1264
1265	if (head) {
1266		list_for_each_entry_safe(bpage, tmp, head, list) {
1267			list_del_init(&bpage->list);
1268			free_buffer_page(bpage);
1269		}
1270		bpage = list_entry(head, struct buffer_page, list);
1271		free_buffer_page(bpage);
1272	}
1273
1274	kfree(cpu_buffer);
1275}
1276
1277#ifdef CONFIG_HOTPLUG_CPU
1278static int rb_cpu_notify(struct notifier_block *self,
1279			 unsigned long action, void *hcpu);
1280#endif
1281
1282/**
1283 * __ring_buffer_alloc - allocate a new ring_buffer
1284 * @size: the size in bytes per cpu that is needed.
1285 * @flags: attributes to set for the ring buffer.
 
1286 *
1287 * Currently the only flag that is available is the RB_FL_OVERWRITE
1288 * flag. This flag means that the buffer will overwrite old data
1289 * when the buffer wraps. If this flag is not set, the buffer will
1290 * drop data when the tail hits the head.
1291 */
1292struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1293					struct lock_class_key *key)
1294{
1295	struct ring_buffer *buffer;
 
1296	int bsize;
1297	int cpu, nr_pages;
 
1298
1299	/* keep it in its own cache line */
1300	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1301			 GFP_KERNEL);
1302	if (!buffer)
1303		return NULL;
1304
1305	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1306		goto fail_free_buffer;
1307
1308	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1309	buffer->flags = flags;
1310	buffer->clock = trace_clock_local;
1311	buffer->reader_lock_key = key;
1312
1313	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1314	init_waitqueue_head(&buffer->irq_work.waiters);
1315
1316	/* need at least two pages */
1317	if (nr_pages < 2)
1318		nr_pages = 2;
1319
1320	/*
1321	 * In case of non-hotplug cpu, if the ring-buffer is allocated
1322	 * in early initcall, it will not be notified of secondary cpus.
1323	 * In that off case, we need to allocate for all possible cpus.
1324	 */
1325#ifdef CONFIG_HOTPLUG_CPU
1326	cpu_notifier_register_begin();
1327	cpumask_copy(buffer->cpumask, cpu_online_mask);
1328#else
1329	cpumask_copy(buffer->cpumask, cpu_possible_mask);
1330#endif
1331	buffer->cpus = nr_cpu_ids;
1332
1333	bsize = sizeof(void *) * nr_cpu_ids;
1334	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1335				  GFP_KERNEL);
1336	if (!buffer->buffers)
1337		goto fail_free_cpumask;
1338
1339	for_each_buffer_cpu(buffer, cpu) {
1340		buffer->buffers[cpu] =
1341			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1342		if (!buffer->buffers[cpu])
1343			goto fail_free_buffers;
1344	}
1345
1346#ifdef CONFIG_HOTPLUG_CPU
1347	buffer->cpu_notify.notifier_call = rb_cpu_notify;
1348	buffer->cpu_notify.priority = 0;
1349	__register_cpu_notifier(&buffer->cpu_notify);
1350	cpu_notifier_register_done();
1351#endif
1352
1353	mutex_init(&buffer->mutex);
1354
1355	return buffer;
1356
1357 fail_free_buffers:
1358	for_each_buffer_cpu(buffer, cpu) {
1359		if (buffer->buffers[cpu])
1360			rb_free_cpu_buffer(buffer->buffers[cpu]);
1361	}
1362	kfree(buffer->buffers);
1363
1364 fail_free_cpumask:
1365	free_cpumask_var(buffer->cpumask);
1366#ifdef CONFIG_HOTPLUG_CPU
1367	cpu_notifier_register_done();
1368#endif
1369
1370 fail_free_buffer:
1371	kfree(buffer);
1372	return NULL;
1373}
1374EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1375
1376/**
1377 * ring_buffer_free - free a ring buffer.
1378 * @buffer: the buffer to free.
1379 */
1380void
1381ring_buffer_free(struct ring_buffer *buffer)
1382{
1383	int cpu;
1384
1385#ifdef CONFIG_HOTPLUG_CPU
1386	cpu_notifier_register_begin();
1387	__unregister_cpu_notifier(&buffer->cpu_notify);
1388#endif
1389
1390	for_each_buffer_cpu(buffer, cpu)
1391		rb_free_cpu_buffer(buffer->buffers[cpu]);
1392
1393#ifdef CONFIG_HOTPLUG_CPU
1394	cpu_notifier_register_done();
1395#endif
1396
1397	kfree(buffer->buffers);
1398	free_cpumask_var(buffer->cpumask);
1399
1400	kfree(buffer);
1401}
1402EXPORT_SYMBOL_GPL(ring_buffer_free);
1403
1404void ring_buffer_set_clock(struct ring_buffer *buffer,
1405			   u64 (*clock)(void))
1406{
1407	buffer->clock = clock;
1408}
1409
 
 
 
 
 
 
 
 
 
 
1410static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1411
1412static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1413{
1414	return local_read(&bpage->entries) & RB_WRITE_MASK;
1415}
1416
1417static inline unsigned long rb_page_write(struct buffer_page *bpage)
1418{
1419	return local_read(&bpage->write) & RB_WRITE_MASK;
1420}
1421
1422static int
1423rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1424{
1425	struct list_head *tail_page, *to_remove, *next_page;
1426	struct buffer_page *to_remove_page, *tmp_iter_page;
1427	struct buffer_page *last_page, *first_page;
1428	unsigned int nr_removed;
1429	unsigned long head_bit;
1430	int page_entries;
1431
1432	head_bit = 0;
1433
1434	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1435	atomic_inc(&cpu_buffer->record_disabled);
1436	/*
1437	 * We don't race with the readers since we have acquired the reader
1438	 * lock. We also don't race with writers after disabling recording.
1439	 * This makes it easy to figure out the first and the last page to be
1440	 * removed from the list. We unlink all the pages in between including
1441	 * the first and last pages. This is done in a busy loop so that we
1442	 * lose the least number of traces.
1443	 * The pages are freed after we restart recording and unlock readers.
1444	 */
1445	tail_page = &cpu_buffer->tail_page->list;
1446
1447	/*
1448	 * tail page might be on reader page, we remove the next page
1449	 * from the ring buffer
1450	 */
1451	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1452		tail_page = rb_list_head(tail_page->next);
1453	to_remove = tail_page;
1454
1455	/* start of pages to remove */
1456	first_page = list_entry(rb_list_head(to_remove->next),
1457				struct buffer_page, list);
1458
1459	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1460		to_remove = rb_list_head(to_remove)->next;
1461		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1462	}
1463
1464	next_page = rb_list_head(to_remove)->next;
1465
1466	/*
1467	 * Now we remove all pages between tail_page and next_page.
1468	 * Make sure that we have head_bit value preserved for the
1469	 * next page
1470	 */
1471	tail_page->next = (struct list_head *)((unsigned long)next_page |
1472						head_bit);
1473	next_page = rb_list_head(next_page);
1474	next_page->prev = tail_page;
1475
1476	/* make sure pages points to a valid page in the ring buffer */
1477	cpu_buffer->pages = next_page;
1478
1479	/* update head page */
1480	if (head_bit)
1481		cpu_buffer->head_page = list_entry(next_page,
1482						struct buffer_page, list);
1483
1484	/*
1485	 * change read pointer to make sure any read iterators reset
1486	 * themselves
1487	 */
1488	cpu_buffer->read = 0;
1489
1490	/* pages are removed, resume tracing and then free the pages */
1491	atomic_dec(&cpu_buffer->record_disabled);
1492	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1493
1494	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1495
1496	/* last buffer page to remove */
1497	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1498				list);
1499	tmp_iter_page = first_page;
1500
1501	do {
 
 
1502		to_remove_page = tmp_iter_page;
1503		rb_inc_page(cpu_buffer, &tmp_iter_page);
1504
1505		/* update the counters */
1506		page_entries = rb_page_entries(to_remove_page);
1507		if (page_entries) {
1508			/*
1509			 * If something was added to this page, it was full
1510			 * since it is not the tail page. So we deduct the
1511			 * bytes consumed in ring buffer from here.
1512			 * Increment overrun to account for the lost events.
1513			 */
1514			local_add(page_entries, &cpu_buffer->overrun);
1515			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
 
1516		}
1517
1518		/*
1519		 * We have already removed references to this list item, just
1520		 * free up the buffer_page and its page
1521		 */
1522		free_buffer_page(to_remove_page);
1523		nr_removed--;
1524
1525	} while (to_remove_page != last_page);
1526
1527	RB_WARN_ON(cpu_buffer, nr_removed);
1528
1529	return nr_removed == 0;
1530}
1531
1532static int
1533rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1534{
1535	struct list_head *pages = &cpu_buffer->new_pages;
1536	int retries, success;
 
1537
1538	raw_spin_lock_irq(&cpu_buffer->reader_lock);
 
1539	/*
1540	 * We are holding the reader lock, so the reader page won't be swapped
1541	 * in the ring buffer. Now we are racing with the writer trying to
1542	 * move head page and the tail page.
1543	 * We are going to adapt the reader page update process where:
1544	 * 1. We first splice the start and end of list of new pages between
1545	 *    the head page and its previous page.
1546	 * 2. We cmpxchg the prev_page->next to point from head page to the
1547	 *    start of new pages list.
1548	 * 3. Finally, we update the head->prev to the end of new list.
1549	 *
1550	 * We will try this process 10 times, to make sure that we don't keep
1551	 * spinning.
1552	 */
1553	retries = 10;
1554	success = 0;
1555	while (retries--) {
1556		struct list_head *head_page, *prev_page, *r;
1557		struct list_head *last_page, *first_page;
1558		struct list_head *head_page_with_bit;
1559
1560		head_page = &rb_set_head_page(cpu_buffer)->list;
1561		if (!head_page)
1562			break;
1563		prev_page = head_page->prev;
1564
1565		first_page = pages->next;
1566		last_page  = pages->prev;
1567
1568		head_page_with_bit = (struct list_head *)
1569				     ((unsigned long)head_page | RB_PAGE_HEAD);
1570
1571		last_page->next = head_page_with_bit;
1572		first_page->prev = prev_page;
1573
1574		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1575
1576		if (r == head_page_with_bit) {
1577			/*
1578			 * yay, we replaced the page pointer to our new list,
1579			 * now, we just have to update to head page's prev
1580			 * pointer to point to end of list
1581			 */
1582			head_page->prev = last_page;
1583			success = 1;
1584			break;
1585		}
1586	}
1587
1588	if (success)
1589		INIT_LIST_HEAD(pages);
1590	/*
1591	 * If we weren't successful in adding in new pages, warn and stop
1592	 * tracing
1593	 */
1594	RB_WARN_ON(cpu_buffer, !success);
1595	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1596
1597	/* free pages if they weren't inserted */
1598	if (!success) {
1599		struct buffer_page *bpage, *tmp;
1600		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1601					 list) {
1602			list_del_init(&bpage->list);
1603			free_buffer_page(bpage);
1604		}
1605	}
1606	return success;
1607}
1608
1609static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1610{
1611	int success;
1612
1613	if (cpu_buffer->nr_pages_to_update > 0)
1614		success = rb_insert_pages(cpu_buffer);
1615	else
1616		success = rb_remove_pages(cpu_buffer,
1617					-cpu_buffer->nr_pages_to_update);
1618
1619	if (success)
1620		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1621}
1622
1623static void update_pages_handler(struct work_struct *work)
1624{
1625	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1626			struct ring_buffer_per_cpu, update_pages_work);
1627	rb_update_pages(cpu_buffer);
1628	complete(&cpu_buffer->update_done);
1629}
1630
1631/**
1632 * ring_buffer_resize - resize the ring buffer
1633 * @buffer: the buffer to resize.
1634 * @size: the new size.
1635 * @cpu_id: the cpu buffer to resize
1636 *
1637 * Minimum size is 2 * BUF_PAGE_SIZE.
1638 *
1639 * Returns 0 on success and < 0 on failure.
1640 */
1641int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1642			int cpu_id)
1643{
1644	struct ring_buffer_per_cpu *cpu_buffer;
1645	unsigned nr_pages;
1646	int cpu, err = 0;
1647
1648	/*
1649	 * Always succeed at resizing a non-existent buffer:
1650	 */
1651	if (!buffer)
1652		return size;
1653
1654	/* Make sure the requested buffer exists */
1655	if (cpu_id != RING_BUFFER_ALL_CPUS &&
1656	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
1657		return size;
1658
1659	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1660	size *= BUF_PAGE_SIZE;
1661
1662	/* we need a minimum of two pages */
1663	if (size < BUF_PAGE_SIZE * 2)
1664		size = BUF_PAGE_SIZE * 2;
1665
1666	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1667
1668	/*
1669	 * Don't succeed if resizing is disabled, as a reader might be
1670	 * manipulating the ring buffer and is expecting a sane state while
1671	 * this is true.
1672	 */
1673	if (atomic_read(&buffer->resize_disabled))
1674		return -EBUSY;
1675
1676	/* prevent another thread from changing buffer sizes */
1677	mutex_lock(&buffer->mutex);
1678
 
1679	if (cpu_id == RING_BUFFER_ALL_CPUS) {
 
 
 
 
 
 
 
 
 
 
 
 
 
1680		/* calculate the pages to update */
1681		for_each_buffer_cpu(buffer, cpu) {
1682			cpu_buffer = buffer->buffers[cpu];
1683
1684			cpu_buffer->nr_pages_to_update = nr_pages -
1685							cpu_buffer->nr_pages;
1686			/*
1687			 * nothing more to do for removing pages or no update
1688			 */
1689			if (cpu_buffer->nr_pages_to_update <= 0)
1690				continue;
1691			/*
1692			 * to add pages, make sure all new pages can be
1693			 * allocated without receiving ENOMEM
1694			 */
1695			INIT_LIST_HEAD(&cpu_buffer->new_pages);
1696			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1697						&cpu_buffer->new_pages, cpu)) {
1698				/* not enough memory for new pages */
1699				err = -ENOMEM;
1700				goto out_err;
1701			}
1702		}
1703
1704		get_online_cpus();
1705		/*
1706		 * Fire off all the required work handlers
1707		 * We can't schedule on offline CPUs, but it's not necessary
1708		 * since we can change their buffer sizes without any race.
1709		 */
1710		for_each_buffer_cpu(buffer, cpu) {
1711			cpu_buffer = buffer->buffers[cpu];
1712			if (!cpu_buffer->nr_pages_to_update)
1713				continue;
1714
1715			/* Can't run something on an offline CPU. */
1716			if (!cpu_online(cpu)) {
1717				rb_update_pages(cpu_buffer);
1718				cpu_buffer->nr_pages_to_update = 0;
1719			} else {
1720				schedule_work_on(cpu,
1721						&cpu_buffer->update_pages_work);
 
 
 
 
 
 
 
 
1722			}
1723		}
1724
1725		/* wait for all the updates to complete */
1726		for_each_buffer_cpu(buffer, cpu) {
1727			cpu_buffer = buffer->buffers[cpu];
1728			if (!cpu_buffer->nr_pages_to_update)
1729				continue;
1730
1731			if (cpu_online(cpu))
1732				wait_for_completion(&cpu_buffer->update_done);
1733			cpu_buffer->nr_pages_to_update = 0;
1734		}
1735
1736		put_online_cpus();
1737	} else {
1738		/* Make sure this CPU has been intitialized */
1739		if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1740			goto out;
1741
1742		cpu_buffer = buffer->buffers[cpu_id];
1743
1744		if (nr_pages == cpu_buffer->nr_pages)
1745			goto out;
1746
 
 
 
 
 
 
 
 
 
 
1747		cpu_buffer->nr_pages_to_update = nr_pages -
1748						cpu_buffer->nr_pages;
1749
1750		INIT_LIST_HEAD(&cpu_buffer->new_pages);
1751		if (cpu_buffer->nr_pages_to_update > 0 &&
1752			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1753					    &cpu_buffer->new_pages, cpu_id)) {
1754			err = -ENOMEM;
1755			goto out_err;
1756		}
1757
1758		get_online_cpus();
1759
1760		/* Can't run something on an offline CPU. */
1761		if (!cpu_online(cpu_id))
1762			rb_update_pages(cpu_buffer);
1763		else {
1764			schedule_work_on(cpu_id,
1765					 &cpu_buffer->update_pages_work);
1766			wait_for_completion(&cpu_buffer->update_done);
 
 
 
 
 
 
 
 
1767		}
1768
1769		cpu_buffer->nr_pages_to_update = 0;
1770		put_online_cpus();
1771	}
1772
1773 out:
1774	/*
1775	 * The ring buffer resize can happen with the ring buffer
1776	 * enabled, so that the update disturbs the tracing as little
1777	 * as possible. But if the buffer is disabled, we do not need
1778	 * to worry about that, and we can take the time to verify
1779	 * that the buffer is not corrupt.
1780	 */
1781	if (atomic_read(&buffer->record_disabled)) {
1782		atomic_inc(&buffer->record_disabled);
1783		/*
1784		 * Even though the buffer was disabled, we must make sure
1785		 * that it is truly disabled before calling rb_check_pages.
1786		 * There could have been a race between checking
1787		 * record_disable and incrementing it.
1788		 */
1789		synchronize_sched();
1790		for_each_buffer_cpu(buffer, cpu) {
1791			cpu_buffer = buffer->buffers[cpu];
1792			rb_check_pages(cpu_buffer);
1793		}
1794		atomic_dec(&buffer->record_disabled);
1795	}
1796
1797	mutex_unlock(&buffer->mutex);
1798	return size;
1799
1800 out_err:
1801	for_each_buffer_cpu(buffer, cpu) {
1802		struct buffer_page *bpage, *tmp;
1803
1804		cpu_buffer = buffer->buffers[cpu];
1805		cpu_buffer->nr_pages_to_update = 0;
1806
1807		if (list_empty(&cpu_buffer->new_pages))
1808			continue;
1809
1810		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1811					list) {
1812			list_del_init(&bpage->list);
1813			free_buffer_page(bpage);
1814		}
1815	}
 
1816	mutex_unlock(&buffer->mutex);
1817	return err;
1818}
1819EXPORT_SYMBOL_GPL(ring_buffer_resize);
1820
1821void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1822{
1823	mutex_lock(&buffer->mutex);
1824	if (val)
1825		buffer->flags |= RB_FL_OVERWRITE;
1826	else
1827		buffer->flags &= ~RB_FL_OVERWRITE;
1828	mutex_unlock(&buffer->mutex);
1829}
1830EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1831
1832static inline void *
1833__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1834{
1835	return bpage->data + index;
1836}
1837
1838static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1839{
1840	return bpage->page->data + index;
1841}
1842
1843static inline struct ring_buffer_event *
1844rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1845{
1846	return __rb_page_index(cpu_buffer->reader_page,
1847			       cpu_buffer->reader_page->read);
1848}
1849
1850static inline struct ring_buffer_event *
1851rb_iter_head_event(struct ring_buffer_iter *iter)
1852{
1853	return __rb_page_index(iter->head_page, iter->head);
1854}
1855
1856static inline unsigned rb_page_commit(struct buffer_page *bpage)
 
1857{
1858	return local_read(&bpage->page->commit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1859}
1860
1861/* Size is determined by what has been committed */
1862static inline unsigned rb_page_size(struct buffer_page *bpage)
1863{
1864	return rb_page_commit(bpage);
1865}
1866
1867static inline unsigned
1868rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1869{
1870	return rb_page_commit(cpu_buffer->commit_page);
1871}
1872
1873static inline unsigned
1874rb_event_index(struct ring_buffer_event *event)
1875{
1876	unsigned long addr = (unsigned long)event;
1877
1878	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1879}
1880
1881static void rb_inc_iter(struct ring_buffer_iter *iter)
1882{
1883	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1884
1885	/*
1886	 * The iterator could be on the reader page (it starts there).
1887	 * But the head could have moved, since the reader was
1888	 * found. Check for this case and assign the iterator
1889	 * to the head page instead of next.
1890	 */
1891	if (iter->head_page == cpu_buffer->reader_page)
1892		iter->head_page = rb_set_head_page(cpu_buffer);
1893	else
1894		rb_inc_page(cpu_buffer, &iter->head_page);
1895
1896	iter->read_stamp = iter->head_page->page->time_stamp;
1897	iter->head = 0;
 
1898}
1899
1900/*
1901 * rb_handle_head_page - writer hit the head page
1902 *
1903 * Returns: +1 to retry page
1904 *           0 to continue
1905 *          -1 on error
1906 */
1907static int
1908rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1909		    struct buffer_page *tail_page,
1910		    struct buffer_page *next_page)
1911{
1912	struct buffer_page *new_head;
1913	int entries;
1914	int type;
1915	int ret;
1916
1917	entries = rb_page_entries(next_page);
1918
1919	/*
1920	 * The hard part is here. We need to move the head
1921	 * forward, and protect against both readers on
1922	 * other CPUs and writers coming in via interrupts.
1923	 */
1924	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1925				       RB_PAGE_HEAD);
1926
1927	/*
1928	 * type can be one of four:
1929	 *  NORMAL - an interrupt already moved it for us
1930	 *  HEAD   - we are the first to get here.
1931	 *  UPDATE - we are the interrupt interrupting
1932	 *           a current move.
1933	 *  MOVED  - a reader on another CPU moved the next
1934	 *           pointer to its reader page. Give up
1935	 *           and try again.
1936	 */
1937
1938	switch (type) {
1939	case RB_PAGE_HEAD:
1940		/*
1941		 * We changed the head to UPDATE, thus
1942		 * it is our responsibility to update
1943		 * the counters.
1944		 */
1945		local_add(entries, &cpu_buffer->overrun);
1946		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
 
1947
1948		/*
1949		 * The entries will be zeroed out when we move the
1950		 * tail page.
1951		 */
1952
1953		/* still more to do */
1954		break;
1955
1956	case RB_PAGE_UPDATE:
1957		/*
1958		 * This is an interrupt that interrupt the
1959		 * previous update. Still more to do.
1960		 */
1961		break;
1962	case RB_PAGE_NORMAL:
1963		/*
1964		 * An interrupt came in before the update
1965		 * and processed this for us.
1966		 * Nothing left to do.
1967		 */
1968		return 1;
1969	case RB_PAGE_MOVED:
1970		/*
1971		 * The reader is on another CPU and just did
1972		 * a swap with our next_page.
1973		 * Try again.
1974		 */
1975		return 1;
1976	default:
1977		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1978		return -1;
1979	}
1980
1981	/*
1982	 * Now that we are here, the old head pointer is
1983	 * set to UPDATE. This will keep the reader from
1984	 * swapping the head page with the reader page.
1985	 * The reader (on another CPU) will spin till
1986	 * we are finished.
1987	 *
1988	 * We just need to protect against interrupts
1989	 * doing the job. We will set the next pointer
1990	 * to HEAD. After that, we set the old pointer
1991	 * to NORMAL, but only if it was HEAD before.
1992	 * otherwise we are an interrupt, and only
1993	 * want the outer most commit to reset it.
1994	 */
1995	new_head = next_page;
1996	rb_inc_page(cpu_buffer, &new_head);
1997
1998	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1999				    RB_PAGE_NORMAL);
2000
2001	/*
2002	 * Valid returns are:
2003	 *  HEAD   - an interrupt came in and already set it.
2004	 *  NORMAL - One of two things:
2005	 *            1) We really set it.
2006	 *            2) A bunch of interrupts came in and moved
2007	 *               the page forward again.
2008	 */
2009	switch (ret) {
2010	case RB_PAGE_HEAD:
2011	case RB_PAGE_NORMAL:
2012		/* OK */
2013		break;
2014	default:
2015		RB_WARN_ON(cpu_buffer, 1);
2016		return -1;
2017	}
2018
2019	/*
2020	 * It is possible that an interrupt came in,
2021	 * set the head up, then more interrupts came in
2022	 * and moved it again. When we get back here,
2023	 * the page would have been set to NORMAL but we
2024	 * just set it back to HEAD.
2025	 *
2026	 * How do you detect this? Well, if that happened
2027	 * the tail page would have moved.
2028	 */
2029	if (ret == RB_PAGE_NORMAL) {
2030		struct buffer_page *buffer_tail_page;
2031
2032		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2033		/*
2034		 * If the tail had moved passed next, then we need
2035		 * to reset the pointer.
2036		 */
2037		if (buffer_tail_page != tail_page &&
2038		    buffer_tail_page != next_page)
2039			rb_head_page_set_normal(cpu_buffer, new_head,
2040						next_page,
2041						RB_PAGE_HEAD);
2042	}
2043
2044	/*
2045	 * If this was the outer most commit (the one that
2046	 * changed the original pointer from HEAD to UPDATE),
2047	 * then it is up to us to reset it to NORMAL.
2048	 */
2049	if (type == RB_PAGE_HEAD) {
2050		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2051					      tail_page,
2052					      RB_PAGE_UPDATE);
2053		if (RB_WARN_ON(cpu_buffer,
2054			       ret != RB_PAGE_UPDATE))
2055			return -1;
2056	}
2057
2058	return 0;
2059}
2060
2061static inline void
2062rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2063	      unsigned long tail, struct rb_event_info *info)
2064{
2065	struct buffer_page *tail_page = info->tail_page;
2066	struct ring_buffer_event *event;
2067	unsigned long length = info->length;
2068
2069	/*
2070	 * Only the event that crossed the page boundary
2071	 * must fill the old tail_page with padding.
2072	 */
2073	if (tail >= BUF_PAGE_SIZE) {
2074		/*
2075		 * If the page was filled, then we still need
2076		 * to update the real_end. Reset it to zero
2077		 * and the reader will ignore it.
2078		 */
2079		if (tail == BUF_PAGE_SIZE)
2080			tail_page->real_end = 0;
2081
2082		local_sub(length, &tail_page->write);
2083		return;
2084	}
2085
2086	event = __rb_page_index(tail_page, tail);
2087	kmemcheck_annotate_bitfield(event, bitfield);
2088
2089	/* account for padding bytes */
2090	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2091
2092	/*
2093	 * Save the original length to the meta data.
2094	 * This will be used by the reader to add lost event
2095	 * counter.
2096	 */
2097	tail_page->real_end = tail;
2098
2099	/*
2100	 * If this event is bigger than the minimum size, then
2101	 * we need to be careful that we don't subtract the
2102	 * write counter enough to allow another writer to slip
2103	 * in on this page.
2104	 * We put in a discarded commit instead, to make sure
2105	 * that this space is not used again.
2106	 *
2107	 * If we are less than the minimum size, we don't need to
2108	 * worry about it.
2109	 */
2110	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2111		/* No room for any events */
2112
2113		/* Mark the rest of the page with padding */
2114		rb_event_set_padding(event);
2115
 
 
 
2116		/* Set the write back to the previous setting */
2117		local_sub(length, &tail_page->write);
2118		return;
2119	}
2120
2121	/* Put in a discarded event */
2122	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2123	event->type_len = RINGBUF_TYPE_PADDING;
2124	/* time delta must be non zero */
2125	event->time_delta = 1;
2126
 
 
 
2127	/* Set write to end of buffer */
2128	length = (tail + length) - BUF_PAGE_SIZE;
2129	local_sub(length, &tail_page->write);
2130}
2131
2132static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2133
2134/*
2135 * This is the slow path, force gcc not to inline it.
2136 */
2137static noinline struct ring_buffer_event *
2138rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2139	     unsigned long tail, struct rb_event_info *info)
2140{
2141	struct buffer_page *tail_page = info->tail_page;
2142	struct buffer_page *commit_page = cpu_buffer->commit_page;
2143	struct ring_buffer *buffer = cpu_buffer->buffer;
2144	struct buffer_page *next_page;
2145	int ret;
2146
2147	next_page = tail_page;
2148
2149	rb_inc_page(cpu_buffer, &next_page);
2150
2151	/*
2152	 * If for some reason, we had an interrupt storm that made
2153	 * it all the way around the buffer, bail, and warn
2154	 * about it.
2155	 */
2156	if (unlikely(next_page == commit_page)) {
2157		local_inc(&cpu_buffer->commit_overrun);
2158		goto out_reset;
2159	}
2160
2161	/*
2162	 * This is where the fun begins!
2163	 *
2164	 * We are fighting against races between a reader that
2165	 * could be on another CPU trying to swap its reader
2166	 * page with the buffer head.
2167	 *
2168	 * We are also fighting against interrupts coming in and
2169	 * moving the head or tail on us as well.
2170	 *
2171	 * If the next page is the head page then we have filled
2172	 * the buffer, unless the commit page is still on the
2173	 * reader page.
2174	 */
2175	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2176
2177		/*
2178		 * If the commit is not on the reader page, then
2179		 * move the header page.
2180		 */
2181		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2182			/*
2183			 * If we are not in overwrite mode,
2184			 * this is easy, just stop here.
2185			 */
2186			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2187				local_inc(&cpu_buffer->dropped_events);
2188				goto out_reset;
2189			}
2190
2191			ret = rb_handle_head_page(cpu_buffer,
2192						  tail_page,
2193						  next_page);
2194			if (ret < 0)
2195				goto out_reset;
2196			if (ret)
2197				goto out_again;
2198		} else {
2199			/*
2200			 * We need to be careful here too. The
2201			 * commit page could still be on the reader
2202			 * page. We could have a small buffer, and
2203			 * have filled up the buffer with events
2204			 * from interrupts and such, and wrapped.
2205			 *
2206			 * Note, if the tail page is also the on the
2207			 * reader_page, we let it move out.
2208			 */
2209			if (unlikely((cpu_buffer->commit_page !=
2210				      cpu_buffer->tail_page) &&
2211				     (cpu_buffer->commit_page ==
2212				      cpu_buffer->reader_page))) {
2213				local_inc(&cpu_buffer->commit_overrun);
2214				goto out_reset;
2215			}
2216		}
2217	}
2218
2219	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2220
2221 out_again:
2222
2223	rb_reset_tail(cpu_buffer, tail, info);
2224
2225	/* Commit what we have for now. */
2226	rb_end_commit(cpu_buffer);
2227	/* rb_end_commit() decs committing */
2228	local_inc(&cpu_buffer->committing);
2229
2230	/* fail and let the caller try again */
2231	return ERR_PTR(-EAGAIN);
2232
2233 out_reset:
2234	/* reset write */
2235	rb_reset_tail(cpu_buffer, tail, info);
2236
2237	return NULL;
2238}
2239
2240/* Slow path, do not inline */
2241static noinline struct ring_buffer_event *
2242rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
2243{
2244	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
 
 
 
2245
2246	/* Not the first event on the page? */
2247	if (rb_event_index(event)) {
2248		event->time_delta = delta & TS_MASK;
2249		event->array[0] = delta >> TS_SHIFT;
2250	} else {
2251		/* nope, just zero it */
2252		event->time_delta = 0;
2253		event->array[0] = 0;
2254	}
2255
2256	return skip_time_extend(event);
2257}
2258
2259static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2260				     struct ring_buffer_event *event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2261
2262/**
2263 * rb_update_event - update event type and data
 
2264 * @event: the event to update
2265 * @type: the type of event
2266 * @length: the size of the event field in the ring buffer
2267 *
2268 * Update the type and data fields of the event. The length
2269 * is the actual size that is written to the ring buffer,
2270 * and with this, we can determine what to place into the
2271 * data field.
2272 */
2273static void
2274rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2275		struct ring_buffer_event *event,
2276		struct rb_event_info *info)
2277{
2278	unsigned length = info->length;
2279	u64 delta = info->delta;
 
2280
2281	/* Only a commit updates the timestamp */
2282	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2283		delta = 0;
2284
2285	/*
2286	 * If we need to add a timestamp, then we
2287	 * add it to the start of the resevered space.
2288	 */
2289	if (unlikely(info->add_timestamp)) {
2290		event = rb_add_time_stamp(event, delta);
2291		length -= RB_LEN_TIME_EXTEND;
2292		delta = 0;
2293	}
2294
2295	event->time_delta = delta;
2296	length -= RB_EVNT_HDR_SIZE;
2297	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2298		event->type_len = 0;
2299		event->array[0] = length;
2300	} else
2301		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2302}
2303
2304static unsigned rb_calculate_event_length(unsigned length)
2305{
2306	struct ring_buffer_event event; /* Used only for sizeof array */
2307
2308	/* zero length can cause confusions */
2309	if (!length)
2310		length++;
2311
2312	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2313		length += sizeof(event.array[0]);
2314
2315	length += RB_EVNT_HDR_SIZE;
2316	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2317
2318	/*
2319	 * In case the time delta is larger than the 27 bits for it
2320	 * in the header, we need to add a timestamp. If another
2321	 * event comes in when trying to discard this one to increase
2322	 * the length, then the timestamp will be added in the allocated
2323	 * space of this event. If length is bigger than the size needed
2324	 * for the TIME_EXTEND, then padding has to be used. The events
2325	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2326	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2327	 * As length is a multiple of 4, we only need to worry if it
2328	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2329	 */
2330	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2331		length += RB_ALIGNMENT;
2332
2333	return length;
2334}
2335
2336#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2337static inline bool sched_clock_stable(void)
2338{
2339	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2340}
2341#endif
2342
2343static inline int
2344rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2345		  struct ring_buffer_event *event)
2346{
2347	unsigned long new_index, old_index;
2348	struct buffer_page *bpage;
2349	unsigned long index;
2350	unsigned long addr;
 
 
2351
2352	new_index = rb_event_index(event);
2353	old_index = new_index + rb_event_ts_length(event);
2354	addr = (unsigned long)event;
2355	addr &= PAGE_MASK;
2356
2357	bpage = READ_ONCE(cpu_buffer->tail_page);
2358
 
 
 
 
 
 
 
 
2359	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2360		unsigned long write_mask =
2361			local_read(&bpage->write) & ~RB_WRITE_MASK;
2362		unsigned long event_length = rb_event_length(event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2363		/*
2364		 * This is on the tail page. It is possible that
2365		 * a write could come in and move the tail page
2366		 * and write to the next page. That is fine
2367		 * because we just shorten what is on this page.
2368		 */
2369		old_index += write_mask;
2370		new_index += write_mask;
2371		index = local_cmpxchg(&bpage->write, old_index, new_index);
2372		if (index == old_index) {
2373			/* update counters */
2374			local_sub(event_length, &cpu_buffer->entries_bytes);
2375			return 1;
2376		}
2377	}
2378
2379	/* could not discard */
2380	return 0;
2381}
2382
2383static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2384{
2385	local_inc(&cpu_buffer->committing);
2386	local_inc(&cpu_buffer->commits);
2387}
2388
2389static void
2390rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2391{
2392	unsigned long max_count;
2393
2394	/*
2395	 * We only race with interrupts and NMIs on this CPU.
2396	 * If we own the commit event, then we can commit
2397	 * all others that interrupted us, since the interruptions
2398	 * are in stack format (they finish before they come
2399	 * back to us). This allows us to do a simple loop to
2400	 * assign the commit to the tail.
2401	 */
2402 again:
2403	max_count = cpu_buffer->nr_pages * 100;
2404
2405	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2406		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2407			return;
2408		if (RB_WARN_ON(cpu_buffer,
2409			       rb_is_reader_page(cpu_buffer->tail_page)))
2410			return;
2411		local_set(&cpu_buffer->commit_page->page->commit,
2412			  rb_page_write(cpu_buffer->commit_page));
2413		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2414		/* Only update the write stamp if the page has an event */
2415		if (rb_page_write(cpu_buffer->commit_page))
2416			cpu_buffer->write_stamp =
2417				cpu_buffer->commit_page->page->time_stamp;
2418		/* add barrier to keep gcc from optimizing too much */
2419		barrier();
2420	}
2421	while (rb_commit_index(cpu_buffer) !=
2422	       rb_page_write(cpu_buffer->commit_page)) {
2423
2424		local_set(&cpu_buffer->commit_page->page->commit,
2425			  rb_page_write(cpu_buffer->commit_page));
2426		RB_WARN_ON(cpu_buffer,
2427			   local_read(&cpu_buffer->commit_page->page->commit) &
2428			   ~RB_WRITE_MASK);
2429		barrier();
2430	}
2431
2432	/* again, keep gcc from optimizing */
2433	barrier();
2434
2435	/*
2436	 * If an interrupt came in just after the first while loop
2437	 * and pushed the tail page forward, we will be left with
2438	 * a dangling commit that will never go forward.
2439	 */
2440	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2441		goto again;
2442}
2443
2444static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2445{
2446	unsigned long commits;
2447
2448	if (RB_WARN_ON(cpu_buffer,
2449		       !local_read(&cpu_buffer->committing)))
2450		return;
2451
2452 again:
2453	commits = local_read(&cpu_buffer->commits);
2454	/* synchronize with interrupts */
2455	barrier();
2456	if (local_read(&cpu_buffer->committing) == 1)
2457		rb_set_commit_to_write(cpu_buffer);
2458
2459	local_dec(&cpu_buffer->committing);
2460
2461	/* synchronize with interrupts */
2462	barrier();
2463
2464	/*
2465	 * Need to account for interrupts coming in between the
2466	 * updating of the commit page and the clearing of the
2467	 * committing counter.
2468	 */
2469	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2470	    !local_read(&cpu_buffer->committing)) {
2471		local_inc(&cpu_buffer->committing);
2472		goto again;
2473	}
2474}
2475
2476static inline void rb_event_discard(struct ring_buffer_event *event)
2477{
2478	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2479		event = skip_time_extend(event);
2480
2481	/* array[0] holds the actual length for the discarded event */
2482	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2483	event->type_len = RINGBUF_TYPE_PADDING;
2484	/* time delta must be non zero */
2485	if (!event->time_delta)
2486		event->time_delta = 1;
2487}
2488
2489static inline bool
2490rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2491		   struct ring_buffer_event *event)
2492{
2493	unsigned long addr = (unsigned long)event;
2494	unsigned long index;
2495
2496	index = rb_event_index(event);
2497	addr &= PAGE_MASK;
2498
2499	return cpu_buffer->commit_page->page == (void *)addr &&
2500		rb_commit_index(cpu_buffer) == index;
2501}
2502
2503static void
2504rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2505		      struct ring_buffer_event *event)
2506{
2507	u64 delta;
2508
2509	/*
2510	 * The event first in the commit queue updates the
2511	 * time stamp.
2512	 */
2513	if (rb_event_is_commit(cpu_buffer, event)) {
2514		/*
2515		 * A commit event that is first on a page
2516		 * updates the write timestamp with the page stamp
2517		 */
2518		if (!rb_event_index(event))
2519			cpu_buffer->write_stamp =
2520				cpu_buffer->commit_page->page->time_stamp;
2521		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2522			delta = event->array[0];
2523			delta <<= TS_SHIFT;
2524			delta += event->time_delta;
2525			cpu_buffer->write_stamp += delta;
2526		} else
2527			cpu_buffer->write_stamp += event->time_delta;
2528	}
2529}
2530
2531static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2532		      struct ring_buffer_event *event)
2533{
2534	local_inc(&cpu_buffer->entries);
2535	rb_update_write_stamp(cpu_buffer, event);
2536	rb_end_commit(cpu_buffer);
2537}
2538
2539static __always_inline void
2540rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2541{
2542	bool pagebusy;
2543
2544	if (buffer->irq_work.waiters_pending) {
2545		buffer->irq_work.waiters_pending = false;
2546		/* irq_work_queue() supplies it's own memory barriers */
2547		irq_work_queue(&buffer->irq_work.work);
2548	}
2549
2550	if (cpu_buffer->irq_work.waiters_pending) {
2551		cpu_buffer->irq_work.waiters_pending = false;
2552		/* irq_work_queue() supplies it's own memory barriers */
2553		irq_work_queue(&cpu_buffer->irq_work.work);
2554	}
2555
2556	pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 
 
 
 
 
 
 
 
 
 
 
 
2557
2558	if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
2559		cpu_buffer->irq_work.wakeup_full = true;
2560		cpu_buffer->irq_work.full_waiters_pending = false;
2561		/* irq_work_queue() supplies it's own memory barriers */
2562		irq_work_queue(&cpu_buffer->irq_work.work);
2563	}
2564}
2565
 
 
 
 
 
 
 
2566/*
2567 * The lock and unlock are done within a preempt disable section.
2568 * The current_context per_cpu variable can only be modified
2569 * by the current task between lock and unlock. But it can
2570 * be modified more than once via an interrupt. To pass this
2571 * information from the lock to the unlock without having to
2572 * access the 'in_interrupt()' functions again (which do show
2573 * a bit of overhead in something as critical as function tracing,
2574 * we use a bitmask trick.
2575 *
2576 *  bit 0 =  NMI context
2577 *  bit 1 =  IRQ context
2578 *  bit 2 =  SoftIRQ context
2579 *  bit 3 =  normal context.
2580 *
2581 * This works because this is the order of contexts that can
2582 * preempt other contexts. A SoftIRQ never preempts an IRQ
2583 * context.
2584 *
2585 * When the context is determined, the corresponding bit is
2586 * checked and set (if it was set, then a recursion of that context
2587 * happened).
2588 *
2589 * On unlock, we need to clear this bit. To do so, just subtract
2590 * 1 from the current_context and AND it to itself.
2591 *
2592 * (binary)
2593 *  101 - 1 = 100
2594 *  101 & 100 = 100 (clearing bit zero)
2595 *
2596 *  1010 - 1 = 1001
2597 *  1010 & 1001 = 1000 (clearing bit 1)
2598 *
2599 * The least significant bit can be cleared this way, and it
2600 * just so happens that it is the same bit corresponding to
2601 * the current context.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2602 */
2603
2604static __always_inline int
2605trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2606{
2607	unsigned int val = cpu_buffer->current_context;
2608	int bit;
2609
2610	if (in_interrupt()) {
2611		if (in_nmi())
2612			bit = RB_CTX_NMI;
2613		else if (in_irq())
2614			bit = RB_CTX_IRQ;
2615		else
2616			bit = RB_CTX_SOFTIRQ;
2617	} else
2618		bit = RB_CTX_NORMAL;
2619
2620	if (unlikely(val & (1 << bit)))
2621		return 1;
 
 
 
 
 
 
 
 
 
 
2622
2623	val |= (1 << bit);
2624	cpu_buffer->current_context = val;
2625
2626	return 0;
2627}
2628
2629static __always_inline void
2630trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2631{
2632	cpu_buffer->current_context &= cpu_buffer->current_context - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2633}
2634
2635/**
2636 * ring_buffer_unlock_commit - commit a reserved
2637 * @buffer: The buffer to commit to
2638 * @event: The event pointer to commit.
2639 *
2640 * This commits the data to the ring buffer, and releases any locks held.
2641 *
2642 * Must be paired with ring_buffer_lock_reserve.
2643 */
2644int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2645			      struct ring_buffer_event *event)
2646{
2647	struct ring_buffer_per_cpu *cpu_buffer;
2648	int cpu = raw_smp_processor_id();
2649
2650	cpu_buffer = buffer->buffers[cpu];
2651
2652	rb_commit(cpu_buffer, event);
2653
2654	rb_wakeups(buffer, cpu_buffer);
2655
2656	trace_recursive_unlock(cpu_buffer);
2657
2658	preempt_enable_notrace();
2659
2660	return 0;
2661}
2662EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2663
2664static noinline void
2665rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2666		    struct rb_event_info *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2667{
2668	WARN_ONCE(info->delta > (1ULL << 59),
2669		  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2670		  (unsigned long long)info->delta,
2671		  (unsigned long long)info->ts,
2672		  (unsigned long long)cpu_buffer->write_stamp,
2673		  sched_clock_stable() ? "" :
2674		  "If you just came from a suspend/resume,\n"
2675		  "please switch to the trace global clock:\n"
2676		  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2677	info->add_timestamp = 1;
2678}
 
2679
2680static struct ring_buffer_event *
2681__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2682		  struct rb_event_info *info)
2683{
2684	struct ring_buffer_event *event;
2685	struct buffer_page *tail_page;
2686	unsigned long tail, write;
2687
2688	/*
2689	 * If the time delta since the last event is too big to
2690	 * hold in the time field of the event, then we append a
2691	 * TIME EXTEND event ahead of the data event.
2692	 */
2693	if (unlikely(info->add_timestamp))
2694		info->length += RB_LEN_TIME_EXTEND;
2695
2696	/* Don't let the compiler play games with cpu_buffer->tail_page */
2697	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2698	write = local_add_return(info->length, &tail_page->write);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2699
2700	/* set write to only the index of the write */
2701	write &= RB_WRITE_MASK;
 
2702	tail = write - info->length;
2703
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2704	/*
2705	 * If this is the first commit on the page, then it has the same
2706	 * timestamp as the page itself.
2707	 */
2708	if (!tail)
 
2709		info->delta = 0;
2710
2711	/* See if we shot pass the end of this buffer page */
2712	if (unlikely(write > BUF_PAGE_SIZE))
2713		return rb_move_tail(cpu_buffer, tail, info);
2714
2715	/* We reserved something on the buffer */
2716
2717	event = __rb_page_index(tail_page, tail);
2718	kmemcheck_annotate_bitfield(event, bitfield);
2719	rb_update_event(cpu_buffer, event, info);
2720
2721	local_inc(&tail_page->entries);
2722
2723	/*
2724	 * If this is the first commit on the page, then update
2725	 * its timestamp.
2726	 */
2727	if (!tail)
2728		tail_page->page->time_stamp = info->ts;
2729
2730	/* account for these added bytes */
2731	local_add(info->length, &cpu_buffer->entries_bytes);
2732
2733	return event;
2734}
2735
2736static struct ring_buffer_event *
2737rb_reserve_next_event(struct ring_buffer *buffer,
2738		      struct ring_buffer_per_cpu *cpu_buffer,
2739		      unsigned long length)
2740{
2741	struct ring_buffer_event *event;
2742	struct rb_event_info info;
2743	int nr_loops = 0;
2744	u64 diff;
2745
2746	rb_start_commit(cpu_buffer);
 
2747
2748#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2749	/*
2750	 * Due to the ability to swap a cpu buffer from a buffer
2751	 * it is possible it was swapped before we committed.
2752	 * (committing stops a swap). We check for it here and
2753	 * if it happened, we have to fail the write.
2754	 */
2755	barrier();
2756	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2757		local_dec(&cpu_buffer->committing);
2758		local_dec(&cpu_buffer->commits);
2759		return NULL;
2760	}
2761#endif
2762
2763	info.length = rb_calculate_event_length(length);
 
 
 
 
 
 
 
 
2764 again:
2765	info.add_timestamp = 0;
2766	info.delta = 0;
2767
2768	/*
2769	 * We allow for interrupts to reenter here and do a trace.
2770	 * If one does, it will cause this original code to loop
2771	 * back here. Even with heavy interrupts happening, this
2772	 * should only happen a few times in a row. If this happens
2773	 * 1000 times in a row, there must be either an interrupt
2774	 * storm or we have something buggy.
2775	 * Bail!
2776	 */
2777	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2778		goto out_fail;
2779
2780	info.ts = rb_time_stamp(cpu_buffer->buffer);
2781	diff = info.ts - cpu_buffer->write_stamp;
2782
2783	/* make sure this diff is calculated here */
2784	barrier();
2785
2786	/* Did the write stamp get updated already? */
2787	if (likely(info.ts >= cpu_buffer->write_stamp)) {
2788		info.delta = diff;
2789		if (unlikely(test_time_stamp(info.delta)))
2790			rb_handle_timestamp(cpu_buffer, &info);
2791	}
2792
2793	event = __rb_reserve_next(cpu_buffer, &info);
2794
2795	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2796		if (info.add_timestamp)
2797			info.length -= RB_LEN_TIME_EXTEND;
2798		goto again;
2799	}
2800
2801	if (!event)
2802		goto out_fail;
2803
2804	return event;
2805
2806 out_fail:
2807	rb_end_commit(cpu_buffer);
2808	return NULL;
2809}
2810
2811/**
2812 * ring_buffer_lock_reserve - reserve a part of the buffer
2813 * @buffer: the ring buffer to reserve from
2814 * @length: the length of the data to reserve (excluding event header)
2815 *
2816 * Returns a reseverd event on the ring buffer to copy directly to.
2817 * The user of this interface will need to get the body to write into
2818 * and can use the ring_buffer_event_data() interface.
2819 *
2820 * The length is the length of the data needed, not the event length
2821 * which also includes the event header.
2822 *
2823 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2824 * If NULL is returned, then nothing has been allocated or locked.
2825 */
2826struct ring_buffer_event *
2827ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2828{
2829	struct ring_buffer_per_cpu *cpu_buffer;
2830	struct ring_buffer_event *event;
2831	int cpu;
2832
2833	/* If we are tracing schedule, we don't want to recurse */
2834	preempt_disable_notrace();
2835
2836	if (unlikely(atomic_read(&buffer->record_disabled)))
2837		goto out;
2838
2839	cpu = raw_smp_processor_id();
2840
2841	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
2842		goto out;
2843
2844	cpu_buffer = buffer->buffers[cpu];
2845
2846	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
2847		goto out;
2848
2849	if (unlikely(length > BUF_MAX_DATA_SIZE))
2850		goto out;
2851
2852	if (unlikely(trace_recursive_lock(cpu_buffer)))
2853		goto out;
2854
2855	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2856	if (!event)
2857		goto out_unlock;
2858
2859	return event;
2860
2861 out_unlock:
2862	trace_recursive_unlock(cpu_buffer);
2863 out:
2864	preempt_enable_notrace();
2865	return NULL;
2866}
2867EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2868
2869/*
2870 * Decrement the entries to the page that an event is on.
2871 * The event does not even need to exist, only the pointer
2872 * to the page it is on. This may only be called before the commit
2873 * takes place.
2874 */
2875static inline void
2876rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2877		   struct ring_buffer_event *event)
2878{
2879	unsigned long addr = (unsigned long)event;
2880	struct buffer_page *bpage = cpu_buffer->commit_page;
2881	struct buffer_page *start;
2882
2883	addr &= PAGE_MASK;
2884
2885	/* Do the likely case first */
2886	if (likely(bpage->page == (void *)addr)) {
2887		local_dec(&bpage->entries);
2888		return;
2889	}
2890
2891	/*
2892	 * Because the commit page may be on the reader page we
2893	 * start with the next page and check the end loop there.
2894	 */
2895	rb_inc_page(cpu_buffer, &bpage);
2896	start = bpage;
2897	do {
2898		if (bpage->page == (void *)addr) {
2899			local_dec(&bpage->entries);
2900			return;
2901		}
2902		rb_inc_page(cpu_buffer, &bpage);
2903	} while (bpage != start);
2904
2905	/* commit not part of this buffer?? */
2906	RB_WARN_ON(cpu_buffer, 1);
2907}
2908
2909/**
2910 * ring_buffer_commit_discard - discard an event that has not been committed
2911 * @buffer: the ring buffer
2912 * @event: non committed event to discard
2913 *
2914 * Sometimes an event that is in the ring buffer needs to be ignored.
2915 * This function lets the user discard an event in the ring buffer
2916 * and then that event will not be read later.
2917 *
2918 * This function only works if it is called before the the item has been
2919 * committed. It will try to free the event from the ring buffer
2920 * if another event has not been added behind it.
2921 *
2922 * If another event has been added behind it, it will set the event
2923 * up as discarded, and perform the commit.
2924 *
2925 * If this function is called, do not call ring_buffer_unlock_commit on
2926 * the event.
2927 */
2928void ring_buffer_discard_commit(struct ring_buffer *buffer,
2929				struct ring_buffer_event *event)
2930{
2931	struct ring_buffer_per_cpu *cpu_buffer;
2932	int cpu;
2933
2934	/* The event is discarded regardless */
2935	rb_event_discard(event);
2936
2937	cpu = smp_processor_id();
2938	cpu_buffer = buffer->buffers[cpu];
2939
2940	/*
2941	 * This must only be called if the event has not been
2942	 * committed yet. Thus we can assume that preemption
2943	 * is still disabled.
2944	 */
2945	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2946
2947	rb_decrement_entry(cpu_buffer, event);
2948	if (rb_try_to_discard(cpu_buffer, event))
2949		goto out;
2950
2951	/*
2952	 * The commit is still visible by the reader, so we
2953	 * must still update the timestamp.
2954	 */
2955	rb_update_write_stamp(cpu_buffer, event);
2956 out:
2957	rb_end_commit(cpu_buffer);
2958
2959	trace_recursive_unlock(cpu_buffer);
2960
2961	preempt_enable_notrace();
2962
2963}
2964EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2965
2966/**
2967 * ring_buffer_write - write data to the buffer without reserving
2968 * @buffer: The ring buffer to write to.
2969 * @length: The length of the data being written (excluding the event header)
2970 * @data: The data to write to the buffer.
2971 *
2972 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2973 * one function. If you already have the data to write to the buffer, it
2974 * may be easier to simply call this function.
2975 *
2976 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2977 * and not the length of the event which would hold the header.
2978 */
2979int ring_buffer_write(struct ring_buffer *buffer,
2980		      unsigned long length,
2981		      void *data)
2982{
2983	struct ring_buffer_per_cpu *cpu_buffer;
2984	struct ring_buffer_event *event;
2985	void *body;
2986	int ret = -EBUSY;
2987	int cpu;
2988
2989	preempt_disable_notrace();
2990
2991	if (atomic_read(&buffer->record_disabled))
2992		goto out;
2993
2994	cpu = raw_smp_processor_id();
2995
2996	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2997		goto out;
2998
2999	cpu_buffer = buffer->buffers[cpu];
3000
3001	if (atomic_read(&cpu_buffer->record_disabled))
3002		goto out;
3003
3004	if (length > BUF_MAX_DATA_SIZE)
3005		goto out;
3006
3007	if (unlikely(trace_recursive_lock(cpu_buffer)))
3008		goto out;
3009
3010	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3011	if (!event)
3012		goto out_unlock;
3013
3014	body = rb_event_data(event);
3015
3016	memcpy(body, data, length);
3017
3018	rb_commit(cpu_buffer, event);
3019
3020	rb_wakeups(buffer, cpu_buffer);
3021
3022	ret = 0;
3023
3024 out_unlock:
3025	trace_recursive_unlock(cpu_buffer);
3026
3027 out:
3028	preempt_enable_notrace();
3029
3030	return ret;
3031}
3032EXPORT_SYMBOL_GPL(ring_buffer_write);
3033
3034static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3035{
3036	struct buffer_page *reader = cpu_buffer->reader_page;
3037	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3038	struct buffer_page *commit = cpu_buffer->commit_page;
3039
3040	/* In case of error, head will be NULL */
3041	if (unlikely(!head))
3042		return true;
3043
3044	return reader->read == rb_page_commit(reader) &&
3045		(commit == reader ||
3046		 (commit == head &&
3047		  head->read == rb_page_commit(commit)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3048}
3049
3050/**
3051 * ring_buffer_record_disable - stop all writes into the buffer
3052 * @buffer: The ring buffer to stop writes to.
3053 *
3054 * This prevents all writes to the buffer. Any attempt to write
3055 * to the buffer after this will fail and return NULL.
3056 *
3057 * The caller should call synchronize_sched() after this.
3058 */
3059void ring_buffer_record_disable(struct ring_buffer *buffer)
3060{
3061	atomic_inc(&buffer->record_disabled);
3062}
3063EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3064
3065/**
3066 * ring_buffer_record_enable - enable writes to the buffer
3067 * @buffer: The ring buffer to enable writes
3068 *
3069 * Note, multiple disables will need the same number of enables
3070 * to truly enable the writing (much like preempt_disable).
3071 */
3072void ring_buffer_record_enable(struct ring_buffer *buffer)
3073{
3074	atomic_dec(&buffer->record_disabled);
3075}
3076EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3077
3078/**
3079 * ring_buffer_record_off - stop all writes into the buffer
3080 * @buffer: The ring buffer to stop writes to.
3081 *
3082 * This prevents all writes to the buffer. Any attempt to write
3083 * to the buffer after this will fail and return NULL.
3084 *
3085 * This is different than ring_buffer_record_disable() as
3086 * it works like an on/off switch, where as the disable() version
3087 * must be paired with a enable().
3088 */
3089void ring_buffer_record_off(struct ring_buffer *buffer)
3090{
3091	unsigned int rd;
3092	unsigned int new_rd;
3093
3094	do {
3095		rd = atomic_read(&buffer->record_disabled);
3096		new_rd = rd | RB_BUFFER_OFF;
3097	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3098}
3099EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3100
3101/**
3102 * ring_buffer_record_on - restart writes into the buffer
3103 * @buffer: The ring buffer to start writes to.
3104 *
3105 * This enables all writes to the buffer that was disabled by
3106 * ring_buffer_record_off().
3107 *
3108 * This is different than ring_buffer_record_enable() as
3109 * it works like an on/off switch, where as the enable() version
3110 * must be paired with a disable().
3111 */
3112void ring_buffer_record_on(struct ring_buffer *buffer)
3113{
3114	unsigned int rd;
3115	unsigned int new_rd;
3116
3117	do {
3118		rd = atomic_read(&buffer->record_disabled);
3119		new_rd = rd & ~RB_BUFFER_OFF;
3120	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3121}
3122EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3123
3124/**
3125 * ring_buffer_record_is_on - return true if the ring buffer can write
3126 * @buffer: The ring buffer to see if write is enabled
3127 *
3128 * Returns true if the ring buffer is in a state that it accepts writes.
3129 */
3130int ring_buffer_record_is_on(struct ring_buffer *buffer)
3131{
3132	return !atomic_read(&buffer->record_disabled);
3133}
3134
3135/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3136 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3137 * @buffer: The ring buffer to stop writes to.
3138 * @cpu: The CPU buffer to stop
3139 *
3140 * This prevents all writes to the buffer. Any attempt to write
3141 * to the buffer after this will fail and return NULL.
3142 *
3143 * The caller should call synchronize_sched() after this.
3144 */
3145void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3146{
3147	struct ring_buffer_per_cpu *cpu_buffer;
3148
3149	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3150		return;
3151
3152	cpu_buffer = buffer->buffers[cpu];
3153	atomic_inc(&cpu_buffer->record_disabled);
3154}
3155EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3156
3157/**
3158 * ring_buffer_record_enable_cpu - enable writes to the buffer
3159 * @buffer: The ring buffer to enable writes
3160 * @cpu: The CPU to enable.
3161 *
3162 * Note, multiple disables will need the same number of enables
3163 * to truly enable the writing (much like preempt_disable).
3164 */
3165void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3166{
3167	struct ring_buffer_per_cpu *cpu_buffer;
3168
3169	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3170		return;
3171
3172	cpu_buffer = buffer->buffers[cpu];
3173	atomic_dec(&cpu_buffer->record_disabled);
3174}
3175EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3176
3177/*
3178 * The total entries in the ring buffer is the running counter
3179 * of entries entered into the ring buffer, minus the sum of
3180 * the entries read from the ring buffer and the number of
3181 * entries that were overwritten.
3182 */
3183static inline unsigned long
3184rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3185{
3186	return local_read(&cpu_buffer->entries) -
3187		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3188}
3189
3190/**
3191 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3192 * @buffer: The ring buffer
3193 * @cpu: The per CPU buffer to read from.
3194 */
3195u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3196{
3197	unsigned long flags;
3198	struct ring_buffer_per_cpu *cpu_buffer;
3199	struct buffer_page *bpage;
3200	u64 ret = 0;
3201
3202	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3203		return 0;
3204
3205	cpu_buffer = buffer->buffers[cpu];
3206	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3207	/*
3208	 * if the tail is on reader_page, oldest time stamp is on the reader
3209	 * page
3210	 */
3211	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3212		bpage = cpu_buffer->reader_page;
3213	else
3214		bpage = rb_set_head_page(cpu_buffer);
3215	if (bpage)
3216		ret = bpage->page->time_stamp;
3217	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3218
3219	return ret;
3220}
3221EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3222
3223/**
3224 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3225 * @buffer: The ring buffer
3226 * @cpu: The per CPU buffer to read from.
3227 */
3228unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3229{
3230	struct ring_buffer_per_cpu *cpu_buffer;
3231	unsigned long ret;
3232
3233	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3234		return 0;
3235
3236	cpu_buffer = buffer->buffers[cpu];
3237	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3238
3239	return ret;
3240}
3241EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3242
3243/**
3244 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3245 * @buffer: The ring buffer
3246 * @cpu: The per CPU buffer to get the entries from.
3247 */
3248unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3249{
3250	struct ring_buffer_per_cpu *cpu_buffer;
3251
3252	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3253		return 0;
3254
3255	cpu_buffer = buffer->buffers[cpu];
3256
3257	return rb_num_of_entries(cpu_buffer);
3258}
3259EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3260
3261/**
3262 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3263 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3264 * @buffer: The ring buffer
3265 * @cpu: The per CPU buffer to get the number of overruns from
3266 */
3267unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3268{
3269	struct ring_buffer_per_cpu *cpu_buffer;
3270	unsigned long ret;
3271
3272	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3273		return 0;
3274
3275	cpu_buffer = buffer->buffers[cpu];
3276	ret = local_read(&cpu_buffer->overrun);
3277
3278	return ret;
3279}
3280EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3281
3282/**
3283 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3284 * commits failing due to the buffer wrapping around while there are uncommitted
3285 * events, such as during an interrupt storm.
3286 * @buffer: The ring buffer
3287 * @cpu: The per CPU buffer to get the number of overruns from
3288 */
3289unsigned long
3290ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3291{
3292	struct ring_buffer_per_cpu *cpu_buffer;
3293	unsigned long ret;
3294
3295	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3296		return 0;
3297
3298	cpu_buffer = buffer->buffers[cpu];
3299	ret = local_read(&cpu_buffer->commit_overrun);
3300
3301	return ret;
3302}
3303EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3304
3305/**
3306 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3307 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3308 * @buffer: The ring buffer
3309 * @cpu: The per CPU buffer to get the number of overruns from
3310 */
3311unsigned long
3312ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3313{
3314	struct ring_buffer_per_cpu *cpu_buffer;
3315	unsigned long ret;
3316
3317	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3318		return 0;
3319
3320	cpu_buffer = buffer->buffers[cpu];
3321	ret = local_read(&cpu_buffer->dropped_events);
3322
3323	return ret;
3324}
3325EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3326
3327/**
3328 * ring_buffer_read_events_cpu - get the number of events successfully read
3329 * @buffer: The ring buffer
3330 * @cpu: The per CPU buffer to get the number of events read
3331 */
3332unsigned long
3333ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3334{
3335	struct ring_buffer_per_cpu *cpu_buffer;
3336
3337	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3338		return 0;
3339
3340	cpu_buffer = buffer->buffers[cpu];
3341	return cpu_buffer->read;
3342}
3343EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3344
3345/**
3346 * ring_buffer_entries - get the number of entries in a buffer
3347 * @buffer: The ring buffer
3348 *
3349 * Returns the total number of entries in the ring buffer
3350 * (all CPU entries)
3351 */
3352unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3353{
3354	struct ring_buffer_per_cpu *cpu_buffer;
3355	unsigned long entries = 0;
3356	int cpu;
3357
3358	/* if you care about this being correct, lock the buffer */
3359	for_each_buffer_cpu(buffer, cpu) {
3360		cpu_buffer = buffer->buffers[cpu];
3361		entries += rb_num_of_entries(cpu_buffer);
3362	}
3363
3364	return entries;
3365}
3366EXPORT_SYMBOL_GPL(ring_buffer_entries);
3367
3368/**
3369 * ring_buffer_overruns - get the number of overruns in buffer
3370 * @buffer: The ring buffer
3371 *
3372 * Returns the total number of overruns in the ring buffer
3373 * (all CPU entries)
3374 */
3375unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3376{
3377	struct ring_buffer_per_cpu *cpu_buffer;
3378	unsigned long overruns = 0;
3379	int cpu;
3380
3381	/* if you care about this being correct, lock the buffer */
3382	for_each_buffer_cpu(buffer, cpu) {
3383		cpu_buffer = buffer->buffers[cpu];
3384		overruns += local_read(&cpu_buffer->overrun);
3385	}
3386
3387	return overruns;
3388}
3389EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3390
3391static void rb_iter_reset(struct ring_buffer_iter *iter)
3392{
3393	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3394
3395	/* Iterator usage is expected to have record disabled */
3396	iter->head_page = cpu_buffer->reader_page;
3397	iter->head = cpu_buffer->reader_page->read;
 
3398
3399	iter->cache_reader_page = iter->head_page;
3400	iter->cache_read = cpu_buffer->read;
3401
3402	if (iter->head)
3403		iter->read_stamp = cpu_buffer->read_stamp;
3404	else
 
3405		iter->read_stamp = iter->head_page->page->time_stamp;
 
 
3406}
3407
3408/**
3409 * ring_buffer_iter_reset - reset an iterator
3410 * @iter: The iterator to reset
3411 *
3412 * Resets the iterator, so that it will start from the beginning
3413 * again.
3414 */
3415void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3416{
3417	struct ring_buffer_per_cpu *cpu_buffer;
3418	unsigned long flags;
3419
3420	if (!iter)
3421		return;
3422
3423	cpu_buffer = iter->cpu_buffer;
3424
3425	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3426	rb_iter_reset(iter);
3427	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3428}
3429EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3430
3431/**
3432 * ring_buffer_iter_empty - check if an iterator has no more to read
3433 * @iter: The iterator to check
3434 */
3435int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3436{
3437	struct ring_buffer_per_cpu *cpu_buffer;
 
 
 
 
 
 
 
3438
3439	cpu_buffer = iter->cpu_buffer;
 
 
 
 
3440
3441	return iter->head_page == cpu_buffer->commit_page &&
3442		iter->head == rb_commit_index(cpu_buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3443}
3444EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3445
3446static void
3447rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3448		     struct ring_buffer_event *event)
3449{
3450	u64 delta;
3451
3452	switch (event->type_len) {
3453	case RINGBUF_TYPE_PADDING:
3454		return;
3455
3456	case RINGBUF_TYPE_TIME_EXTEND:
3457		delta = event->array[0];
3458		delta <<= TS_SHIFT;
3459		delta += event->time_delta;
3460		cpu_buffer->read_stamp += delta;
3461		return;
3462
3463	case RINGBUF_TYPE_TIME_STAMP:
3464		/* FIXME: not implemented */
 
 
3465		return;
3466
3467	case RINGBUF_TYPE_DATA:
3468		cpu_buffer->read_stamp += event->time_delta;
3469		return;
3470
3471	default:
3472		BUG();
3473	}
3474	return;
3475}
3476
3477static void
3478rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3479			  struct ring_buffer_event *event)
3480{
3481	u64 delta;
3482
3483	switch (event->type_len) {
3484	case RINGBUF_TYPE_PADDING:
3485		return;
3486
3487	case RINGBUF_TYPE_TIME_EXTEND:
3488		delta = event->array[0];
3489		delta <<= TS_SHIFT;
3490		delta += event->time_delta;
3491		iter->read_stamp += delta;
3492		return;
3493
3494	case RINGBUF_TYPE_TIME_STAMP:
3495		/* FIXME: not implemented */
 
 
3496		return;
3497
3498	case RINGBUF_TYPE_DATA:
3499		iter->read_stamp += event->time_delta;
3500		return;
3501
3502	default:
3503		BUG();
3504	}
3505	return;
3506}
3507
3508static struct buffer_page *
3509rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3510{
3511	struct buffer_page *reader = NULL;
3512	unsigned long overwrite;
3513	unsigned long flags;
3514	int nr_loops = 0;
3515	int ret;
3516
3517	local_irq_save(flags);
3518	arch_spin_lock(&cpu_buffer->lock);
3519
3520 again:
3521	/*
3522	 * This should normally only loop twice. But because the
3523	 * start of the reader inserts an empty page, it causes
3524	 * a case where we will loop three times. There should be no
3525	 * reason to loop four times (that I know of).
3526	 */
3527	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3528		reader = NULL;
3529		goto out;
3530	}
3531
3532	reader = cpu_buffer->reader_page;
3533
3534	/* If there's more to read, return this page */
3535	if (cpu_buffer->reader_page->read < rb_page_size(reader))
3536		goto out;
3537
3538	/* Never should we have an index greater than the size */
3539	if (RB_WARN_ON(cpu_buffer,
3540		       cpu_buffer->reader_page->read > rb_page_size(reader)))
3541		goto out;
3542
3543	/* check if we caught up to the tail */
3544	reader = NULL;
3545	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3546		goto out;
3547
3548	/* Don't bother swapping if the ring buffer is empty */
3549	if (rb_num_of_entries(cpu_buffer) == 0)
3550		goto out;
3551
3552	/*
3553	 * Reset the reader page to size zero.
3554	 */
3555	local_set(&cpu_buffer->reader_page->write, 0);
3556	local_set(&cpu_buffer->reader_page->entries, 0);
3557	local_set(&cpu_buffer->reader_page->page->commit, 0);
3558	cpu_buffer->reader_page->real_end = 0;
3559
3560 spin:
3561	/*
3562	 * Splice the empty reader page into the list around the head.
3563	 */
3564	reader = rb_set_head_page(cpu_buffer);
3565	if (!reader)
3566		goto out;
3567	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3568	cpu_buffer->reader_page->list.prev = reader->list.prev;
3569
3570	/*
3571	 * cpu_buffer->pages just needs to point to the buffer, it
3572	 *  has no specific buffer page to point to. Lets move it out
3573	 *  of our way so we don't accidentally swap it.
3574	 */
3575	cpu_buffer->pages = reader->list.prev;
3576
3577	/* The reader page will be pointing to the new head */
3578	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3579
3580	/*
3581	 * We want to make sure we read the overruns after we set up our
3582	 * pointers to the next object. The writer side does a
3583	 * cmpxchg to cross pages which acts as the mb on the writer
3584	 * side. Note, the reader will constantly fail the swap
3585	 * while the writer is updating the pointers, so this
3586	 * guarantees that the overwrite recorded here is the one we
3587	 * want to compare with the last_overrun.
3588	 */
3589	smp_mb();
3590	overwrite = local_read(&(cpu_buffer->overrun));
3591
3592	/*
3593	 * Here's the tricky part.
3594	 *
3595	 * We need to move the pointer past the header page.
3596	 * But we can only do that if a writer is not currently
3597	 * moving it. The page before the header page has the
3598	 * flag bit '1' set if it is pointing to the page we want.
3599	 * but if the writer is in the process of moving it
3600	 * than it will be '2' or already moved '0'.
3601	 */
3602
3603	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3604
3605	/*
3606	 * If we did not convert it, then we must try again.
3607	 */
3608	if (!ret)
3609		goto spin;
3610
3611	/*
3612	 * Yeah! We succeeded in replacing the page.
3613	 *
3614	 * Now make the new head point back to the reader page.
3615	 */
3616	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3617	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
 
 
3618
3619	/* Finally update the reader page to the new head */
3620	cpu_buffer->reader_page = reader;
3621	cpu_buffer->reader_page->read = 0;
3622
3623	if (overwrite != cpu_buffer->last_overrun) {
3624		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3625		cpu_buffer->last_overrun = overwrite;
3626	}
3627
3628	goto again;
3629
3630 out:
3631	/* Update the read_stamp on the first event */
3632	if (reader && reader->read == 0)
3633		cpu_buffer->read_stamp = reader->page->time_stamp;
3634
3635	arch_spin_unlock(&cpu_buffer->lock);
3636	local_irq_restore(flags);
3637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3638	return reader;
3639}
3640
3641static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3642{
3643	struct ring_buffer_event *event;
3644	struct buffer_page *reader;
3645	unsigned length;
3646
3647	reader = rb_get_reader_page(cpu_buffer);
3648
3649	/* This function should not be called when buffer is empty */
3650	if (RB_WARN_ON(cpu_buffer, !reader))
3651		return;
3652
3653	event = rb_reader_event(cpu_buffer);
3654
3655	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3656		cpu_buffer->read++;
3657
3658	rb_update_read_stamp(cpu_buffer, event);
3659
3660	length = rb_event_length(event);
3661	cpu_buffer->reader_page->read += length;
3662}
3663
3664static void rb_advance_iter(struct ring_buffer_iter *iter)
3665{
3666	struct ring_buffer_per_cpu *cpu_buffer;
3667	struct ring_buffer_event *event;
3668	unsigned length;
3669
3670	cpu_buffer = iter->cpu_buffer;
3671
 
 
 
 
 
 
 
 
 
3672	/*
3673	 * Check if we are at the end of the buffer.
3674	 */
3675	if (iter->head >= rb_page_size(iter->head_page)) {
3676		/* discarded commits can make the page empty */
3677		if (iter->head_page == cpu_buffer->commit_page)
3678			return;
3679		rb_inc_iter(iter);
3680		return;
3681	}
3682
3683	event = rb_iter_head_event(iter);
3684
3685	length = rb_event_length(event);
3686
3687	/*
3688	 * This should not be called to advance the header if we are
3689	 * at the tail of the buffer.
3690	 */
3691	if (RB_WARN_ON(cpu_buffer,
3692		       (iter->head_page == cpu_buffer->commit_page) &&
3693		       (iter->head + length > rb_commit_index(cpu_buffer))))
3694		return;
3695
3696	rb_update_iter_read_stamp(iter, event);
3697
3698	iter->head += length;
3699
3700	/* check for end of page padding */
3701	if ((iter->head >= rb_page_size(iter->head_page)) &&
3702	    (iter->head_page != cpu_buffer->commit_page))
3703		rb_inc_iter(iter);
3704}
3705
3706static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3707{
3708	return cpu_buffer->lost_events;
3709}
3710
3711static struct ring_buffer_event *
3712rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3713	       unsigned long *lost_events)
3714{
3715	struct ring_buffer_event *event;
3716	struct buffer_page *reader;
3717	int nr_loops = 0;
3718
 
 
3719 again:
3720	/*
3721	 * We repeat when a time extend is encountered.
3722	 * Since the time extend is always attached to a data event,
3723	 * we should never loop more than once.
3724	 * (We never hit the following condition more than twice).
3725	 */
3726	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3727		return NULL;
3728
3729	reader = rb_get_reader_page(cpu_buffer);
3730	if (!reader)
3731		return NULL;
3732
3733	event = rb_reader_event(cpu_buffer);
3734
3735	switch (event->type_len) {
3736	case RINGBUF_TYPE_PADDING:
3737		if (rb_null_event(event))
3738			RB_WARN_ON(cpu_buffer, 1);
3739		/*
3740		 * Because the writer could be discarding every
3741		 * event it creates (which would probably be bad)
3742		 * if we were to go back to "again" then we may never
3743		 * catch up, and will trigger the warn on, or lock
3744		 * the box. Return the padding, and we will release
3745		 * the current locks, and try again.
3746		 */
3747		return event;
3748
3749	case RINGBUF_TYPE_TIME_EXTEND:
3750		/* Internal data, OK to advance */
3751		rb_advance_reader(cpu_buffer);
3752		goto again;
3753
3754	case RINGBUF_TYPE_TIME_STAMP:
3755		/* FIXME: not implemented */
 
 
 
 
 
 
3756		rb_advance_reader(cpu_buffer);
3757		goto again;
3758
3759	case RINGBUF_TYPE_DATA:
3760		if (ts) {
3761			*ts = cpu_buffer->read_stamp + event->time_delta;
3762			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3763							 cpu_buffer->cpu, ts);
3764		}
3765		if (lost_events)
3766			*lost_events = rb_lost_events(cpu_buffer);
3767		return event;
3768
3769	default:
3770		BUG();
3771	}
3772
3773	return NULL;
3774}
3775EXPORT_SYMBOL_GPL(ring_buffer_peek);
3776
3777static struct ring_buffer_event *
3778rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3779{
3780	struct ring_buffer *buffer;
3781	struct ring_buffer_per_cpu *cpu_buffer;
3782	struct ring_buffer_event *event;
3783	int nr_loops = 0;
3784
 
 
 
3785	cpu_buffer = iter->cpu_buffer;
3786	buffer = cpu_buffer->buffer;
3787
3788	/*
3789	 * Check if someone performed a consuming read to
3790	 * the buffer. A consuming read invalidates the iterator
3791	 * and we need to reset the iterator in this case.
3792	 */
3793	if (unlikely(iter->cache_read != cpu_buffer->read ||
3794		     iter->cache_reader_page != cpu_buffer->reader_page))
3795		rb_iter_reset(iter);
3796
3797 again:
3798	if (ring_buffer_iter_empty(iter))
3799		return NULL;
3800
3801	/*
3802	 * We repeat when a time extend is encountered or we hit
3803	 * the end of the page. Since the time extend is always attached
3804	 * to a data event, we should never loop more than three times.
3805	 * Once for going to next page, once on time extend, and
3806	 * finally once to get the event.
3807	 * (We never hit the following condition more than thrice).
3808	 */
3809	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3810		return NULL;
3811
3812	if (rb_per_cpu_empty(cpu_buffer))
3813		return NULL;
3814
3815	if (iter->head >= rb_page_size(iter->head_page)) {
3816		rb_inc_iter(iter);
3817		goto again;
3818	}
3819
3820	event = rb_iter_head_event(iter);
 
 
3821
3822	switch (event->type_len) {
3823	case RINGBUF_TYPE_PADDING:
3824		if (rb_null_event(event)) {
3825			rb_inc_iter(iter);
3826			goto again;
3827		}
3828		rb_advance_iter(iter);
3829		return event;
3830
3831	case RINGBUF_TYPE_TIME_EXTEND:
3832		/* Internal data, OK to advance */
3833		rb_advance_iter(iter);
3834		goto again;
3835
3836	case RINGBUF_TYPE_TIME_STAMP:
3837		/* FIXME: not implemented */
 
 
 
 
 
 
3838		rb_advance_iter(iter);
3839		goto again;
3840
3841	case RINGBUF_TYPE_DATA:
3842		if (ts) {
3843			*ts = iter->read_stamp + event->time_delta;
3844			ring_buffer_normalize_time_stamp(buffer,
3845							 cpu_buffer->cpu, ts);
3846		}
3847		return event;
3848
3849	default:
3850		BUG();
3851	}
3852
3853	return NULL;
3854}
3855EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3856
3857static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
3858{
3859	if (likely(!in_nmi())) {
3860		raw_spin_lock(&cpu_buffer->reader_lock);
3861		return true;
3862	}
3863
3864	/*
3865	 * If an NMI die dumps out the content of the ring buffer
3866	 * trylock must be used to prevent a deadlock if the NMI
3867	 * preempted a task that holds the ring buffer locks. If
3868	 * we get the lock then all is fine, if not, then continue
3869	 * to do the read, but this can corrupt the ring buffer,
3870	 * so it must be permanently disabled from future writes.
3871	 * Reading from NMI is a oneshot deal.
3872	 */
3873	if (raw_spin_trylock(&cpu_buffer->reader_lock))
3874		return true;
3875
3876	/* Continue without locking, but disable the ring buffer */
3877	atomic_inc(&cpu_buffer->record_disabled);
3878	return false;
3879}
3880
3881static inline void
3882rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
3883{
3884	if (likely(locked))
3885		raw_spin_unlock(&cpu_buffer->reader_lock);
3886	return;
3887}
3888
3889/**
3890 * ring_buffer_peek - peek at the next event to be read
3891 * @buffer: The ring buffer to read
3892 * @cpu: The cpu to peak at
3893 * @ts: The timestamp counter of this event.
3894 * @lost_events: a variable to store if events were lost (may be NULL)
3895 *
3896 * This will return the event that will be read next, but does
3897 * not consume the data.
3898 */
3899struct ring_buffer_event *
3900ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3901		 unsigned long *lost_events)
3902{
3903	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3904	struct ring_buffer_event *event;
3905	unsigned long flags;
3906	bool dolock;
3907
3908	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3909		return NULL;
3910
3911 again:
3912	local_irq_save(flags);
3913	dolock = rb_reader_lock(cpu_buffer);
3914	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3915	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3916		rb_advance_reader(cpu_buffer);
3917	rb_reader_unlock(cpu_buffer, dolock);
3918	local_irq_restore(flags);
3919
3920	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3921		goto again;
3922
3923	return event;
3924}
3925
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3926/**
3927 * ring_buffer_iter_peek - peek at the next event to be read
3928 * @iter: The ring buffer iterator
3929 * @ts: The timestamp counter of this event.
3930 *
3931 * This will return the event that will be read next, but does
3932 * not increment the iterator.
3933 */
3934struct ring_buffer_event *
3935ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3936{
3937	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3938	struct ring_buffer_event *event;
3939	unsigned long flags;
3940
3941 again:
3942	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3943	event = rb_iter_peek(iter, ts);
3944	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3945
3946	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3947		goto again;
3948
3949	return event;
3950}
3951
3952/**
3953 * ring_buffer_consume - return an event and consume it
3954 * @buffer: The ring buffer to get the next event from
3955 * @cpu: the cpu to read the buffer from
3956 * @ts: a variable to store the timestamp (may be NULL)
3957 * @lost_events: a variable to store if events were lost (may be NULL)
3958 *
3959 * Returns the next event in the ring buffer, and that event is consumed.
3960 * Meaning, that sequential reads will keep returning a different event,
3961 * and eventually empty the ring buffer if the producer is slower.
3962 */
3963struct ring_buffer_event *
3964ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3965		    unsigned long *lost_events)
3966{
3967	struct ring_buffer_per_cpu *cpu_buffer;
3968	struct ring_buffer_event *event = NULL;
3969	unsigned long flags;
3970	bool dolock;
3971
3972 again:
3973	/* might be called in atomic */
3974	preempt_disable();
3975
3976	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3977		goto out;
3978
3979	cpu_buffer = buffer->buffers[cpu];
3980	local_irq_save(flags);
3981	dolock = rb_reader_lock(cpu_buffer);
3982
3983	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3984	if (event) {
3985		cpu_buffer->lost_events = 0;
3986		rb_advance_reader(cpu_buffer);
3987	}
3988
3989	rb_reader_unlock(cpu_buffer, dolock);
3990	local_irq_restore(flags);
3991
3992 out:
3993	preempt_enable();
3994
3995	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3996		goto again;
3997
3998	return event;
3999}
4000EXPORT_SYMBOL_GPL(ring_buffer_consume);
4001
4002/**
4003 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4004 * @buffer: The ring buffer to read from
4005 * @cpu: The cpu buffer to iterate over
 
4006 *
4007 * This performs the initial preparations necessary to iterate
4008 * through the buffer.  Memory is allocated, buffer recording
4009 * is disabled, and the iterator pointer is returned to the caller.
4010 *
4011 * Disabling buffer recordng prevents the reading from being
4012 * corrupted. This is not a consuming read, so a producer is not
4013 * expected.
4014 *
4015 * After a sequence of ring_buffer_read_prepare calls, the user is
4016 * expected to make at least one call to ring_buffer_read_prepare_sync.
4017 * Afterwards, ring_buffer_read_start is invoked to get things going
4018 * for real.
4019 *
4020 * This overall must be paired with ring_buffer_read_finish.
4021 */
4022struct ring_buffer_iter *
4023ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
4024{
4025	struct ring_buffer_per_cpu *cpu_buffer;
4026	struct ring_buffer_iter *iter;
4027
4028	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4029		return NULL;
4030
4031	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
4032	if (!iter)
4033		return NULL;
4034
 
 
 
 
 
 
4035	cpu_buffer = buffer->buffers[cpu];
4036
4037	iter->cpu_buffer = cpu_buffer;
4038
4039	atomic_inc(&buffer->resize_disabled);
4040	atomic_inc(&cpu_buffer->record_disabled);
4041
4042	return iter;
4043}
4044EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4045
4046/**
4047 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4048 *
4049 * All previously invoked ring_buffer_read_prepare calls to prepare
4050 * iterators will be synchronized.  Afterwards, read_buffer_read_start
4051 * calls on those iterators are allowed.
4052 */
4053void
4054ring_buffer_read_prepare_sync(void)
4055{
4056	synchronize_sched();
4057}
4058EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4059
4060/**
4061 * ring_buffer_read_start - start a non consuming read of the buffer
4062 * @iter: The iterator returned by ring_buffer_read_prepare
4063 *
4064 * This finalizes the startup of an iteration through the buffer.
4065 * The iterator comes from a call to ring_buffer_read_prepare and
4066 * an intervening ring_buffer_read_prepare_sync must have been
4067 * performed.
4068 *
4069 * Must be paired with ring_buffer_read_finish.
4070 */
4071void
4072ring_buffer_read_start(struct ring_buffer_iter *iter)
4073{
4074	struct ring_buffer_per_cpu *cpu_buffer;
4075	unsigned long flags;
4076
4077	if (!iter)
4078		return;
4079
4080	cpu_buffer = iter->cpu_buffer;
4081
4082	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4083	arch_spin_lock(&cpu_buffer->lock);
4084	rb_iter_reset(iter);
4085	arch_spin_unlock(&cpu_buffer->lock);
4086	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4087}
4088EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4089
4090/**
4091 * ring_buffer_read_finish - finish reading the iterator of the buffer
4092 * @iter: The iterator retrieved by ring_buffer_start
4093 *
4094 * This re-enables the recording to the buffer, and frees the
4095 * iterator.
4096 */
4097void
4098ring_buffer_read_finish(struct ring_buffer_iter *iter)
4099{
4100	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4101	unsigned long flags;
4102
4103	/*
4104	 * Ring buffer is disabled from recording, here's a good place
4105	 * to check the integrity of the ring buffer.
4106	 * Must prevent readers from trying to read, as the check
4107	 * clears the HEAD page and readers require it.
4108	 */
4109	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4110	rb_check_pages(cpu_buffer);
4111	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4112
4113	atomic_dec(&cpu_buffer->record_disabled);
4114	atomic_dec(&cpu_buffer->buffer->resize_disabled);
4115	kfree(iter);
4116}
4117EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4118
4119/**
4120 * ring_buffer_read - read the next item in the ring buffer by the iterator
4121 * @iter: The ring buffer iterator
4122 * @ts: The time stamp of the event read.
4123 *
4124 * This reads the next event in the ring buffer and increments the iterator.
 
4125 */
4126struct ring_buffer_event *
4127ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4128{
4129	struct ring_buffer_event *event;
4130	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4131	unsigned long flags;
4132
4133	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4134 again:
4135	event = rb_iter_peek(iter, ts);
4136	if (!event)
4137		goto out;
4138
4139	if (event->type_len == RINGBUF_TYPE_PADDING)
4140		goto again;
4141
4142	rb_advance_iter(iter);
4143 out:
4144	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4145
4146	return event;
4147}
4148EXPORT_SYMBOL_GPL(ring_buffer_read);
4149
4150/**
4151 * ring_buffer_size - return the size of the ring buffer (in bytes)
4152 * @buffer: The ring buffer.
 
4153 */
4154unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4155{
4156	/*
4157	 * Earlier, this method returned
4158	 *	BUF_PAGE_SIZE * buffer->nr_pages
4159	 * Since the nr_pages field is now removed, we have converted this to
4160	 * return the per cpu buffer value.
4161	 */
4162	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4163		return 0;
4164
4165	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4166}
4167EXPORT_SYMBOL_GPL(ring_buffer_size);
4168
4169static void
4170rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4171{
4172	rb_head_page_deactivate(cpu_buffer);
4173
4174	cpu_buffer->head_page
4175		= list_entry(cpu_buffer->pages, struct buffer_page, list);
4176	local_set(&cpu_buffer->head_page->write, 0);
4177	local_set(&cpu_buffer->head_page->entries, 0);
4178	local_set(&cpu_buffer->head_page->page->commit, 0);
4179
4180	cpu_buffer->head_page->read = 0;
4181
4182	cpu_buffer->tail_page = cpu_buffer->head_page;
4183	cpu_buffer->commit_page = cpu_buffer->head_page;
4184
4185	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4186	INIT_LIST_HEAD(&cpu_buffer->new_pages);
4187	local_set(&cpu_buffer->reader_page->write, 0);
4188	local_set(&cpu_buffer->reader_page->entries, 0);
4189	local_set(&cpu_buffer->reader_page->page->commit, 0);
4190	cpu_buffer->reader_page->read = 0;
4191
4192	local_set(&cpu_buffer->entries_bytes, 0);
4193	local_set(&cpu_buffer->overrun, 0);
4194	local_set(&cpu_buffer->commit_overrun, 0);
4195	local_set(&cpu_buffer->dropped_events, 0);
4196	local_set(&cpu_buffer->entries, 0);
4197	local_set(&cpu_buffer->committing, 0);
4198	local_set(&cpu_buffer->commits, 0);
 
 
 
 
 
4199	cpu_buffer->read = 0;
4200	cpu_buffer->read_bytes = 0;
4201
4202	cpu_buffer->write_stamp = 0;
4203	cpu_buffer->read_stamp = 0;
 
 
4204
4205	cpu_buffer->lost_events = 0;
4206	cpu_buffer->last_overrun = 0;
4207
4208	rb_head_page_activate(cpu_buffer);
4209}
4210
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4211/**
4212 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4213 * @buffer: The ring buffer to reset a per cpu buffer of
4214 * @cpu: The CPU buffer to be reset
4215 */
4216void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4217{
4218	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4219	unsigned long flags;
4220
4221	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4222		return;
4223
4224	atomic_inc(&buffer->resize_disabled);
 
 
 
4225	atomic_inc(&cpu_buffer->record_disabled);
4226
4227	/* Make sure all commits have finished */
4228	synchronize_sched();
 
 
4229
4230	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4231
4232	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4233		goto out;
 
4234
4235	arch_spin_lock(&cpu_buffer->lock);
 
4236
4237	rb_reset_cpu(cpu_buffer);
 
4238
4239	arch_spin_unlock(&cpu_buffer->lock);
4240
4241 out:
4242	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 
4243
4244	atomic_dec(&cpu_buffer->record_disabled);
4245	atomic_dec(&buffer->resize_disabled);
4246}
4247EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4248
4249/**
4250 * ring_buffer_reset - reset a ring buffer
4251 * @buffer: The ring buffer to reset all cpu buffers
4252 */
4253void ring_buffer_reset(struct ring_buffer *buffer)
4254{
 
4255	int cpu;
4256
4257	for_each_buffer_cpu(buffer, cpu)
4258		ring_buffer_reset_cpu(buffer, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4259}
4260EXPORT_SYMBOL_GPL(ring_buffer_reset);
4261
4262/**
4263 * rind_buffer_empty - is the ring buffer empty?
4264 * @buffer: The ring buffer to test
4265 */
4266bool ring_buffer_empty(struct ring_buffer *buffer)
4267{
4268	struct ring_buffer_per_cpu *cpu_buffer;
4269	unsigned long flags;
4270	bool dolock;
4271	int cpu;
4272	int ret;
4273
4274	/* yes this is racy, but if you don't like the race, lock the buffer */
4275	for_each_buffer_cpu(buffer, cpu) {
4276		cpu_buffer = buffer->buffers[cpu];
4277		local_irq_save(flags);
4278		dolock = rb_reader_lock(cpu_buffer);
4279		ret = rb_per_cpu_empty(cpu_buffer);
4280		rb_reader_unlock(cpu_buffer, dolock);
4281		local_irq_restore(flags);
4282
4283		if (!ret)
4284			return false;
4285	}
4286
4287	return true;
4288}
4289EXPORT_SYMBOL_GPL(ring_buffer_empty);
4290
4291/**
4292 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4293 * @buffer: The ring buffer
4294 * @cpu: The CPU buffer to test
4295 */
4296bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4297{
4298	struct ring_buffer_per_cpu *cpu_buffer;
4299	unsigned long flags;
4300	bool dolock;
4301	int ret;
4302
4303	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4304		return true;
4305
4306	cpu_buffer = buffer->buffers[cpu];
4307	local_irq_save(flags);
4308	dolock = rb_reader_lock(cpu_buffer);
4309	ret = rb_per_cpu_empty(cpu_buffer);
4310	rb_reader_unlock(cpu_buffer, dolock);
4311	local_irq_restore(flags);
4312
4313	return ret;
4314}
4315EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4316
4317#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4318/**
4319 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4320 * @buffer_a: One buffer to swap with
4321 * @buffer_b: The other buffer to swap with
 
4322 *
4323 * This function is useful for tracers that want to take a "snapshot"
4324 * of a CPU buffer and has another back up buffer lying around.
4325 * it is expected that the tracer handles the cpu buffer not being
4326 * used at the moment.
4327 */
4328int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4329			 struct ring_buffer *buffer_b, int cpu)
4330{
4331	struct ring_buffer_per_cpu *cpu_buffer_a;
4332	struct ring_buffer_per_cpu *cpu_buffer_b;
4333	int ret = -EINVAL;
4334
4335	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4336	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
4337		goto out;
4338
4339	cpu_buffer_a = buffer_a->buffers[cpu];
4340	cpu_buffer_b = buffer_b->buffers[cpu];
4341
4342	/* At least make sure the two buffers are somewhat the same */
4343	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4344		goto out;
4345
4346	ret = -EAGAIN;
4347
4348	if (atomic_read(&buffer_a->record_disabled))
4349		goto out;
4350
4351	if (atomic_read(&buffer_b->record_disabled))
4352		goto out;
4353
4354	if (atomic_read(&cpu_buffer_a->record_disabled))
4355		goto out;
4356
4357	if (atomic_read(&cpu_buffer_b->record_disabled))
4358		goto out;
4359
4360	/*
4361	 * We can't do a synchronize_sched here because this
4362	 * function can be called in atomic context.
4363	 * Normally this will be called from the same CPU as cpu.
4364	 * If not it's up to the caller to protect this.
4365	 */
4366	atomic_inc(&cpu_buffer_a->record_disabled);
4367	atomic_inc(&cpu_buffer_b->record_disabled);
4368
4369	ret = -EBUSY;
4370	if (local_read(&cpu_buffer_a->committing))
4371		goto out_dec;
4372	if (local_read(&cpu_buffer_b->committing))
4373		goto out_dec;
4374
4375	buffer_a->buffers[cpu] = cpu_buffer_b;
4376	buffer_b->buffers[cpu] = cpu_buffer_a;
4377
4378	cpu_buffer_b->buffer = buffer_a;
4379	cpu_buffer_a->buffer = buffer_b;
4380
4381	ret = 0;
4382
4383out_dec:
4384	atomic_dec(&cpu_buffer_a->record_disabled);
4385	atomic_dec(&cpu_buffer_b->record_disabled);
4386out:
4387	return ret;
4388}
4389EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4390#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4391
4392/**
4393 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4394 * @buffer: the buffer to allocate for.
4395 * @cpu: the cpu buffer to allocate.
4396 *
4397 * This function is used in conjunction with ring_buffer_read_page.
4398 * When reading a full page from the ring buffer, these functions
4399 * can be used to speed up the process. The calling function should
4400 * allocate a few pages first with this function. Then when it
4401 * needs to get pages from the ring buffer, it passes the result
4402 * of this function into ring_buffer_read_page, which will swap
4403 * the page that was allocated, with the read page of the buffer.
4404 *
4405 * Returns:
4406 *  The page allocated, or NULL on error.
4407 */
4408void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4409{
4410	struct buffer_data_page *bpage;
 
 
4411	struct page *page;
4412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4413	page = alloc_pages_node(cpu_to_node(cpu),
4414				GFP_KERNEL | __GFP_NORETRY, 0);
4415	if (!page)
4416		return NULL;
4417
4418	bpage = page_address(page);
4419
 
4420	rb_init_page(bpage);
4421
4422	return bpage;
4423}
4424EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4425
4426/**
4427 * ring_buffer_free_read_page - free an allocated read page
4428 * @buffer: the buffer the page was allocate for
 
4429 * @data: the page to free
4430 *
4431 * Free a page allocated from ring_buffer_alloc_read_page.
4432 */
4433void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4434{
4435	free_page((unsigned long)data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4436}
4437EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4438
4439/**
4440 * ring_buffer_read_page - extract a page from the ring buffer
4441 * @buffer: buffer to extract from
4442 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4443 * @len: amount to extract
4444 * @cpu: the cpu of the buffer to extract
4445 * @full: should the extraction only happen when the page is full.
4446 *
4447 * This function will pull out a page from the ring buffer and consume it.
4448 * @data_page must be the address of the variable that was returned
4449 * from ring_buffer_alloc_read_page. This is because the page might be used
4450 * to swap with a page in the ring buffer.
4451 *
4452 * for example:
4453 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
4454 *	if (!rpage)
4455 *		return error;
4456 *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4457 *	if (ret >= 0)
4458 *		process_page(rpage, ret);
4459 *
4460 * When @full is set, the function will not return true unless
4461 * the writer is off the reader page.
4462 *
4463 * Note: it is up to the calling functions to handle sleeps and wakeups.
4464 *  The ring buffer can be used anywhere in the kernel and can not
4465 *  blindly call wake_up. The layer that uses the ring buffer must be
4466 *  responsible for that.
4467 *
4468 * Returns:
4469 *  >=0 if data has been transferred, returns the offset of consumed data.
4470 *  <0 if no data has been transferred.
4471 */
4472int ring_buffer_read_page(struct ring_buffer *buffer,
4473			  void **data_page, size_t len, int cpu, int full)
4474{
4475	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4476	struct ring_buffer_event *event;
4477	struct buffer_data_page *bpage;
4478	struct buffer_page *reader;
4479	unsigned long missed_events;
4480	unsigned long flags;
4481	unsigned int commit;
4482	unsigned int read;
4483	u64 save_timestamp;
4484	int ret = -1;
4485
4486	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4487		goto out;
4488
4489	/*
4490	 * If len is not big enough to hold the page header, then
4491	 * we can not copy anything.
4492	 */
4493	if (len <= BUF_PAGE_HDR_SIZE)
4494		goto out;
4495
4496	len -= BUF_PAGE_HDR_SIZE;
4497
4498	if (!data_page)
4499		goto out;
4500
4501	bpage = *data_page;
4502	if (!bpage)
4503		goto out;
4504
4505	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4506
4507	reader = rb_get_reader_page(cpu_buffer);
4508	if (!reader)
4509		goto out_unlock;
4510
4511	event = rb_reader_event(cpu_buffer);
4512
4513	read = reader->read;
4514	commit = rb_page_commit(reader);
4515
4516	/* Check if any events were dropped */
4517	missed_events = cpu_buffer->lost_events;
4518
4519	/*
4520	 * If this page has been partially read or
4521	 * if len is not big enough to read the rest of the page or
4522	 * a writer is still on the page, then
4523	 * we must copy the data from the page to the buffer.
4524	 * Otherwise, we can simply swap the page with the one passed in.
4525	 */
4526	if (read || (len < (commit - read)) ||
4527	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
4528		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4529		unsigned int rpos = read;
4530		unsigned int pos = 0;
4531		unsigned int size;
4532
4533		if (full)
 
 
 
 
 
 
 
 
4534			goto out_unlock;
4535
4536		if (len > (commit - read))
4537			len = (commit - read);
4538
4539		/* Always keep the time extend and data together */
4540		size = rb_event_ts_length(event);
4541
4542		if (len < size)
4543			goto out_unlock;
4544
4545		/* save the current timestamp, since the user will need it */
4546		save_timestamp = cpu_buffer->read_stamp;
4547
4548		/* Need to copy one event at a time */
4549		do {
4550			/* We need the size of one event, because
4551			 * rb_advance_reader only advances by one event,
4552			 * whereas rb_event_ts_length may include the size of
4553			 * one or two events.
4554			 * We have already ensured there's enough space if this
4555			 * is a time extend. */
4556			size = rb_event_length(event);
4557			memcpy(bpage->data + pos, rpage->data + rpos, size);
4558
4559			len -= size;
4560
4561			rb_advance_reader(cpu_buffer);
4562			rpos = reader->read;
4563			pos += size;
4564
4565			if (rpos >= commit)
4566				break;
4567
4568			event = rb_reader_event(cpu_buffer);
4569			/* Always keep the time extend and data together */
4570			size = rb_event_ts_length(event);
4571		} while (len >= size);
4572
4573		/* update bpage */
4574		local_set(&bpage->commit, pos);
4575		bpage->time_stamp = save_timestamp;
4576
4577		/* we copied everything to the beginning */
4578		read = 0;
4579	} else {
4580		/* update the entry counter */
4581		cpu_buffer->read += rb_page_entries(reader);
4582		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4583
4584		/* swap the pages */
4585		rb_init_page(bpage);
4586		bpage = reader->page;
4587		reader->page = *data_page;
4588		local_set(&reader->write, 0);
4589		local_set(&reader->entries, 0);
4590		reader->read = 0;
4591		*data_page = bpage;
4592
4593		/*
4594		 * Use the real_end for the data size,
4595		 * This gives us a chance to store the lost events
4596		 * on the page.
4597		 */
4598		if (reader->real_end)
4599			local_set(&bpage->commit, reader->real_end);
4600	}
4601	ret = read;
4602
4603	cpu_buffer->lost_events = 0;
4604
4605	commit = local_read(&bpage->commit);
4606	/*
4607	 * Set a flag in the commit field if we lost events
4608	 */
4609	if (missed_events) {
4610		/* If there is room at the end of the page to save the
4611		 * missed events, then record it there.
4612		 */
4613		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4614			memcpy(&bpage->data[commit], &missed_events,
4615			       sizeof(missed_events));
4616			local_add(RB_MISSED_STORED, &bpage->commit);
4617			commit += sizeof(missed_events);
4618		}
4619		local_add(RB_MISSED_EVENTS, &bpage->commit);
4620	}
4621
4622	/*
4623	 * This page may be off to user land. Zero it out here.
4624	 */
4625	if (commit < BUF_PAGE_SIZE)
4626		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4627
4628 out_unlock:
4629	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4630
4631 out:
4632	return ret;
4633}
4634EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4635
4636#ifdef CONFIG_HOTPLUG_CPU
4637static int rb_cpu_notify(struct notifier_block *self,
4638			 unsigned long action, void *hcpu)
4639{
4640	struct ring_buffer *buffer =
4641		container_of(self, struct ring_buffer, cpu_notify);
4642	long cpu = (long)hcpu;
4643	int cpu_i, nr_pages_same;
4644	unsigned int nr_pages;
4645
4646	switch (action) {
4647	case CPU_UP_PREPARE:
4648	case CPU_UP_PREPARE_FROZEN:
4649		if (cpumask_test_cpu(cpu, buffer->cpumask))
4650			return NOTIFY_OK;
4651
4652		nr_pages = 0;
4653		nr_pages_same = 1;
4654		/* check if all cpu sizes are same */
4655		for_each_buffer_cpu(buffer, cpu_i) {
4656			/* fill in the size from first enabled cpu */
4657			if (nr_pages == 0)
4658				nr_pages = buffer->buffers[cpu_i]->nr_pages;
4659			if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4660				nr_pages_same = 0;
4661				break;
4662			}
4663		}
4664		/* allocate minimum pages, user can later expand it */
4665		if (!nr_pages_same)
4666			nr_pages = 2;
4667		buffer->buffers[cpu] =
4668			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4669		if (!buffer->buffers[cpu]) {
4670			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4671			     cpu);
4672			return NOTIFY_OK;
4673		}
4674		smp_wmb();
4675		cpumask_set_cpu(cpu, buffer->cpumask);
4676		break;
4677	case CPU_DOWN_PREPARE:
4678	case CPU_DOWN_PREPARE_FROZEN:
4679		/*
4680		 * Do nothing.
4681		 *  If we were to free the buffer, then the user would
4682		 *  lose any trace that was in the buffer.
4683		 */
4684		break;
4685	default:
4686		break;
4687	}
4688	return NOTIFY_OK;
 
 
 
 
 
 
 
 
 
 
 
 
4689}
4690#endif
4691
4692#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4693/*
4694 * This is a basic integrity check of the ring buffer.
4695 * Late in the boot cycle this test will run when configured in.
4696 * It will kick off a thread per CPU that will go into a loop
4697 * writing to the per cpu ring buffer various sizes of data.
4698 * Some of the data will be large items, some small.
4699 *
4700 * Another thread is created that goes into a spin, sending out
4701 * IPIs to the other CPUs to also write into the ring buffer.
4702 * this is to test the nesting ability of the buffer.
4703 *
4704 * Basic stats are recorded and reported. If something in the
4705 * ring buffer should happen that's not expected, a big warning
4706 * is displayed and all ring buffers are disabled.
4707 */
4708static struct task_struct *rb_threads[NR_CPUS] __initdata;
4709
4710struct rb_test_data {
4711	struct ring_buffer	*buffer;
4712	unsigned long		events;
4713	unsigned long		bytes_written;
4714	unsigned long		bytes_alloc;
4715	unsigned long		bytes_dropped;
4716	unsigned long		events_nested;
4717	unsigned long		bytes_written_nested;
4718	unsigned long		bytes_alloc_nested;
4719	unsigned long		bytes_dropped_nested;
4720	int			min_size_nested;
4721	int			max_size_nested;
4722	int			max_size;
4723	int			min_size;
4724	int			cpu;
4725	int			cnt;
4726};
4727
4728static struct rb_test_data rb_data[NR_CPUS] __initdata;
4729
4730/* 1 meg per cpu */
4731#define RB_TEST_BUFFER_SIZE	1048576
4732
4733static char rb_string[] __initdata =
4734	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4735	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4736	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4737
4738static bool rb_test_started __initdata;
4739
4740struct rb_item {
4741	int size;
4742	char str[];
4743};
4744
4745static __init int rb_write_something(struct rb_test_data *data, bool nested)
4746{
4747	struct ring_buffer_event *event;
4748	struct rb_item *item;
4749	bool started;
4750	int event_len;
4751	int size;
4752	int len;
4753	int cnt;
4754
4755	/* Have nested writes different that what is written */
4756	cnt = data->cnt + (nested ? 27 : 0);
4757
4758	/* Multiply cnt by ~e, to make some unique increment */
4759	size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4760
4761	len = size + sizeof(struct rb_item);
4762
4763	started = rb_test_started;
4764	/* read rb_test_started before checking buffer enabled */
4765	smp_rmb();
4766
4767	event = ring_buffer_lock_reserve(data->buffer, len);
4768	if (!event) {
4769		/* Ignore dropped events before test starts. */
4770		if (started) {
4771			if (nested)
4772				data->bytes_dropped += len;
4773			else
4774				data->bytes_dropped_nested += len;
4775		}
4776		return len;
4777	}
4778
4779	event_len = ring_buffer_event_length(event);
4780
4781	if (RB_WARN_ON(data->buffer, event_len < len))
4782		goto out;
4783
4784	item = ring_buffer_event_data(event);
4785	item->size = size;
4786	memcpy(item->str, rb_string, size);
4787
4788	if (nested) {
4789		data->bytes_alloc_nested += event_len;
4790		data->bytes_written_nested += len;
4791		data->events_nested++;
4792		if (!data->min_size_nested || len < data->min_size_nested)
4793			data->min_size_nested = len;
4794		if (len > data->max_size_nested)
4795			data->max_size_nested = len;
4796	} else {
4797		data->bytes_alloc += event_len;
4798		data->bytes_written += len;
4799		data->events++;
4800		if (!data->min_size || len < data->min_size)
4801			data->max_size = len;
4802		if (len > data->max_size)
4803			data->max_size = len;
4804	}
4805
4806 out:
4807	ring_buffer_unlock_commit(data->buffer, event);
4808
4809	return 0;
4810}
4811
4812static __init int rb_test(void *arg)
4813{
4814	struct rb_test_data *data = arg;
4815
4816	while (!kthread_should_stop()) {
4817		rb_write_something(data, false);
4818		data->cnt++;
4819
4820		set_current_state(TASK_INTERRUPTIBLE);
4821		/* Now sleep between a min of 100-300us and a max of 1ms */
4822		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4823	}
4824
4825	return 0;
4826}
4827
4828static __init void rb_ipi(void *ignore)
4829{
4830	struct rb_test_data *data;
4831	int cpu = smp_processor_id();
4832
4833	data = &rb_data[cpu];
4834	rb_write_something(data, true);
4835}
4836
4837static __init int rb_hammer_test(void *arg)
4838{
4839	while (!kthread_should_stop()) {
4840
4841		/* Send an IPI to all cpus to write data! */
4842		smp_call_function(rb_ipi, NULL, 1);
4843		/* No sleep, but for non preempt, let others run */
4844		schedule();
4845	}
4846
4847	return 0;
4848}
4849
4850static __init int test_ringbuffer(void)
4851{
4852	struct task_struct *rb_hammer;
4853	struct ring_buffer *buffer;
4854	int cpu;
4855	int ret = 0;
4856
 
 
 
 
 
4857	pr_info("Running ring buffer tests...\n");
4858
4859	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4860	if (WARN_ON(!buffer))
4861		return 0;
4862
4863	/* Disable buffer so that threads can't write to it yet */
4864	ring_buffer_record_off(buffer);
4865
4866	for_each_online_cpu(cpu) {
4867		rb_data[cpu].buffer = buffer;
4868		rb_data[cpu].cpu = cpu;
4869		rb_data[cpu].cnt = cpu;
4870		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4871						 "rbtester/%d", cpu);
4872		if (WARN_ON(!rb_threads[cpu])) {
4873			pr_cont("FAILED\n");
4874			ret = -1;
4875			goto out_free;
4876		}
4877
4878		kthread_bind(rb_threads[cpu], cpu);
4879 		wake_up_process(rb_threads[cpu]);
4880	}
4881
4882	/* Now create the rb hammer! */
4883	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4884	if (WARN_ON(!rb_hammer)) {
4885		pr_cont("FAILED\n");
4886		ret = -1;
4887		goto out_free;
4888	}
4889
4890	ring_buffer_record_on(buffer);
4891	/*
4892	 * Show buffer is enabled before setting rb_test_started.
4893	 * Yes there's a small race window where events could be
4894	 * dropped and the thread wont catch it. But when a ring
4895	 * buffer gets enabled, there will always be some kind of
4896	 * delay before other CPUs see it. Thus, we don't care about
4897	 * those dropped events. We care about events dropped after
4898	 * the threads see that the buffer is active.
4899	 */
4900	smp_wmb();
4901	rb_test_started = true;
4902
4903	set_current_state(TASK_INTERRUPTIBLE);
4904	/* Just run for 10 seconds */;
4905	schedule_timeout(10 * HZ);
4906
4907	kthread_stop(rb_hammer);
4908
4909 out_free:
4910	for_each_online_cpu(cpu) {
4911		if (!rb_threads[cpu])
4912			break;
4913		kthread_stop(rb_threads[cpu]);
4914	}
4915	if (ret) {
4916		ring_buffer_free(buffer);
4917		return ret;
4918	}
4919
4920	/* Report! */
4921	pr_info("finished\n");
4922	for_each_online_cpu(cpu) {
4923		struct ring_buffer_event *event;
4924		struct rb_test_data *data = &rb_data[cpu];
4925		struct rb_item *item;
4926		unsigned long total_events;
4927		unsigned long total_dropped;
4928		unsigned long total_written;
4929		unsigned long total_alloc;
4930		unsigned long total_read = 0;
4931		unsigned long total_size = 0;
4932		unsigned long total_len = 0;
4933		unsigned long total_lost = 0;
4934		unsigned long lost;
4935		int big_event_size;
4936		int small_event_size;
4937
4938		ret = -1;
4939
4940		total_events = data->events + data->events_nested;
4941		total_written = data->bytes_written + data->bytes_written_nested;
4942		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4943		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4944
4945		big_event_size = data->max_size + data->max_size_nested;
4946		small_event_size = data->min_size + data->min_size_nested;
4947
4948		pr_info("CPU %d:\n", cpu);
4949		pr_info("              events:    %ld\n", total_events);
4950		pr_info("       dropped bytes:    %ld\n", total_dropped);
4951		pr_info("       alloced bytes:    %ld\n", total_alloc);
4952		pr_info("       written bytes:    %ld\n", total_written);
4953		pr_info("       biggest event:    %d\n", big_event_size);
4954		pr_info("      smallest event:    %d\n", small_event_size);
4955
4956		if (RB_WARN_ON(buffer, total_dropped))
4957			break;
4958
4959		ret = 0;
4960
4961		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4962			total_lost += lost;
4963			item = ring_buffer_event_data(event);
4964			total_len += ring_buffer_event_length(event);
4965			total_size += item->size + sizeof(struct rb_item);
4966			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4967				pr_info("FAILED!\n");
4968				pr_info("buffer had: %.*s\n", item->size, item->str);
4969				pr_info("expected:   %.*s\n", item->size, rb_string);
4970				RB_WARN_ON(buffer, 1);
4971				ret = -1;
4972				break;
4973			}
4974			total_read++;
4975		}
4976		if (ret)
4977			break;
4978
4979		ret = -1;
4980
4981		pr_info("         read events:   %ld\n", total_read);
4982		pr_info("         lost events:   %ld\n", total_lost);
4983		pr_info("        total events:   %ld\n", total_lost + total_read);
4984		pr_info("  recorded len bytes:   %ld\n", total_len);
4985		pr_info(" recorded size bytes:   %ld\n", total_size);
4986		if (total_lost)
4987			pr_info(" With dropped events, record len and size may not match\n"
4988				" alloced and written from above\n");
4989		if (!total_lost) {
4990			if (RB_WARN_ON(buffer, total_len != total_alloc ||
4991				       total_size != total_written))
4992				break;
4993		}
4994		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4995			break;
4996
4997		ret = 0;
4998	}
4999	if (!ret)
5000		pr_info("Ring buffer PASSED!\n");
5001
5002	ring_buffer_free(buffer);
5003	return 0;
5004}
5005
5006late_initcall(test_ringbuffer);
5007#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */