Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * Generic ring buffer
   3 *
   4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   5 */
 
   6#include <linux/trace_events.h>
   7#include <linux/ring_buffer.h>
   8#include <linux/trace_clock.h>
 
   9#include <linux/trace_seq.h>
  10#include <linux/spinlock.h>
  11#include <linux/irq_work.h>
 
  12#include <linux/uaccess.h>
  13#include <linux/hardirq.h>
  14#include <linux/kthread.h>	/* for self test */
  15#include <linux/kmemcheck.h>
  16#include <linux/module.h>
  17#include <linux/percpu.h>
  18#include <linux/mutex.h>
  19#include <linux/delay.h>
  20#include <linux/slab.h>
  21#include <linux/init.h>
  22#include <linux/hash.h>
  23#include <linux/list.h>
  24#include <linux/cpu.h>
 
  25
 
  26#include <asm/local.h>
  27
 
 
 
 
 
 
 
 
  28static void update_pages_handler(struct work_struct *work);
  29
  30/*
  31 * The ring buffer header is special. We must manually up keep it.
  32 */
  33int ring_buffer_print_entry_header(struct trace_seq *s)
  34{
  35	trace_seq_puts(s, "# compressed entry header\n");
  36	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  37	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  38	trace_seq_puts(s, "\tarray       :   32 bits\n");
  39	trace_seq_putc(s, '\n');
  40	trace_seq_printf(s, "\tpadding     : type == %d\n",
  41			 RINGBUF_TYPE_PADDING);
  42	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  43			 RINGBUF_TYPE_TIME_EXTEND);
 
 
  44	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  45			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  46
  47	return !trace_seq_has_overflowed(s);
  48}
  49
  50/*
  51 * The ring buffer is made up of a list of pages. A separate list of pages is
  52 * allocated for each CPU. A writer may only write to a buffer that is
  53 * associated with the CPU it is currently executing on.  A reader may read
  54 * from any per cpu buffer.
  55 *
  56 * The reader is special. For each per cpu buffer, the reader has its own
  57 * reader page. When a reader has read the entire reader page, this reader
  58 * page is swapped with another page in the ring buffer.
  59 *
  60 * Now, as long as the writer is off the reader page, the reader can do what
  61 * ever it wants with that page. The writer will never write to that page
  62 * again (as long as it is out of the ring buffer).
  63 *
  64 * Here's some silly ASCII art.
  65 *
  66 *   +------+
  67 *   |reader|          RING BUFFER
  68 *   |page  |
  69 *   +------+        +---+   +---+   +---+
  70 *                   |   |-->|   |-->|   |
  71 *                   +---+   +---+   +---+
  72 *                     ^               |
  73 *                     |               |
  74 *                     +---------------+
  75 *
  76 *
  77 *   +------+
  78 *   |reader|          RING BUFFER
  79 *   |page  |------------------v
  80 *   +------+        +---+   +---+   +---+
  81 *                   |   |-->|   |-->|   |
  82 *                   +---+   +---+   +---+
  83 *                     ^               |
  84 *                     |               |
  85 *                     +---------------+
  86 *
  87 *
  88 *   +------+
  89 *   |reader|          RING BUFFER
  90 *   |page  |------------------v
  91 *   +------+        +---+   +---+   +---+
  92 *      ^            |   |-->|   |-->|   |
  93 *      |            +---+   +---+   +---+
  94 *      |                              |
  95 *      |                              |
  96 *      +------------------------------+
  97 *
  98 *
  99 *   +------+
 100 *   |buffer|          RING BUFFER
 101 *   |page  |------------------v
 102 *   +------+        +---+   +---+   +---+
 103 *      ^            |   |   |   |-->|   |
 104 *      |   New      +---+   +---+   +---+
 105 *      |  Reader------^               |
 106 *      |   page                       |
 107 *      +------------------------------+
 108 *
 109 *
 110 * After we make this swap, the reader can hand this page off to the splice
 111 * code and be done with it. It can even allocate a new page if it needs to
 112 * and swap that into the ring buffer.
 113 *
 114 * We will be using cmpxchg soon to make all this lockless.
 115 *
 116 */
 117
 118/* Used for individual buffers (after the counter) */
 119#define RB_BUFFER_OFF		(1 << 20)
 120
 121#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 122
 123#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 124#define RB_ALIGNMENT		4U
 125#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 126#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 127
 128#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 129# define RB_FORCE_8BYTE_ALIGNMENT	0
 130# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 131#else
 132# define RB_FORCE_8BYTE_ALIGNMENT	1
 133# define RB_ARCH_ALIGNMENT		8U
 134#endif
 135
 136#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 137
 138/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 139#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 140
 141enum {
 142	RB_LEN_TIME_EXTEND = 8,
 143	RB_LEN_TIME_STAMP = 16,
 144};
 145
 146#define skip_time_extend(event) \
 147	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 148
 149static inline int rb_null_event(struct ring_buffer_event *event)
 
 
 
 150{
 151	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 152}
 153
 154static void rb_event_set_padding(struct ring_buffer_event *event)
 155{
 156	/* padding has a NULL time_delta */
 157	event->type_len = RINGBUF_TYPE_PADDING;
 158	event->time_delta = 0;
 159}
 160
 161static unsigned
 162rb_event_data_length(struct ring_buffer_event *event)
 163{
 164	unsigned length;
 165
 166	if (event->type_len)
 167		length = event->type_len * RB_ALIGNMENT;
 168	else
 169		length = event->array[0];
 170	return length + RB_EVNT_HDR_SIZE;
 171}
 172
 173/*
 174 * Return the length of the given event. Will return
 175 * the length of the time extend if the event is a
 176 * time extend.
 177 */
 178static inline unsigned
 179rb_event_length(struct ring_buffer_event *event)
 180{
 181	switch (event->type_len) {
 182	case RINGBUF_TYPE_PADDING:
 183		if (rb_null_event(event))
 184			/* undefined */
 185			return -1;
 186		return  event->array[0] + RB_EVNT_HDR_SIZE;
 187
 188	case RINGBUF_TYPE_TIME_EXTEND:
 189		return RB_LEN_TIME_EXTEND;
 190
 191	case RINGBUF_TYPE_TIME_STAMP:
 192		return RB_LEN_TIME_STAMP;
 193
 194	case RINGBUF_TYPE_DATA:
 195		return rb_event_data_length(event);
 196	default:
 197		BUG();
 198	}
 199	/* not hit */
 200	return 0;
 201}
 202
 203/*
 204 * Return total length of time extend and data,
 205 *   or just the event length for all other events.
 206 */
 207static inline unsigned
 208rb_event_ts_length(struct ring_buffer_event *event)
 209{
 210	unsigned len = 0;
 211
 212	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
 213		/* time extends include the data event after it */
 214		len = RB_LEN_TIME_EXTEND;
 215		event = skip_time_extend(event);
 216	}
 217	return len + rb_event_length(event);
 218}
 219
 220/**
 221 * ring_buffer_event_length - return the length of the event
 222 * @event: the event to get the length of
 223 *
 224 * Returns the size of the data load of a data event.
 225 * If the event is something other than a data event, it
 226 * returns the size of the event itself. With the exception
 227 * of a TIME EXTEND, where it still returns the size of the
 228 * data load of the data event after it.
 229 */
 230unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 231{
 232	unsigned length;
 233
 234	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 235		event = skip_time_extend(event);
 236
 237	length = rb_event_length(event);
 238	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 239		return length;
 240	length -= RB_EVNT_HDR_SIZE;
 241	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 242                length -= sizeof(event->array[0]);
 243	return length;
 244}
 245EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 246
 247/* inline for ring buffer fast paths */
 248static void *
 249rb_event_data(struct ring_buffer_event *event)
 250{
 251	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
 252		event = skip_time_extend(event);
 253	BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 254	/* If length is in len field, then array[0] has the data */
 255	if (event->type_len)
 256		return (void *)&event->array[0];
 257	/* Otherwise length is in array[0] and array[1] has the data */
 258	return (void *)&event->array[1];
 259}
 260
 261/**
 262 * ring_buffer_event_data - return the data of the event
 263 * @event: the event to get the data from
 264 */
 265void *ring_buffer_event_data(struct ring_buffer_event *event)
 266{
 267	return rb_event_data(event);
 268}
 269EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 270
 271#define for_each_buffer_cpu(buffer, cpu)		\
 272	for_each_cpu(cpu, buffer->cpumask)
 273
 
 
 
 274#define TS_SHIFT	27
 275#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 276#define TS_DELTA_TEST	(~TS_MASK)
 277
 
 
 
 
 
 
 
 
 
 
 
 278/* Flag when events were overwritten */
 279#define RB_MISSED_EVENTS	(1 << 31)
 280/* Missed count stored at end */
 281#define RB_MISSED_STORED	(1 << 30)
 282
 283struct buffer_data_page {
 284	u64		 time_stamp;	/* page time stamp */
 285	local_t		 commit;	/* write committed index */
 286	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 287};
 288
 
 
 
 
 
 289/*
 290 * Note, the buffer_page list must be first. The buffer pages
 291 * are allocated in cache lines, which means that each buffer
 292 * page will be at the beginning of a cache line, and thus
 293 * the least significant bits will be zero. We use this to
 294 * add flags in the list struct pointers, to make the ring buffer
 295 * lockless.
 296 */
 297struct buffer_page {
 298	struct list_head list;		/* list of buffer pages */
 299	local_t		 write;		/* index for next write */
 300	unsigned	 read;		/* index for next read */
 301	local_t		 entries;	/* entries on this page */
 302	unsigned long	 real_end;	/* real end of data */
 
 303	struct buffer_data_page *page;	/* Actual data page */
 304};
 305
 306/*
 307 * The buffer page counters, write and entries, must be reset
 308 * atomically when crossing page boundaries. To synchronize this
 309 * update, two counters are inserted into the number. One is
 310 * the actual counter for the write position or count on the page.
 311 *
 312 * The other is a counter of updaters. Before an update happens
 313 * the update partition of the counter is incremented. This will
 314 * allow the updater to update the counter atomically.
 315 *
 316 * The counter is 20 bits, and the state data is 12.
 317 */
 318#define RB_WRITE_MASK		0xfffff
 319#define RB_WRITE_INTCNT		(1 << 20)
 320
 321static void rb_init_page(struct buffer_data_page *bpage)
 322{
 323	local_set(&bpage->commit, 0);
 324}
 325
 326/**
 327 * ring_buffer_page_len - the size of data on the page.
 328 * @page: The page to read
 329 *
 330 * Returns the amount of data on the page, including buffer page header.
 331 */
 332size_t ring_buffer_page_len(void *page)
 333{
 334	return local_read(&((struct buffer_data_page *)page)->commit)
 335		+ BUF_PAGE_HDR_SIZE;
 336}
 337
 338/*
 339 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
 340 * this issue out.
 341 */
 342static void free_buffer_page(struct buffer_page *bpage)
 343{
 344	free_page((unsigned long)bpage->page);
 345	kfree(bpage);
 346}
 347
 348/*
 349 * We need to fit the time_stamp delta into 27 bits.
 350 */
 351static inline int test_time_stamp(u64 delta)
 352{
 353	if (delta & TS_DELTA_TEST)
 354		return 1;
 355	return 0;
 356}
 357
 358#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
 359
 360/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
 361#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
 362
 363int ring_buffer_print_page_header(struct trace_seq *s)
 364{
 365	struct buffer_data_page field;
 366
 367	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 368			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 369			 (unsigned int)sizeof(field.time_stamp),
 370			 (unsigned int)is_signed_type(u64));
 371
 372	trace_seq_printf(s, "\tfield: local_t commit;\t"
 373			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 374			 (unsigned int)offsetof(typeof(field), commit),
 375			 (unsigned int)sizeof(field.commit),
 376			 (unsigned int)is_signed_type(long));
 377
 378	trace_seq_printf(s, "\tfield: int overwrite;\t"
 379			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 380			 (unsigned int)offsetof(typeof(field), commit),
 381			 1,
 382			 (unsigned int)is_signed_type(long));
 383
 384	trace_seq_printf(s, "\tfield: char data;\t"
 385			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 386			 (unsigned int)offsetof(typeof(field), data),
 387			 (unsigned int)BUF_PAGE_SIZE,
 388			 (unsigned int)is_signed_type(char));
 389
 390	return !trace_seq_has_overflowed(s);
 391}
 392
 393struct rb_irq_work {
 394	struct irq_work			work;
 395	wait_queue_head_t		waiters;
 396	wait_queue_head_t		full_waiters;
 
 397	bool				waiters_pending;
 398	bool				full_waiters_pending;
 399	bool				wakeup_full;
 400};
 401
 402/*
 403 * Structure to hold event state and handle nested events.
 404 */
 405struct rb_event_info {
 406	u64			ts;
 407	u64			delta;
 
 
 408	unsigned long		length;
 409	struct buffer_page	*tail_page;
 410	int			add_timestamp;
 411};
 412
 413/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 414 * Used for which event context the event is in.
 415 *  NMI     = 0
 416 *  IRQ     = 1
 417 *  SOFTIRQ = 2
 418 *  NORMAL  = 3
 
 419 *
 420 * See trace_recursive_lock() comment below for more details.
 421 */
 422enum {
 
 423	RB_CTX_NMI,
 424	RB_CTX_IRQ,
 425	RB_CTX_SOFTIRQ,
 426	RB_CTX_NORMAL,
 427	RB_CTX_MAX
 428};
 429
 
 
 
 
 
 
 
 430/*
 431 * head_page == tail_page && head == tail then buffer is empty.
 432 */
 433struct ring_buffer_per_cpu {
 434	int				cpu;
 435	atomic_t			record_disabled;
 436	struct ring_buffer		*buffer;
 
 437	raw_spinlock_t			reader_lock;	/* serialize readers */
 438	arch_spinlock_t			lock;
 439	struct lock_class_key		lock_key;
 440	unsigned int			nr_pages;
 
 441	unsigned int			current_context;
 442	struct list_head		*pages;
 443	struct buffer_page		*head_page;	/* read from head */
 444	struct buffer_page		*tail_page;	/* write to tail */
 445	struct buffer_page		*commit_page;	/* committed pages */
 446	struct buffer_page		*reader_page;
 447	unsigned long			lost_events;
 448	unsigned long			last_overrun;
 
 449	local_t				entries_bytes;
 450	local_t				entries;
 451	local_t				overrun;
 452	local_t				commit_overrun;
 453	local_t				dropped_events;
 454	local_t				committing;
 455	local_t				commits;
 
 
 
 
 
 456	unsigned long			read;
 457	unsigned long			read_bytes;
 458	u64				write_stamp;
 
 
 459	u64				read_stamp;
 
 
 460	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 461	int				nr_pages_to_update;
 462	struct list_head		new_pages; /* new pages to add */
 463	struct work_struct		update_pages_work;
 464	struct completion		update_done;
 465
 466	struct rb_irq_work		irq_work;
 467};
 468
 469struct ring_buffer {
 470	unsigned			flags;
 471	int				cpus;
 472	atomic_t			record_disabled;
 473	atomic_t			resize_disabled;
 474	cpumask_var_t			cpumask;
 475
 476	struct lock_class_key		*reader_lock_key;
 477
 478	struct mutex			mutex;
 479
 480	struct ring_buffer_per_cpu	**buffers;
 481
 482#ifdef CONFIG_HOTPLUG_CPU
 483	struct notifier_block		cpu_notify;
 484#endif
 485	u64				(*clock)(void);
 486
 487	struct rb_irq_work		irq_work;
 
 
 
 
 
 488};
 489
 490struct ring_buffer_iter {
 491	struct ring_buffer_per_cpu	*cpu_buffer;
 492	unsigned long			head;
 
 493	struct buffer_page		*head_page;
 494	struct buffer_page		*cache_reader_page;
 495	unsigned long			cache_read;
 
 496	u64				read_stamp;
 
 
 
 
 497};
 498
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 499/*
 500 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 501 *
 502 * Schedules a delayed work to wake up any task that is blocked on the
 503 * ring buffer waiters queue.
 504 */
 505static void rb_wake_up_waiters(struct irq_work *work)
 506{
 507	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 508
 
 
 
 509	wake_up_all(&rbwork->waiters);
 510	if (rbwork->wakeup_full) {
 
 
 
 
 
 
 511		rbwork->wakeup_full = false;
 
 
 
 
 
 
 512		wake_up_all(&rbwork->full_waiters);
 513	}
 514}
 515
 516/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517 * ring_buffer_wait - wait for input to the ring buffer
 518 * @buffer: buffer to wait on
 519 * @cpu: the cpu buffer to wait on
 520 * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
 
 
 521 *
 522 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 523 * as data is added to any of the @buffer's cpu buffers. Otherwise
 524 * it will wait for data to be added to a specific cpu buffer.
 525 */
 526int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
 
 527{
 528	struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
 529	DEFINE_WAIT(wait);
 530	struct rb_irq_work *work;
 
 531	int ret = 0;
 532
 533	/*
 534	 * Depending on what the caller is waiting for, either any
 535	 * data in any cpu buffer, or a specific buffer, put the
 536	 * caller on the appropriate wait queue.
 537	 */
 538	if (cpu == RING_BUFFER_ALL_CPUS) {
 539		work = &buffer->irq_work;
 540		/* Full only makes sense on per cpu reads */
 541		full = false;
 542	} else {
 543		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 544			return -ENODEV;
 545		cpu_buffer = buffer->buffers[cpu];
 546		work = &cpu_buffer->irq_work;
 547	}
 548
 
 
 
 
 549
 550	while (true) {
 551		if (full)
 552			prepare_to_wait(&work->full_waiters, &wait, TASK_INTERRUPTIBLE);
 553		else
 554			prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
 555
 556		/*
 557		 * The events can happen in critical sections where
 558		 * checking a work queue can cause deadlocks.
 559		 * After adding a task to the queue, this flag is set
 560		 * only to notify events to try to wake up the queue
 561		 * using irq_work.
 562		 *
 563		 * We don't clear it even if the buffer is no longer
 564		 * empty. The flag only causes the next event to run
 565		 * irq_work to do the work queue wake up. The worse
 566		 * that can happen if we race with !trace_empty() is that
 567		 * an event will cause an irq_work to try to wake up
 568		 * an empty queue.
 569		 *
 570		 * There's no reason to protect this flag either, as
 571		 * the work queue and irq_work logic will do the necessary
 572		 * synchronization for the wake ups. The only thing
 573		 * that is necessary is that the wake up happens after
 574		 * a task has been queued. It's OK for spurious wake ups.
 575		 */
 576		if (full)
 577			work->full_waiters_pending = true;
 578		else
 579			work->waiters_pending = true;
 580
 581		if (signal_pending(current)) {
 582			ret = -EINTR;
 583			break;
 584		}
 585
 586		if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
 587			break;
 588
 589		if (cpu != RING_BUFFER_ALL_CPUS &&
 590		    !ring_buffer_empty_cpu(buffer, cpu)) {
 591			unsigned long flags;
 592			bool pagebusy;
 593
 594			if (!full)
 595				break;
 596
 597			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 598			pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 599			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 600
 601			if (!pagebusy)
 602				break;
 603		}
 604
 605		schedule();
 606	}
 607
 608	if (full)
 609		finish_wait(&work->full_waiters, &wait);
 610	else
 611		finish_wait(&work->waiters, &wait);
 612
 613	return ret;
 614}
 615
 616/**
 617 * ring_buffer_poll_wait - poll on buffer input
 618 * @buffer: buffer to wait on
 619 * @cpu: the cpu buffer to wait on
 620 * @filp: the file descriptor
 621 * @poll_table: The poll descriptor
 
 622 *
 623 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 624 * as data is added to any of the @buffer's cpu buffers. Otherwise
 625 * it will wait for data to be added to a specific cpu buffer.
 626 *
 627 * Returns POLLIN | POLLRDNORM if data exists in the buffers,
 628 * zero otherwise.
 629 */
 630int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
 631			  struct file *filp, poll_table *poll_table)
 632{
 633	struct ring_buffer_per_cpu *cpu_buffer;
 634	struct rb_irq_work *work;
 635
 636	if (cpu == RING_BUFFER_ALL_CPUS)
 637		work = &buffer->irq_work;
 638	else {
 
 639		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 640			return -EINVAL;
 641
 642		cpu_buffer = buffer->buffers[cpu];
 643		work = &cpu_buffer->irq_work;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 644	}
 645
 646	poll_wait(filp, &work->waiters, poll_table);
 647	work->waiters_pending = true;
 
 648	/*
 649	 * There's a tight race between setting the waiters_pending and
 650	 * checking if the ring buffer is empty.  Once the waiters_pending bit
 651	 * is set, the next event will wake the task up, but we can get stuck
 652	 * if there's only a single event in.
 653	 *
 654	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
 655	 * but adding a memory barrier to all events will cause too much of a
 656	 * performance hit in the fast path.  We only need a memory barrier when
 657	 * the buffer goes from empty to having content.  But as this race is
 658	 * extremely small, and it's not a problem if another event comes in, we
 659	 * will fix it later.
 660	 */
 661	smp_mb();
 662
 663	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
 664	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
 665		return POLLIN | POLLRDNORM;
 666	return 0;
 667}
 668
 669/* buffer may be either ring_buffer or ring_buffer_per_cpu */
 670#define RB_WARN_ON(b, cond)						\
 671	({								\
 672		int _____ret = unlikely(cond);				\
 673		if (_____ret) {						\
 674			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
 675				struct ring_buffer_per_cpu *__b =	\
 676					(void *)b;			\
 677				atomic_inc(&__b->buffer->record_disabled); \
 678			} else						\
 679				atomic_inc(&b->record_disabled);	\
 680			WARN_ON(1);					\
 681		}							\
 682		_____ret;						\
 683	})
 684
 685/* Up this if you want to test the TIME_EXTENTS and normalization */
 686#define DEBUG_SHIFT 0
 687
 688static inline u64 rb_time_stamp(struct ring_buffer *buffer)
 689{
 
 
 
 
 
 
 
 
 690	/* shift to debug/test normalization and TIME_EXTENTS */
 691	return buffer->clock() << DEBUG_SHIFT;
 692}
 693
 694u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
 695{
 696	u64 time;
 697
 698	preempt_disable_notrace();
 699	time = rb_time_stamp(buffer);
 700	preempt_enable_no_resched_notrace();
 701
 702	return time;
 703}
 704EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
 705
 706void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
 707				      int cpu, u64 *ts)
 708{
 709	/* Just stupid testing the normalize function and deltas */
 710	*ts >>= DEBUG_SHIFT;
 711}
 712EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
 713
 714/*
 715 * Making the ring buffer lockless makes things tricky.
 716 * Although writes only happen on the CPU that they are on,
 717 * and they only need to worry about interrupts. Reads can
 718 * happen on any CPU.
 719 *
 720 * The reader page is always off the ring buffer, but when the
 721 * reader finishes with a page, it needs to swap its page with
 722 * a new one from the buffer. The reader needs to take from
 723 * the head (writes go to the tail). But if a writer is in overwrite
 724 * mode and wraps, it must push the head page forward.
 725 *
 726 * Here lies the problem.
 727 *
 728 * The reader must be careful to replace only the head page, and
 729 * not another one. As described at the top of the file in the
 730 * ASCII art, the reader sets its old page to point to the next
 731 * page after head. It then sets the page after head to point to
 732 * the old reader page. But if the writer moves the head page
 733 * during this operation, the reader could end up with the tail.
 734 *
 735 * We use cmpxchg to help prevent this race. We also do something
 736 * special with the page before head. We set the LSB to 1.
 737 *
 738 * When the writer must push the page forward, it will clear the
 739 * bit that points to the head page, move the head, and then set
 740 * the bit that points to the new head page.
 741 *
 742 * We also don't want an interrupt coming in and moving the head
 743 * page on another writer. Thus we use the second LSB to catch
 744 * that too. Thus:
 745 *
 746 * head->list->prev->next        bit 1          bit 0
 747 *                              -------        -------
 748 * Normal page                     0              0
 749 * Points to head page             0              1
 750 * New head page                   1              0
 751 *
 752 * Note we can not trust the prev pointer of the head page, because:
 753 *
 754 * +----+       +-----+        +-----+
 755 * |    |------>|  T  |---X--->|  N  |
 756 * |    |<------|     |        |     |
 757 * +----+       +-----+        +-----+
 758 *   ^                           ^ |
 759 *   |          +-----+          | |
 760 *   +----------|  R  |----------+ |
 761 *              |     |<-----------+
 762 *              +-----+
 763 *
 764 * Key:  ---X-->  HEAD flag set in pointer
 765 *         T      Tail page
 766 *         R      Reader page
 767 *         N      Next page
 768 *
 769 * (see __rb_reserve_next() to see where this happens)
 770 *
 771 *  What the above shows is that the reader just swapped out
 772 *  the reader page with a page in the buffer, but before it
 773 *  could make the new header point back to the new page added
 774 *  it was preempted by a writer. The writer moved forward onto
 775 *  the new page added by the reader and is about to move forward
 776 *  again.
 777 *
 778 *  You can see, it is legitimate for the previous pointer of
 779 *  the head (or any page) not to point back to itself. But only
 780 *  temporarially.
 781 */
 782
 783#define RB_PAGE_NORMAL		0UL
 784#define RB_PAGE_HEAD		1UL
 785#define RB_PAGE_UPDATE		2UL
 786
 787
 788#define RB_FLAG_MASK		3UL
 789
 790/* PAGE_MOVED is not part of the mask */
 791#define RB_PAGE_MOVED		4UL
 792
 793/*
 794 * rb_list_head - remove any bit
 795 */
 796static struct list_head *rb_list_head(struct list_head *list)
 797{
 798	unsigned long val = (unsigned long)list;
 799
 800	return (struct list_head *)(val & ~RB_FLAG_MASK);
 801}
 802
 803/*
 804 * rb_is_head_page - test if the given page is the head page
 805 *
 806 * Because the reader may move the head_page pointer, we can
 807 * not trust what the head page is (it may be pointing to
 808 * the reader page). But if the next page is a header page,
 809 * its flags will be non zero.
 810 */
 811static inline int
 812rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
 813		struct buffer_page *page, struct list_head *list)
 814{
 815	unsigned long val;
 816
 817	val = (unsigned long)list->next;
 818
 819	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
 820		return RB_PAGE_MOVED;
 821
 822	return val & RB_FLAG_MASK;
 823}
 824
 825/*
 826 * rb_is_reader_page
 827 *
 828 * The unique thing about the reader page, is that, if the
 829 * writer is ever on it, the previous pointer never points
 830 * back to the reader page.
 831 */
 832static bool rb_is_reader_page(struct buffer_page *page)
 833{
 834	struct list_head *list = page->list.prev;
 835
 836	return rb_list_head(list->next) != &page->list;
 837}
 838
 839/*
 840 * rb_set_list_to_head - set a list_head to be pointing to head.
 841 */
 842static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
 843				struct list_head *list)
 844{
 845	unsigned long *ptr;
 846
 847	ptr = (unsigned long *)&list->next;
 848	*ptr |= RB_PAGE_HEAD;
 849	*ptr &= ~RB_PAGE_UPDATE;
 850}
 851
 852/*
 853 * rb_head_page_activate - sets up head page
 854 */
 855static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
 856{
 857	struct buffer_page *head;
 858
 859	head = cpu_buffer->head_page;
 860	if (!head)
 861		return;
 862
 863	/*
 864	 * Set the previous list pointer to have the HEAD flag.
 865	 */
 866	rb_set_list_to_head(cpu_buffer, head->list.prev);
 867}
 868
 869static void rb_list_head_clear(struct list_head *list)
 870{
 871	unsigned long *ptr = (unsigned long *)&list->next;
 872
 873	*ptr &= ~RB_FLAG_MASK;
 874}
 875
 876/*
 877 * rb_head_page_dactivate - clears head page ptr (for free list)
 878 */
 879static void
 880rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
 881{
 882	struct list_head *hd;
 883
 884	/* Go through the whole list and clear any pointers found. */
 885	rb_list_head_clear(cpu_buffer->pages);
 886
 887	list_for_each(hd, cpu_buffer->pages)
 888		rb_list_head_clear(hd);
 889}
 890
 891static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
 892			    struct buffer_page *head,
 893			    struct buffer_page *prev,
 894			    int old_flag, int new_flag)
 895{
 896	struct list_head *list;
 897	unsigned long val = (unsigned long)&head->list;
 898	unsigned long ret;
 899
 900	list = &prev->list;
 901
 902	val &= ~RB_FLAG_MASK;
 903
 904	ret = cmpxchg((unsigned long *)&list->next,
 905		      val | old_flag, val | new_flag);
 906
 907	/* check if the reader took the page */
 908	if ((ret & ~RB_FLAG_MASK) != val)
 909		return RB_PAGE_MOVED;
 910
 911	return ret & RB_FLAG_MASK;
 912}
 913
 914static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
 915				   struct buffer_page *head,
 916				   struct buffer_page *prev,
 917				   int old_flag)
 918{
 919	return rb_head_page_set(cpu_buffer, head, prev,
 920				old_flag, RB_PAGE_UPDATE);
 921}
 922
 923static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
 924				 struct buffer_page *head,
 925				 struct buffer_page *prev,
 926				 int old_flag)
 927{
 928	return rb_head_page_set(cpu_buffer, head, prev,
 929				old_flag, RB_PAGE_HEAD);
 930}
 931
 932static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
 933				   struct buffer_page *head,
 934				   struct buffer_page *prev,
 935				   int old_flag)
 936{
 937	return rb_head_page_set(cpu_buffer, head, prev,
 938				old_flag, RB_PAGE_NORMAL);
 939}
 940
 941static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
 942			       struct buffer_page **bpage)
 943{
 944	struct list_head *p = rb_list_head((*bpage)->list.next);
 945
 946	*bpage = list_entry(p, struct buffer_page, list);
 947}
 948
 949static struct buffer_page *
 950rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
 951{
 952	struct buffer_page *head;
 953	struct buffer_page *page;
 954	struct list_head *list;
 955	int i;
 956
 957	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
 958		return NULL;
 959
 960	/* sanity check */
 961	list = cpu_buffer->pages;
 962	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
 963		return NULL;
 964
 965	page = head = cpu_buffer->head_page;
 966	/*
 967	 * It is possible that the writer moves the header behind
 968	 * where we started, and we miss in one loop.
 969	 * A second loop should grab the header, but we'll do
 970	 * three loops just because I'm paranoid.
 971	 */
 972	for (i = 0; i < 3; i++) {
 973		do {
 974			if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
 975				cpu_buffer->head_page = page;
 976				return page;
 977			}
 978			rb_inc_page(cpu_buffer, &page);
 979		} while (page != head);
 980	}
 981
 982	RB_WARN_ON(cpu_buffer, 1);
 983
 984	return NULL;
 985}
 986
 987static int rb_head_page_replace(struct buffer_page *old,
 988				struct buffer_page *new)
 989{
 990	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
 991	unsigned long val;
 992	unsigned long ret;
 993
 994	val = *ptr & ~RB_FLAG_MASK;
 995	val |= RB_PAGE_HEAD;
 996
 997	ret = cmpxchg(ptr, val, (unsigned long)&new->list);
 998
 999	return ret == val;
1000}
1001
1002/*
1003 * rb_tail_page_update - move the tail page forward
1004 */
1005static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1006			       struct buffer_page *tail_page,
1007			       struct buffer_page *next_page)
1008{
1009	unsigned long old_entries;
1010	unsigned long old_write;
1011
1012	/*
1013	 * The tail page now needs to be moved forward.
1014	 *
1015	 * We need to reset the tail page, but without messing
1016	 * with possible erasing of data brought in by interrupts
1017	 * that have moved the tail page and are currently on it.
1018	 *
1019	 * We add a counter to the write field to denote this.
1020	 */
1021	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1022	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1023
1024	/*
1025	 * Just make sure we have seen our old_write and synchronize
1026	 * with any interrupts that come in.
1027	 */
1028	barrier();
1029
1030	/*
1031	 * If the tail page is still the same as what we think
1032	 * it is, then it is up to us to update the tail
1033	 * pointer.
1034	 */
1035	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1036		/* Zero the write counter */
1037		unsigned long val = old_write & ~RB_WRITE_MASK;
1038		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1039
1040		/*
1041		 * This will only succeed if an interrupt did
1042		 * not come in and change it. In which case, we
1043		 * do not want to modify it.
1044		 *
1045		 * We add (void) to let the compiler know that we do not care
1046		 * about the return value of these functions. We use the
1047		 * cmpxchg to only update if an interrupt did not already
1048		 * do it for us. If the cmpxchg fails, we don't care.
1049		 */
1050		(void)local_cmpxchg(&next_page->write, old_write, val);
1051		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1052
1053		/*
1054		 * No need to worry about races with clearing out the commit.
1055		 * it only can increment when a commit takes place. But that
1056		 * only happens in the outer most nested commit.
1057		 */
1058		local_set(&next_page->page->commit, 0);
1059
1060		/* Again, either we update tail_page or an interrupt does */
1061		(void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
 
1062	}
1063}
1064
1065static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1066			  struct buffer_page *bpage)
1067{
1068	unsigned long val = (unsigned long)bpage;
1069
1070	if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1071		return 1;
1072
1073	return 0;
1074}
1075
1076/**
1077 * rb_check_list - make sure a pointer to a list has the last bits zero
1078 */
1079static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1080			 struct list_head *list)
1081{
1082	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1083		return 1;
1084	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1085		return 1;
1086	return 0;
1087}
1088
1089/**
1090 * rb_check_pages - integrity check of buffer pages
1091 * @cpu_buffer: CPU buffer with pages to test
1092 *
1093 * As a safety measure we check to make sure the data pages have not
1094 * been corrupted.
 
 
 
 
 
1095 */
1096static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1097{
1098	struct list_head *head = cpu_buffer->pages;
1099	struct buffer_page *bpage, *tmp;
1100
1101	/* Reset the head page if it exists */
1102	if (cpu_buffer->head_page)
1103		rb_set_head_page(cpu_buffer);
1104
1105	rb_head_page_deactivate(cpu_buffer);
1106
1107	if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1108		return -1;
1109	if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1110		return -1;
1111
1112	if (rb_check_list(cpu_buffer, head))
1113		return -1;
 
1114
1115	list_for_each_entry_safe(bpage, tmp, head, list) {
1116		if (RB_WARN_ON(cpu_buffer,
1117			       bpage->list.next->prev != &bpage->list))
1118			return -1;
 
1119		if (RB_WARN_ON(cpu_buffer,
1120			       bpage->list.prev->next != &bpage->list))
1121			return -1;
1122		if (rb_check_list(cpu_buffer, &bpage->list))
1123			return -1;
1124	}
1125
1126	rb_head_page_activate(cpu_buffer);
1127
1128	return 0;
1129}
1130
1131static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
 
1132{
1133	int i;
1134	struct buffer_page *bpage, *tmp;
 
 
 
1135
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136	for (i = 0; i < nr_pages; i++) {
1137		struct page *page;
1138		/*
1139		 * __GFP_NORETRY flag makes sure that the allocation fails
1140		 * gracefully without invoking oom-killer and the system is
1141		 * not destabilized.
1142		 */
1143		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1144				    GFP_KERNEL | __GFP_NORETRY,
1145				    cpu_to_node(cpu));
1146		if (!bpage)
1147			goto free_pages;
1148
 
 
1149		list_add(&bpage->list, pages);
1150
1151		page = alloc_pages_node(cpu_to_node(cpu),
1152					GFP_KERNEL | __GFP_NORETRY, 0);
 
1153		if (!page)
1154			goto free_pages;
1155		bpage->page = page_address(page);
 
1156		rb_init_page(bpage->page);
 
 
 
1157	}
 
 
1158
1159	return 0;
1160
1161free_pages:
1162	list_for_each_entry_safe(bpage, tmp, pages, list) {
1163		list_del_init(&bpage->list);
1164		free_buffer_page(bpage);
1165	}
 
 
1166
1167	return -ENOMEM;
1168}
1169
1170static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1171			     unsigned nr_pages)
1172{
1173	LIST_HEAD(pages);
1174
1175	WARN_ON(!nr_pages);
1176
1177	if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1178		return -ENOMEM;
1179
1180	/*
1181	 * The ring buffer page list is a circular list that does not
1182	 * start and end with a list head. All page list items point to
1183	 * other pages.
1184	 */
1185	cpu_buffer->pages = pages.next;
1186	list_del(&pages);
1187
1188	cpu_buffer->nr_pages = nr_pages;
1189
1190	rb_check_pages(cpu_buffer);
1191
1192	return 0;
1193}
1194
1195static struct ring_buffer_per_cpu *
1196rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1197{
1198	struct ring_buffer_per_cpu *cpu_buffer;
1199	struct buffer_page *bpage;
1200	struct page *page;
1201	int ret;
1202
1203	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1204				  GFP_KERNEL, cpu_to_node(cpu));
1205	if (!cpu_buffer)
1206		return NULL;
1207
1208	cpu_buffer->cpu = cpu;
1209	cpu_buffer->buffer = buffer;
1210	raw_spin_lock_init(&cpu_buffer->reader_lock);
1211	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1212	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1213	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1214	init_completion(&cpu_buffer->update_done);
1215	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1216	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1217	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1218
1219	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1220			    GFP_KERNEL, cpu_to_node(cpu));
1221	if (!bpage)
1222		goto fail_free_buffer;
1223
1224	rb_check_bpage(cpu_buffer, bpage);
1225
1226	cpu_buffer->reader_page = bpage;
1227	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
 
 
1228	if (!page)
1229		goto fail_free_reader;
1230	bpage->page = page_address(page);
1231	rb_init_page(bpage->page);
1232
1233	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1234	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1235
1236	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1237	if (ret < 0)
1238		goto fail_free_reader;
1239
1240	cpu_buffer->head_page
1241		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1242	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1243
1244	rb_head_page_activate(cpu_buffer);
1245
1246	return cpu_buffer;
1247
1248 fail_free_reader:
1249	free_buffer_page(cpu_buffer->reader_page);
1250
1251 fail_free_buffer:
1252	kfree(cpu_buffer);
1253	return NULL;
1254}
1255
1256static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1257{
1258	struct list_head *head = cpu_buffer->pages;
1259	struct buffer_page *bpage, *tmp;
1260
1261	free_buffer_page(cpu_buffer->reader_page);
1262
1263	rb_head_page_deactivate(cpu_buffer);
1264
1265	if (head) {
 
 
1266		list_for_each_entry_safe(bpage, tmp, head, list) {
1267			list_del_init(&bpage->list);
1268			free_buffer_page(bpage);
1269		}
1270		bpage = list_entry(head, struct buffer_page, list);
1271		free_buffer_page(bpage);
1272	}
1273
 
 
1274	kfree(cpu_buffer);
1275}
1276
1277#ifdef CONFIG_HOTPLUG_CPU
1278static int rb_cpu_notify(struct notifier_block *self,
1279			 unsigned long action, void *hcpu);
1280#endif
1281
1282/**
1283 * __ring_buffer_alloc - allocate a new ring_buffer
1284 * @size: the size in bytes per cpu that is needed.
1285 * @flags: attributes to set for the ring buffer.
 
1286 *
1287 * Currently the only flag that is available is the RB_FL_OVERWRITE
1288 * flag. This flag means that the buffer will overwrite old data
1289 * when the buffer wraps. If this flag is not set, the buffer will
1290 * drop data when the tail hits the head.
1291 */
1292struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1293					struct lock_class_key *key)
1294{
1295	struct ring_buffer *buffer;
 
1296	int bsize;
1297	int cpu, nr_pages;
 
1298
1299	/* keep it in its own cache line */
1300	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1301			 GFP_KERNEL);
1302	if (!buffer)
1303		return NULL;
1304
1305	if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1306		goto fail_free_buffer;
1307
1308	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
 
 
 
 
 
 
 
1309	buffer->flags = flags;
1310	buffer->clock = trace_clock_local;
1311	buffer->reader_lock_key = key;
1312
1313	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1314	init_waitqueue_head(&buffer->irq_work.waiters);
1315
1316	/* need at least two pages */
1317	if (nr_pages < 2)
1318		nr_pages = 2;
1319
1320	/*
1321	 * In case of non-hotplug cpu, if the ring-buffer is allocated
1322	 * in early initcall, it will not be notified of secondary cpus.
1323	 * In that off case, we need to allocate for all possible cpus.
1324	 */
1325#ifdef CONFIG_HOTPLUG_CPU
1326	cpu_notifier_register_begin();
1327	cpumask_copy(buffer->cpumask, cpu_online_mask);
1328#else
1329	cpumask_copy(buffer->cpumask, cpu_possible_mask);
1330#endif
1331	buffer->cpus = nr_cpu_ids;
1332
1333	bsize = sizeof(void *) * nr_cpu_ids;
1334	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1335				  GFP_KERNEL);
1336	if (!buffer->buffers)
1337		goto fail_free_cpumask;
1338
1339	for_each_buffer_cpu(buffer, cpu) {
1340		buffer->buffers[cpu] =
1341			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1342		if (!buffer->buffers[cpu])
1343			goto fail_free_buffers;
1344	}
1345
1346#ifdef CONFIG_HOTPLUG_CPU
1347	buffer->cpu_notify.notifier_call = rb_cpu_notify;
1348	buffer->cpu_notify.priority = 0;
1349	__register_cpu_notifier(&buffer->cpu_notify);
1350	cpu_notifier_register_done();
1351#endif
1352
1353	mutex_init(&buffer->mutex);
1354
1355	return buffer;
1356
1357 fail_free_buffers:
1358	for_each_buffer_cpu(buffer, cpu) {
1359		if (buffer->buffers[cpu])
1360			rb_free_cpu_buffer(buffer->buffers[cpu]);
1361	}
1362	kfree(buffer->buffers);
1363
1364 fail_free_cpumask:
1365	free_cpumask_var(buffer->cpumask);
1366#ifdef CONFIG_HOTPLUG_CPU
1367	cpu_notifier_register_done();
1368#endif
1369
1370 fail_free_buffer:
1371	kfree(buffer);
1372	return NULL;
1373}
1374EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1375
1376/**
1377 * ring_buffer_free - free a ring buffer.
1378 * @buffer: the buffer to free.
1379 */
1380void
1381ring_buffer_free(struct ring_buffer *buffer)
1382{
1383	int cpu;
1384
1385#ifdef CONFIG_HOTPLUG_CPU
1386	cpu_notifier_register_begin();
1387	__unregister_cpu_notifier(&buffer->cpu_notify);
1388#endif
1389
1390	for_each_buffer_cpu(buffer, cpu)
1391		rb_free_cpu_buffer(buffer->buffers[cpu]);
1392
1393#ifdef CONFIG_HOTPLUG_CPU
1394	cpu_notifier_register_done();
1395#endif
1396
1397	kfree(buffer->buffers);
1398	free_cpumask_var(buffer->cpumask);
1399
1400	kfree(buffer);
1401}
1402EXPORT_SYMBOL_GPL(ring_buffer_free);
1403
1404void ring_buffer_set_clock(struct ring_buffer *buffer,
1405			   u64 (*clock)(void))
1406{
1407	buffer->clock = clock;
1408}
1409
 
 
 
 
 
 
 
 
 
 
1410static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1411
1412static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1413{
1414	return local_read(&bpage->entries) & RB_WRITE_MASK;
1415}
1416
1417static inline unsigned long rb_page_write(struct buffer_page *bpage)
1418{
1419	return local_read(&bpage->write) & RB_WRITE_MASK;
1420}
1421
1422static int
1423rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1424{
1425	struct list_head *tail_page, *to_remove, *next_page;
1426	struct buffer_page *to_remove_page, *tmp_iter_page;
1427	struct buffer_page *last_page, *first_page;
1428	unsigned int nr_removed;
1429	unsigned long head_bit;
1430	int page_entries;
1431
1432	head_bit = 0;
1433
1434	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1435	atomic_inc(&cpu_buffer->record_disabled);
1436	/*
1437	 * We don't race with the readers since we have acquired the reader
1438	 * lock. We also don't race with writers after disabling recording.
1439	 * This makes it easy to figure out the first and the last page to be
1440	 * removed from the list. We unlink all the pages in between including
1441	 * the first and last pages. This is done in a busy loop so that we
1442	 * lose the least number of traces.
1443	 * The pages are freed after we restart recording and unlock readers.
1444	 */
1445	tail_page = &cpu_buffer->tail_page->list;
1446
1447	/*
1448	 * tail page might be on reader page, we remove the next page
1449	 * from the ring buffer
1450	 */
1451	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1452		tail_page = rb_list_head(tail_page->next);
1453	to_remove = tail_page;
1454
1455	/* start of pages to remove */
1456	first_page = list_entry(rb_list_head(to_remove->next),
1457				struct buffer_page, list);
1458
1459	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1460		to_remove = rb_list_head(to_remove)->next;
1461		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1462	}
 
 
1463
1464	next_page = rb_list_head(to_remove)->next;
1465
1466	/*
1467	 * Now we remove all pages between tail_page and next_page.
1468	 * Make sure that we have head_bit value preserved for the
1469	 * next page
1470	 */
1471	tail_page->next = (struct list_head *)((unsigned long)next_page |
1472						head_bit);
1473	next_page = rb_list_head(next_page);
1474	next_page->prev = tail_page;
1475
1476	/* make sure pages points to a valid page in the ring buffer */
1477	cpu_buffer->pages = next_page;
1478
1479	/* update head page */
1480	if (head_bit)
1481		cpu_buffer->head_page = list_entry(next_page,
1482						struct buffer_page, list);
1483
1484	/*
1485	 * change read pointer to make sure any read iterators reset
1486	 * themselves
1487	 */
1488	cpu_buffer->read = 0;
1489
1490	/* pages are removed, resume tracing and then free the pages */
1491	atomic_dec(&cpu_buffer->record_disabled);
1492	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1493
1494	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1495
1496	/* last buffer page to remove */
1497	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1498				list);
1499	tmp_iter_page = first_page;
1500
1501	do {
 
 
1502		to_remove_page = tmp_iter_page;
1503		rb_inc_page(cpu_buffer, &tmp_iter_page);
1504
1505		/* update the counters */
1506		page_entries = rb_page_entries(to_remove_page);
1507		if (page_entries) {
1508			/*
1509			 * If something was added to this page, it was full
1510			 * since it is not the tail page. So we deduct the
1511			 * bytes consumed in ring buffer from here.
1512			 * Increment overrun to account for the lost events.
1513			 */
1514			local_add(page_entries, &cpu_buffer->overrun);
1515			local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
 
1516		}
1517
1518		/*
1519		 * We have already removed references to this list item, just
1520		 * free up the buffer_page and its page
1521		 */
1522		free_buffer_page(to_remove_page);
1523		nr_removed--;
1524
1525	} while (to_remove_page != last_page);
1526
1527	RB_WARN_ON(cpu_buffer, nr_removed);
1528
1529	return nr_removed == 0;
1530}
1531
1532static int
1533rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1534{
1535	struct list_head *pages = &cpu_buffer->new_pages;
1536	int retries, success;
 
 
1537
1538	raw_spin_lock_irq(&cpu_buffer->reader_lock);
 
1539	/*
1540	 * We are holding the reader lock, so the reader page won't be swapped
1541	 * in the ring buffer. Now we are racing with the writer trying to
1542	 * move head page and the tail page.
1543	 * We are going to adapt the reader page update process where:
1544	 * 1. We first splice the start and end of list of new pages between
1545	 *    the head page and its previous page.
1546	 * 2. We cmpxchg the prev_page->next to point from head page to the
1547	 *    start of new pages list.
1548	 * 3. Finally, we update the head->prev to the end of new list.
1549	 *
1550	 * We will try this process 10 times, to make sure that we don't keep
1551	 * spinning.
1552	 */
1553	retries = 10;
1554	success = 0;
1555	while (retries--) {
1556		struct list_head *head_page, *prev_page, *r;
1557		struct list_head *last_page, *first_page;
1558		struct list_head *head_page_with_bit;
 
1559
1560		head_page = &rb_set_head_page(cpu_buffer)->list;
1561		if (!head_page)
1562			break;
 
1563		prev_page = head_page->prev;
1564
1565		first_page = pages->next;
1566		last_page  = pages->prev;
1567
1568		head_page_with_bit = (struct list_head *)
1569				     ((unsigned long)head_page | RB_PAGE_HEAD);
1570
1571		last_page->next = head_page_with_bit;
1572		first_page->prev = prev_page;
1573
1574		r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1575
1576		if (r == head_page_with_bit) {
1577			/*
1578			 * yay, we replaced the page pointer to our new list,
1579			 * now, we just have to update to head page's prev
1580			 * pointer to point to end of list
1581			 */
1582			head_page->prev = last_page;
1583			success = 1;
1584			break;
1585		}
1586	}
1587
1588	if (success)
1589		INIT_LIST_HEAD(pages);
1590	/*
1591	 * If we weren't successful in adding in new pages, warn and stop
1592	 * tracing
1593	 */
1594	RB_WARN_ON(cpu_buffer, !success);
1595	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1596
1597	/* free pages if they weren't inserted */
1598	if (!success) {
1599		struct buffer_page *bpage, *tmp;
1600		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1601					 list) {
1602			list_del_init(&bpage->list);
1603			free_buffer_page(bpage);
1604		}
1605	}
1606	return success;
1607}
1608
1609static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1610{
1611	int success;
1612
1613	if (cpu_buffer->nr_pages_to_update > 0)
1614		success = rb_insert_pages(cpu_buffer);
1615	else
1616		success = rb_remove_pages(cpu_buffer,
1617					-cpu_buffer->nr_pages_to_update);
1618
1619	if (success)
1620		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1621}
1622
1623static void update_pages_handler(struct work_struct *work)
1624{
1625	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1626			struct ring_buffer_per_cpu, update_pages_work);
1627	rb_update_pages(cpu_buffer);
1628	complete(&cpu_buffer->update_done);
1629}
1630
1631/**
1632 * ring_buffer_resize - resize the ring buffer
1633 * @buffer: the buffer to resize.
1634 * @size: the new size.
1635 * @cpu_id: the cpu buffer to resize
1636 *
1637 * Minimum size is 2 * BUF_PAGE_SIZE.
1638 *
1639 * Returns 0 on success and < 0 on failure.
1640 */
1641int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1642			int cpu_id)
1643{
1644	struct ring_buffer_per_cpu *cpu_buffer;
1645	unsigned nr_pages;
1646	int cpu, err = 0;
1647
1648	/*
1649	 * Always succeed at resizing a non-existent buffer:
1650	 */
1651	if (!buffer)
1652		return size;
1653
1654	/* Make sure the requested buffer exists */
1655	if (cpu_id != RING_BUFFER_ALL_CPUS &&
1656	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
1657		return size;
1658
1659	size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1660	size *= BUF_PAGE_SIZE;
1661
1662	/* we need a minimum of two pages */
1663	if (size < BUF_PAGE_SIZE * 2)
1664		size = BUF_PAGE_SIZE * 2;
1665
1666	nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1667
1668	/*
1669	 * Don't succeed if resizing is disabled, as a reader might be
1670	 * manipulating the ring buffer and is expecting a sane state while
1671	 * this is true.
1672	 */
1673	if (atomic_read(&buffer->resize_disabled))
1674		return -EBUSY;
1675
1676	/* prevent another thread from changing buffer sizes */
1677	mutex_lock(&buffer->mutex);
 
1678
1679	if (cpu_id == RING_BUFFER_ALL_CPUS) {
 
 
 
 
 
 
 
 
 
 
 
 
 
1680		/* calculate the pages to update */
1681		for_each_buffer_cpu(buffer, cpu) {
1682			cpu_buffer = buffer->buffers[cpu];
1683
1684			cpu_buffer->nr_pages_to_update = nr_pages -
1685							cpu_buffer->nr_pages;
1686			/*
1687			 * nothing more to do for removing pages or no update
1688			 */
1689			if (cpu_buffer->nr_pages_to_update <= 0)
1690				continue;
1691			/*
1692			 * to add pages, make sure all new pages can be
1693			 * allocated without receiving ENOMEM
1694			 */
1695			INIT_LIST_HEAD(&cpu_buffer->new_pages);
1696			if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1697						&cpu_buffer->new_pages, cpu)) {
1698				/* not enough memory for new pages */
1699				err = -ENOMEM;
1700				goto out_err;
1701			}
 
 
1702		}
1703
1704		get_online_cpus();
1705		/*
1706		 * Fire off all the required work handlers
1707		 * We can't schedule on offline CPUs, but it's not necessary
1708		 * since we can change their buffer sizes without any race.
1709		 */
1710		for_each_buffer_cpu(buffer, cpu) {
1711			cpu_buffer = buffer->buffers[cpu];
1712			if (!cpu_buffer->nr_pages_to_update)
1713				continue;
1714
1715			/* Can't run something on an offline CPU. */
1716			if (!cpu_online(cpu)) {
1717				rb_update_pages(cpu_buffer);
1718				cpu_buffer->nr_pages_to_update = 0;
1719			} else {
1720				schedule_work_on(cpu,
1721						&cpu_buffer->update_pages_work);
 
 
 
 
 
 
 
 
1722			}
1723		}
1724
1725		/* wait for all the updates to complete */
1726		for_each_buffer_cpu(buffer, cpu) {
1727			cpu_buffer = buffer->buffers[cpu];
1728			if (!cpu_buffer->nr_pages_to_update)
1729				continue;
1730
1731			if (cpu_online(cpu))
1732				wait_for_completion(&cpu_buffer->update_done);
1733			cpu_buffer->nr_pages_to_update = 0;
1734		}
1735
1736		put_online_cpus();
1737	} else {
1738		/* Make sure this CPU has been intitialized */
1739		if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1740			goto out;
1741
1742		cpu_buffer = buffer->buffers[cpu_id];
1743
1744		if (nr_pages == cpu_buffer->nr_pages)
1745			goto out;
1746
 
 
 
 
 
 
 
 
 
 
1747		cpu_buffer->nr_pages_to_update = nr_pages -
1748						cpu_buffer->nr_pages;
1749
1750		INIT_LIST_HEAD(&cpu_buffer->new_pages);
1751		if (cpu_buffer->nr_pages_to_update > 0 &&
1752			__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1753					    &cpu_buffer->new_pages, cpu_id)) {
1754			err = -ENOMEM;
1755			goto out_err;
1756		}
1757
1758		get_online_cpus();
1759
1760		/* Can't run something on an offline CPU. */
1761		if (!cpu_online(cpu_id))
1762			rb_update_pages(cpu_buffer);
1763		else {
1764			schedule_work_on(cpu_id,
1765					 &cpu_buffer->update_pages_work);
1766			wait_for_completion(&cpu_buffer->update_done);
 
 
 
 
 
 
 
 
1767		}
1768
1769		cpu_buffer->nr_pages_to_update = 0;
1770		put_online_cpus();
1771	}
1772
1773 out:
1774	/*
1775	 * The ring buffer resize can happen with the ring buffer
1776	 * enabled, so that the update disturbs the tracing as little
1777	 * as possible. But if the buffer is disabled, we do not need
1778	 * to worry about that, and we can take the time to verify
1779	 * that the buffer is not corrupt.
1780	 */
1781	if (atomic_read(&buffer->record_disabled)) {
1782		atomic_inc(&buffer->record_disabled);
1783		/*
1784		 * Even though the buffer was disabled, we must make sure
1785		 * that it is truly disabled before calling rb_check_pages.
1786		 * There could have been a race between checking
1787		 * record_disable and incrementing it.
1788		 */
1789		synchronize_sched();
1790		for_each_buffer_cpu(buffer, cpu) {
 
 
1791			cpu_buffer = buffer->buffers[cpu];
 
1792			rb_check_pages(cpu_buffer);
 
1793		}
1794		atomic_dec(&buffer->record_disabled);
1795	}
1796
 
1797	mutex_unlock(&buffer->mutex);
1798	return size;
1799
1800 out_err:
1801	for_each_buffer_cpu(buffer, cpu) {
1802		struct buffer_page *bpage, *tmp;
1803
1804		cpu_buffer = buffer->buffers[cpu];
1805		cpu_buffer->nr_pages_to_update = 0;
1806
1807		if (list_empty(&cpu_buffer->new_pages))
1808			continue;
1809
1810		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1811					list) {
1812			list_del_init(&bpage->list);
1813			free_buffer_page(bpage);
1814		}
1815	}
 
 
1816	mutex_unlock(&buffer->mutex);
1817	return err;
1818}
1819EXPORT_SYMBOL_GPL(ring_buffer_resize);
1820
1821void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1822{
1823	mutex_lock(&buffer->mutex);
1824	if (val)
1825		buffer->flags |= RB_FL_OVERWRITE;
1826	else
1827		buffer->flags &= ~RB_FL_OVERWRITE;
1828	mutex_unlock(&buffer->mutex);
1829}
1830EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1831
1832static inline void *
1833__rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1834{
1835	return bpage->data + index;
1836}
1837
1838static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1839{
1840	return bpage->page->data + index;
1841}
1842
1843static inline struct ring_buffer_event *
1844rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1845{
1846	return __rb_page_index(cpu_buffer->reader_page,
1847			       cpu_buffer->reader_page->read);
1848}
1849
1850static inline struct ring_buffer_event *
1851rb_iter_head_event(struct ring_buffer_iter *iter)
1852{
1853	return __rb_page_index(iter->head_page, iter->head);
1854}
 
 
1855
1856static inline unsigned rb_page_commit(struct buffer_page *bpage)
1857{
1858	return local_read(&bpage->page->commit);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1859}
1860
1861/* Size is determined by what has been committed */
1862static inline unsigned rb_page_size(struct buffer_page *bpage)
1863{
1864	return rb_page_commit(bpage);
1865}
1866
1867static inline unsigned
1868rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1869{
1870	return rb_page_commit(cpu_buffer->commit_page);
1871}
1872
1873static inline unsigned
1874rb_event_index(struct ring_buffer_event *event)
1875{
1876	unsigned long addr = (unsigned long)event;
1877
1878	return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
 
 
1879}
1880
1881static void rb_inc_iter(struct ring_buffer_iter *iter)
1882{
1883	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1884
1885	/*
1886	 * The iterator could be on the reader page (it starts there).
1887	 * But the head could have moved, since the reader was
1888	 * found. Check for this case and assign the iterator
1889	 * to the head page instead of next.
1890	 */
1891	if (iter->head_page == cpu_buffer->reader_page)
1892		iter->head_page = rb_set_head_page(cpu_buffer);
1893	else
1894		rb_inc_page(cpu_buffer, &iter->head_page);
1895
1896	iter->read_stamp = iter->head_page->page->time_stamp;
1897	iter->head = 0;
 
1898}
1899
1900/*
1901 * rb_handle_head_page - writer hit the head page
1902 *
1903 * Returns: +1 to retry page
1904 *           0 to continue
1905 *          -1 on error
1906 */
1907static int
1908rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
1909		    struct buffer_page *tail_page,
1910		    struct buffer_page *next_page)
1911{
1912	struct buffer_page *new_head;
1913	int entries;
1914	int type;
1915	int ret;
1916
1917	entries = rb_page_entries(next_page);
1918
1919	/*
1920	 * The hard part is here. We need to move the head
1921	 * forward, and protect against both readers on
1922	 * other CPUs and writers coming in via interrupts.
1923	 */
1924	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
1925				       RB_PAGE_HEAD);
1926
1927	/*
1928	 * type can be one of four:
1929	 *  NORMAL - an interrupt already moved it for us
1930	 *  HEAD   - we are the first to get here.
1931	 *  UPDATE - we are the interrupt interrupting
1932	 *           a current move.
1933	 *  MOVED  - a reader on another CPU moved the next
1934	 *           pointer to its reader page. Give up
1935	 *           and try again.
1936	 */
1937
1938	switch (type) {
1939	case RB_PAGE_HEAD:
1940		/*
1941		 * We changed the head to UPDATE, thus
1942		 * it is our responsibility to update
1943		 * the counters.
1944		 */
1945		local_add(entries, &cpu_buffer->overrun);
1946		local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
 
1947
1948		/*
1949		 * The entries will be zeroed out when we move the
1950		 * tail page.
1951		 */
1952
1953		/* still more to do */
1954		break;
1955
1956	case RB_PAGE_UPDATE:
1957		/*
1958		 * This is an interrupt that interrupt the
1959		 * previous update. Still more to do.
1960		 */
1961		break;
1962	case RB_PAGE_NORMAL:
1963		/*
1964		 * An interrupt came in before the update
1965		 * and processed this for us.
1966		 * Nothing left to do.
1967		 */
1968		return 1;
1969	case RB_PAGE_MOVED:
1970		/*
1971		 * The reader is on another CPU and just did
1972		 * a swap with our next_page.
1973		 * Try again.
1974		 */
1975		return 1;
1976	default:
1977		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
1978		return -1;
1979	}
1980
1981	/*
1982	 * Now that we are here, the old head pointer is
1983	 * set to UPDATE. This will keep the reader from
1984	 * swapping the head page with the reader page.
1985	 * The reader (on another CPU) will spin till
1986	 * we are finished.
1987	 *
1988	 * We just need to protect against interrupts
1989	 * doing the job. We will set the next pointer
1990	 * to HEAD. After that, we set the old pointer
1991	 * to NORMAL, but only if it was HEAD before.
1992	 * otherwise we are an interrupt, and only
1993	 * want the outer most commit to reset it.
1994	 */
1995	new_head = next_page;
1996	rb_inc_page(cpu_buffer, &new_head);
1997
1998	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
1999				    RB_PAGE_NORMAL);
2000
2001	/*
2002	 * Valid returns are:
2003	 *  HEAD   - an interrupt came in and already set it.
2004	 *  NORMAL - One of two things:
2005	 *            1) We really set it.
2006	 *            2) A bunch of interrupts came in and moved
2007	 *               the page forward again.
2008	 */
2009	switch (ret) {
2010	case RB_PAGE_HEAD:
2011	case RB_PAGE_NORMAL:
2012		/* OK */
2013		break;
2014	default:
2015		RB_WARN_ON(cpu_buffer, 1);
2016		return -1;
2017	}
2018
2019	/*
2020	 * It is possible that an interrupt came in,
2021	 * set the head up, then more interrupts came in
2022	 * and moved it again. When we get back here,
2023	 * the page would have been set to NORMAL but we
2024	 * just set it back to HEAD.
2025	 *
2026	 * How do you detect this? Well, if that happened
2027	 * the tail page would have moved.
2028	 */
2029	if (ret == RB_PAGE_NORMAL) {
2030		struct buffer_page *buffer_tail_page;
2031
2032		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2033		/*
2034		 * If the tail had moved passed next, then we need
2035		 * to reset the pointer.
2036		 */
2037		if (buffer_tail_page != tail_page &&
2038		    buffer_tail_page != next_page)
2039			rb_head_page_set_normal(cpu_buffer, new_head,
2040						next_page,
2041						RB_PAGE_HEAD);
2042	}
2043
2044	/*
2045	 * If this was the outer most commit (the one that
2046	 * changed the original pointer from HEAD to UPDATE),
2047	 * then it is up to us to reset it to NORMAL.
2048	 */
2049	if (type == RB_PAGE_HEAD) {
2050		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2051					      tail_page,
2052					      RB_PAGE_UPDATE);
2053		if (RB_WARN_ON(cpu_buffer,
2054			       ret != RB_PAGE_UPDATE))
2055			return -1;
2056	}
2057
2058	return 0;
2059}
2060
2061static inline void
2062rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2063	      unsigned long tail, struct rb_event_info *info)
2064{
 
2065	struct buffer_page *tail_page = info->tail_page;
2066	struct ring_buffer_event *event;
2067	unsigned long length = info->length;
2068
2069	/*
2070	 * Only the event that crossed the page boundary
2071	 * must fill the old tail_page with padding.
2072	 */
2073	if (tail >= BUF_PAGE_SIZE) {
2074		/*
2075		 * If the page was filled, then we still need
2076		 * to update the real_end. Reset it to zero
2077		 * and the reader will ignore it.
2078		 */
2079		if (tail == BUF_PAGE_SIZE)
2080			tail_page->real_end = 0;
2081
2082		local_sub(length, &tail_page->write);
2083		return;
2084	}
2085
2086	event = __rb_page_index(tail_page, tail);
2087	kmemcheck_annotate_bitfield(event, bitfield);
2088
2089	/* account for padding bytes */
2090	local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2091
2092	/*
2093	 * Save the original length to the meta data.
2094	 * This will be used by the reader to add lost event
2095	 * counter.
2096	 */
2097	tail_page->real_end = tail;
2098
2099	/*
2100	 * If this event is bigger than the minimum size, then
2101	 * we need to be careful that we don't subtract the
2102	 * write counter enough to allow another writer to slip
2103	 * in on this page.
2104	 * We put in a discarded commit instead, to make sure
2105	 * that this space is not used again.
 
2106	 *
2107	 * If we are less than the minimum size, we don't need to
2108	 * worry about it.
2109	 */
2110	if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2111		/* No room for any events */
2112
2113		/* Mark the rest of the page with padding */
2114		rb_event_set_padding(event);
2115
 
 
 
2116		/* Set the write back to the previous setting */
2117		local_sub(length, &tail_page->write);
2118		return;
2119	}
2120
2121	/* Put in a discarded event */
2122	event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2123	event->type_len = RINGBUF_TYPE_PADDING;
2124	/* time delta must be non zero */
2125	event->time_delta = 1;
2126
 
 
 
 
 
 
2127	/* Set write to end of buffer */
2128	length = (tail + length) - BUF_PAGE_SIZE;
2129	local_sub(length, &tail_page->write);
2130}
2131
2132static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2133
2134/*
2135 * This is the slow path, force gcc not to inline it.
2136 */
2137static noinline struct ring_buffer_event *
2138rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2139	     unsigned long tail, struct rb_event_info *info)
2140{
2141	struct buffer_page *tail_page = info->tail_page;
2142	struct buffer_page *commit_page = cpu_buffer->commit_page;
2143	struct ring_buffer *buffer = cpu_buffer->buffer;
2144	struct buffer_page *next_page;
2145	int ret;
2146
2147	next_page = tail_page;
2148
2149	rb_inc_page(cpu_buffer, &next_page);
2150
2151	/*
2152	 * If for some reason, we had an interrupt storm that made
2153	 * it all the way around the buffer, bail, and warn
2154	 * about it.
2155	 */
2156	if (unlikely(next_page == commit_page)) {
2157		local_inc(&cpu_buffer->commit_overrun);
2158		goto out_reset;
2159	}
2160
2161	/*
2162	 * This is where the fun begins!
2163	 *
2164	 * We are fighting against races between a reader that
2165	 * could be on another CPU trying to swap its reader
2166	 * page with the buffer head.
2167	 *
2168	 * We are also fighting against interrupts coming in and
2169	 * moving the head or tail on us as well.
2170	 *
2171	 * If the next page is the head page then we have filled
2172	 * the buffer, unless the commit page is still on the
2173	 * reader page.
2174	 */
2175	if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2176
2177		/*
2178		 * If the commit is not on the reader page, then
2179		 * move the header page.
2180		 */
2181		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2182			/*
2183			 * If we are not in overwrite mode,
2184			 * this is easy, just stop here.
2185			 */
2186			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2187				local_inc(&cpu_buffer->dropped_events);
2188				goto out_reset;
2189			}
2190
2191			ret = rb_handle_head_page(cpu_buffer,
2192						  tail_page,
2193						  next_page);
2194			if (ret < 0)
2195				goto out_reset;
2196			if (ret)
2197				goto out_again;
2198		} else {
2199			/*
2200			 * We need to be careful here too. The
2201			 * commit page could still be on the reader
2202			 * page. We could have a small buffer, and
2203			 * have filled up the buffer with events
2204			 * from interrupts and such, and wrapped.
2205			 *
2206			 * Note, if the tail page is also the on the
2207			 * reader_page, we let it move out.
2208			 */
2209			if (unlikely((cpu_buffer->commit_page !=
2210				      cpu_buffer->tail_page) &&
2211				     (cpu_buffer->commit_page ==
2212				      cpu_buffer->reader_page))) {
2213				local_inc(&cpu_buffer->commit_overrun);
2214				goto out_reset;
2215			}
2216		}
2217	}
2218
2219	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2220
2221 out_again:
2222
2223	rb_reset_tail(cpu_buffer, tail, info);
2224
2225	/* Commit what we have for now. */
2226	rb_end_commit(cpu_buffer);
2227	/* rb_end_commit() decs committing */
2228	local_inc(&cpu_buffer->committing);
2229
2230	/* fail and let the caller try again */
2231	return ERR_PTR(-EAGAIN);
2232
2233 out_reset:
2234	/* reset write */
2235	rb_reset_tail(cpu_buffer, tail, info);
2236
2237	return NULL;
2238}
2239
2240/* Slow path, do not inline */
2241static noinline struct ring_buffer_event *
2242rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
 
2243{
2244	event->type_len = RINGBUF_TYPE_TIME_EXTEND;
 
 
 
2245
2246	/* Not the first event on the page? */
2247	if (rb_event_index(event)) {
2248		event->time_delta = delta & TS_MASK;
2249		event->array[0] = delta >> TS_SHIFT;
2250	} else {
2251		/* nope, just zero it */
2252		event->time_delta = 0;
2253		event->array[0] = 0;
2254	}
2255
2256	return skip_time_extend(event);
2257}
2258
2259static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2260				     struct ring_buffer_event *event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2261
2262/**
2263 * rb_update_event - update event type and data
 
2264 * @event: the event to update
2265 * @type: the type of event
2266 * @length: the size of the event field in the ring buffer
2267 *
2268 * Update the type and data fields of the event. The length
2269 * is the actual size that is written to the ring buffer,
2270 * and with this, we can determine what to place into the
2271 * data field.
2272 */
2273static void
2274rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2275		struct ring_buffer_event *event,
2276		struct rb_event_info *info)
2277{
2278	unsigned length = info->length;
2279	u64 delta = info->delta;
 
2280
2281	/* Only a commit updates the timestamp */
2282	if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
2283		delta = 0;
2284
2285	/*
2286	 * If we need to add a timestamp, then we
2287	 * add it to the start of the resevered space.
2288	 */
2289	if (unlikely(info->add_timestamp)) {
2290		event = rb_add_time_stamp(event, delta);
2291		length -= RB_LEN_TIME_EXTEND;
2292		delta = 0;
2293	}
2294
2295	event->time_delta = delta;
2296	length -= RB_EVNT_HDR_SIZE;
2297	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2298		event->type_len = 0;
2299		event->array[0] = length;
2300	} else
2301		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2302}
2303
2304static unsigned rb_calculate_event_length(unsigned length)
2305{
2306	struct ring_buffer_event event; /* Used only for sizeof array */
2307
2308	/* zero length can cause confusions */
2309	if (!length)
2310		length++;
2311
2312	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2313		length += sizeof(event.array[0]);
2314
2315	length += RB_EVNT_HDR_SIZE;
2316	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2317
2318	/*
2319	 * In case the time delta is larger than the 27 bits for it
2320	 * in the header, we need to add a timestamp. If another
2321	 * event comes in when trying to discard this one to increase
2322	 * the length, then the timestamp will be added in the allocated
2323	 * space of this event. If length is bigger than the size needed
2324	 * for the TIME_EXTEND, then padding has to be used. The events
2325	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2326	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2327	 * As length is a multiple of 4, we only need to worry if it
2328	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2329	 */
2330	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2331		length += RB_ALIGNMENT;
2332
2333	return length;
2334}
2335
2336#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2337static inline bool sched_clock_stable(void)
2338{
2339	return true;
2340}
2341#endif
2342
2343static inline int
2344rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2345		  struct ring_buffer_event *event)
2346{
2347	unsigned long new_index, old_index;
2348	struct buffer_page *bpage;
2349	unsigned long index;
2350	unsigned long addr;
2351
2352	new_index = rb_event_index(event);
2353	old_index = new_index + rb_event_ts_length(event);
2354	addr = (unsigned long)event;
2355	addr &= PAGE_MASK;
2356
2357	bpage = READ_ONCE(cpu_buffer->tail_page);
2358
 
 
 
 
2359	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2360		unsigned long write_mask =
2361			local_read(&bpage->write) & ~RB_WRITE_MASK;
2362		unsigned long event_length = rb_event_length(event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2363		/*
2364		 * This is on the tail page. It is possible that
2365		 * a write could come in and move the tail page
2366		 * and write to the next page. That is fine
2367		 * because we just shorten what is on this page.
2368		 */
2369		old_index += write_mask;
2370		new_index += write_mask;
2371		index = local_cmpxchg(&bpage->write, old_index, new_index);
2372		if (index == old_index) {
 
2373			/* update counters */
2374			local_sub(event_length, &cpu_buffer->entries_bytes);
2375			return 1;
2376		}
2377	}
2378
2379	/* could not discard */
2380	return 0;
2381}
2382
2383static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2384{
2385	local_inc(&cpu_buffer->committing);
2386	local_inc(&cpu_buffer->commits);
2387}
2388
2389static void
2390rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2391{
2392	unsigned long max_count;
2393
2394	/*
2395	 * We only race with interrupts and NMIs on this CPU.
2396	 * If we own the commit event, then we can commit
2397	 * all others that interrupted us, since the interruptions
2398	 * are in stack format (they finish before they come
2399	 * back to us). This allows us to do a simple loop to
2400	 * assign the commit to the tail.
2401	 */
2402 again:
2403	max_count = cpu_buffer->nr_pages * 100;
2404
2405	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2406		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2407			return;
2408		if (RB_WARN_ON(cpu_buffer,
2409			       rb_is_reader_page(cpu_buffer->tail_page)))
2410			return;
 
 
 
 
2411		local_set(&cpu_buffer->commit_page->page->commit,
2412			  rb_page_write(cpu_buffer->commit_page));
2413		rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
2414		/* Only update the write stamp if the page has an event */
2415		if (rb_page_write(cpu_buffer->commit_page))
2416			cpu_buffer->write_stamp =
2417				cpu_buffer->commit_page->page->time_stamp;
2418		/* add barrier to keep gcc from optimizing too much */
2419		barrier();
2420	}
2421	while (rb_commit_index(cpu_buffer) !=
2422	       rb_page_write(cpu_buffer->commit_page)) {
2423
 
 
2424		local_set(&cpu_buffer->commit_page->page->commit,
2425			  rb_page_write(cpu_buffer->commit_page));
2426		RB_WARN_ON(cpu_buffer,
2427			   local_read(&cpu_buffer->commit_page->page->commit) &
2428			   ~RB_WRITE_MASK);
2429		barrier();
2430	}
2431
2432	/* again, keep gcc from optimizing */
2433	barrier();
2434
2435	/*
2436	 * If an interrupt came in just after the first while loop
2437	 * and pushed the tail page forward, we will be left with
2438	 * a dangling commit that will never go forward.
2439	 */
2440	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
2441		goto again;
2442}
2443
2444static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2445{
2446	unsigned long commits;
2447
2448	if (RB_WARN_ON(cpu_buffer,
2449		       !local_read(&cpu_buffer->committing)))
2450		return;
2451
2452 again:
2453	commits = local_read(&cpu_buffer->commits);
2454	/* synchronize with interrupts */
2455	barrier();
2456	if (local_read(&cpu_buffer->committing) == 1)
2457		rb_set_commit_to_write(cpu_buffer);
2458
2459	local_dec(&cpu_buffer->committing);
2460
2461	/* synchronize with interrupts */
2462	barrier();
2463
2464	/*
2465	 * Need to account for interrupts coming in between the
2466	 * updating of the commit page and the clearing of the
2467	 * committing counter.
2468	 */
2469	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2470	    !local_read(&cpu_buffer->committing)) {
2471		local_inc(&cpu_buffer->committing);
2472		goto again;
2473	}
2474}
2475
2476static inline void rb_event_discard(struct ring_buffer_event *event)
2477{
2478	if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2479		event = skip_time_extend(event);
2480
2481	/* array[0] holds the actual length for the discarded event */
2482	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2483	event->type_len = RINGBUF_TYPE_PADDING;
2484	/* time delta must be non zero */
2485	if (!event->time_delta)
2486		event->time_delta = 1;
2487}
2488
2489static inline bool
2490rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2491		   struct ring_buffer_event *event)
2492{
2493	unsigned long addr = (unsigned long)event;
2494	unsigned long index;
2495
2496	index = rb_event_index(event);
2497	addr &= PAGE_MASK;
2498
2499	return cpu_buffer->commit_page->page == (void *)addr &&
2500		rb_commit_index(cpu_buffer) == index;
2501}
2502
2503static void
2504rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2505		      struct ring_buffer_event *event)
2506{
2507	u64 delta;
2508
2509	/*
2510	 * The event first in the commit queue updates the
2511	 * time stamp.
2512	 */
2513	if (rb_event_is_commit(cpu_buffer, event)) {
2514		/*
2515		 * A commit event that is first on a page
2516		 * updates the write timestamp with the page stamp
2517		 */
2518		if (!rb_event_index(event))
2519			cpu_buffer->write_stamp =
2520				cpu_buffer->commit_page->page->time_stamp;
2521		else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2522			delta = event->array[0];
2523			delta <<= TS_SHIFT;
2524			delta += event->time_delta;
2525			cpu_buffer->write_stamp += delta;
2526		} else
2527			cpu_buffer->write_stamp += event->time_delta;
2528	}
2529}
2530
2531static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2532		      struct ring_buffer_event *event)
2533{
2534	local_inc(&cpu_buffer->entries);
2535	rb_update_write_stamp(cpu_buffer, event);
2536	rb_end_commit(cpu_buffer);
2537}
2538
2539static __always_inline void
2540rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2541{
2542	bool pagebusy;
2543
2544	if (buffer->irq_work.waiters_pending) {
2545		buffer->irq_work.waiters_pending = false;
2546		/* irq_work_queue() supplies it's own memory barriers */
2547		irq_work_queue(&buffer->irq_work.work);
2548	}
2549
2550	if (cpu_buffer->irq_work.waiters_pending) {
2551		cpu_buffer->irq_work.waiters_pending = false;
2552		/* irq_work_queue() supplies it's own memory barriers */
2553		irq_work_queue(&cpu_buffer->irq_work.work);
2554	}
2555
2556	pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 
2557
2558	if (!pagebusy && cpu_buffer->irq_work.full_waiters_pending) {
2559		cpu_buffer->irq_work.wakeup_full = true;
2560		cpu_buffer->irq_work.full_waiters_pending = false;
2561		/* irq_work_queue() supplies it's own memory barriers */
2562		irq_work_queue(&cpu_buffer->irq_work.work);
2563	}
 
 
 
 
 
 
 
 
 
2564}
2565
 
 
 
 
 
 
 
2566/*
2567 * The lock and unlock are done within a preempt disable section.
2568 * The current_context per_cpu variable can only be modified
2569 * by the current task between lock and unlock. But it can
2570 * be modified more than once via an interrupt. To pass this
2571 * information from the lock to the unlock without having to
2572 * access the 'in_interrupt()' functions again (which do show
2573 * a bit of overhead in something as critical as function tracing,
2574 * we use a bitmask trick.
2575 *
2576 *  bit 0 =  NMI context
2577 *  bit 1 =  IRQ context
2578 *  bit 2 =  SoftIRQ context
2579 *  bit 3 =  normal context.
2580 *
2581 * This works because this is the order of contexts that can
2582 * preempt other contexts. A SoftIRQ never preempts an IRQ
2583 * context.
2584 *
2585 * When the context is determined, the corresponding bit is
2586 * checked and set (if it was set, then a recursion of that context
2587 * happened).
2588 *
2589 * On unlock, we need to clear this bit. To do so, just subtract
2590 * 1 from the current_context and AND it to itself.
2591 *
2592 * (binary)
2593 *  101 - 1 = 100
2594 *  101 & 100 = 100 (clearing bit zero)
2595 *
2596 *  1010 - 1 = 1001
2597 *  1010 & 1001 = 1000 (clearing bit 1)
2598 *
2599 * The least significant bit can be cleared this way, and it
2600 * just so happens that it is the same bit corresponding to
2601 * the current context.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2602 */
2603
2604static __always_inline int
2605trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
2606{
2607	unsigned int val = cpu_buffer->current_context;
2608	int bit;
2609
2610	if (in_interrupt()) {
2611		if (in_nmi())
2612			bit = RB_CTX_NMI;
2613		else if (in_irq())
2614			bit = RB_CTX_IRQ;
2615		else
2616			bit = RB_CTX_SOFTIRQ;
2617	} else
2618		bit = RB_CTX_NORMAL;
2619
2620	if (unlikely(val & (1 << bit)))
2621		return 1;
 
 
 
 
 
 
 
 
 
 
2622
2623	val |= (1 << bit);
2624	cpu_buffer->current_context = val;
2625
2626	return 0;
2627}
2628
2629static __always_inline void
2630trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2631{
2632	cpu_buffer->current_context &= cpu_buffer->current_context - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2633}
2634
2635/**
2636 * ring_buffer_unlock_commit - commit a reserved
2637 * @buffer: The buffer to commit to
2638 * @event: The event pointer to commit.
2639 *
2640 * This commits the data to the ring buffer, and releases any locks held.
2641 *
2642 * Must be paired with ring_buffer_lock_reserve.
2643 */
2644int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2645			      struct ring_buffer_event *event)
2646{
2647	struct ring_buffer_per_cpu *cpu_buffer;
2648	int cpu = raw_smp_processor_id();
2649
2650	cpu_buffer = buffer->buffers[cpu];
2651
2652	rb_commit(cpu_buffer, event);
2653
2654	rb_wakeups(buffer, cpu_buffer);
2655
2656	trace_recursive_unlock(cpu_buffer);
2657
2658	preempt_enable_notrace();
2659
2660	return 0;
2661}
2662EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2663
2664static noinline void
2665rb_handle_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2666		    struct rb_event_info *info)
 
 
 
2667{
2668	WARN_ONCE(info->delta > (1ULL << 59),
2669		  KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2670		  (unsigned long long)info->delta,
2671		  (unsigned long long)info->ts,
2672		  (unsigned long long)cpu_buffer->write_stamp,
2673		  sched_clock_stable() ? "" :
2674		  "If you just came from a suspend/resume,\n"
2675		  "please switch to the trace global clock:\n"
2676		  "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2677	info->add_timestamp = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2678}
2679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2680static struct ring_buffer_event *
2681__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2682		  struct rb_event_info *info)
2683{
2684	struct ring_buffer_event *event;
2685	struct buffer_page *tail_page;
2686	unsigned long tail, write;
2687
2688	/*
2689	 * If the time delta since the last event is too big to
2690	 * hold in the time field of the event, then we append a
2691	 * TIME EXTEND event ahead of the data event.
2692	 */
2693	if (unlikely(info->add_timestamp))
2694		info->length += RB_LEN_TIME_EXTEND;
2695
2696	/* Don't let the compiler play games with cpu_buffer->tail_page */
2697	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
2698	write = local_add_return(info->length, &tail_page->write);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2699
2700	/* set write to only the index of the write */
2701	write &= RB_WRITE_MASK;
 
2702	tail = write - info->length;
2703
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2704	/*
2705	 * If this is the first commit on the page, then it has the same
2706	 * timestamp as the page itself.
2707	 */
2708	if (!tail)
 
2709		info->delta = 0;
2710
2711	/* See if we shot pass the end of this buffer page */
2712	if (unlikely(write > BUF_PAGE_SIZE))
2713		return rb_move_tail(cpu_buffer, tail, info);
2714
2715	/* We reserved something on the buffer */
2716
2717	event = __rb_page_index(tail_page, tail);
2718	kmemcheck_annotate_bitfield(event, bitfield);
2719	rb_update_event(cpu_buffer, event, info);
2720
2721	local_inc(&tail_page->entries);
2722
2723	/*
2724	 * If this is the first commit on the page, then update
2725	 * its timestamp.
2726	 */
2727	if (!tail)
2728		tail_page->page->time_stamp = info->ts;
2729
2730	/* account for these added bytes */
2731	local_add(info->length, &cpu_buffer->entries_bytes);
2732
2733	return event;
2734}
2735
2736static struct ring_buffer_event *
2737rb_reserve_next_event(struct ring_buffer *buffer,
2738		      struct ring_buffer_per_cpu *cpu_buffer,
2739		      unsigned long length)
2740{
2741	struct ring_buffer_event *event;
2742	struct rb_event_info info;
2743	int nr_loops = 0;
2744	u64 diff;
 
 
 
 
 
 
2745
2746	rb_start_commit(cpu_buffer);
 
2747
2748#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2749	/*
2750	 * Due to the ability to swap a cpu buffer from a buffer
2751	 * it is possible it was swapped before we committed.
2752	 * (committing stops a swap). We check for it here and
2753	 * if it happened, we have to fail the write.
2754	 */
2755	barrier();
2756	if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2757		local_dec(&cpu_buffer->committing);
2758		local_dec(&cpu_buffer->commits);
2759		return NULL;
2760	}
2761#endif
2762
2763	info.length = rb_calculate_event_length(length);
 
 
 
 
 
 
 
 
 
 
2764 again:
2765	info.add_timestamp = 0;
2766	info.delta = 0;
2767
2768	/*
2769	 * We allow for interrupts to reenter here and do a trace.
2770	 * If one does, it will cause this original code to loop
2771	 * back here. Even with heavy interrupts happening, this
2772	 * should only happen a few times in a row. If this happens
2773	 * 1000 times in a row, there must be either an interrupt
2774	 * storm or we have something buggy.
2775	 * Bail!
2776	 */
2777	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2778		goto out_fail;
2779
2780	info.ts = rb_time_stamp(cpu_buffer->buffer);
2781	diff = info.ts - cpu_buffer->write_stamp;
2782
2783	/* make sure this diff is calculated here */
2784	barrier();
2785
2786	/* Did the write stamp get updated already? */
2787	if (likely(info.ts >= cpu_buffer->write_stamp)) {
2788		info.delta = diff;
2789		if (unlikely(test_time_stamp(info.delta)))
2790			rb_handle_timestamp(cpu_buffer, &info);
2791	}
2792
2793	event = __rb_reserve_next(cpu_buffer, &info);
2794
2795	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
2796		if (info.add_timestamp)
2797			info.length -= RB_LEN_TIME_EXTEND;
2798		goto again;
2799	}
2800
2801	if (!event)
2802		goto out_fail;
2803
2804	return event;
2805
2806 out_fail:
2807	rb_end_commit(cpu_buffer);
2808	return NULL;
2809}
2810
2811/**
2812 * ring_buffer_lock_reserve - reserve a part of the buffer
2813 * @buffer: the ring buffer to reserve from
2814 * @length: the length of the data to reserve (excluding event header)
2815 *
2816 * Returns a reseverd event on the ring buffer to copy directly to.
2817 * The user of this interface will need to get the body to write into
2818 * and can use the ring_buffer_event_data() interface.
2819 *
2820 * The length is the length of the data needed, not the event length
2821 * which also includes the event header.
2822 *
2823 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2824 * If NULL is returned, then nothing has been allocated or locked.
2825 */
2826struct ring_buffer_event *
2827ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2828{
2829	struct ring_buffer_per_cpu *cpu_buffer;
2830	struct ring_buffer_event *event;
2831	int cpu;
2832
2833	/* If we are tracing schedule, we don't want to recurse */
2834	preempt_disable_notrace();
2835
2836	if (unlikely(atomic_read(&buffer->record_disabled)))
2837		goto out;
2838
2839	cpu = raw_smp_processor_id();
2840
2841	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
2842		goto out;
2843
2844	cpu_buffer = buffer->buffers[cpu];
2845
2846	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
2847		goto out;
2848
2849	if (unlikely(length > BUF_MAX_DATA_SIZE))
2850		goto out;
2851
2852	if (unlikely(trace_recursive_lock(cpu_buffer)))
2853		goto out;
2854
2855	event = rb_reserve_next_event(buffer, cpu_buffer, length);
2856	if (!event)
2857		goto out_unlock;
2858
2859	return event;
2860
2861 out_unlock:
2862	trace_recursive_unlock(cpu_buffer);
2863 out:
2864	preempt_enable_notrace();
2865	return NULL;
2866}
2867EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2868
2869/*
2870 * Decrement the entries to the page that an event is on.
2871 * The event does not even need to exist, only the pointer
2872 * to the page it is on. This may only be called before the commit
2873 * takes place.
2874 */
2875static inline void
2876rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2877		   struct ring_buffer_event *event)
2878{
2879	unsigned long addr = (unsigned long)event;
2880	struct buffer_page *bpage = cpu_buffer->commit_page;
2881	struct buffer_page *start;
2882
2883	addr &= PAGE_MASK;
2884
2885	/* Do the likely case first */
2886	if (likely(bpage->page == (void *)addr)) {
2887		local_dec(&bpage->entries);
2888		return;
2889	}
2890
2891	/*
2892	 * Because the commit page may be on the reader page we
2893	 * start with the next page and check the end loop there.
2894	 */
2895	rb_inc_page(cpu_buffer, &bpage);
2896	start = bpage;
2897	do {
2898		if (bpage->page == (void *)addr) {
2899			local_dec(&bpage->entries);
2900			return;
2901		}
2902		rb_inc_page(cpu_buffer, &bpage);
2903	} while (bpage != start);
2904
2905	/* commit not part of this buffer?? */
2906	RB_WARN_ON(cpu_buffer, 1);
2907}
2908
2909/**
2910 * ring_buffer_commit_discard - discard an event that has not been committed
2911 * @buffer: the ring buffer
2912 * @event: non committed event to discard
2913 *
2914 * Sometimes an event that is in the ring buffer needs to be ignored.
2915 * This function lets the user discard an event in the ring buffer
2916 * and then that event will not be read later.
2917 *
2918 * This function only works if it is called before the the item has been
2919 * committed. It will try to free the event from the ring buffer
2920 * if another event has not been added behind it.
2921 *
2922 * If another event has been added behind it, it will set the event
2923 * up as discarded, and perform the commit.
2924 *
2925 * If this function is called, do not call ring_buffer_unlock_commit on
2926 * the event.
2927 */
2928void ring_buffer_discard_commit(struct ring_buffer *buffer,
2929				struct ring_buffer_event *event)
2930{
2931	struct ring_buffer_per_cpu *cpu_buffer;
2932	int cpu;
2933
2934	/* The event is discarded regardless */
2935	rb_event_discard(event);
2936
2937	cpu = smp_processor_id();
2938	cpu_buffer = buffer->buffers[cpu];
2939
2940	/*
2941	 * This must only be called if the event has not been
2942	 * committed yet. Thus we can assume that preemption
2943	 * is still disabled.
2944	 */
2945	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2946
2947	rb_decrement_entry(cpu_buffer, event);
2948	if (rb_try_to_discard(cpu_buffer, event))
2949		goto out;
2950
2951	/*
2952	 * The commit is still visible by the reader, so we
2953	 * must still update the timestamp.
2954	 */
2955	rb_update_write_stamp(cpu_buffer, event);
2956 out:
2957	rb_end_commit(cpu_buffer);
2958
2959	trace_recursive_unlock(cpu_buffer);
2960
2961	preempt_enable_notrace();
2962
2963}
2964EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2965
2966/**
2967 * ring_buffer_write - write data to the buffer without reserving
2968 * @buffer: The ring buffer to write to.
2969 * @length: The length of the data being written (excluding the event header)
2970 * @data: The data to write to the buffer.
2971 *
2972 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2973 * one function. If you already have the data to write to the buffer, it
2974 * may be easier to simply call this function.
2975 *
2976 * Note, like ring_buffer_lock_reserve, the length is the length of the data
2977 * and not the length of the event which would hold the header.
2978 */
2979int ring_buffer_write(struct ring_buffer *buffer,
2980		      unsigned long length,
2981		      void *data)
2982{
2983	struct ring_buffer_per_cpu *cpu_buffer;
2984	struct ring_buffer_event *event;
2985	void *body;
2986	int ret = -EBUSY;
2987	int cpu;
2988
2989	preempt_disable_notrace();
2990
2991	if (atomic_read(&buffer->record_disabled))
2992		goto out;
2993
2994	cpu = raw_smp_processor_id();
2995
2996	if (!cpumask_test_cpu(cpu, buffer->cpumask))
2997		goto out;
2998
2999	cpu_buffer = buffer->buffers[cpu];
3000
3001	if (atomic_read(&cpu_buffer->record_disabled))
3002		goto out;
3003
3004	if (length > BUF_MAX_DATA_SIZE)
3005		goto out;
3006
3007	if (unlikely(trace_recursive_lock(cpu_buffer)))
3008		goto out;
3009
3010	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3011	if (!event)
3012		goto out_unlock;
3013
3014	body = rb_event_data(event);
3015
3016	memcpy(body, data, length);
3017
3018	rb_commit(cpu_buffer, event);
3019
3020	rb_wakeups(buffer, cpu_buffer);
3021
3022	ret = 0;
3023
3024 out_unlock:
3025	trace_recursive_unlock(cpu_buffer);
3026
3027 out:
3028	preempt_enable_notrace();
3029
3030	return ret;
3031}
3032EXPORT_SYMBOL_GPL(ring_buffer_write);
3033
3034static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3035{
3036	struct buffer_page *reader = cpu_buffer->reader_page;
3037	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3038	struct buffer_page *commit = cpu_buffer->commit_page;
3039
3040	/* In case of error, head will be NULL */
3041	if (unlikely(!head))
3042		return true;
3043
3044	return reader->read == rb_page_commit(reader) &&
3045		(commit == reader ||
3046		 (commit == head &&
3047		  head->read == rb_page_commit(commit)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3048}
3049
3050/**
3051 * ring_buffer_record_disable - stop all writes into the buffer
3052 * @buffer: The ring buffer to stop writes to.
3053 *
3054 * This prevents all writes to the buffer. Any attempt to write
3055 * to the buffer after this will fail and return NULL.
3056 *
3057 * The caller should call synchronize_sched() after this.
3058 */
3059void ring_buffer_record_disable(struct ring_buffer *buffer)
3060{
3061	atomic_inc(&buffer->record_disabled);
3062}
3063EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3064
3065/**
3066 * ring_buffer_record_enable - enable writes to the buffer
3067 * @buffer: The ring buffer to enable writes
3068 *
3069 * Note, multiple disables will need the same number of enables
3070 * to truly enable the writing (much like preempt_disable).
3071 */
3072void ring_buffer_record_enable(struct ring_buffer *buffer)
3073{
3074	atomic_dec(&buffer->record_disabled);
3075}
3076EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3077
3078/**
3079 * ring_buffer_record_off - stop all writes into the buffer
3080 * @buffer: The ring buffer to stop writes to.
3081 *
3082 * This prevents all writes to the buffer. Any attempt to write
3083 * to the buffer after this will fail and return NULL.
3084 *
3085 * This is different than ring_buffer_record_disable() as
3086 * it works like an on/off switch, where as the disable() version
3087 * must be paired with a enable().
3088 */
3089void ring_buffer_record_off(struct ring_buffer *buffer)
3090{
3091	unsigned int rd;
3092	unsigned int new_rd;
3093
 
3094	do {
3095		rd = atomic_read(&buffer->record_disabled);
3096		new_rd = rd | RB_BUFFER_OFF;
3097	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3098}
3099EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3100
3101/**
3102 * ring_buffer_record_on - restart writes into the buffer
3103 * @buffer: The ring buffer to start writes to.
3104 *
3105 * This enables all writes to the buffer that was disabled by
3106 * ring_buffer_record_off().
3107 *
3108 * This is different than ring_buffer_record_enable() as
3109 * it works like an on/off switch, where as the enable() version
3110 * must be paired with a disable().
3111 */
3112void ring_buffer_record_on(struct ring_buffer *buffer)
3113{
3114	unsigned int rd;
3115	unsigned int new_rd;
3116
 
3117	do {
3118		rd = atomic_read(&buffer->record_disabled);
3119		new_rd = rd & ~RB_BUFFER_OFF;
3120	} while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3121}
3122EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3123
3124/**
3125 * ring_buffer_record_is_on - return true if the ring buffer can write
3126 * @buffer: The ring buffer to see if write is enabled
3127 *
3128 * Returns true if the ring buffer is in a state that it accepts writes.
3129 */
3130int ring_buffer_record_is_on(struct ring_buffer *buffer)
3131{
3132	return !atomic_read(&buffer->record_disabled);
3133}
3134
3135/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3136 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3137 * @buffer: The ring buffer to stop writes to.
3138 * @cpu: The CPU buffer to stop
3139 *
3140 * This prevents all writes to the buffer. Any attempt to write
3141 * to the buffer after this will fail and return NULL.
3142 *
3143 * The caller should call synchronize_sched() after this.
3144 */
3145void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3146{
3147	struct ring_buffer_per_cpu *cpu_buffer;
3148
3149	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3150		return;
3151
3152	cpu_buffer = buffer->buffers[cpu];
3153	atomic_inc(&cpu_buffer->record_disabled);
3154}
3155EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3156
3157/**
3158 * ring_buffer_record_enable_cpu - enable writes to the buffer
3159 * @buffer: The ring buffer to enable writes
3160 * @cpu: The CPU to enable.
3161 *
3162 * Note, multiple disables will need the same number of enables
3163 * to truly enable the writing (much like preempt_disable).
3164 */
3165void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3166{
3167	struct ring_buffer_per_cpu *cpu_buffer;
3168
3169	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3170		return;
3171
3172	cpu_buffer = buffer->buffers[cpu];
3173	atomic_dec(&cpu_buffer->record_disabled);
3174}
3175EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3176
3177/*
3178 * The total entries in the ring buffer is the running counter
3179 * of entries entered into the ring buffer, minus the sum of
3180 * the entries read from the ring buffer and the number of
3181 * entries that were overwritten.
3182 */
3183static inline unsigned long
3184rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3185{
3186	return local_read(&cpu_buffer->entries) -
3187		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3188}
3189
3190/**
3191 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3192 * @buffer: The ring buffer
3193 * @cpu: The per CPU buffer to read from.
3194 */
3195u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3196{
3197	unsigned long flags;
3198	struct ring_buffer_per_cpu *cpu_buffer;
3199	struct buffer_page *bpage;
3200	u64 ret = 0;
3201
3202	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3203		return 0;
3204
3205	cpu_buffer = buffer->buffers[cpu];
3206	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3207	/*
3208	 * if the tail is on reader_page, oldest time stamp is on the reader
3209	 * page
3210	 */
3211	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3212		bpage = cpu_buffer->reader_page;
3213	else
3214		bpage = rb_set_head_page(cpu_buffer);
3215	if (bpage)
3216		ret = bpage->page->time_stamp;
3217	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3218
3219	return ret;
3220}
3221EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3222
3223/**
3224 * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3225 * @buffer: The ring buffer
3226 * @cpu: The per CPU buffer to read from.
3227 */
3228unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3229{
3230	struct ring_buffer_per_cpu *cpu_buffer;
3231	unsigned long ret;
3232
3233	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3234		return 0;
3235
3236	cpu_buffer = buffer->buffers[cpu];
3237	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3238
3239	return ret;
3240}
3241EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3242
3243/**
3244 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3245 * @buffer: The ring buffer
3246 * @cpu: The per CPU buffer to get the entries from.
3247 */
3248unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3249{
3250	struct ring_buffer_per_cpu *cpu_buffer;
3251
3252	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3253		return 0;
3254
3255	cpu_buffer = buffer->buffers[cpu];
3256
3257	return rb_num_of_entries(cpu_buffer);
3258}
3259EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3260
3261/**
3262 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3263 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3264 * @buffer: The ring buffer
3265 * @cpu: The per CPU buffer to get the number of overruns from
3266 */
3267unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3268{
3269	struct ring_buffer_per_cpu *cpu_buffer;
3270	unsigned long ret;
3271
3272	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3273		return 0;
3274
3275	cpu_buffer = buffer->buffers[cpu];
3276	ret = local_read(&cpu_buffer->overrun);
3277
3278	return ret;
3279}
3280EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3281
3282/**
3283 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3284 * commits failing due to the buffer wrapping around while there are uncommitted
3285 * events, such as during an interrupt storm.
3286 * @buffer: The ring buffer
3287 * @cpu: The per CPU buffer to get the number of overruns from
3288 */
3289unsigned long
3290ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3291{
3292	struct ring_buffer_per_cpu *cpu_buffer;
3293	unsigned long ret;
3294
3295	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3296		return 0;
3297
3298	cpu_buffer = buffer->buffers[cpu];
3299	ret = local_read(&cpu_buffer->commit_overrun);
3300
3301	return ret;
3302}
3303EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3304
3305/**
3306 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3307 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3308 * @buffer: The ring buffer
3309 * @cpu: The per CPU buffer to get the number of overruns from
3310 */
3311unsigned long
3312ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3313{
3314	struct ring_buffer_per_cpu *cpu_buffer;
3315	unsigned long ret;
3316
3317	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3318		return 0;
3319
3320	cpu_buffer = buffer->buffers[cpu];
3321	ret = local_read(&cpu_buffer->dropped_events);
3322
3323	return ret;
3324}
3325EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3326
3327/**
3328 * ring_buffer_read_events_cpu - get the number of events successfully read
3329 * @buffer: The ring buffer
3330 * @cpu: The per CPU buffer to get the number of events read
3331 */
3332unsigned long
3333ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3334{
3335	struct ring_buffer_per_cpu *cpu_buffer;
3336
3337	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3338		return 0;
3339
3340	cpu_buffer = buffer->buffers[cpu];
3341	return cpu_buffer->read;
3342}
3343EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3344
3345/**
3346 * ring_buffer_entries - get the number of entries in a buffer
3347 * @buffer: The ring buffer
3348 *
3349 * Returns the total number of entries in the ring buffer
3350 * (all CPU entries)
3351 */
3352unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3353{
3354	struct ring_buffer_per_cpu *cpu_buffer;
3355	unsigned long entries = 0;
3356	int cpu;
3357
3358	/* if you care about this being correct, lock the buffer */
3359	for_each_buffer_cpu(buffer, cpu) {
3360		cpu_buffer = buffer->buffers[cpu];
3361		entries += rb_num_of_entries(cpu_buffer);
3362	}
3363
3364	return entries;
3365}
3366EXPORT_SYMBOL_GPL(ring_buffer_entries);
3367
3368/**
3369 * ring_buffer_overruns - get the number of overruns in buffer
3370 * @buffer: The ring buffer
3371 *
3372 * Returns the total number of overruns in the ring buffer
3373 * (all CPU entries)
3374 */
3375unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3376{
3377	struct ring_buffer_per_cpu *cpu_buffer;
3378	unsigned long overruns = 0;
3379	int cpu;
3380
3381	/* if you care about this being correct, lock the buffer */
3382	for_each_buffer_cpu(buffer, cpu) {
3383		cpu_buffer = buffer->buffers[cpu];
3384		overruns += local_read(&cpu_buffer->overrun);
3385	}
3386
3387	return overruns;
3388}
3389EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3390
3391static void rb_iter_reset(struct ring_buffer_iter *iter)
3392{
3393	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3394
3395	/* Iterator usage is expected to have record disabled */
3396	iter->head_page = cpu_buffer->reader_page;
3397	iter->head = cpu_buffer->reader_page->read;
 
3398
3399	iter->cache_reader_page = iter->head_page;
3400	iter->cache_read = cpu_buffer->read;
 
3401
3402	if (iter->head)
3403		iter->read_stamp = cpu_buffer->read_stamp;
3404	else
 
3405		iter->read_stamp = iter->head_page->page->time_stamp;
 
 
3406}
3407
3408/**
3409 * ring_buffer_iter_reset - reset an iterator
3410 * @iter: The iterator to reset
3411 *
3412 * Resets the iterator, so that it will start from the beginning
3413 * again.
3414 */
3415void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3416{
3417	struct ring_buffer_per_cpu *cpu_buffer;
3418	unsigned long flags;
3419
3420	if (!iter)
3421		return;
3422
3423	cpu_buffer = iter->cpu_buffer;
3424
3425	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3426	rb_iter_reset(iter);
3427	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3428}
3429EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3430
3431/**
3432 * ring_buffer_iter_empty - check if an iterator has no more to read
3433 * @iter: The iterator to check
3434 */
3435int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3436{
3437	struct ring_buffer_per_cpu *cpu_buffer;
 
 
 
 
 
 
 
3438
3439	cpu_buffer = iter->cpu_buffer;
 
 
 
 
3440
3441	return iter->head_page == cpu_buffer->commit_page &&
3442		iter->head == rb_commit_index(cpu_buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3443}
3444EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3445
3446static void
3447rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3448		     struct ring_buffer_event *event)
3449{
3450	u64 delta;
3451
3452	switch (event->type_len) {
3453	case RINGBUF_TYPE_PADDING:
3454		return;
3455
3456	case RINGBUF_TYPE_TIME_EXTEND:
3457		delta = event->array[0];
3458		delta <<= TS_SHIFT;
3459		delta += event->time_delta;
3460		cpu_buffer->read_stamp += delta;
3461		return;
3462
3463	case RINGBUF_TYPE_TIME_STAMP:
3464		/* FIXME: not implemented */
 
 
3465		return;
3466
3467	case RINGBUF_TYPE_DATA:
3468		cpu_buffer->read_stamp += event->time_delta;
3469		return;
3470
3471	default:
3472		BUG();
3473	}
3474	return;
3475}
3476
3477static void
3478rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3479			  struct ring_buffer_event *event)
3480{
3481	u64 delta;
3482
3483	switch (event->type_len) {
3484	case RINGBUF_TYPE_PADDING:
3485		return;
3486
3487	case RINGBUF_TYPE_TIME_EXTEND:
3488		delta = event->array[0];
3489		delta <<= TS_SHIFT;
3490		delta += event->time_delta;
3491		iter->read_stamp += delta;
3492		return;
3493
3494	case RINGBUF_TYPE_TIME_STAMP:
3495		/* FIXME: not implemented */
 
 
3496		return;
3497
3498	case RINGBUF_TYPE_DATA:
3499		iter->read_stamp += event->time_delta;
3500		return;
3501
3502	default:
3503		BUG();
3504	}
3505	return;
3506}
3507
3508static struct buffer_page *
3509rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3510{
3511	struct buffer_page *reader = NULL;
 
3512	unsigned long overwrite;
3513	unsigned long flags;
3514	int nr_loops = 0;
3515	int ret;
3516
3517	local_irq_save(flags);
3518	arch_spin_lock(&cpu_buffer->lock);
3519
3520 again:
3521	/*
3522	 * This should normally only loop twice. But because the
3523	 * start of the reader inserts an empty page, it causes
3524	 * a case where we will loop three times. There should be no
3525	 * reason to loop four times (that I know of).
3526	 */
3527	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3528		reader = NULL;
3529		goto out;
3530	}
3531
3532	reader = cpu_buffer->reader_page;
3533
3534	/* If there's more to read, return this page */
3535	if (cpu_buffer->reader_page->read < rb_page_size(reader))
3536		goto out;
3537
3538	/* Never should we have an index greater than the size */
3539	if (RB_WARN_ON(cpu_buffer,
3540		       cpu_buffer->reader_page->read > rb_page_size(reader)))
3541		goto out;
3542
3543	/* check if we caught up to the tail */
3544	reader = NULL;
3545	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3546		goto out;
3547
3548	/* Don't bother swapping if the ring buffer is empty */
3549	if (rb_num_of_entries(cpu_buffer) == 0)
3550		goto out;
3551
3552	/*
3553	 * Reset the reader page to size zero.
3554	 */
3555	local_set(&cpu_buffer->reader_page->write, 0);
3556	local_set(&cpu_buffer->reader_page->entries, 0);
3557	local_set(&cpu_buffer->reader_page->page->commit, 0);
3558	cpu_buffer->reader_page->real_end = 0;
3559
3560 spin:
3561	/*
3562	 * Splice the empty reader page into the list around the head.
3563	 */
3564	reader = rb_set_head_page(cpu_buffer);
3565	if (!reader)
3566		goto out;
3567	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3568	cpu_buffer->reader_page->list.prev = reader->list.prev;
3569
3570	/*
3571	 * cpu_buffer->pages just needs to point to the buffer, it
3572	 *  has no specific buffer page to point to. Lets move it out
3573	 *  of our way so we don't accidentally swap it.
3574	 */
3575	cpu_buffer->pages = reader->list.prev;
3576
3577	/* The reader page will be pointing to the new head */
3578	rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3579
3580	/*
3581	 * We want to make sure we read the overruns after we set up our
3582	 * pointers to the next object. The writer side does a
3583	 * cmpxchg to cross pages which acts as the mb on the writer
3584	 * side. Note, the reader will constantly fail the swap
3585	 * while the writer is updating the pointers, so this
3586	 * guarantees that the overwrite recorded here is the one we
3587	 * want to compare with the last_overrun.
3588	 */
3589	smp_mb();
3590	overwrite = local_read(&(cpu_buffer->overrun));
3591
3592	/*
3593	 * Here's the tricky part.
3594	 *
3595	 * We need to move the pointer past the header page.
3596	 * But we can only do that if a writer is not currently
3597	 * moving it. The page before the header page has the
3598	 * flag bit '1' set if it is pointing to the page we want.
3599	 * but if the writer is in the process of moving it
3600	 * than it will be '2' or already moved '0'.
3601	 */
3602
3603	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3604
3605	/*
3606	 * If we did not convert it, then we must try again.
3607	 */
3608	if (!ret)
3609		goto spin;
3610
3611	/*
3612	 * Yeah! We succeeded in replacing the page.
3613	 *
3614	 * Now make the new head point back to the reader page.
3615	 */
3616	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3617	rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
 
 
3618
3619	/* Finally update the reader page to the new head */
3620	cpu_buffer->reader_page = reader;
3621	cpu_buffer->reader_page->read = 0;
3622
3623	if (overwrite != cpu_buffer->last_overrun) {
3624		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3625		cpu_buffer->last_overrun = overwrite;
3626	}
3627
3628	goto again;
3629
3630 out:
3631	/* Update the read_stamp on the first event */
3632	if (reader && reader->read == 0)
3633		cpu_buffer->read_stamp = reader->page->time_stamp;
3634
3635	arch_spin_unlock(&cpu_buffer->lock);
3636	local_irq_restore(flags);
3637
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3638	return reader;
3639}
3640
3641static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3642{
3643	struct ring_buffer_event *event;
3644	struct buffer_page *reader;
3645	unsigned length;
3646
3647	reader = rb_get_reader_page(cpu_buffer);
3648
3649	/* This function should not be called when buffer is empty */
3650	if (RB_WARN_ON(cpu_buffer, !reader))
3651		return;
3652
3653	event = rb_reader_event(cpu_buffer);
3654
3655	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3656		cpu_buffer->read++;
3657
3658	rb_update_read_stamp(cpu_buffer, event);
3659
3660	length = rb_event_length(event);
3661	cpu_buffer->reader_page->read += length;
 
3662}
3663
3664static void rb_advance_iter(struct ring_buffer_iter *iter)
3665{
3666	struct ring_buffer_per_cpu *cpu_buffer;
3667	struct ring_buffer_event *event;
3668	unsigned length;
3669
3670	cpu_buffer = iter->cpu_buffer;
3671
 
 
 
 
 
 
 
 
 
3672	/*
3673	 * Check if we are at the end of the buffer.
3674	 */
3675	if (iter->head >= rb_page_size(iter->head_page)) {
3676		/* discarded commits can make the page empty */
3677		if (iter->head_page == cpu_buffer->commit_page)
3678			return;
3679		rb_inc_iter(iter);
3680		return;
3681	}
3682
3683	event = rb_iter_head_event(iter);
3684
3685	length = rb_event_length(event);
3686
3687	/*
3688	 * This should not be called to advance the header if we are
3689	 * at the tail of the buffer.
3690	 */
3691	if (RB_WARN_ON(cpu_buffer,
3692		       (iter->head_page == cpu_buffer->commit_page) &&
3693		       (iter->head + length > rb_commit_index(cpu_buffer))))
3694		return;
3695
3696	rb_update_iter_read_stamp(iter, event);
3697
3698	iter->head += length;
3699
3700	/* check for end of page padding */
3701	if ((iter->head >= rb_page_size(iter->head_page)) &&
3702	    (iter->head_page != cpu_buffer->commit_page))
3703		rb_inc_iter(iter);
3704}
3705
3706static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3707{
3708	return cpu_buffer->lost_events;
3709}
3710
3711static struct ring_buffer_event *
3712rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3713	       unsigned long *lost_events)
3714{
3715	struct ring_buffer_event *event;
3716	struct buffer_page *reader;
3717	int nr_loops = 0;
3718
 
 
3719 again:
3720	/*
3721	 * We repeat when a time extend is encountered.
3722	 * Since the time extend is always attached to a data event,
3723	 * we should never loop more than once.
3724	 * (We never hit the following condition more than twice).
3725	 */
3726	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3727		return NULL;
3728
3729	reader = rb_get_reader_page(cpu_buffer);
3730	if (!reader)
3731		return NULL;
3732
3733	event = rb_reader_event(cpu_buffer);
3734
3735	switch (event->type_len) {
3736	case RINGBUF_TYPE_PADDING:
3737		if (rb_null_event(event))
3738			RB_WARN_ON(cpu_buffer, 1);
3739		/*
3740		 * Because the writer could be discarding every
3741		 * event it creates (which would probably be bad)
3742		 * if we were to go back to "again" then we may never
3743		 * catch up, and will trigger the warn on, or lock
3744		 * the box. Return the padding, and we will release
3745		 * the current locks, and try again.
3746		 */
3747		return event;
3748
3749	case RINGBUF_TYPE_TIME_EXTEND:
3750		/* Internal data, OK to advance */
3751		rb_advance_reader(cpu_buffer);
3752		goto again;
3753
3754	case RINGBUF_TYPE_TIME_STAMP:
3755		/* FIXME: not implemented */
 
 
 
 
 
 
3756		rb_advance_reader(cpu_buffer);
3757		goto again;
3758
3759	case RINGBUF_TYPE_DATA:
3760		if (ts) {
3761			*ts = cpu_buffer->read_stamp + event->time_delta;
3762			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3763							 cpu_buffer->cpu, ts);
3764		}
3765		if (lost_events)
3766			*lost_events = rb_lost_events(cpu_buffer);
3767		return event;
3768
3769	default:
3770		BUG();
3771	}
3772
3773	return NULL;
3774}
3775EXPORT_SYMBOL_GPL(ring_buffer_peek);
3776
3777static struct ring_buffer_event *
3778rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3779{
3780	struct ring_buffer *buffer;
3781	struct ring_buffer_per_cpu *cpu_buffer;
3782	struct ring_buffer_event *event;
3783	int nr_loops = 0;
3784
 
 
 
3785	cpu_buffer = iter->cpu_buffer;
3786	buffer = cpu_buffer->buffer;
3787
3788	/*
3789	 * Check if someone performed a consuming read to
3790	 * the buffer. A consuming read invalidates the iterator
3791	 * and we need to reset the iterator in this case.
3792	 */
3793	if (unlikely(iter->cache_read != cpu_buffer->read ||
3794		     iter->cache_reader_page != cpu_buffer->reader_page))
 
3795		rb_iter_reset(iter);
3796
3797 again:
3798	if (ring_buffer_iter_empty(iter))
3799		return NULL;
3800
3801	/*
3802	 * We repeat when a time extend is encountered or we hit
3803	 * the end of the page. Since the time extend is always attached
3804	 * to a data event, we should never loop more than three times.
3805	 * Once for going to next page, once on time extend, and
3806	 * finally once to get the event.
3807	 * (We never hit the following condition more than thrice).
3808	 */
3809	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3810		return NULL;
3811
3812	if (rb_per_cpu_empty(cpu_buffer))
3813		return NULL;
3814
3815	if (iter->head >= rb_page_size(iter->head_page)) {
3816		rb_inc_iter(iter);
3817		goto again;
3818	}
3819
3820	event = rb_iter_head_event(iter);
 
 
3821
3822	switch (event->type_len) {
3823	case RINGBUF_TYPE_PADDING:
3824		if (rb_null_event(event)) {
3825			rb_inc_iter(iter);
3826			goto again;
3827		}
3828		rb_advance_iter(iter);
3829		return event;
3830
3831	case RINGBUF_TYPE_TIME_EXTEND:
3832		/* Internal data, OK to advance */
3833		rb_advance_iter(iter);
3834		goto again;
3835
3836	case RINGBUF_TYPE_TIME_STAMP:
3837		/* FIXME: not implemented */
 
 
 
 
 
 
3838		rb_advance_iter(iter);
3839		goto again;
3840
3841	case RINGBUF_TYPE_DATA:
3842		if (ts) {
3843			*ts = iter->read_stamp + event->time_delta;
3844			ring_buffer_normalize_time_stamp(buffer,
3845							 cpu_buffer->cpu, ts);
3846		}
3847		return event;
3848
3849	default:
3850		BUG();
3851	}
3852
3853	return NULL;
3854}
3855EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3856
3857static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
3858{
3859	if (likely(!in_nmi())) {
3860		raw_spin_lock(&cpu_buffer->reader_lock);
3861		return true;
3862	}
3863
3864	/*
3865	 * If an NMI die dumps out the content of the ring buffer
3866	 * trylock must be used to prevent a deadlock if the NMI
3867	 * preempted a task that holds the ring buffer locks. If
3868	 * we get the lock then all is fine, if not, then continue
3869	 * to do the read, but this can corrupt the ring buffer,
3870	 * so it must be permanently disabled from future writes.
3871	 * Reading from NMI is a oneshot deal.
3872	 */
3873	if (raw_spin_trylock(&cpu_buffer->reader_lock))
3874		return true;
3875
3876	/* Continue without locking, but disable the ring buffer */
3877	atomic_inc(&cpu_buffer->record_disabled);
3878	return false;
3879}
3880
3881static inline void
3882rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
3883{
3884	if (likely(locked))
3885		raw_spin_unlock(&cpu_buffer->reader_lock);
3886	return;
3887}
3888
3889/**
3890 * ring_buffer_peek - peek at the next event to be read
3891 * @buffer: The ring buffer to read
3892 * @cpu: The cpu to peak at
3893 * @ts: The timestamp counter of this event.
3894 * @lost_events: a variable to store if events were lost (may be NULL)
3895 *
3896 * This will return the event that will be read next, but does
3897 * not consume the data.
3898 */
3899struct ring_buffer_event *
3900ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3901		 unsigned long *lost_events)
3902{
3903	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3904	struct ring_buffer_event *event;
3905	unsigned long flags;
3906	bool dolock;
3907
3908	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3909		return NULL;
3910
3911 again:
3912	local_irq_save(flags);
3913	dolock = rb_reader_lock(cpu_buffer);
3914	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3915	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3916		rb_advance_reader(cpu_buffer);
3917	rb_reader_unlock(cpu_buffer, dolock);
3918	local_irq_restore(flags);
3919
3920	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3921		goto again;
3922
3923	return event;
3924}
3925
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3926/**
3927 * ring_buffer_iter_peek - peek at the next event to be read
3928 * @iter: The ring buffer iterator
3929 * @ts: The timestamp counter of this event.
3930 *
3931 * This will return the event that will be read next, but does
3932 * not increment the iterator.
3933 */
3934struct ring_buffer_event *
3935ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3936{
3937	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3938	struct ring_buffer_event *event;
3939	unsigned long flags;
3940
3941 again:
3942	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3943	event = rb_iter_peek(iter, ts);
3944	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3945
3946	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3947		goto again;
3948
3949	return event;
3950}
3951
3952/**
3953 * ring_buffer_consume - return an event and consume it
3954 * @buffer: The ring buffer to get the next event from
3955 * @cpu: the cpu to read the buffer from
3956 * @ts: a variable to store the timestamp (may be NULL)
3957 * @lost_events: a variable to store if events were lost (may be NULL)
3958 *
3959 * Returns the next event in the ring buffer, and that event is consumed.
3960 * Meaning, that sequential reads will keep returning a different event,
3961 * and eventually empty the ring buffer if the producer is slower.
3962 */
3963struct ring_buffer_event *
3964ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3965		    unsigned long *lost_events)
3966{
3967	struct ring_buffer_per_cpu *cpu_buffer;
3968	struct ring_buffer_event *event = NULL;
3969	unsigned long flags;
3970	bool dolock;
3971
3972 again:
3973	/* might be called in atomic */
3974	preempt_disable();
3975
3976	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3977		goto out;
3978
3979	cpu_buffer = buffer->buffers[cpu];
3980	local_irq_save(flags);
3981	dolock = rb_reader_lock(cpu_buffer);
3982
3983	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3984	if (event) {
3985		cpu_buffer->lost_events = 0;
3986		rb_advance_reader(cpu_buffer);
3987	}
3988
3989	rb_reader_unlock(cpu_buffer, dolock);
3990	local_irq_restore(flags);
3991
3992 out:
3993	preempt_enable();
3994
3995	if (event && event->type_len == RINGBUF_TYPE_PADDING)
3996		goto again;
3997
3998	return event;
3999}
4000EXPORT_SYMBOL_GPL(ring_buffer_consume);
4001
4002/**
4003 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
4004 * @buffer: The ring buffer to read from
4005 * @cpu: The cpu buffer to iterate over
 
4006 *
4007 * This performs the initial preparations necessary to iterate
4008 * through the buffer.  Memory is allocated, buffer recording
4009 * is disabled, and the iterator pointer is returned to the caller.
4010 *
4011 * Disabling buffer recordng prevents the reading from being
4012 * corrupted. This is not a consuming read, so a producer is not
4013 * expected.
4014 *
4015 * After a sequence of ring_buffer_read_prepare calls, the user is
4016 * expected to make at least one call to ring_buffer_read_prepare_sync.
4017 * Afterwards, ring_buffer_read_start is invoked to get things going
4018 * for real.
4019 *
4020 * This overall must be paired with ring_buffer_read_finish.
4021 */
4022struct ring_buffer_iter *
4023ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
4024{
4025	struct ring_buffer_per_cpu *cpu_buffer;
4026	struct ring_buffer_iter *iter;
4027
4028	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4029		return NULL;
4030
4031	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
4032	if (!iter)
4033		return NULL;
4034
 
 
 
 
 
 
 
 
4035	cpu_buffer = buffer->buffers[cpu];
4036
4037	iter->cpu_buffer = cpu_buffer;
4038
4039	atomic_inc(&buffer->resize_disabled);
4040	atomic_inc(&cpu_buffer->record_disabled);
4041
4042	return iter;
4043}
4044EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
4045
4046/**
4047 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
4048 *
4049 * All previously invoked ring_buffer_read_prepare calls to prepare
4050 * iterators will be synchronized.  Afterwards, read_buffer_read_start
4051 * calls on those iterators are allowed.
4052 */
4053void
4054ring_buffer_read_prepare_sync(void)
4055{
4056	synchronize_sched();
4057}
4058EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
4059
4060/**
4061 * ring_buffer_read_start - start a non consuming read of the buffer
4062 * @iter: The iterator returned by ring_buffer_read_prepare
4063 *
4064 * This finalizes the startup of an iteration through the buffer.
4065 * The iterator comes from a call to ring_buffer_read_prepare and
4066 * an intervening ring_buffer_read_prepare_sync must have been
4067 * performed.
4068 *
4069 * Must be paired with ring_buffer_read_finish.
4070 */
4071void
4072ring_buffer_read_start(struct ring_buffer_iter *iter)
4073{
4074	struct ring_buffer_per_cpu *cpu_buffer;
4075	unsigned long flags;
4076
4077	if (!iter)
4078		return;
4079
4080	cpu_buffer = iter->cpu_buffer;
4081
4082	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4083	arch_spin_lock(&cpu_buffer->lock);
4084	rb_iter_reset(iter);
4085	arch_spin_unlock(&cpu_buffer->lock);
4086	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4087}
4088EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4089
4090/**
4091 * ring_buffer_read_finish - finish reading the iterator of the buffer
4092 * @iter: The iterator retrieved by ring_buffer_start
4093 *
4094 * This re-enables the recording to the buffer, and frees the
4095 * iterator.
4096 */
4097void
4098ring_buffer_read_finish(struct ring_buffer_iter *iter)
4099{
4100	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4101	unsigned long flags;
4102
4103	/*
4104	 * Ring buffer is disabled from recording, here's a good place
4105	 * to check the integrity of the ring buffer.
4106	 * Must prevent readers from trying to read, as the check
4107	 * clears the HEAD page and readers require it.
4108	 */
4109	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4110	rb_check_pages(cpu_buffer);
4111	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4112
4113	atomic_dec(&cpu_buffer->record_disabled);
4114	atomic_dec(&cpu_buffer->buffer->resize_disabled);
4115	kfree(iter);
4116}
4117EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4118
4119/**
4120 * ring_buffer_read - read the next item in the ring buffer by the iterator
4121 * @iter: The ring buffer iterator
4122 * @ts: The time stamp of the event read.
4123 *
4124 * This reads the next event in the ring buffer and increments the iterator.
 
4125 */
4126struct ring_buffer_event *
4127ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4128{
4129	struct ring_buffer_event *event;
4130	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4131	unsigned long flags;
4132
4133	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4134 again:
4135	event = rb_iter_peek(iter, ts);
4136	if (!event)
4137		goto out;
4138
4139	if (event->type_len == RINGBUF_TYPE_PADDING)
4140		goto again;
4141
4142	rb_advance_iter(iter);
4143 out:
4144	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4145
4146	return event;
4147}
4148EXPORT_SYMBOL_GPL(ring_buffer_read);
4149
4150/**
4151 * ring_buffer_size - return the size of the ring buffer (in bytes)
4152 * @buffer: The ring buffer.
 
4153 */
4154unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4155{
4156	/*
4157	 * Earlier, this method returned
4158	 *	BUF_PAGE_SIZE * buffer->nr_pages
4159	 * Since the nr_pages field is now removed, we have converted this to
4160	 * return the per cpu buffer value.
4161	 */
4162	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4163		return 0;
4164
4165	return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4166}
4167EXPORT_SYMBOL_GPL(ring_buffer_size);
4168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4169static void
4170rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4171{
 
 
4172	rb_head_page_deactivate(cpu_buffer);
4173
4174	cpu_buffer->head_page
4175		= list_entry(cpu_buffer->pages, struct buffer_page, list);
4176	local_set(&cpu_buffer->head_page->write, 0);
4177	local_set(&cpu_buffer->head_page->entries, 0);
4178	local_set(&cpu_buffer->head_page->page->commit, 0);
4179
4180	cpu_buffer->head_page->read = 0;
4181
4182	cpu_buffer->tail_page = cpu_buffer->head_page;
4183	cpu_buffer->commit_page = cpu_buffer->head_page;
4184
4185	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4186	INIT_LIST_HEAD(&cpu_buffer->new_pages);
4187	local_set(&cpu_buffer->reader_page->write, 0);
4188	local_set(&cpu_buffer->reader_page->entries, 0);
4189	local_set(&cpu_buffer->reader_page->page->commit, 0);
4190	cpu_buffer->reader_page->read = 0;
4191
4192	local_set(&cpu_buffer->entries_bytes, 0);
4193	local_set(&cpu_buffer->overrun, 0);
4194	local_set(&cpu_buffer->commit_overrun, 0);
4195	local_set(&cpu_buffer->dropped_events, 0);
4196	local_set(&cpu_buffer->entries, 0);
4197	local_set(&cpu_buffer->committing, 0);
4198	local_set(&cpu_buffer->commits, 0);
 
 
 
 
 
4199	cpu_buffer->read = 0;
4200	cpu_buffer->read_bytes = 0;
4201
4202	cpu_buffer->write_stamp = 0;
4203	cpu_buffer->read_stamp = 0;
 
 
4204
4205	cpu_buffer->lost_events = 0;
4206	cpu_buffer->last_overrun = 0;
4207
4208	rb_head_page_activate(cpu_buffer);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4209}
4210
4211/**
4212 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4213 * @buffer: The ring buffer to reset a per cpu buffer of
4214 * @cpu: The CPU buffer to be reset
4215 */
4216void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4217{
4218	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4219	unsigned long flags;
4220
4221	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4222		return;
4223
4224	atomic_inc(&buffer->resize_disabled);
 
 
 
4225	atomic_inc(&cpu_buffer->record_disabled);
4226
4227	/* Make sure all commits have finished */
4228	synchronize_sched();
4229
4230	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4231
4232	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4233		goto out;
4234
4235	arch_spin_lock(&cpu_buffer->lock);
 
 
4236
4237	rb_reset_cpu(cpu_buffer);
 
4238
4239	arch_spin_unlock(&cpu_buffer->lock);
 
 
 
 
 
 
 
4240
4241 out:
4242	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4243
4244	atomic_dec(&cpu_buffer->record_disabled);
4245	atomic_dec(&buffer->resize_disabled);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4246}
4247EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4248
4249/**
4250 * ring_buffer_reset - reset a ring buffer
4251 * @buffer: The ring buffer to reset all cpu buffers
4252 */
4253void ring_buffer_reset(struct ring_buffer *buffer)
4254{
 
4255	int cpu;
4256
4257	for_each_buffer_cpu(buffer, cpu)
4258		ring_buffer_reset_cpu(buffer, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4259}
4260EXPORT_SYMBOL_GPL(ring_buffer_reset);
4261
4262/**
4263 * rind_buffer_empty - is the ring buffer empty?
4264 * @buffer: The ring buffer to test
4265 */
4266bool ring_buffer_empty(struct ring_buffer *buffer)
4267{
4268	struct ring_buffer_per_cpu *cpu_buffer;
4269	unsigned long flags;
4270	bool dolock;
 
4271	int cpu;
4272	int ret;
4273
4274	/* yes this is racy, but if you don't like the race, lock the buffer */
4275	for_each_buffer_cpu(buffer, cpu) {
4276		cpu_buffer = buffer->buffers[cpu];
4277		local_irq_save(flags);
4278		dolock = rb_reader_lock(cpu_buffer);
4279		ret = rb_per_cpu_empty(cpu_buffer);
4280		rb_reader_unlock(cpu_buffer, dolock);
4281		local_irq_restore(flags);
4282
4283		if (!ret)
4284			return false;
4285	}
4286
4287	return true;
4288}
4289EXPORT_SYMBOL_GPL(ring_buffer_empty);
4290
4291/**
4292 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4293 * @buffer: The ring buffer
4294 * @cpu: The CPU buffer to test
4295 */
4296bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4297{
4298	struct ring_buffer_per_cpu *cpu_buffer;
4299	unsigned long flags;
4300	bool dolock;
4301	int ret;
4302
4303	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4304		return true;
4305
4306	cpu_buffer = buffer->buffers[cpu];
4307	local_irq_save(flags);
4308	dolock = rb_reader_lock(cpu_buffer);
4309	ret = rb_per_cpu_empty(cpu_buffer);
4310	rb_reader_unlock(cpu_buffer, dolock);
4311	local_irq_restore(flags);
4312
4313	return ret;
4314}
4315EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4316
4317#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4318/**
4319 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4320 * @buffer_a: One buffer to swap with
4321 * @buffer_b: The other buffer to swap with
 
4322 *
4323 * This function is useful for tracers that want to take a "snapshot"
4324 * of a CPU buffer and has another back up buffer lying around.
4325 * it is expected that the tracer handles the cpu buffer not being
4326 * used at the moment.
4327 */
4328int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4329			 struct ring_buffer *buffer_b, int cpu)
4330{
4331	struct ring_buffer_per_cpu *cpu_buffer_a;
4332	struct ring_buffer_per_cpu *cpu_buffer_b;
4333	int ret = -EINVAL;
4334
4335	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4336	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
4337		goto out;
4338
4339	cpu_buffer_a = buffer_a->buffers[cpu];
4340	cpu_buffer_b = buffer_b->buffers[cpu];
4341
4342	/* At least make sure the two buffers are somewhat the same */
4343	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4344		goto out;
4345
 
 
 
4346	ret = -EAGAIN;
4347
4348	if (atomic_read(&buffer_a->record_disabled))
4349		goto out;
4350
4351	if (atomic_read(&buffer_b->record_disabled))
4352		goto out;
4353
4354	if (atomic_read(&cpu_buffer_a->record_disabled))
4355		goto out;
4356
4357	if (atomic_read(&cpu_buffer_b->record_disabled))
4358		goto out;
4359
4360	/*
4361	 * We can't do a synchronize_sched here because this
4362	 * function can be called in atomic context.
4363	 * Normally this will be called from the same CPU as cpu.
4364	 * If not it's up to the caller to protect this.
4365	 */
4366	atomic_inc(&cpu_buffer_a->record_disabled);
4367	atomic_inc(&cpu_buffer_b->record_disabled);
4368
4369	ret = -EBUSY;
4370	if (local_read(&cpu_buffer_a->committing))
4371		goto out_dec;
4372	if (local_read(&cpu_buffer_b->committing))
4373		goto out_dec;
4374
 
 
 
 
 
 
 
 
 
4375	buffer_a->buffers[cpu] = cpu_buffer_b;
4376	buffer_b->buffers[cpu] = cpu_buffer_a;
4377
4378	cpu_buffer_b->buffer = buffer_a;
4379	cpu_buffer_a->buffer = buffer_b;
4380
4381	ret = 0;
4382
4383out_dec:
4384	atomic_dec(&cpu_buffer_a->record_disabled);
4385	atomic_dec(&cpu_buffer_b->record_disabled);
4386out:
4387	return ret;
4388}
4389EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4390#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4391
4392/**
4393 * ring_buffer_alloc_read_page - allocate a page to read from buffer
4394 * @buffer: the buffer to allocate for.
4395 * @cpu: the cpu buffer to allocate.
4396 *
4397 * This function is used in conjunction with ring_buffer_read_page.
4398 * When reading a full page from the ring buffer, these functions
4399 * can be used to speed up the process. The calling function should
4400 * allocate a few pages first with this function. Then when it
4401 * needs to get pages from the ring buffer, it passes the result
4402 * of this function into ring_buffer_read_page, which will swap
4403 * the page that was allocated, with the read page of the buffer.
4404 *
4405 * Returns:
4406 *  The page allocated, or NULL on error.
4407 */
4408void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
 
4409{
4410	struct buffer_data_page *bpage;
 
 
4411	struct page *page;
4412
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4413	page = alloc_pages_node(cpu_to_node(cpu),
4414				GFP_KERNEL | __GFP_NORETRY, 0);
4415	if (!page)
4416		return NULL;
 
 
 
4417
4418	bpage = page_address(page);
4419
4420	rb_init_page(bpage);
 
4421
4422	return bpage;
4423}
4424EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4425
4426/**
4427 * ring_buffer_free_read_page - free an allocated read page
4428 * @buffer: the buffer the page was allocate for
4429 * @data: the page to free
 
4430 *
4431 * Free a page allocated from ring_buffer_alloc_read_page.
4432 */
4433void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
 
4434{
4435	free_page((unsigned long)data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4436}
4437EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4438
4439/**
4440 * ring_buffer_read_page - extract a page from the ring buffer
4441 * @buffer: buffer to extract from
4442 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4443 * @len: amount to extract
4444 * @cpu: the cpu of the buffer to extract
4445 * @full: should the extraction only happen when the page is full.
4446 *
4447 * This function will pull out a page from the ring buffer and consume it.
4448 * @data_page must be the address of the variable that was returned
4449 * from ring_buffer_alloc_read_page. This is because the page might be used
4450 * to swap with a page in the ring buffer.
4451 *
4452 * for example:
4453 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
4454 *	if (!rpage)
4455 *		return error;
4456 *	ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4457 *	if (ret >= 0)
4458 *		process_page(rpage, ret);
 
4459 *
4460 * When @full is set, the function will not return true unless
4461 * the writer is off the reader page.
4462 *
4463 * Note: it is up to the calling functions to handle sleeps and wakeups.
4464 *  The ring buffer can be used anywhere in the kernel and can not
4465 *  blindly call wake_up. The layer that uses the ring buffer must be
4466 *  responsible for that.
4467 *
4468 * Returns:
4469 *  >=0 if data has been transferred, returns the offset of consumed data.
4470 *  <0 if no data has been transferred.
4471 */
4472int ring_buffer_read_page(struct ring_buffer *buffer,
4473			  void **data_page, size_t len, int cpu, int full)
 
4474{
4475	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4476	struct ring_buffer_event *event;
4477	struct buffer_data_page *bpage;
4478	struct buffer_page *reader;
4479	unsigned long missed_events;
4480	unsigned long flags;
4481	unsigned int commit;
4482	unsigned int read;
4483	u64 save_timestamp;
4484	int ret = -1;
4485
4486	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4487		goto out;
4488
4489	/*
4490	 * If len is not big enough to hold the page header, then
4491	 * we can not copy anything.
4492	 */
4493	if (len <= BUF_PAGE_HDR_SIZE)
4494		goto out;
4495
4496	len -= BUF_PAGE_HDR_SIZE;
4497
4498	if (!data_page)
 
 
4499		goto out;
4500
4501	bpage = *data_page;
4502	if (!bpage)
4503		goto out;
4504
4505	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4506
4507	reader = rb_get_reader_page(cpu_buffer);
4508	if (!reader)
4509		goto out_unlock;
4510
4511	event = rb_reader_event(cpu_buffer);
4512
4513	read = reader->read;
4514	commit = rb_page_commit(reader);
4515
4516	/* Check if any events were dropped */
4517	missed_events = cpu_buffer->lost_events;
4518
4519	/*
4520	 * If this page has been partially read or
4521	 * if len is not big enough to read the rest of the page or
4522	 * a writer is still on the page, then
4523	 * we must copy the data from the page to the buffer.
4524	 * Otherwise, we can simply swap the page with the one passed in.
4525	 */
4526	if (read || (len < (commit - read)) ||
4527	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
4528		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4529		unsigned int rpos = read;
4530		unsigned int pos = 0;
4531		unsigned int size;
4532
4533		if (full)
 
 
 
 
 
 
 
 
4534			goto out_unlock;
4535
4536		if (len > (commit - read))
4537			len = (commit - read);
4538
4539		/* Always keep the time extend and data together */
4540		size = rb_event_ts_length(event);
4541
4542		if (len < size)
4543			goto out_unlock;
4544
4545		/* save the current timestamp, since the user will need it */
4546		save_timestamp = cpu_buffer->read_stamp;
4547
4548		/* Need to copy one event at a time */
4549		do {
4550			/* We need the size of one event, because
4551			 * rb_advance_reader only advances by one event,
4552			 * whereas rb_event_ts_length may include the size of
4553			 * one or two events.
4554			 * We have already ensured there's enough space if this
4555			 * is a time extend. */
4556			size = rb_event_length(event);
4557			memcpy(bpage->data + pos, rpage->data + rpos, size);
4558
4559			len -= size;
4560
4561			rb_advance_reader(cpu_buffer);
4562			rpos = reader->read;
4563			pos += size;
4564
4565			if (rpos >= commit)
4566				break;
4567
4568			event = rb_reader_event(cpu_buffer);
4569			/* Always keep the time extend and data together */
4570			size = rb_event_ts_length(event);
4571		} while (len >= size);
4572
4573		/* update bpage */
4574		local_set(&bpage->commit, pos);
4575		bpage->time_stamp = save_timestamp;
4576
4577		/* we copied everything to the beginning */
4578		read = 0;
4579	} else {
4580		/* update the entry counter */
4581		cpu_buffer->read += rb_page_entries(reader);
4582		cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4583
4584		/* swap the pages */
4585		rb_init_page(bpage);
4586		bpage = reader->page;
4587		reader->page = *data_page;
4588		local_set(&reader->write, 0);
4589		local_set(&reader->entries, 0);
4590		reader->read = 0;
4591		*data_page = bpage;
4592
4593		/*
4594		 * Use the real_end for the data size,
4595		 * This gives us a chance to store the lost events
4596		 * on the page.
4597		 */
4598		if (reader->real_end)
4599			local_set(&bpage->commit, reader->real_end);
4600	}
4601	ret = read;
4602
4603	cpu_buffer->lost_events = 0;
4604
4605	commit = local_read(&bpage->commit);
4606	/*
4607	 * Set a flag in the commit field if we lost events
4608	 */
4609	if (missed_events) {
4610		/* If there is room at the end of the page to save the
4611		 * missed events, then record it there.
4612		 */
4613		if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4614			memcpy(&bpage->data[commit], &missed_events,
4615			       sizeof(missed_events));
4616			local_add(RB_MISSED_STORED, &bpage->commit);
4617			commit += sizeof(missed_events);
4618		}
4619		local_add(RB_MISSED_EVENTS, &bpage->commit);
4620	}
4621
4622	/*
4623	 * This page may be off to user land. Zero it out here.
4624	 */
4625	if (commit < BUF_PAGE_SIZE)
4626		memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4627
4628 out_unlock:
4629	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4630
4631 out:
4632	return ret;
4633}
4634EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4635
4636#ifdef CONFIG_HOTPLUG_CPU
4637static int rb_cpu_notify(struct notifier_block *self,
4638			 unsigned long action, void *hcpu)
4639{
4640	struct ring_buffer *buffer =
4641		container_of(self, struct ring_buffer, cpu_notify);
4642	long cpu = (long)hcpu;
4643	int cpu_i, nr_pages_same;
4644	unsigned int nr_pages;
4645
4646	switch (action) {
4647	case CPU_UP_PREPARE:
4648	case CPU_UP_PREPARE_FROZEN:
4649		if (cpumask_test_cpu(cpu, buffer->cpumask))
4650			return NOTIFY_OK;
4651
4652		nr_pages = 0;
4653		nr_pages_same = 1;
4654		/* check if all cpu sizes are same */
4655		for_each_buffer_cpu(buffer, cpu_i) {
4656			/* fill in the size from first enabled cpu */
4657			if (nr_pages == 0)
4658				nr_pages = buffer->buffers[cpu_i]->nr_pages;
4659			if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4660				nr_pages_same = 0;
4661				break;
4662			}
4663		}
4664		/* allocate minimum pages, user can later expand it */
4665		if (!nr_pages_same)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4666			nr_pages = 2;
4667		buffer->buffers[cpu] =
4668			rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4669		if (!buffer->buffers[cpu]) {
4670			WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4671			     cpu);
4672			return NOTIFY_OK;
 
 
 
 
 
 
 
4673		}
4674		smp_wmb();
4675		cpumask_set_cpu(cpu, buffer->cpumask);
4676		break;
4677	case CPU_DOWN_PREPARE:
4678	case CPU_DOWN_PREPARE_FROZEN:
4679		/*
4680		 * Do nothing.
4681		 *  If we were to free the buffer, then the user would
4682		 *  lose any trace that was in the buffer.
4683		 */
4684		break;
4685	default:
4686		break;
4687	}
4688	return NOTIFY_OK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4689}
4690#endif
4691
4692#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4693/*
4694 * This is a basic integrity check of the ring buffer.
4695 * Late in the boot cycle this test will run when configured in.
4696 * It will kick off a thread per CPU that will go into a loop
4697 * writing to the per cpu ring buffer various sizes of data.
4698 * Some of the data will be large items, some small.
4699 *
4700 * Another thread is created that goes into a spin, sending out
4701 * IPIs to the other CPUs to also write into the ring buffer.
4702 * this is to test the nesting ability of the buffer.
4703 *
4704 * Basic stats are recorded and reported. If something in the
4705 * ring buffer should happen that's not expected, a big warning
4706 * is displayed and all ring buffers are disabled.
4707 */
4708static struct task_struct *rb_threads[NR_CPUS] __initdata;
4709
4710struct rb_test_data {
4711	struct ring_buffer	*buffer;
4712	unsigned long		events;
4713	unsigned long		bytes_written;
4714	unsigned long		bytes_alloc;
4715	unsigned long		bytes_dropped;
4716	unsigned long		events_nested;
4717	unsigned long		bytes_written_nested;
4718	unsigned long		bytes_alloc_nested;
4719	unsigned long		bytes_dropped_nested;
4720	int			min_size_nested;
4721	int			max_size_nested;
4722	int			max_size;
4723	int			min_size;
4724	int			cpu;
4725	int			cnt;
4726};
4727
4728static struct rb_test_data rb_data[NR_CPUS] __initdata;
4729
4730/* 1 meg per cpu */
4731#define RB_TEST_BUFFER_SIZE	1048576
4732
4733static char rb_string[] __initdata =
4734	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4735	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4736	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4737
4738static bool rb_test_started __initdata;
4739
4740struct rb_item {
4741	int size;
4742	char str[];
4743};
4744
4745static __init int rb_write_something(struct rb_test_data *data, bool nested)
4746{
4747	struct ring_buffer_event *event;
4748	struct rb_item *item;
4749	bool started;
4750	int event_len;
4751	int size;
4752	int len;
4753	int cnt;
4754
4755	/* Have nested writes different that what is written */
4756	cnt = data->cnt + (nested ? 27 : 0);
4757
4758	/* Multiply cnt by ~e, to make some unique increment */
4759	size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4760
4761	len = size + sizeof(struct rb_item);
4762
4763	started = rb_test_started;
4764	/* read rb_test_started before checking buffer enabled */
4765	smp_rmb();
4766
4767	event = ring_buffer_lock_reserve(data->buffer, len);
4768	if (!event) {
4769		/* Ignore dropped events before test starts. */
4770		if (started) {
4771			if (nested)
4772				data->bytes_dropped += len;
4773			else
4774				data->bytes_dropped_nested += len;
4775		}
4776		return len;
4777	}
4778
4779	event_len = ring_buffer_event_length(event);
4780
4781	if (RB_WARN_ON(data->buffer, event_len < len))
4782		goto out;
4783
4784	item = ring_buffer_event_data(event);
4785	item->size = size;
4786	memcpy(item->str, rb_string, size);
4787
4788	if (nested) {
4789		data->bytes_alloc_nested += event_len;
4790		data->bytes_written_nested += len;
4791		data->events_nested++;
4792		if (!data->min_size_nested || len < data->min_size_nested)
4793			data->min_size_nested = len;
4794		if (len > data->max_size_nested)
4795			data->max_size_nested = len;
4796	} else {
4797		data->bytes_alloc += event_len;
4798		data->bytes_written += len;
4799		data->events++;
4800		if (!data->min_size || len < data->min_size)
4801			data->max_size = len;
4802		if (len > data->max_size)
4803			data->max_size = len;
4804	}
4805
4806 out:
4807	ring_buffer_unlock_commit(data->buffer, event);
4808
4809	return 0;
4810}
4811
4812static __init int rb_test(void *arg)
4813{
4814	struct rb_test_data *data = arg;
4815
4816	while (!kthread_should_stop()) {
4817		rb_write_something(data, false);
4818		data->cnt++;
4819
4820		set_current_state(TASK_INTERRUPTIBLE);
4821		/* Now sleep between a min of 100-300us and a max of 1ms */
4822		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4823	}
4824
4825	return 0;
4826}
4827
4828static __init void rb_ipi(void *ignore)
4829{
4830	struct rb_test_data *data;
4831	int cpu = smp_processor_id();
4832
4833	data = &rb_data[cpu];
4834	rb_write_something(data, true);
4835}
4836
4837static __init int rb_hammer_test(void *arg)
4838{
4839	while (!kthread_should_stop()) {
4840
4841		/* Send an IPI to all cpus to write data! */
4842		smp_call_function(rb_ipi, NULL, 1);
4843		/* No sleep, but for non preempt, let others run */
4844		schedule();
4845	}
4846
4847	return 0;
4848}
4849
4850static __init int test_ringbuffer(void)
4851{
4852	struct task_struct *rb_hammer;
4853	struct ring_buffer *buffer;
4854	int cpu;
4855	int ret = 0;
4856
 
 
 
 
 
4857	pr_info("Running ring buffer tests...\n");
4858
4859	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4860	if (WARN_ON(!buffer))
4861		return 0;
4862
4863	/* Disable buffer so that threads can't write to it yet */
4864	ring_buffer_record_off(buffer);
4865
4866	for_each_online_cpu(cpu) {
4867		rb_data[cpu].buffer = buffer;
4868		rb_data[cpu].cpu = cpu;
4869		rb_data[cpu].cnt = cpu;
4870		rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4871						 "rbtester/%d", cpu);
4872		if (WARN_ON(!rb_threads[cpu])) {
4873			pr_cont("FAILED\n");
4874			ret = -1;
4875			goto out_free;
4876		}
4877
4878		kthread_bind(rb_threads[cpu], cpu);
4879 		wake_up_process(rb_threads[cpu]);
4880	}
4881
4882	/* Now create the rb hammer! */
4883	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4884	if (WARN_ON(!rb_hammer)) {
4885		pr_cont("FAILED\n");
4886		ret = -1;
4887		goto out_free;
4888	}
4889
4890	ring_buffer_record_on(buffer);
4891	/*
4892	 * Show buffer is enabled before setting rb_test_started.
4893	 * Yes there's a small race window where events could be
4894	 * dropped and the thread wont catch it. But when a ring
4895	 * buffer gets enabled, there will always be some kind of
4896	 * delay before other CPUs see it. Thus, we don't care about
4897	 * those dropped events. We care about events dropped after
4898	 * the threads see that the buffer is active.
4899	 */
4900	smp_wmb();
4901	rb_test_started = true;
4902
4903	set_current_state(TASK_INTERRUPTIBLE);
4904	/* Just run for 10 seconds */;
4905	schedule_timeout(10 * HZ);
4906
4907	kthread_stop(rb_hammer);
4908
4909 out_free:
4910	for_each_online_cpu(cpu) {
4911		if (!rb_threads[cpu])
4912			break;
4913		kthread_stop(rb_threads[cpu]);
4914	}
4915	if (ret) {
4916		ring_buffer_free(buffer);
4917		return ret;
4918	}
4919
4920	/* Report! */
4921	pr_info("finished\n");
4922	for_each_online_cpu(cpu) {
4923		struct ring_buffer_event *event;
4924		struct rb_test_data *data = &rb_data[cpu];
4925		struct rb_item *item;
4926		unsigned long total_events;
4927		unsigned long total_dropped;
4928		unsigned long total_written;
4929		unsigned long total_alloc;
4930		unsigned long total_read = 0;
4931		unsigned long total_size = 0;
4932		unsigned long total_len = 0;
4933		unsigned long total_lost = 0;
4934		unsigned long lost;
4935		int big_event_size;
4936		int small_event_size;
4937
4938		ret = -1;
4939
4940		total_events = data->events + data->events_nested;
4941		total_written = data->bytes_written + data->bytes_written_nested;
4942		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4943		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4944
4945		big_event_size = data->max_size + data->max_size_nested;
4946		small_event_size = data->min_size + data->min_size_nested;
4947
4948		pr_info("CPU %d:\n", cpu);
4949		pr_info("              events:    %ld\n", total_events);
4950		pr_info("       dropped bytes:    %ld\n", total_dropped);
4951		pr_info("       alloced bytes:    %ld\n", total_alloc);
4952		pr_info("       written bytes:    %ld\n", total_written);
4953		pr_info("       biggest event:    %d\n", big_event_size);
4954		pr_info("      smallest event:    %d\n", small_event_size);
4955
4956		if (RB_WARN_ON(buffer, total_dropped))
4957			break;
4958
4959		ret = 0;
4960
4961		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4962			total_lost += lost;
4963			item = ring_buffer_event_data(event);
4964			total_len += ring_buffer_event_length(event);
4965			total_size += item->size + sizeof(struct rb_item);
4966			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4967				pr_info("FAILED!\n");
4968				pr_info("buffer had: %.*s\n", item->size, item->str);
4969				pr_info("expected:   %.*s\n", item->size, rb_string);
4970				RB_WARN_ON(buffer, 1);
4971				ret = -1;
4972				break;
4973			}
4974			total_read++;
4975		}
4976		if (ret)
4977			break;
4978
4979		ret = -1;
4980
4981		pr_info("         read events:   %ld\n", total_read);
4982		pr_info("         lost events:   %ld\n", total_lost);
4983		pr_info("        total events:   %ld\n", total_lost + total_read);
4984		pr_info("  recorded len bytes:   %ld\n", total_len);
4985		pr_info(" recorded size bytes:   %ld\n", total_size);
4986		if (total_lost)
4987			pr_info(" With dropped events, record len and size may not match\n"
4988				" alloced and written from above\n");
4989		if (!total_lost) {
4990			if (RB_WARN_ON(buffer, total_len != total_alloc ||
4991				       total_size != total_written))
4992				break;
4993		}
4994		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4995			break;
4996
4997		ret = 0;
4998	}
4999	if (!ret)
5000		pr_info("Ring buffer PASSED!\n");
5001
5002	ring_buffer_free(buffer);
5003	return 0;
5004}
5005
5006late_initcall(test_ringbuffer);
5007#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Generic ring buffer
   4 *
   5 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
   6 */
   7#include <linux/trace_recursion.h>
   8#include <linux/trace_events.h>
   9#include <linux/ring_buffer.h>
  10#include <linux/trace_clock.h>
  11#include <linux/sched/clock.h>
  12#include <linux/trace_seq.h>
  13#include <linux/spinlock.h>
  14#include <linux/irq_work.h>
  15#include <linux/security.h>
  16#include <linux/uaccess.h>
  17#include <linux/hardirq.h>
  18#include <linux/kthread.h>	/* for self test */
 
  19#include <linux/module.h>
  20#include <linux/percpu.h>
  21#include <linux/mutex.h>
  22#include <linux/delay.h>
  23#include <linux/slab.h>
  24#include <linux/init.h>
  25#include <linux/hash.h>
  26#include <linux/list.h>
  27#include <linux/cpu.h>
  28#include <linux/oom.h>
  29
  30#include <asm/local64.h>
  31#include <asm/local.h>
  32
  33/*
  34 * The "absolute" timestamp in the buffer is only 59 bits.
  35 * If a clock has the 5 MSBs set, it needs to be saved and
  36 * reinserted.
  37 */
  38#define TS_MSB		(0xf8ULL << 56)
  39#define ABS_TS_MASK	(~TS_MSB)
  40
  41static void update_pages_handler(struct work_struct *work);
  42
  43/*
  44 * The ring buffer header is special. We must manually up keep it.
  45 */
  46int ring_buffer_print_entry_header(struct trace_seq *s)
  47{
  48	trace_seq_puts(s, "# compressed entry header\n");
  49	trace_seq_puts(s, "\ttype_len    :    5 bits\n");
  50	trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
  51	trace_seq_puts(s, "\tarray       :   32 bits\n");
  52	trace_seq_putc(s, '\n');
  53	trace_seq_printf(s, "\tpadding     : type == %d\n",
  54			 RINGBUF_TYPE_PADDING);
  55	trace_seq_printf(s, "\ttime_extend : type == %d\n",
  56			 RINGBUF_TYPE_TIME_EXTEND);
  57	trace_seq_printf(s, "\ttime_stamp : type == %d\n",
  58			 RINGBUF_TYPE_TIME_STAMP);
  59	trace_seq_printf(s, "\tdata max type_len  == %d\n",
  60			 RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
  61
  62	return !trace_seq_has_overflowed(s);
  63}
  64
  65/*
  66 * The ring buffer is made up of a list of pages. A separate list of pages is
  67 * allocated for each CPU. A writer may only write to a buffer that is
  68 * associated with the CPU it is currently executing on.  A reader may read
  69 * from any per cpu buffer.
  70 *
  71 * The reader is special. For each per cpu buffer, the reader has its own
  72 * reader page. When a reader has read the entire reader page, this reader
  73 * page is swapped with another page in the ring buffer.
  74 *
  75 * Now, as long as the writer is off the reader page, the reader can do what
  76 * ever it wants with that page. The writer will never write to that page
  77 * again (as long as it is out of the ring buffer).
  78 *
  79 * Here's some silly ASCII art.
  80 *
  81 *   +------+
  82 *   |reader|          RING BUFFER
  83 *   |page  |
  84 *   +------+        +---+   +---+   +---+
  85 *                   |   |-->|   |-->|   |
  86 *                   +---+   +---+   +---+
  87 *                     ^               |
  88 *                     |               |
  89 *                     +---------------+
  90 *
  91 *
  92 *   +------+
  93 *   |reader|          RING BUFFER
  94 *   |page  |------------------v
  95 *   +------+        +---+   +---+   +---+
  96 *                   |   |-->|   |-->|   |
  97 *                   +---+   +---+   +---+
  98 *                     ^               |
  99 *                     |               |
 100 *                     +---------------+
 101 *
 102 *
 103 *   +------+
 104 *   |reader|          RING BUFFER
 105 *   |page  |------------------v
 106 *   +------+        +---+   +---+   +---+
 107 *      ^            |   |-->|   |-->|   |
 108 *      |            +---+   +---+   +---+
 109 *      |                              |
 110 *      |                              |
 111 *      +------------------------------+
 112 *
 113 *
 114 *   +------+
 115 *   |buffer|          RING BUFFER
 116 *   |page  |------------------v
 117 *   +------+        +---+   +---+   +---+
 118 *      ^            |   |   |   |-->|   |
 119 *      |   New      +---+   +---+   +---+
 120 *      |  Reader------^               |
 121 *      |   page                       |
 122 *      +------------------------------+
 123 *
 124 *
 125 * After we make this swap, the reader can hand this page off to the splice
 126 * code and be done with it. It can even allocate a new page if it needs to
 127 * and swap that into the ring buffer.
 128 *
 129 * We will be using cmpxchg soon to make all this lockless.
 130 *
 131 */
 132
 133/* Used for individual buffers (after the counter) */
 134#define RB_BUFFER_OFF		(1 << 20)
 135
 136#define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
 137
 138#define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
 139#define RB_ALIGNMENT		4U
 140#define RB_MAX_SMALL_DATA	(RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 141#define RB_EVNT_MIN_SIZE	8U	/* two 32bit words */
 142
 143#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
 144# define RB_FORCE_8BYTE_ALIGNMENT	0
 145# define RB_ARCH_ALIGNMENT		RB_ALIGNMENT
 146#else
 147# define RB_FORCE_8BYTE_ALIGNMENT	1
 148# define RB_ARCH_ALIGNMENT		8U
 149#endif
 150
 151#define RB_ALIGN_DATA		__aligned(RB_ARCH_ALIGNMENT)
 152
 153/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 154#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 155
 156enum {
 157	RB_LEN_TIME_EXTEND = 8,
 158	RB_LEN_TIME_STAMP =  8,
 159};
 160
 161#define skip_time_extend(event) \
 162	((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
 163
 164#define extended_time(event) \
 165	(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
 166
 167static inline bool rb_null_event(struct ring_buffer_event *event)
 168{
 169	return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 170}
 171
 172static void rb_event_set_padding(struct ring_buffer_event *event)
 173{
 174	/* padding has a NULL time_delta */
 175	event->type_len = RINGBUF_TYPE_PADDING;
 176	event->time_delta = 0;
 177}
 178
 179static unsigned
 180rb_event_data_length(struct ring_buffer_event *event)
 181{
 182	unsigned length;
 183
 184	if (event->type_len)
 185		length = event->type_len * RB_ALIGNMENT;
 186	else
 187		length = event->array[0];
 188	return length + RB_EVNT_HDR_SIZE;
 189}
 190
 191/*
 192 * Return the length of the given event. Will return
 193 * the length of the time extend if the event is a
 194 * time extend.
 195 */
 196static inline unsigned
 197rb_event_length(struct ring_buffer_event *event)
 198{
 199	switch (event->type_len) {
 200	case RINGBUF_TYPE_PADDING:
 201		if (rb_null_event(event))
 202			/* undefined */
 203			return -1;
 204		return  event->array[0] + RB_EVNT_HDR_SIZE;
 205
 206	case RINGBUF_TYPE_TIME_EXTEND:
 207		return RB_LEN_TIME_EXTEND;
 208
 209	case RINGBUF_TYPE_TIME_STAMP:
 210		return RB_LEN_TIME_STAMP;
 211
 212	case RINGBUF_TYPE_DATA:
 213		return rb_event_data_length(event);
 214	default:
 215		WARN_ON_ONCE(1);
 216	}
 217	/* not hit */
 218	return 0;
 219}
 220
 221/*
 222 * Return total length of time extend and data,
 223 *   or just the event length for all other events.
 224 */
 225static inline unsigned
 226rb_event_ts_length(struct ring_buffer_event *event)
 227{
 228	unsigned len = 0;
 229
 230	if (extended_time(event)) {
 231		/* time extends include the data event after it */
 232		len = RB_LEN_TIME_EXTEND;
 233		event = skip_time_extend(event);
 234	}
 235	return len + rb_event_length(event);
 236}
 237
 238/**
 239 * ring_buffer_event_length - return the length of the event
 240 * @event: the event to get the length of
 241 *
 242 * Returns the size of the data load of a data event.
 243 * If the event is something other than a data event, it
 244 * returns the size of the event itself. With the exception
 245 * of a TIME EXTEND, where it still returns the size of the
 246 * data load of the data event after it.
 247 */
 248unsigned ring_buffer_event_length(struct ring_buffer_event *event)
 249{
 250	unsigned length;
 251
 252	if (extended_time(event))
 253		event = skip_time_extend(event);
 254
 255	length = rb_event_length(event);
 256	if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 257		return length;
 258	length -= RB_EVNT_HDR_SIZE;
 259	if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
 260                length -= sizeof(event->array[0]);
 261	return length;
 262}
 263EXPORT_SYMBOL_GPL(ring_buffer_event_length);
 264
 265/* inline for ring buffer fast paths */
 266static __always_inline void *
 267rb_event_data(struct ring_buffer_event *event)
 268{
 269	if (extended_time(event))
 270		event = skip_time_extend(event);
 271	WARN_ON_ONCE(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
 272	/* If length is in len field, then array[0] has the data */
 273	if (event->type_len)
 274		return (void *)&event->array[0];
 275	/* Otherwise length is in array[0] and array[1] has the data */
 276	return (void *)&event->array[1];
 277}
 278
 279/**
 280 * ring_buffer_event_data - return the data of the event
 281 * @event: the event to get the data from
 282 */
 283void *ring_buffer_event_data(struct ring_buffer_event *event)
 284{
 285	return rb_event_data(event);
 286}
 287EXPORT_SYMBOL_GPL(ring_buffer_event_data);
 288
 289#define for_each_buffer_cpu(buffer, cpu)		\
 290	for_each_cpu(cpu, buffer->cpumask)
 291
 292#define for_each_online_buffer_cpu(buffer, cpu)		\
 293	for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
 294
 295#define TS_SHIFT	27
 296#define TS_MASK		((1ULL << TS_SHIFT) - 1)
 297#define TS_DELTA_TEST	(~TS_MASK)
 298
 299static u64 rb_event_time_stamp(struct ring_buffer_event *event)
 300{
 301	u64 ts;
 302
 303	ts = event->array[0];
 304	ts <<= TS_SHIFT;
 305	ts += event->time_delta;
 306
 307	return ts;
 308}
 309
 310/* Flag when events were overwritten */
 311#define RB_MISSED_EVENTS	(1 << 31)
 312/* Missed count stored at end */
 313#define RB_MISSED_STORED	(1 << 30)
 314
 315struct buffer_data_page {
 316	u64		 time_stamp;	/* page time stamp */
 317	local_t		 commit;	/* write committed index */
 318	unsigned char	 data[] RB_ALIGN_DATA;	/* data of buffer page */
 319};
 320
 321struct buffer_data_read_page {
 322	unsigned		order;	/* order of the page */
 323	struct buffer_data_page	*data;	/* actual data, stored in this page */
 324};
 325
 326/*
 327 * Note, the buffer_page list must be first. The buffer pages
 328 * are allocated in cache lines, which means that each buffer
 329 * page will be at the beginning of a cache line, and thus
 330 * the least significant bits will be zero. We use this to
 331 * add flags in the list struct pointers, to make the ring buffer
 332 * lockless.
 333 */
 334struct buffer_page {
 335	struct list_head list;		/* list of buffer pages */
 336	local_t		 write;		/* index for next write */
 337	unsigned	 read;		/* index for next read */
 338	local_t		 entries;	/* entries on this page */
 339	unsigned long	 real_end;	/* real end of data */
 340	unsigned	 order;		/* order of the page */
 341	struct buffer_data_page *page;	/* Actual data page */
 342};
 343
 344/*
 345 * The buffer page counters, write and entries, must be reset
 346 * atomically when crossing page boundaries. To synchronize this
 347 * update, two counters are inserted into the number. One is
 348 * the actual counter for the write position or count on the page.
 349 *
 350 * The other is a counter of updaters. Before an update happens
 351 * the update partition of the counter is incremented. This will
 352 * allow the updater to update the counter atomically.
 353 *
 354 * The counter is 20 bits, and the state data is 12.
 355 */
 356#define RB_WRITE_MASK		0xfffff
 357#define RB_WRITE_INTCNT		(1 << 20)
 358
 359static void rb_init_page(struct buffer_data_page *bpage)
 360{
 361	local_set(&bpage->commit, 0);
 362}
 363
 364static __always_inline unsigned int rb_page_commit(struct buffer_page *bpage)
 
 
 
 
 
 
 365{
 366	return local_read(&bpage->page->commit);
 
 367}
 368
 
 
 
 
 369static void free_buffer_page(struct buffer_page *bpage)
 370{
 371	free_pages((unsigned long)bpage->page, bpage->order);
 372	kfree(bpage);
 373}
 374
 375/*
 376 * We need to fit the time_stamp delta into 27 bits.
 377 */
 378static inline bool test_time_stamp(u64 delta)
 379{
 380	return !!(delta & TS_DELTA_TEST);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 381}
 382
 383struct rb_irq_work {
 384	struct irq_work			work;
 385	wait_queue_head_t		waiters;
 386	wait_queue_head_t		full_waiters;
 387	atomic_t			seq;
 388	bool				waiters_pending;
 389	bool				full_waiters_pending;
 390	bool				wakeup_full;
 391};
 392
 393/*
 394 * Structure to hold event state and handle nested events.
 395 */
 396struct rb_event_info {
 397	u64			ts;
 398	u64			delta;
 399	u64			before;
 400	u64			after;
 401	unsigned long		length;
 402	struct buffer_page	*tail_page;
 403	int			add_timestamp;
 404};
 405
 406/*
 407 * Used for the add_timestamp
 408 *  NONE
 409 *  EXTEND - wants a time extend
 410 *  ABSOLUTE - the buffer requests all events to have absolute time stamps
 411 *  FORCE - force a full time stamp.
 412 */
 413enum {
 414	RB_ADD_STAMP_NONE		= 0,
 415	RB_ADD_STAMP_EXTEND		= BIT(1),
 416	RB_ADD_STAMP_ABSOLUTE		= BIT(2),
 417	RB_ADD_STAMP_FORCE		= BIT(3)
 418};
 419/*
 420 * Used for which event context the event is in.
 421 *  TRANSITION = 0
 422 *  NMI     = 1
 423 *  IRQ     = 2
 424 *  SOFTIRQ = 3
 425 *  NORMAL  = 4
 426 *
 427 * See trace_recursive_lock() comment below for more details.
 428 */
 429enum {
 430	RB_CTX_TRANSITION,
 431	RB_CTX_NMI,
 432	RB_CTX_IRQ,
 433	RB_CTX_SOFTIRQ,
 434	RB_CTX_NORMAL,
 435	RB_CTX_MAX
 436};
 437
 438struct rb_time_struct {
 439	local64_t	time;
 440};
 441typedef struct rb_time_struct rb_time_t;
 442
 443#define MAX_NEST	5
 444
 445/*
 446 * head_page == tail_page && head == tail then buffer is empty.
 447 */
 448struct ring_buffer_per_cpu {
 449	int				cpu;
 450	atomic_t			record_disabled;
 451	atomic_t			resize_disabled;
 452	struct trace_buffer	*buffer;
 453	raw_spinlock_t			reader_lock;	/* serialize readers */
 454	arch_spinlock_t			lock;
 455	struct lock_class_key		lock_key;
 456	struct buffer_data_page		*free_page;
 457	unsigned long			nr_pages;
 458	unsigned int			current_context;
 459	struct list_head		*pages;
 460	struct buffer_page		*head_page;	/* read from head */
 461	struct buffer_page		*tail_page;	/* write to tail */
 462	struct buffer_page		*commit_page;	/* committed pages */
 463	struct buffer_page		*reader_page;
 464	unsigned long			lost_events;
 465	unsigned long			last_overrun;
 466	unsigned long			nest;
 467	local_t				entries_bytes;
 468	local_t				entries;
 469	local_t				overrun;
 470	local_t				commit_overrun;
 471	local_t				dropped_events;
 472	local_t				committing;
 473	local_t				commits;
 474	local_t				pages_touched;
 475	local_t				pages_lost;
 476	local_t				pages_read;
 477	long				last_pages_touch;
 478	size_t				shortest_full;
 479	unsigned long			read;
 480	unsigned long			read_bytes;
 481	rb_time_t			write_stamp;
 482	rb_time_t			before_stamp;
 483	u64				event_stamp[MAX_NEST];
 484	u64				read_stamp;
 485	/* pages removed since last reset */
 486	unsigned long			pages_removed;
 487	/* ring buffer pages to update, > 0 to add, < 0 to remove */
 488	long				nr_pages_to_update;
 489	struct list_head		new_pages; /* new pages to add */
 490	struct work_struct		update_pages_work;
 491	struct completion		update_done;
 492
 493	struct rb_irq_work		irq_work;
 494};
 495
 496struct trace_buffer {
 497	unsigned			flags;
 498	int				cpus;
 499	atomic_t			record_disabled;
 500	atomic_t			resizing;
 501	cpumask_var_t			cpumask;
 502
 503	struct lock_class_key		*reader_lock_key;
 504
 505	struct mutex			mutex;
 506
 507	struct ring_buffer_per_cpu	**buffers;
 508
 509	struct hlist_node		node;
 
 
 510	u64				(*clock)(void);
 511
 512	struct rb_irq_work		irq_work;
 513	bool				time_stamp_abs;
 514
 515	unsigned int			subbuf_size;
 516	unsigned int			subbuf_order;
 517	unsigned int			max_data_size;
 518};
 519
 520struct ring_buffer_iter {
 521	struct ring_buffer_per_cpu	*cpu_buffer;
 522	unsigned long			head;
 523	unsigned long			next_event;
 524	struct buffer_page		*head_page;
 525	struct buffer_page		*cache_reader_page;
 526	unsigned long			cache_read;
 527	unsigned long			cache_pages_removed;
 528	u64				read_stamp;
 529	u64				page_stamp;
 530	struct ring_buffer_event	*event;
 531	size_t				event_size;
 532	int				missed_events;
 533};
 534
 535int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
 536{
 537	struct buffer_data_page field;
 538
 539	trace_seq_printf(s, "\tfield: u64 timestamp;\t"
 540			 "offset:0;\tsize:%u;\tsigned:%u;\n",
 541			 (unsigned int)sizeof(field.time_stamp),
 542			 (unsigned int)is_signed_type(u64));
 543
 544	trace_seq_printf(s, "\tfield: local_t commit;\t"
 545			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 546			 (unsigned int)offsetof(typeof(field), commit),
 547			 (unsigned int)sizeof(field.commit),
 548			 (unsigned int)is_signed_type(long));
 549
 550	trace_seq_printf(s, "\tfield: int overwrite;\t"
 551			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 552			 (unsigned int)offsetof(typeof(field), commit),
 553			 1,
 554			 (unsigned int)is_signed_type(long));
 555
 556	trace_seq_printf(s, "\tfield: char data;\t"
 557			 "offset:%u;\tsize:%u;\tsigned:%u;\n",
 558			 (unsigned int)offsetof(typeof(field), data),
 559			 (unsigned int)buffer->subbuf_size,
 560			 (unsigned int)is_signed_type(char));
 561
 562	return !trace_seq_has_overflowed(s);
 563}
 564
 565static inline void rb_time_read(rb_time_t *t, u64 *ret)
 566{
 567	*ret = local64_read(&t->time);
 568}
 569static void rb_time_set(rb_time_t *t, u64 val)
 570{
 571	local64_set(&t->time, val);
 572}
 573
 574/*
 575 * Enable this to make sure that the event passed to
 576 * ring_buffer_event_time_stamp() is not committed and also
 577 * is on the buffer that it passed in.
 578 */
 579//#define RB_VERIFY_EVENT
 580#ifdef RB_VERIFY_EVENT
 581static struct list_head *rb_list_head(struct list_head *list);
 582static void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 583			 void *event)
 584{
 585	struct buffer_page *page = cpu_buffer->commit_page;
 586	struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page);
 587	struct list_head *next;
 588	long commit, write;
 589	unsigned long addr = (unsigned long)event;
 590	bool done = false;
 591	int stop = 0;
 592
 593	/* Make sure the event exists and is not committed yet */
 594	do {
 595		if (page == tail_page || WARN_ON_ONCE(stop++ > 100))
 596			done = true;
 597		commit = local_read(&page->page->commit);
 598		write = local_read(&page->write);
 599		if (addr >= (unsigned long)&page->page->data[commit] &&
 600		    addr < (unsigned long)&page->page->data[write])
 601			return;
 602
 603		next = rb_list_head(page->list.next);
 604		page = list_entry(next, struct buffer_page, list);
 605	} while (!done);
 606	WARN_ON_ONCE(1);
 607}
 608#else
 609static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer,
 610			 void *event)
 611{
 612}
 613#endif
 614
 615/*
 616 * The absolute time stamp drops the 5 MSBs and some clocks may
 617 * require them. The rb_fix_abs_ts() will take a previous full
 618 * time stamp, and add the 5 MSB of that time stamp on to the
 619 * saved absolute time stamp. Then they are compared in case of
 620 * the unlikely event that the latest time stamp incremented
 621 * the 5 MSB.
 622 */
 623static inline u64 rb_fix_abs_ts(u64 abs, u64 save_ts)
 624{
 625	if (save_ts & TS_MSB) {
 626		abs |= save_ts & TS_MSB;
 627		/* Check for overflow */
 628		if (unlikely(abs < save_ts))
 629			abs += 1ULL << 59;
 630	}
 631	return abs;
 632}
 633
 634static inline u64 rb_time_stamp(struct trace_buffer *buffer);
 635
 636/**
 637 * ring_buffer_event_time_stamp - return the event's current time stamp
 638 * @buffer: The buffer that the event is on
 639 * @event: the event to get the time stamp of
 640 *
 641 * Note, this must be called after @event is reserved, and before it is
 642 * committed to the ring buffer. And must be called from the same
 643 * context where the event was reserved (normal, softirq, irq, etc).
 644 *
 645 * Returns the time stamp associated with the current event.
 646 * If the event has an extended time stamp, then that is used as
 647 * the time stamp to return.
 648 * In the highly unlikely case that the event was nested more than
 649 * the max nesting, then the write_stamp of the buffer is returned,
 650 * otherwise  current time is returned, but that really neither of
 651 * the last two cases should ever happen.
 652 */
 653u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
 654				 struct ring_buffer_event *event)
 655{
 656	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()];
 657	unsigned int nest;
 658	u64 ts;
 659
 660	/* If the event includes an absolute time, then just use that */
 661	if (event->type_len == RINGBUF_TYPE_TIME_STAMP) {
 662		ts = rb_event_time_stamp(event);
 663		return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp);
 664	}
 665
 666	nest = local_read(&cpu_buffer->committing);
 667	verify_event(cpu_buffer, event);
 668	if (WARN_ON_ONCE(!nest))
 669		goto fail;
 670
 671	/* Read the current saved nesting level time stamp */
 672	if (likely(--nest < MAX_NEST))
 673		return cpu_buffer->event_stamp[nest];
 674
 675	/* Shouldn't happen, warn if it does */
 676	WARN_ONCE(1, "nest (%d) greater than max", nest);
 677
 678 fail:
 679	rb_time_read(&cpu_buffer->write_stamp, &ts);
 680
 681	return ts;
 682}
 683
 684/**
 685 * ring_buffer_nr_pages - get the number of buffer pages in the ring buffer
 686 * @buffer: The ring_buffer to get the number of pages from
 687 * @cpu: The cpu of the ring_buffer to get the number of pages from
 688 *
 689 * Returns the number of pages used by a per_cpu buffer of the ring buffer.
 690 */
 691size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
 692{
 693	return buffer->buffers[cpu]->nr_pages;
 694}
 695
 696/**
 697 * ring_buffer_nr_dirty_pages - get the number of used pages in the ring buffer
 698 * @buffer: The ring_buffer to get the number of pages from
 699 * @cpu: The cpu of the ring_buffer to get the number of pages from
 700 *
 701 * Returns the number of pages that have content in the ring buffer.
 702 */
 703size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
 704{
 705	size_t read;
 706	size_t lost;
 707	size_t cnt;
 708
 709	read = local_read(&buffer->buffers[cpu]->pages_read);
 710	lost = local_read(&buffer->buffers[cpu]->pages_lost);
 711	cnt = local_read(&buffer->buffers[cpu]->pages_touched);
 712
 713	if (WARN_ON_ONCE(cnt < lost))
 714		return 0;
 715
 716	cnt -= lost;
 717
 718	/* The reader can read an empty page, but not more than that */
 719	if (cnt < read) {
 720		WARN_ON_ONCE(read > cnt + 1);
 721		return 0;
 722	}
 723
 724	return cnt - read;
 725}
 726
 727static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full)
 728{
 729	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 730	size_t nr_pages;
 731	size_t dirty;
 732
 733	nr_pages = cpu_buffer->nr_pages;
 734	if (!nr_pages || !full)
 735		return true;
 736
 737	/*
 738	 * Add one as dirty will never equal nr_pages, as the sub-buffer
 739	 * that the writer is on is not counted as dirty.
 740	 * This is needed if "buffer_percent" is set to 100.
 741	 */
 742	dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1;
 743
 744	return (dirty * 100) >= (full * nr_pages);
 745}
 746
 747/*
 748 * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
 749 *
 750 * Schedules a delayed work to wake up any task that is blocked on the
 751 * ring buffer waiters queue.
 752 */
 753static void rb_wake_up_waiters(struct irq_work *work)
 754{
 755	struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
 756
 757	/* For waiters waiting for the first wake up */
 758	(void)atomic_fetch_inc_release(&rbwork->seq);
 759
 760	wake_up_all(&rbwork->waiters);
 761	if (rbwork->full_waiters_pending || rbwork->wakeup_full) {
 762		/* Only cpu_buffer sets the above flags */
 763		struct ring_buffer_per_cpu *cpu_buffer =
 764			container_of(rbwork, struct ring_buffer_per_cpu, irq_work);
 765
 766		/* Called from interrupt context */
 767		raw_spin_lock(&cpu_buffer->reader_lock);
 768		rbwork->wakeup_full = false;
 769		rbwork->full_waiters_pending = false;
 770
 771		/* Waking up all waiters, they will reset the shortest full */
 772		cpu_buffer->shortest_full = 0;
 773		raw_spin_unlock(&cpu_buffer->reader_lock);
 774
 775		wake_up_all(&rbwork->full_waiters);
 776	}
 777}
 778
 779/**
 780 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
 781 * @buffer: The ring buffer to wake waiters on
 782 * @cpu: The CPU buffer to wake waiters on
 783 *
 784 * In the case of a file that represents a ring buffer is closing,
 785 * it is prudent to wake up any waiters that are on this.
 786 */
 787void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
 788{
 789	struct ring_buffer_per_cpu *cpu_buffer;
 790	struct rb_irq_work *rbwork;
 791
 792	if (!buffer)
 793		return;
 794
 795	if (cpu == RING_BUFFER_ALL_CPUS) {
 796
 797		/* Wake up individual ones too. One level recursion */
 798		for_each_buffer_cpu(buffer, cpu)
 799			ring_buffer_wake_waiters(buffer, cpu);
 800
 801		rbwork = &buffer->irq_work;
 802	} else {
 803		if (WARN_ON_ONCE(!buffer->buffers))
 804			return;
 805		if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
 806			return;
 807
 808		cpu_buffer = buffer->buffers[cpu];
 809		/* The CPU buffer may not have been initialized yet */
 810		if (!cpu_buffer)
 811			return;
 812		rbwork = &cpu_buffer->irq_work;
 813	}
 814
 815	/* This can be called in any context */
 816	irq_work_queue(&rbwork->work);
 817}
 818
 819static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full)
 820{
 821	struct ring_buffer_per_cpu *cpu_buffer;
 822	bool ret = false;
 823
 824	/* Reads of all CPUs always waits for any data */
 825	if (cpu == RING_BUFFER_ALL_CPUS)
 826		return !ring_buffer_empty(buffer);
 827
 828	cpu_buffer = buffer->buffers[cpu];
 829
 830	if (!ring_buffer_empty_cpu(buffer, cpu)) {
 831		unsigned long flags;
 832		bool pagebusy;
 833
 834		if (!full)
 835			return true;
 836
 837		raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 838		pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
 839		ret = !pagebusy && full_hit(buffer, cpu, full);
 840
 841		if (!ret && (!cpu_buffer->shortest_full ||
 842			     cpu_buffer->shortest_full > full)) {
 843		    cpu_buffer->shortest_full = full;
 844		}
 845		raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
 846	}
 847	return ret;
 848}
 849
 850static inline bool
 851rb_wait_cond(struct rb_irq_work *rbwork, struct trace_buffer *buffer,
 852	     int cpu, int full, ring_buffer_cond_fn cond, void *data)
 853{
 854	if (rb_watermark_hit(buffer, cpu, full))
 855		return true;
 856
 857	if (cond(data))
 858		return true;
 859
 860	/*
 861	 * The events can happen in critical sections where
 862	 * checking a work queue can cause deadlocks.
 863	 * After adding a task to the queue, this flag is set
 864	 * only to notify events to try to wake up the queue
 865	 * using irq_work.
 866	 *
 867	 * We don't clear it even if the buffer is no longer
 868	 * empty. The flag only causes the next event to run
 869	 * irq_work to do the work queue wake up. The worse
 870	 * that can happen if we race with !trace_empty() is that
 871	 * an event will cause an irq_work to try to wake up
 872	 * an empty queue.
 873	 *
 874	 * There's no reason to protect this flag either, as
 875	 * the work queue and irq_work logic will do the necessary
 876	 * synchronization for the wake ups. The only thing
 877	 * that is necessary is that the wake up happens after
 878	 * a task has been queued. It's OK for spurious wake ups.
 879	 */
 880	if (full)
 881		rbwork->full_waiters_pending = true;
 882	else
 883		rbwork->waiters_pending = true;
 884
 885	return false;
 886}
 887
 888struct rb_wait_data {
 889	struct rb_irq_work		*irq_work;
 890	int				seq;
 891};
 892
 893/*
 894 * The default wait condition for ring_buffer_wait() is to just to exit the
 895 * wait loop the first time it is woken up.
 896 */
 897static bool rb_wait_once(void *data)
 898{
 899	struct rb_wait_data *rdata = data;
 900	struct rb_irq_work *rbwork = rdata->irq_work;
 901
 902	return atomic_read_acquire(&rbwork->seq) != rdata->seq;
 903}
 904
 905/**
 906 * ring_buffer_wait - wait for input to the ring buffer
 907 * @buffer: buffer to wait on
 908 * @cpu: the cpu buffer to wait on
 909 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
 910 * @cond: condition function to break out of wait (NULL to run once)
 911 * @data: the data to pass to @cond.
 912 *
 913 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 914 * as data is added to any of the @buffer's cpu buffers. Otherwise
 915 * it will wait for data to be added to a specific cpu buffer.
 916 */
 917int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full,
 918		     ring_buffer_cond_fn cond, void *data)
 919{
 920	struct ring_buffer_per_cpu *cpu_buffer;
 921	struct wait_queue_head *waitq;
 922	struct rb_irq_work *rbwork;
 923	struct rb_wait_data rdata;
 924	int ret = 0;
 925
 926	/*
 927	 * Depending on what the caller is waiting for, either any
 928	 * data in any cpu buffer, or a specific buffer, put the
 929	 * caller on the appropriate wait queue.
 930	 */
 931	if (cpu == RING_BUFFER_ALL_CPUS) {
 932		rbwork = &buffer->irq_work;
 933		/* Full only makes sense on per cpu reads */
 934		full = 0;
 935	} else {
 936		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 937			return -ENODEV;
 938		cpu_buffer = buffer->buffers[cpu];
 939		rbwork = &cpu_buffer->irq_work;
 940	}
 941
 942	if (full)
 943		waitq = &rbwork->full_waiters;
 944	else
 945		waitq = &rbwork->waiters;
 946
 947	/* Set up to exit loop as soon as it is woken */
 948	if (!cond) {
 949		cond = rb_wait_once;
 950		rdata.irq_work = rbwork;
 951		rdata.seq = atomic_read_acquire(&rbwork->seq);
 952		data = &rdata;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953	}
 954
 955	ret = wait_event_interruptible((*waitq),
 956				rb_wait_cond(rbwork, buffer, cpu, full, cond, data));
 
 
 957
 958	return ret;
 959}
 960
 961/**
 962 * ring_buffer_poll_wait - poll on buffer input
 963 * @buffer: buffer to wait on
 964 * @cpu: the cpu buffer to wait on
 965 * @filp: the file descriptor
 966 * @poll_table: The poll descriptor
 967 * @full: wait until the percentage of pages are available, if @cpu != RING_BUFFER_ALL_CPUS
 968 *
 969 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
 970 * as data is added to any of the @buffer's cpu buffers. Otherwise
 971 * it will wait for data to be added to a specific cpu buffer.
 972 *
 973 * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
 974 * zero otherwise.
 975 */
 976__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
 977			  struct file *filp, poll_table *poll_table, int full)
 978{
 979	struct ring_buffer_per_cpu *cpu_buffer;
 980	struct rb_irq_work *rbwork;
 981
 982	if (cpu == RING_BUFFER_ALL_CPUS) {
 983		rbwork = &buffer->irq_work;
 984		full = 0;
 985	} else {
 986		if (!cpumask_test_cpu(cpu, buffer->cpumask))
 987			return EPOLLERR;
 988
 989		cpu_buffer = buffer->buffers[cpu];
 990		rbwork = &cpu_buffer->irq_work;
 991	}
 992
 993	if (full) {
 994		poll_wait(filp, &rbwork->full_waiters, poll_table);
 995
 996		if (rb_watermark_hit(buffer, cpu, full))
 997			return EPOLLIN | EPOLLRDNORM;
 998		/*
 999		 * Only allow full_waiters_pending update to be seen after
1000		 * the shortest_full is set (in rb_watermark_hit). If the
1001		 * writer sees the full_waiters_pending flag set, it will
1002		 * compare the amount in the ring buffer to shortest_full.
1003		 * If the amount in the ring buffer is greater than the
1004		 * shortest_full percent, it will call the irq_work handler
1005		 * to wake up this list. The irq_handler will reset shortest_full
1006		 * back to zero. That's done under the reader_lock, but
1007		 * the below smp_mb() makes sure that the update to
1008		 * full_waiters_pending doesn't leak up into the above.
1009		 */
1010		smp_mb();
1011		rbwork->full_waiters_pending = true;
1012		return 0;
1013	}
1014
1015	poll_wait(filp, &rbwork->waiters, poll_table);
1016	rbwork->waiters_pending = true;
1017
1018	/*
1019	 * There's a tight race between setting the waiters_pending and
1020	 * checking if the ring buffer is empty.  Once the waiters_pending bit
1021	 * is set, the next event will wake the task up, but we can get stuck
1022	 * if there's only a single event in.
1023	 *
1024	 * FIXME: Ideally, we need a memory barrier on the writer side as well,
1025	 * but adding a memory barrier to all events will cause too much of a
1026	 * performance hit in the fast path.  We only need a memory barrier when
1027	 * the buffer goes from empty to having content.  But as this race is
1028	 * extremely small, and it's not a problem if another event comes in, we
1029	 * will fix it later.
1030	 */
1031	smp_mb();
1032
1033	if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
1034	    (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
1035		return EPOLLIN | EPOLLRDNORM;
1036	return 0;
1037}
1038
1039/* buffer may be either ring_buffer or ring_buffer_per_cpu */
1040#define RB_WARN_ON(b, cond)						\
1041	({								\
1042		int _____ret = unlikely(cond);				\
1043		if (_____ret) {						\
1044			if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
1045				struct ring_buffer_per_cpu *__b =	\
1046					(void *)b;			\
1047				atomic_inc(&__b->buffer->record_disabled); \
1048			} else						\
1049				atomic_inc(&b->record_disabled);	\
1050			WARN_ON(1);					\
1051		}							\
1052		_____ret;						\
1053	})
1054
1055/* Up this if you want to test the TIME_EXTENTS and normalization */
1056#define DEBUG_SHIFT 0
1057
1058static inline u64 rb_time_stamp(struct trace_buffer *buffer)
1059{
1060	u64 ts;
1061
1062	/* Skip retpolines :-( */
1063	if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && likely(buffer->clock == trace_clock_local))
1064		ts = trace_clock_local();
1065	else
1066		ts = buffer->clock();
1067
1068	/* shift to debug/test normalization and TIME_EXTENTS */
1069	return ts << DEBUG_SHIFT;
1070}
1071
1072u64 ring_buffer_time_stamp(struct trace_buffer *buffer)
1073{
1074	u64 time;
1075
1076	preempt_disable_notrace();
1077	time = rb_time_stamp(buffer);
1078	preempt_enable_notrace();
1079
1080	return time;
1081}
1082EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
1083
1084void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
1085				      int cpu, u64 *ts)
1086{
1087	/* Just stupid testing the normalize function and deltas */
1088	*ts >>= DEBUG_SHIFT;
1089}
1090EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
1091
1092/*
1093 * Making the ring buffer lockless makes things tricky.
1094 * Although writes only happen on the CPU that they are on,
1095 * and they only need to worry about interrupts. Reads can
1096 * happen on any CPU.
1097 *
1098 * The reader page is always off the ring buffer, but when the
1099 * reader finishes with a page, it needs to swap its page with
1100 * a new one from the buffer. The reader needs to take from
1101 * the head (writes go to the tail). But if a writer is in overwrite
1102 * mode and wraps, it must push the head page forward.
1103 *
1104 * Here lies the problem.
1105 *
1106 * The reader must be careful to replace only the head page, and
1107 * not another one. As described at the top of the file in the
1108 * ASCII art, the reader sets its old page to point to the next
1109 * page after head. It then sets the page after head to point to
1110 * the old reader page. But if the writer moves the head page
1111 * during this operation, the reader could end up with the tail.
1112 *
1113 * We use cmpxchg to help prevent this race. We also do something
1114 * special with the page before head. We set the LSB to 1.
1115 *
1116 * When the writer must push the page forward, it will clear the
1117 * bit that points to the head page, move the head, and then set
1118 * the bit that points to the new head page.
1119 *
1120 * We also don't want an interrupt coming in and moving the head
1121 * page on another writer. Thus we use the second LSB to catch
1122 * that too. Thus:
1123 *
1124 * head->list->prev->next        bit 1          bit 0
1125 *                              -------        -------
1126 * Normal page                     0              0
1127 * Points to head page             0              1
1128 * New head page                   1              0
1129 *
1130 * Note we can not trust the prev pointer of the head page, because:
1131 *
1132 * +----+       +-----+        +-----+
1133 * |    |------>|  T  |---X--->|  N  |
1134 * |    |<------|     |        |     |
1135 * +----+       +-----+        +-----+
1136 *   ^                           ^ |
1137 *   |          +-----+          | |
1138 *   +----------|  R  |----------+ |
1139 *              |     |<-----------+
1140 *              +-----+
1141 *
1142 * Key:  ---X-->  HEAD flag set in pointer
1143 *         T      Tail page
1144 *         R      Reader page
1145 *         N      Next page
1146 *
1147 * (see __rb_reserve_next() to see where this happens)
1148 *
1149 *  What the above shows is that the reader just swapped out
1150 *  the reader page with a page in the buffer, but before it
1151 *  could make the new header point back to the new page added
1152 *  it was preempted by a writer. The writer moved forward onto
1153 *  the new page added by the reader and is about to move forward
1154 *  again.
1155 *
1156 *  You can see, it is legitimate for the previous pointer of
1157 *  the head (or any page) not to point back to itself. But only
1158 *  temporarily.
1159 */
1160
1161#define RB_PAGE_NORMAL		0UL
1162#define RB_PAGE_HEAD		1UL
1163#define RB_PAGE_UPDATE		2UL
1164
1165
1166#define RB_FLAG_MASK		3UL
1167
1168/* PAGE_MOVED is not part of the mask */
1169#define RB_PAGE_MOVED		4UL
1170
1171/*
1172 * rb_list_head - remove any bit
1173 */
1174static struct list_head *rb_list_head(struct list_head *list)
1175{
1176	unsigned long val = (unsigned long)list;
1177
1178	return (struct list_head *)(val & ~RB_FLAG_MASK);
1179}
1180
1181/*
1182 * rb_is_head_page - test if the given page is the head page
1183 *
1184 * Because the reader may move the head_page pointer, we can
1185 * not trust what the head page is (it may be pointing to
1186 * the reader page). But if the next page is a header page,
1187 * its flags will be non zero.
1188 */
1189static inline int
1190rb_is_head_page(struct buffer_page *page, struct list_head *list)
 
1191{
1192	unsigned long val;
1193
1194	val = (unsigned long)list->next;
1195
1196	if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
1197		return RB_PAGE_MOVED;
1198
1199	return val & RB_FLAG_MASK;
1200}
1201
1202/*
1203 * rb_is_reader_page
1204 *
1205 * The unique thing about the reader page, is that, if the
1206 * writer is ever on it, the previous pointer never points
1207 * back to the reader page.
1208 */
1209static bool rb_is_reader_page(struct buffer_page *page)
1210{
1211	struct list_head *list = page->list.prev;
1212
1213	return rb_list_head(list->next) != &page->list;
1214}
1215
1216/*
1217 * rb_set_list_to_head - set a list_head to be pointing to head.
1218 */
1219static void rb_set_list_to_head(struct list_head *list)
 
1220{
1221	unsigned long *ptr;
1222
1223	ptr = (unsigned long *)&list->next;
1224	*ptr |= RB_PAGE_HEAD;
1225	*ptr &= ~RB_PAGE_UPDATE;
1226}
1227
1228/*
1229 * rb_head_page_activate - sets up head page
1230 */
1231static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
1232{
1233	struct buffer_page *head;
1234
1235	head = cpu_buffer->head_page;
1236	if (!head)
1237		return;
1238
1239	/*
1240	 * Set the previous list pointer to have the HEAD flag.
1241	 */
1242	rb_set_list_to_head(head->list.prev);
1243}
1244
1245static void rb_list_head_clear(struct list_head *list)
1246{
1247	unsigned long *ptr = (unsigned long *)&list->next;
1248
1249	*ptr &= ~RB_FLAG_MASK;
1250}
1251
1252/*
1253 * rb_head_page_deactivate - clears head page ptr (for free list)
1254 */
1255static void
1256rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
1257{
1258	struct list_head *hd;
1259
1260	/* Go through the whole list and clear any pointers found. */
1261	rb_list_head_clear(cpu_buffer->pages);
1262
1263	list_for_each(hd, cpu_buffer->pages)
1264		rb_list_head_clear(hd);
1265}
1266
1267static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
1268			    struct buffer_page *head,
1269			    struct buffer_page *prev,
1270			    int old_flag, int new_flag)
1271{
1272	struct list_head *list;
1273	unsigned long val = (unsigned long)&head->list;
1274	unsigned long ret;
1275
1276	list = &prev->list;
1277
1278	val &= ~RB_FLAG_MASK;
1279
1280	ret = cmpxchg((unsigned long *)&list->next,
1281		      val | old_flag, val | new_flag);
1282
1283	/* check if the reader took the page */
1284	if ((ret & ~RB_FLAG_MASK) != val)
1285		return RB_PAGE_MOVED;
1286
1287	return ret & RB_FLAG_MASK;
1288}
1289
1290static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
1291				   struct buffer_page *head,
1292				   struct buffer_page *prev,
1293				   int old_flag)
1294{
1295	return rb_head_page_set(cpu_buffer, head, prev,
1296				old_flag, RB_PAGE_UPDATE);
1297}
1298
1299static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
1300				 struct buffer_page *head,
1301				 struct buffer_page *prev,
1302				 int old_flag)
1303{
1304	return rb_head_page_set(cpu_buffer, head, prev,
1305				old_flag, RB_PAGE_HEAD);
1306}
1307
1308static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
1309				   struct buffer_page *head,
1310				   struct buffer_page *prev,
1311				   int old_flag)
1312{
1313	return rb_head_page_set(cpu_buffer, head, prev,
1314				old_flag, RB_PAGE_NORMAL);
1315}
1316
1317static inline void rb_inc_page(struct buffer_page **bpage)
 
1318{
1319	struct list_head *p = rb_list_head((*bpage)->list.next);
1320
1321	*bpage = list_entry(p, struct buffer_page, list);
1322}
1323
1324static struct buffer_page *
1325rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
1326{
1327	struct buffer_page *head;
1328	struct buffer_page *page;
1329	struct list_head *list;
1330	int i;
1331
1332	if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
1333		return NULL;
1334
1335	/* sanity check */
1336	list = cpu_buffer->pages;
1337	if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
1338		return NULL;
1339
1340	page = head = cpu_buffer->head_page;
1341	/*
1342	 * It is possible that the writer moves the header behind
1343	 * where we started, and we miss in one loop.
1344	 * A second loop should grab the header, but we'll do
1345	 * three loops just because I'm paranoid.
1346	 */
1347	for (i = 0; i < 3; i++) {
1348		do {
1349			if (rb_is_head_page(page, page->list.prev)) {
1350				cpu_buffer->head_page = page;
1351				return page;
1352			}
1353			rb_inc_page(&page);
1354		} while (page != head);
1355	}
1356
1357	RB_WARN_ON(cpu_buffer, 1);
1358
1359	return NULL;
1360}
1361
1362static bool rb_head_page_replace(struct buffer_page *old,
1363				struct buffer_page *new)
1364{
1365	unsigned long *ptr = (unsigned long *)&old->list.prev->next;
1366	unsigned long val;
 
1367
1368	val = *ptr & ~RB_FLAG_MASK;
1369	val |= RB_PAGE_HEAD;
1370
1371	return try_cmpxchg(ptr, &val, (unsigned long)&new->list);
 
 
1372}
1373
1374/*
1375 * rb_tail_page_update - move the tail page forward
1376 */
1377static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
1378			       struct buffer_page *tail_page,
1379			       struct buffer_page *next_page)
1380{
1381	unsigned long old_entries;
1382	unsigned long old_write;
1383
1384	/*
1385	 * The tail page now needs to be moved forward.
1386	 *
1387	 * We need to reset the tail page, but without messing
1388	 * with possible erasing of data brought in by interrupts
1389	 * that have moved the tail page and are currently on it.
1390	 *
1391	 * We add a counter to the write field to denote this.
1392	 */
1393	old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
1394	old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
1395
1396	/*
1397	 * Just make sure we have seen our old_write and synchronize
1398	 * with any interrupts that come in.
1399	 */
1400	barrier();
1401
1402	/*
1403	 * If the tail page is still the same as what we think
1404	 * it is, then it is up to us to update the tail
1405	 * pointer.
1406	 */
1407	if (tail_page == READ_ONCE(cpu_buffer->tail_page)) {
1408		/* Zero the write counter */
1409		unsigned long val = old_write & ~RB_WRITE_MASK;
1410		unsigned long eval = old_entries & ~RB_WRITE_MASK;
1411
1412		/*
1413		 * This will only succeed if an interrupt did
1414		 * not come in and change it. In which case, we
1415		 * do not want to modify it.
1416		 *
1417		 * We add (void) to let the compiler know that we do not care
1418		 * about the return value of these functions. We use the
1419		 * cmpxchg to only update if an interrupt did not already
1420		 * do it for us. If the cmpxchg fails, we don't care.
1421		 */
1422		(void)local_cmpxchg(&next_page->write, old_write, val);
1423		(void)local_cmpxchg(&next_page->entries, old_entries, eval);
1424
1425		/*
1426		 * No need to worry about races with clearing out the commit.
1427		 * it only can increment when a commit takes place. But that
1428		 * only happens in the outer most nested commit.
1429		 */
1430		local_set(&next_page->page->commit, 0);
1431
1432		/* Either we update tail_page or an interrupt does */
1433		if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
1434			local_inc(&cpu_buffer->pages_touched);
1435	}
1436}
1437
1438static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1439			  struct buffer_page *bpage)
1440{
1441	unsigned long val = (unsigned long)bpage;
1442
1443	RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1444}
1445
1446/**
1447 * rb_check_pages - integrity check of buffer pages
1448 * @cpu_buffer: CPU buffer with pages to test
1449 *
1450 * As a safety measure we check to make sure the data pages have not
1451 * been corrupted.
1452 *
1453 * Callers of this function need to guarantee that the list of pages doesn't get
1454 * modified during the check. In particular, if it's possible that the function
1455 * is invoked with concurrent readers which can swap in a new reader page then
1456 * the caller should take cpu_buffer->reader_lock.
1457 */
1458static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1459{
1460	struct list_head *head = rb_list_head(cpu_buffer->pages);
1461	struct list_head *tmp;
1462
1463	if (RB_WARN_ON(cpu_buffer,
1464			rb_list_head(rb_list_head(head->next)->prev) != head))
1465		return;
 
 
 
 
 
 
 
1466
1467	if (RB_WARN_ON(cpu_buffer,
1468			rb_list_head(rb_list_head(head->prev)->next) != head))
1469		return;
1470
1471	for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
1472		if (RB_WARN_ON(cpu_buffer,
1473				rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
1474			return;
1475
1476		if (RB_WARN_ON(cpu_buffer,
1477				rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
1478			return;
 
 
1479	}
 
 
 
 
1480}
1481
1482static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1483		long nr_pages, struct list_head *pages)
1484{
 
1485	struct buffer_page *bpage, *tmp;
1486	bool user_thread = current->mm != NULL;
1487	gfp_t mflags;
1488	long i;
1489
1490	/*
1491	 * Check if the available memory is there first.
1492	 * Note, si_mem_available() only gives us a rough estimate of available
1493	 * memory. It may not be accurate. But we don't care, we just want
1494	 * to prevent doing any allocation when it is obvious that it is
1495	 * not going to succeed.
1496	 */
1497	i = si_mem_available();
1498	if (i < nr_pages)
1499		return -ENOMEM;
1500
1501	/*
1502	 * __GFP_RETRY_MAYFAIL flag makes sure that the allocation fails
1503	 * gracefully without invoking oom-killer and the system is not
1504	 * destabilized.
1505	 */
1506	mflags = GFP_KERNEL | __GFP_RETRY_MAYFAIL;
1507
1508	/*
1509	 * If a user thread allocates too much, and si_mem_available()
1510	 * reports there's enough memory, even though there is not.
1511	 * Make sure the OOM killer kills this thread. This can happen
1512	 * even with RETRY_MAYFAIL because another task may be doing
1513	 * an allocation after this task has taken all memory.
1514	 * This is the task the OOM killer needs to take out during this
1515	 * loop, even if it was triggered by an allocation somewhere else.
1516	 */
1517	if (user_thread)
1518		set_current_oom_origin();
1519	for (i = 0; i < nr_pages; i++) {
1520		struct page *page;
1521
 
 
 
 
1522		bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1523				    mflags, cpu_to_node(cpu_buffer->cpu));
 
1524		if (!bpage)
1525			goto free_pages;
1526
1527		rb_check_bpage(cpu_buffer, bpage);
1528
1529		list_add(&bpage->list, pages);
1530
1531		page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu),
1532					mflags | __GFP_ZERO,
1533					cpu_buffer->buffer->subbuf_order);
1534		if (!page)
1535			goto free_pages;
1536		bpage->page = page_address(page);
1537		bpage->order = cpu_buffer->buffer->subbuf_order;
1538		rb_init_page(bpage->page);
1539
1540		if (user_thread && fatal_signal_pending(current))
1541			goto free_pages;
1542	}
1543	if (user_thread)
1544		clear_current_oom_origin();
1545
1546	return 0;
1547
1548free_pages:
1549	list_for_each_entry_safe(bpage, tmp, pages, list) {
1550		list_del_init(&bpage->list);
1551		free_buffer_page(bpage);
1552	}
1553	if (user_thread)
1554		clear_current_oom_origin();
1555
1556	return -ENOMEM;
1557}
1558
1559static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1560			     unsigned long nr_pages)
1561{
1562	LIST_HEAD(pages);
1563
1564	WARN_ON(!nr_pages);
1565
1566	if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages))
1567		return -ENOMEM;
1568
1569	/*
1570	 * The ring buffer page list is a circular list that does not
1571	 * start and end with a list head. All page list items point to
1572	 * other pages.
1573	 */
1574	cpu_buffer->pages = pages.next;
1575	list_del(&pages);
1576
1577	cpu_buffer->nr_pages = nr_pages;
1578
1579	rb_check_pages(cpu_buffer);
1580
1581	return 0;
1582}
1583
1584static struct ring_buffer_per_cpu *
1585rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
1586{
1587	struct ring_buffer_per_cpu *cpu_buffer;
1588	struct buffer_page *bpage;
1589	struct page *page;
1590	int ret;
1591
1592	cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1593				  GFP_KERNEL, cpu_to_node(cpu));
1594	if (!cpu_buffer)
1595		return NULL;
1596
1597	cpu_buffer->cpu = cpu;
1598	cpu_buffer->buffer = buffer;
1599	raw_spin_lock_init(&cpu_buffer->reader_lock);
1600	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1601	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1602	INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1603	init_completion(&cpu_buffer->update_done);
1604	init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1605	init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1606	init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
1607
1608	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1609			    GFP_KERNEL, cpu_to_node(cpu));
1610	if (!bpage)
1611		goto fail_free_buffer;
1612
1613	rb_check_bpage(cpu_buffer, bpage);
1614
1615	cpu_buffer->reader_page = bpage;
1616
1617	page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_ZERO,
1618				cpu_buffer->buffer->subbuf_order);
1619	if (!page)
1620		goto fail_free_reader;
1621	bpage->page = page_address(page);
1622	rb_init_page(bpage->page);
1623
1624	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1625	INIT_LIST_HEAD(&cpu_buffer->new_pages);
1626
1627	ret = rb_allocate_pages(cpu_buffer, nr_pages);
1628	if (ret < 0)
1629		goto fail_free_reader;
1630
1631	cpu_buffer->head_page
1632		= list_entry(cpu_buffer->pages, struct buffer_page, list);
1633	cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1634
1635	rb_head_page_activate(cpu_buffer);
1636
1637	return cpu_buffer;
1638
1639 fail_free_reader:
1640	free_buffer_page(cpu_buffer->reader_page);
1641
1642 fail_free_buffer:
1643	kfree(cpu_buffer);
1644	return NULL;
1645}
1646
1647static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1648{
1649	struct list_head *head = cpu_buffer->pages;
1650	struct buffer_page *bpage, *tmp;
1651
1652	irq_work_sync(&cpu_buffer->irq_work.work);
1653
1654	free_buffer_page(cpu_buffer->reader_page);
1655
1656	if (head) {
1657		rb_head_page_deactivate(cpu_buffer);
1658
1659		list_for_each_entry_safe(bpage, tmp, head, list) {
1660			list_del_init(&bpage->list);
1661			free_buffer_page(bpage);
1662		}
1663		bpage = list_entry(head, struct buffer_page, list);
1664		free_buffer_page(bpage);
1665	}
1666
1667	free_page((unsigned long)cpu_buffer->free_page);
1668
1669	kfree(cpu_buffer);
1670}
1671
 
 
 
 
 
1672/**
1673 * __ring_buffer_alloc - allocate a new ring_buffer
1674 * @size: the size in bytes per cpu that is needed.
1675 * @flags: attributes to set for the ring buffer.
1676 * @key: ring buffer reader_lock_key.
1677 *
1678 * Currently the only flag that is available is the RB_FL_OVERWRITE
1679 * flag. This flag means that the buffer will overwrite old data
1680 * when the buffer wraps. If this flag is not set, the buffer will
1681 * drop data when the tail hits the head.
1682 */
1683struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1684					struct lock_class_key *key)
1685{
1686	struct trace_buffer *buffer;
1687	long nr_pages;
1688	int bsize;
1689	int cpu;
1690	int ret;
1691
1692	/* keep it in its own cache line */
1693	buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1694			 GFP_KERNEL);
1695	if (!buffer)
1696		return NULL;
1697
1698	if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1699		goto fail_free_buffer;
1700
1701	/* Default buffer page size - one system page */
1702	buffer->subbuf_order = 0;
1703	buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
1704
1705	/* Max payload is buffer page size - header (8bytes) */
1706	buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
1707
1708	nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
1709	buffer->flags = flags;
1710	buffer->clock = trace_clock_local;
1711	buffer->reader_lock_key = key;
1712
1713	init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1714	init_waitqueue_head(&buffer->irq_work.waiters);
1715
1716	/* need at least two pages */
1717	if (nr_pages < 2)
1718		nr_pages = 2;
1719
 
 
 
 
 
 
 
 
 
 
 
1720	buffer->cpus = nr_cpu_ids;
1721
1722	bsize = sizeof(void *) * nr_cpu_ids;
1723	buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1724				  GFP_KERNEL);
1725	if (!buffer->buffers)
1726		goto fail_free_cpumask;
1727
1728	cpu = raw_smp_processor_id();
1729	cpumask_set_cpu(cpu, buffer->cpumask);
1730	buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1731	if (!buffer->buffers[cpu])
1732		goto fail_free_buffers;
 
1733
1734	ret = cpuhp_state_add_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1735	if (ret < 0)
1736		goto fail_free_buffers;
 
 
 
1737
1738	mutex_init(&buffer->mutex);
1739
1740	return buffer;
1741
1742 fail_free_buffers:
1743	for_each_buffer_cpu(buffer, cpu) {
1744		if (buffer->buffers[cpu])
1745			rb_free_cpu_buffer(buffer->buffers[cpu]);
1746	}
1747	kfree(buffer->buffers);
1748
1749 fail_free_cpumask:
1750	free_cpumask_var(buffer->cpumask);
 
 
 
1751
1752 fail_free_buffer:
1753	kfree(buffer);
1754	return NULL;
1755}
1756EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1757
1758/**
1759 * ring_buffer_free - free a ring buffer.
1760 * @buffer: the buffer to free.
1761 */
1762void
1763ring_buffer_free(struct trace_buffer *buffer)
1764{
1765	int cpu;
1766
1767	cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
1768
1769	irq_work_sync(&buffer->irq_work.work);
 
1770
1771	for_each_buffer_cpu(buffer, cpu)
1772		rb_free_cpu_buffer(buffer->buffers[cpu]);
1773
 
 
 
 
1774	kfree(buffer->buffers);
1775	free_cpumask_var(buffer->cpumask);
1776
1777	kfree(buffer);
1778}
1779EXPORT_SYMBOL_GPL(ring_buffer_free);
1780
1781void ring_buffer_set_clock(struct trace_buffer *buffer,
1782			   u64 (*clock)(void))
1783{
1784	buffer->clock = clock;
1785}
1786
1787void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
1788{
1789	buffer->time_stamp_abs = abs;
1790}
1791
1792bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
1793{
1794	return buffer->time_stamp_abs;
1795}
1796
1797static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1798
1799static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1800{
1801	return local_read(&bpage->entries) & RB_WRITE_MASK;
1802}
1803
1804static inline unsigned long rb_page_write(struct buffer_page *bpage)
1805{
1806	return local_read(&bpage->write) & RB_WRITE_MASK;
1807}
1808
1809static bool
1810rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1811{
1812	struct list_head *tail_page, *to_remove, *next_page;
1813	struct buffer_page *to_remove_page, *tmp_iter_page;
1814	struct buffer_page *last_page, *first_page;
1815	unsigned long nr_removed;
1816	unsigned long head_bit;
1817	int page_entries;
1818
1819	head_bit = 0;
1820
1821	raw_spin_lock_irq(&cpu_buffer->reader_lock);
1822	atomic_inc(&cpu_buffer->record_disabled);
1823	/*
1824	 * We don't race with the readers since we have acquired the reader
1825	 * lock. We also don't race with writers after disabling recording.
1826	 * This makes it easy to figure out the first and the last page to be
1827	 * removed from the list. We unlink all the pages in between including
1828	 * the first and last pages. This is done in a busy loop so that we
1829	 * lose the least number of traces.
1830	 * The pages are freed after we restart recording and unlock readers.
1831	 */
1832	tail_page = &cpu_buffer->tail_page->list;
1833
1834	/*
1835	 * tail page might be on reader page, we remove the next page
1836	 * from the ring buffer
1837	 */
1838	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1839		tail_page = rb_list_head(tail_page->next);
1840	to_remove = tail_page;
1841
1842	/* start of pages to remove */
1843	first_page = list_entry(rb_list_head(to_remove->next),
1844				struct buffer_page, list);
1845
1846	for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1847		to_remove = rb_list_head(to_remove)->next;
1848		head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1849	}
1850	/* Read iterators need to reset themselves when some pages removed */
1851	cpu_buffer->pages_removed += nr_removed;
1852
1853	next_page = rb_list_head(to_remove)->next;
1854
1855	/*
1856	 * Now we remove all pages between tail_page and next_page.
1857	 * Make sure that we have head_bit value preserved for the
1858	 * next page
1859	 */
1860	tail_page->next = (struct list_head *)((unsigned long)next_page |
1861						head_bit);
1862	next_page = rb_list_head(next_page);
1863	next_page->prev = tail_page;
1864
1865	/* make sure pages points to a valid page in the ring buffer */
1866	cpu_buffer->pages = next_page;
1867
1868	/* update head page */
1869	if (head_bit)
1870		cpu_buffer->head_page = list_entry(next_page,
1871						struct buffer_page, list);
1872
 
 
 
 
 
 
1873	/* pages are removed, resume tracing and then free the pages */
1874	atomic_dec(&cpu_buffer->record_disabled);
1875	raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1876
1877	RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1878
1879	/* last buffer page to remove */
1880	last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1881				list);
1882	tmp_iter_page = first_page;
1883
1884	do {
1885		cond_resched();
1886
1887		to_remove_page = tmp_iter_page;
1888		rb_inc_page(&tmp_iter_page);
1889
1890		/* update the counters */
1891		page_entries = rb_page_entries(to_remove_page);
1892		if (page_entries) {
1893			/*
1894			 * If something was added to this page, it was full
1895			 * since it is not the tail page. So we deduct the
1896			 * bytes consumed in ring buffer from here.
1897			 * Increment overrun to account for the lost events.
1898			 */
1899			local_add(page_entries, &cpu_buffer->overrun);
1900			local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes);
1901			local_inc(&cpu_buffer->pages_lost);
1902		}
1903
1904		/*
1905		 * We have already removed references to this list item, just
1906		 * free up the buffer_page and its page
1907		 */
1908		free_buffer_page(to_remove_page);
1909		nr_removed--;
1910
1911	} while (to_remove_page != last_page);
1912
1913	RB_WARN_ON(cpu_buffer, nr_removed);
1914
1915	return nr_removed == 0;
1916}
1917
1918static bool
1919rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1920{
1921	struct list_head *pages = &cpu_buffer->new_pages;
1922	unsigned long flags;
1923	bool success;
1924	int retries;
1925
1926	/* Can be called at early boot up, where interrupts must not been enabled */
1927	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1928	/*
1929	 * We are holding the reader lock, so the reader page won't be swapped
1930	 * in the ring buffer. Now we are racing with the writer trying to
1931	 * move head page and the tail page.
1932	 * We are going to adapt the reader page update process where:
1933	 * 1. We first splice the start and end of list of new pages between
1934	 *    the head page and its previous page.
1935	 * 2. We cmpxchg the prev_page->next to point from head page to the
1936	 *    start of new pages list.
1937	 * 3. Finally, we update the head->prev to the end of new list.
1938	 *
1939	 * We will try this process 10 times, to make sure that we don't keep
1940	 * spinning.
1941	 */
1942	retries = 10;
1943	success = false;
1944	while (retries--) {
1945		struct list_head *head_page, *prev_page;
1946		struct list_head *last_page, *first_page;
1947		struct list_head *head_page_with_bit;
1948		struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
1949
1950		if (!hpage)
 
1951			break;
1952		head_page = &hpage->list;
1953		prev_page = head_page->prev;
1954
1955		first_page = pages->next;
1956		last_page  = pages->prev;
1957
1958		head_page_with_bit = (struct list_head *)
1959				     ((unsigned long)head_page | RB_PAGE_HEAD);
1960
1961		last_page->next = head_page_with_bit;
1962		first_page->prev = prev_page;
1963
1964		/* caution: head_page_with_bit gets updated on cmpxchg failure */
1965		if (try_cmpxchg(&prev_page->next,
1966				&head_page_with_bit, first_page)) {
1967			/*
1968			 * yay, we replaced the page pointer to our new list,
1969			 * now, we just have to update to head page's prev
1970			 * pointer to point to end of list
1971			 */
1972			head_page->prev = last_page;
1973			success = true;
1974			break;
1975		}
1976	}
1977
1978	if (success)
1979		INIT_LIST_HEAD(pages);
1980	/*
1981	 * If we weren't successful in adding in new pages, warn and stop
1982	 * tracing
1983	 */
1984	RB_WARN_ON(cpu_buffer, !success);
1985	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1986
1987	/* free pages if they weren't inserted */
1988	if (!success) {
1989		struct buffer_page *bpage, *tmp;
1990		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1991					 list) {
1992			list_del_init(&bpage->list);
1993			free_buffer_page(bpage);
1994		}
1995	}
1996	return success;
1997}
1998
1999static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
2000{
2001	bool success;
2002
2003	if (cpu_buffer->nr_pages_to_update > 0)
2004		success = rb_insert_pages(cpu_buffer);
2005	else
2006		success = rb_remove_pages(cpu_buffer,
2007					-cpu_buffer->nr_pages_to_update);
2008
2009	if (success)
2010		cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
2011}
2012
2013static void update_pages_handler(struct work_struct *work)
2014{
2015	struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
2016			struct ring_buffer_per_cpu, update_pages_work);
2017	rb_update_pages(cpu_buffer);
2018	complete(&cpu_buffer->update_done);
2019}
2020
2021/**
2022 * ring_buffer_resize - resize the ring buffer
2023 * @buffer: the buffer to resize.
2024 * @size: the new size.
2025 * @cpu_id: the cpu buffer to resize
2026 *
2027 * Minimum size is 2 * buffer->subbuf_size.
2028 *
2029 * Returns 0 on success and < 0 on failure.
2030 */
2031int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
2032			int cpu_id)
2033{
2034	struct ring_buffer_per_cpu *cpu_buffer;
2035	unsigned long nr_pages;
2036	int cpu, err;
2037
2038	/*
2039	 * Always succeed at resizing a non-existent buffer:
2040	 */
2041	if (!buffer)
2042		return 0;
2043
2044	/* Make sure the requested buffer exists */
2045	if (cpu_id != RING_BUFFER_ALL_CPUS &&
2046	    !cpumask_test_cpu(cpu_id, buffer->cpumask))
2047		return 0;
2048
2049	nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
 
2050
2051	/* we need a minimum of two pages */
2052	if (nr_pages < 2)
2053		nr_pages = 2;
 
 
 
 
 
 
 
 
 
 
2054
2055	/* prevent another thread from changing buffer sizes */
2056	mutex_lock(&buffer->mutex);
2057	atomic_inc(&buffer->resizing);
2058
2059	if (cpu_id == RING_BUFFER_ALL_CPUS) {
2060		/*
2061		 * Don't succeed if resizing is disabled, as a reader might be
2062		 * manipulating the ring buffer and is expecting a sane state while
2063		 * this is true.
2064		 */
2065		for_each_buffer_cpu(buffer, cpu) {
2066			cpu_buffer = buffer->buffers[cpu];
2067			if (atomic_read(&cpu_buffer->resize_disabled)) {
2068				err = -EBUSY;
2069				goto out_err_unlock;
2070			}
2071		}
2072
2073		/* calculate the pages to update */
2074		for_each_buffer_cpu(buffer, cpu) {
2075			cpu_buffer = buffer->buffers[cpu];
2076
2077			cpu_buffer->nr_pages_to_update = nr_pages -
2078							cpu_buffer->nr_pages;
2079			/*
2080			 * nothing more to do for removing pages or no update
2081			 */
2082			if (cpu_buffer->nr_pages_to_update <= 0)
2083				continue;
2084			/*
2085			 * to add pages, make sure all new pages can be
2086			 * allocated without receiving ENOMEM
2087			 */
2088			INIT_LIST_HEAD(&cpu_buffer->new_pages);
2089			if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2090						&cpu_buffer->new_pages)) {
2091				/* not enough memory for new pages */
2092				err = -ENOMEM;
2093				goto out_err;
2094			}
2095
2096			cond_resched();
2097		}
2098
2099		cpus_read_lock();
2100		/*
2101		 * Fire off all the required work handlers
2102		 * We can't schedule on offline CPUs, but it's not necessary
2103		 * since we can change their buffer sizes without any race.
2104		 */
2105		for_each_buffer_cpu(buffer, cpu) {
2106			cpu_buffer = buffer->buffers[cpu];
2107			if (!cpu_buffer->nr_pages_to_update)
2108				continue;
2109
2110			/* Can't run something on an offline CPU. */
2111			if (!cpu_online(cpu)) {
2112				rb_update_pages(cpu_buffer);
2113				cpu_buffer->nr_pages_to_update = 0;
2114			} else {
2115				/* Run directly if possible. */
2116				migrate_disable();
2117				if (cpu != smp_processor_id()) {
2118					migrate_enable();
2119					schedule_work_on(cpu,
2120							 &cpu_buffer->update_pages_work);
2121				} else {
2122					update_pages_handler(&cpu_buffer->update_pages_work);
2123					migrate_enable();
2124				}
2125			}
2126		}
2127
2128		/* wait for all the updates to complete */
2129		for_each_buffer_cpu(buffer, cpu) {
2130			cpu_buffer = buffer->buffers[cpu];
2131			if (!cpu_buffer->nr_pages_to_update)
2132				continue;
2133
2134			if (cpu_online(cpu))
2135				wait_for_completion(&cpu_buffer->update_done);
2136			cpu_buffer->nr_pages_to_update = 0;
2137		}
2138
2139		cpus_read_unlock();
2140	} else {
 
 
 
 
2141		cpu_buffer = buffer->buffers[cpu_id];
2142
2143		if (nr_pages == cpu_buffer->nr_pages)
2144			goto out;
2145
2146		/*
2147		 * Don't succeed if resizing is disabled, as a reader might be
2148		 * manipulating the ring buffer and is expecting a sane state while
2149		 * this is true.
2150		 */
2151		if (atomic_read(&cpu_buffer->resize_disabled)) {
2152			err = -EBUSY;
2153			goto out_err_unlock;
2154		}
2155
2156		cpu_buffer->nr_pages_to_update = nr_pages -
2157						cpu_buffer->nr_pages;
2158
2159		INIT_LIST_HEAD(&cpu_buffer->new_pages);
2160		if (cpu_buffer->nr_pages_to_update > 0 &&
2161			__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update,
2162					    &cpu_buffer->new_pages)) {
2163			err = -ENOMEM;
2164			goto out_err;
2165		}
2166
2167		cpus_read_lock();
2168
2169		/* Can't run something on an offline CPU. */
2170		if (!cpu_online(cpu_id))
2171			rb_update_pages(cpu_buffer);
2172		else {
2173			/* Run directly if possible. */
2174			migrate_disable();
2175			if (cpu_id == smp_processor_id()) {
2176				rb_update_pages(cpu_buffer);
2177				migrate_enable();
2178			} else {
2179				migrate_enable();
2180				schedule_work_on(cpu_id,
2181						 &cpu_buffer->update_pages_work);
2182				wait_for_completion(&cpu_buffer->update_done);
2183			}
2184		}
2185
2186		cpu_buffer->nr_pages_to_update = 0;
2187		cpus_read_unlock();
2188	}
2189
2190 out:
2191	/*
2192	 * The ring buffer resize can happen with the ring buffer
2193	 * enabled, so that the update disturbs the tracing as little
2194	 * as possible. But if the buffer is disabled, we do not need
2195	 * to worry about that, and we can take the time to verify
2196	 * that the buffer is not corrupt.
2197	 */
2198	if (atomic_read(&buffer->record_disabled)) {
2199		atomic_inc(&buffer->record_disabled);
2200		/*
2201		 * Even though the buffer was disabled, we must make sure
2202		 * that it is truly disabled before calling rb_check_pages.
2203		 * There could have been a race between checking
2204		 * record_disable and incrementing it.
2205		 */
2206		synchronize_rcu();
2207		for_each_buffer_cpu(buffer, cpu) {
2208			unsigned long flags;
2209
2210			cpu_buffer = buffer->buffers[cpu];
2211			raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2212			rb_check_pages(cpu_buffer);
2213			raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2214		}
2215		atomic_dec(&buffer->record_disabled);
2216	}
2217
2218	atomic_dec(&buffer->resizing);
2219	mutex_unlock(&buffer->mutex);
2220	return 0;
2221
2222 out_err:
2223	for_each_buffer_cpu(buffer, cpu) {
2224		struct buffer_page *bpage, *tmp;
2225
2226		cpu_buffer = buffer->buffers[cpu];
2227		cpu_buffer->nr_pages_to_update = 0;
2228
2229		if (list_empty(&cpu_buffer->new_pages))
2230			continue;
2231
2232		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
2233					list) {
2234			list_del_init(&bpage->list);
2235			free_buffer_page(bpage);
2236		}
2237	}
2238 out_err_unlock:
2239	atomic_dec(&buffer->resizing);
2240	mutex_unlock(&buffer->mutex);
2241	return err;
2242}
2243EXPORT_SYMBOL_GPL(ring_buffer_resize);
2244
2245void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
2246{
2247	mutex_lock(&buffer->mutex);
2248	if (val)
2249		buffer->flags |= RB_FL_OVERWRITE;
2250	else
2251		buffer->flags &= ~RB_FL_OVERWRITE;
2252	mutex_unlock(&buffer->mutex);
2253}
2254EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
2255
2256static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
 
 
 
 
 
 
2257{
2258	return bpage->page->data + index;
2259}
2260
2261static __always_inline struct ring_buffer_event *
2262rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
2263{
2264	return __rb_page_index(cpu_buffer->reader_page,
2265			       cpu_buffer->reader_page->read);
2266}
2267
2268static struct ring_buffer_event *
2269rb_iter_head_event(struct ring_buffer_iter *iter)
2270{
2271	struct ring_buffer_event *event;
2272	struct buffer_page *iter_head_page = iter->head_page;
2273	unsigned long commit;
2274	unsigned length;
2275
2276	if (iter->head != iter->next_event)
2277		return iter->event;
2278
2279	/*
2280	 * When the writer goes across pages, it issues a cmpxchg which
2281	 * is a mb(), which will synchronize with the rmb here.
2282	 * (see rb_tail_page_update() and __rb_reserve_next())
2283	 */
2284	commit = rb_page_commit(iter_head_page);
2285	smp_rmb();
2286
2287	/* An event needs to be at least 8 bytes in size */
2288	if (iter->head > commit - 8)
2289		goto reset;
2290
2291	event = __rb_page_index(iter_head_page, iter->head);
2292	length = rb_event_length(event);
2293
2294	/*
2295	 * READ_ONCE() doesn't work on functions and we don't want the
2296	 * compiler doing any crazy optimizations with length.
2297	 */
2298	barrier();
2299
2300	if ((iter->head + length) > commit || length > iter->event_size)
2301		/* Writer corrupted the read? */
2302		goto reset;
2303
2304	memcpy(iter->event, event, length);
2305	/*
2306	 * If the page stamp is still the same after this rmb() then the
2307	 * event was safely copied without the writer entering the page.
2308	 */
2309	smp_rmb();
2310
2311	/* Make sure the page didn't change since we read this */
2312	if (iter->page_stamp != iter_head_page->page->time_stamp ||
2313	    commit > rb_page_commit(iter_head_page))
2314		goto reset;
2315
2316	iter->next_event = iter->head + length;
2317	return iter->event;
2318 reset:
2319	/* Reset to the beginning */
2320	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2321	iter->head = 0;
2322	iter->next_event = 0;
2323	iter->missed_events = 1;
2324	return NULL;
2325}
2326
2327/* Size is determined by what has been committed */
2328static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
2329{
2330	return rb_page_commit(bpage);
2331}
2332
2333static __always_inline unsigned
2334rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
2335{
2336	return rb_page_commit(cpu_buffer->commit_page);
2337}
2338
2339static __always_inline unsigned
2340rb_event_index(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event)
2341{
2342	unsigned long addr = (unsigned long)event;
2343
2344	addr &= (PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1;
2345
2346	return addr - BUF_PAGE_HDR_SIZE;
2347}
2348
2349static void rb_inc_iter(struct ring_buffer_iter *iter)
2350{
2351	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2352
2353	/*
2354	 * The iterator could be on the reader page (it starts there).
2355	 * But the head could have moved, since the reader was
2356	 * found. Check for this case and assign the iterator
2357	 * to the head page instead of next.
2358	 */
2359	if (iter->head_page == cpu_buffer->reader_page)
2360		iter->head_page = rb_set_head_page(cpu_buffer);
2361	else
2362		rb_inc_page(&iter->head_page);
2363
2364	iter->page_stamp = iter->read_stamp = iter->head_page->page->time_stamp;
2365	iter->head = 0;
2366	iter->next_event = 0;
2367}
2368
2369/*
2370 * rb_handle_head_page - writer hit the head page
2371 *
2372 * Returns: +1 to retry page
2373 *           0 to continue
2374 *          -1 on error
2375 */
2376static int
2377rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2378		    struct buffer_page *tail_page,
2379		    struct buffer_page *next_page)
2380{
2381	struct buffer_page *new_head;
2382	int entries;
2383	int type;
2384	int ret;
2385
2386	entries = rb_page_entries(next_page);
2387
2388	/*
2389	 * The hard part is here. We need to move the head
2390	 * forward, and protect against both readers on
2391	 * other CPUs and writers coming in via interrupts.
2392	 */
2393	type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2394				       RB_PAGE_HEAD);
2395
2396	/*
2397	 * type can be one of four:
2398	 *  NORMAL - an interrupt already moved it for us
2399	 *  HEAD   - we are the first to get here.
2400	 *  UPDATE - we are the interrupt interrupting
2401	 *           a current move.
2402	 *  MOVED  - a reader on another CPU moved the next
2403	 *           pointer to its reader page. Give up
2404	 *           and try again.
2405	 */
2406
2407	switch (type) {
2408	case RB_PAGE_HEAD:
2409		/*
2410		 * We changed the head to UPDATE, thus
2411		 * it is our responsibility to update
2412		 * the counters.
2413		 */
2414		local_add(entries, &cpu_buffer->overrun);
2415		local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes);
2416		local_inc(&cpu_buffer->pages_lost);
2417
2418		/*
2419		 * The entries will be zeroed out when we move the
2420		 * tail page.
2421		 */
2422
2423		/* still more to do */
2424		break;
2425
2426	case RB_PAGE_UPDATE:
2427		/*
2428		 * This is an interrupt that interrupt the
2429		 * previous update. Still more to do.
2430		 */
2431		break;
2432	case RB_PAGE_NORMAL:
2433		/*
2434		 * An interrupt came in before the update
2435		 * and processed this for us.
2436		 * Nothing left to do.
2437		 */
2438		return 1;
2439	case RB_PAGE_MOVED:
2440		/*
2441		 * The reader is on another CPU and just did
2442		 * a swap with our next_page.
2443		 * Try again.
2444		 */
2445		return 1;
2446	default:
2447		RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2448		return -1;
2449	}
2450
2451	/*
2452	 * Now that we are here, the old head pointer is
2453	 * set to UPDATE. This will keep the reader from
2454	 * swapping the head page with the reader page.
2455	 * The reader (on another CPU) will spin till
2456	 * we are finished.
2457	 *
2458	 * We just need to protect against interrupts
2459	 * doing the job. We will set the next pointer
2460	 * to HEAD. After that, we set the old pointer
2461	 * to NORMAL, but only if it was HEAD before.
2462	 * otherwise we are an interrupt, and only
2463	 * want the outer most commit to reset it.
2464	 */
2465	new_head = next_page;
2466	rb_inc_page(&new_head);
2467
2468	ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2469				    RB_PAGE_NORMAL);
2470
2471	/*
2472	 * Valid returns are:
2473	 *  HEAD   - an interrupt came in and already set it.
2474	 *  NORMAL - One of two things:
2475	 *            1) We really set it.
2476	 *            2) A bunch of interrupts came in and moved
2477	 *               the page forward again.
2478	 */
2479	switch (ret) {
2480	case RB_PAGE_HEAD:
2481	case RB_PAGE_NORMAL:
2482		/* OK */
2483		break;
2484	default:
2485		RB_WARN_ON(cpu_buffer, 1);
2486		return -1;
2487	}
2488
2489	/*
2490	 * It is possible that an interrupt came in,
2491	 * set the head up, then more interrupts came in
2492	 * and moved it again. When we get back here,
2493	 * the page would have been set to NORMAL but we
2494	 * just set it back to HEAD.
2495	 *
2496	 * How do you detect this? Well, if that happened
2497	 * the tail page would have moved.
2498	 */
2499	if (ret == RB_PAGE_NORMAL) {
2500		struct buffer_page *buffer_tail_page;
2501
2502		buffer_tail_page = READ_ONCE(cpu_buffer->tail_page);
2503		/*
2504		 * If the tail had moved passed next, then we need
2505		 * to reset the pointer.
2506		 */
2507		if (buffer_tail_page != tail_page &&
2508		    buffer_tail_page != next_page)
2509			rb_head_page_set_normal(cpu_buffer, new_head,
2510						next_page,
2511						RB_PAGE_HEAD);
2512	}
2513
2514	/*
2515	 * If this was the outer most commit (the one that
2516	 * changed the original pointer from HEAD to UPDATE),
2517	 * then it is up to us to reset it to NORMAL.
2518	 */
2519	if (type == RB_PAGE_HEAD) {
2520		ret = rb_head_page_set_normal(cpu_buffer, next_page,
2521					      tail_page,
2522					      RB_PAGE_UPDATE);
2523		if (RB_WARN_ON(cpu_buffer,
2524			       ret != RB_PAGE_UPDATE))
2525			return -1;
2526	}
2527
2528	return 0;
2529}
2530
2531static inline void
2532rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2533	      unsigned long tail, struct rb_event_info *info)
2534{
2535	unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
2536	struct buffer_page *tail_page = info->tail_page;
2537	struct ring_buffer_event *event;
2538	unsigned long length = info->length;
2539
2540	/*
2541	 * Only the event that crossed the page boundary
2542	 * must fill the old tail_page with padding.
2543	 */
2544	if (tail >= bsize) {
2545		/*
2546		 * If the page was filled, then we still need
2547		 * to update the real_end. Reset it to zero
2548		 * and the reader will ignore it.
2549		 */
2550		if (tail == bsize)
2551			tail_page->real_end = 0;
2552
2553		local_sub(length, &tail_page->write);
2554		return;
2555	}
2556
2557	event = __rb_page_index(tail_page, tail);
 
 
 
 
2558
2559	/*
2560	 * Save the original length to the meta data.
2561	 * This will be used by the reader to add lost event
2562	 * counter.
2563	 */
2564	tail_page->real_end = tail;
2565
2566	/*
2567	 * If this event is bigger than the minimum size, then
2568	 * we need to be careful that we don't subtract the
2569	 * write counter enough to allow another writer to slip
2570	 * in on this page.
2571	 * We put in a discarded commit instead, to make sure
2572	 * that this space is not used again, and this space will
2573	 * not be accounted into 'entries_bytes'.
2574	 *
2575	 * If we are less than the minimum size, we don't need to
2576	 * worry about it.
2577	 */
2578	if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
2579		/* No room for any events */
2580
2581		/* Mark the rest of the page with padding */
2582		rb_event_set_padding(event);
2583
2584		/* Make sure the padding is visible before the write update */
2585		smp_wmb();
2586
2587		/* Set the write back to the previous setting */
2588		local_sub(length, &tail_page->write);
2589		return;
2590	}
2591
2592	/* Put in a discarded event */
2593	event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
2594	event->type_len = RINGBUF_TYPE_PADDING;
2595	/* time delta must be non zero */
2596	event->time_delta = 1;
2597
2598	/* account for padding bytes */
2599	local_add(bsize - tail, &cpu_buffer->entries_bytes);
2600
2601	/* Make sure the padding is visible before the tail_page->write update */
2602	smp_wmb();
2603
2604	/* Set write to end of buffer */
2605	length = (tail + length) - bsize;
2606	local_sub(length, &tail_page->write);
2607}
2608
2609static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2610
2611/*
2612 * This is the slow path, force gcc not to inline it.
2613 */
2614static noinline struct ring_buffer_event *
2615rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2616	     unsigned long tail, struct rb_event_info *info)
2617{
2618	struct buffer_page *tail_page = info->tail_page;
2619	struct buffer_page *commit_page = cpu_buffer->commit_page;
2620	struct trace_buffer *buffer = cpu_buffer->buffer;
2621	struct buffer_page *next_page;
2622	int ret;
2623
2624	next_page = tail_page;
2625
2626	rb_inc_page(&next_page);
2627
2628	/*
2629	 * If for some reason, we had an interrupt storm that made
2630	 * it all the way around the buffer, bail, and warn
2631	 * about it.
2632	 */
2633	if (unlikely(next_page == commit_page)) {
2634		local_inc(&cpu_buffer->commit_overrun);
2635		goto out_reset;
2636	}
2637
2638	/*
2639	 * This is where the fun begins!
2640	 *
2641	 * We are fighting against races between a reader that
2642	 * could be on another CPU trying to swap its reader
2643	 * page with the buffer head.
2644	 *
2645	 * We are also fighting against interrupts coming in and
2646	 * moving the head or tail on us as well.
2647	 *
2648	 * If the next page is the head page then we have filled
2649	 * the buffer, unless the commit page is still on the
2650	 * reader page.
2651	 */
2652	if (rb_is_head_page(next_page, &tail_page->list)) {
2653
2654		/*
2655		 * If the commit is not on the reader page, then
2656		 * move the header page.
2657		 */
2658		if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2659			/*
2660			 * If we are not in overwrite mode,
2661			 * this is easy, just stop here.
2662			 */
2663			if (!(buffer->flags & RB_FL_OVERWRITE)) {
2664				local_inc(&cpu_buffer->dropped_events);
2665				goto out_reset;
2666			}
2667
2668			ret = rb_handle_head_page(cpu_buffer,
2669						  tail_page,
2670						  next_page);
2671			if (ret < 0)
2672				goto out_reset;
2673			if (ret)
2674				goto out_again;
2675		} else {
2676			/*
2677			 * We need to be careful here too. The
2678			 * commit page could still be on the reader
2679			 * page. We could have a small buffer, and
2680			 * have filled up the buffer with events
2681			 * from interrupts and such, and wrapped.
2682			 *
2683			 * Note, if the tail page is also on the
2684			 * reader_page, we let it move out.
2685			 */
2686			if (unlikely((cpu_buffer->commit_page !=
2687				      cpu_buffer->tail_page) &&
2688				     (cpu_buffer->commit_page ==
2689				      cpu_buffer->reader_page))) {
2690				local_inc(&cpu_buffer->commit_overrun);
2691				goto out_reset;
2692			}
2693		}
2694	}
2695
2696	rb_tail_page_update(cpu_buffer, tail_page, next_page);
2697
2698 out_again:
2699
2700	rb_reset_tail(cpu_buffer, tail, info);
2701
2702	/* Commit what we have for now. */
2703	rb_end_commit(cpu_buffer);
2704	/* rb_end_commit() decs committing */
2705	local_inc(&cpu_buffer->committing);
2706
2707	/* fail and let the caller try again */
2708	return ERR_PTR(-EAGAIN);
2709
2710 out_reset:
2711	/* reset write */
2712	rb_reset_tail(cpu_buffer, tail, info);
2713
2714	return NULL;
2715}
2716
2717/* Slow path */
2718static struct ring_buffer_event *
2719rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2720		  struct ring_buffer_event *event, u64 delta, bool abs)
2721{
2722	if (abs)
2723		event->type_len = RINGBUF_TYPE_TIME_STAMP;
2724	else
2725		event->type_len = RINGBUF_TYPE_TIME_EXTEND;
2726
2727	/* Not the first event on the page, or not delta? */
2728	if (abs || rb_event_index(cpu_buffer, event)) {
2729		event->time_delta = delta & TS_MASK;
2730		event->array[0] = delta >> TS_SHIFT;
2731	} else {
2732		/* nope, just zero it */
2733		event->time_delta = 0;
2734		event->array[0] = 0;
2735	}
2736
2737	return skip_time_extend(event);
2738}
2739
2740#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2741static inline bool sched_clock_stable(void)
2742{
2743	return true;
2744}
2745#endif
2746
2747static void
2748rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2749		   struct rb_event_info *info)
2750{
2751	u64 write_stamp;
2752
2753	WARN_ONCE(1, "Delta way too big! %llu ts=%llu before=%llu after=%llu write stamp=%llu\n%s",
2754		  (unsigned long long)info->delta,
2755		  (unsigned long long)info->ts,
2756		  (unsigned long long)info->before,
2757		  (unsigned long long)info->after,
2758		  (unsigned long long)({rb_time_read(&cpu_buffer->write_stamp, &write_stamp); write_stamp;}),
2759		  sched_clock_stable() ? "" :
2760		  "If you just came from a suspend/resume,\n"
2761		  "please switch to the trace global clock:\n"
2762		  "  echo global > /sys/kernel/tracing/trace_clock\n"
2763		  "or add trace_clock=global to the kernel command line\n");
2764}
2765
2766static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer,
2767				      struct ring_buffer_event **event,
2768				      struct rb_event_info *info,
2769				      u64 *delta,
2770				      unsigned int *length)
2771{
2772	bool abs = info->add_timestamp &
2773		(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE);
2774
2775	if (unlikely(info->delta > (1ULL << 59))) {
2776		/*
2777		 * Some timers can use more than 59 bits, and when a timestamp
2778		 * is added to the buffer, it will lose those bits.
2779		 */
2780		if (abs && (info->ts & TS_MSB)) {
2781			info->delta &= ABS_TS_MASK;
2782
2783		/* did the clock go backwards */
2784		} else if (info->before == info->after && info->before > info->ts) {
2785			/* not interrupted */
2786			static int once;
2787
2788			/*
2789			 * This is possible with a recalibrating of the TSC.
2790			 * Do not produce a call stack, but just report it.
2791			 */
2792			if (!once) {
2793				once++;
2794				pr_warn("Ring buffer clock went backwards: %llu -> %llu\n",
2795					info->before, info->ts);
2796			}
2797		} else
2798			rb_check_timestamp(cpu_buffer, info);
2799		if (!abs)
2800			info->delta = 0;
2801	}
2802	*event = rb_add_time_stamp(cpu_buffer, *event, info->delta, abs);
2803	*length -= RB_LEN_TIME_EXTEND;
2804	*delta = 0;
2805}
2806
2807/**
2808 * rb_update_event - update event type and data
2809 * @cpu_buffer: The per cpu buffer of the @event
2810 * @event: the event to update
2811 * @info: The info to update the @event with (contains length and delta)
 
2812 *
2813 * Update the type and data fields of the @event. The length
2814 * is the actual size that is written to the ring buffer,
2815 * and with this, we can determine what to place into the
2816 * data field.
2817 */
2818static void
2819rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2820		struct ring_buffer_event *event,
2821		struct rb_event_info *info)
2822{
2823	unsigned length = info->length;
2824	u64 delta = info->delta;
2825	unsigned int nest = local_read(&cpu_buffer->committing) - 1;
2826
2827	if (!WARN_ON_ONCE(nest >= MAX_NEST))
2828		cpu_buffer->event_stamp[nest] = info->ts;
 
2829
2830	/*
2831	 * If we need to add a timestamp, then we
2832	 * add it to the start of the reserved space.
2833	 */
2834	if (unlikely(info->add_timestamp))
2835		rb_add_timestamp(cpu_buffer, &event, info, &delta, &length);
 
 
 
2836
2837	event->time_delta = delta;
2838	length -= RB_EVNT_HDR_SIZE;
2839	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2840		event->type_len = 0;
2841		event->array[0] = length;
2842	} else
2843		event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2844}
2845
2846static unsigned rb_calculate_event_length(unsigned length)
2847{
2848	struct ring_buffer_event event; /* Used only for sizeof array */
2849
2850	/* zero length can cause confusions */
2851	if (!length)
2852		length++;
2853
2854	if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2855		length += sizeof(event.array[0]);
2856
2857	length += RB_EVNT_HDR_SIZE;
2858	length = ALIGN(length, RB_ARCH_ALIGNMENT);
2859
2860	/*
2861	 * In case the time delta is larger than the 27 bits for it
2862	 * in the header, we need to add a timestamp. If another
2863	 * event comes in when trying to discard this one to increase
2864	 * the length, then the timestamp will be added in the allocated
2865	 * space of this event. If length is bigger than the size needed
2866	 * for the TIME_EXTEND, then padding has to be used. The events
2867	 * length must be either RB_LEN_TIME_EXTEND, or greater than or equal
2868	 * to RB_LEN_TIME_EXTEND + 8, as 8 is the minimum size for padding.
2869	 * As length is a multiple of 4, we only need to worry if it
2870	 * is 12 (RB_LEN_TIME_EXTEND + 4).
2871	 */
2872	if (length == RB_LEN_TIME_EXTEND + RB_ALIGNMENT)
2873		length += RB_ALIGNMENT;
2874
2875	return length;
2876}
2877
2878static inline bool
 
 
 
 
 
 
 
2879rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2880		  struct ring_buffer_event *event)
2881{
2882	unsigned long new_index, old_index;
2883	struct buffer_page *bpage;
 
2884	unsigned long addr;
2885
2886	new_index = rb_event_index(cpu_buffer, event);
2887	old_index = new_index + rb_event_ts_length(event);
2888	addr = (unsigned long)event;
2889	addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
2890
2891	bpage = READ_ONCE(cpu_buffer->tail_page);
2892
2893	/*
2894	 * Make sure the tail_page is still the same and
2895	 * the next write location is the end of this event
2896	 */
2897	if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2898		unsigned long write_mask =
2899			local_read(&bpage->write) & ~RB_WRITE_MASK;
2900		unsigned long event_length = rb_event_length(event);
2901
2902		/*
2903		 * For the before_stamp to be different than the write_stamp
2904		 * to make sure that the next event adds an absolute
2905		 * value and does not rely on the saved write stamp, which
2906		 * is now going to be bogus.
2907		 *
2908		 * By setting the before_stamp to zero, the next event
2909		 * is not going to use the write_stamp and will instead
2910		 * create an absolute timestamp. This means there's no
2911		 * reason to update the wirte_stamp!
2912		 */
2913		rb_time_set(&cpu_buffer->before_stamp, 0);
2914
2915		/*
2916		 * If an event were to come in now, it would see that the
2917		 * write_stamp and the before_stamp are different, and assume
2918		 * that this event just added itself before updating
2919		 * the write stamp. The interrupting event will fix the
2920		 * write stamp for us, and use an absolute timestamp.
2921		 */
2922
2923		/*
2924		 * This is on the tail page. It is possible that
2925		 * a write could come in and move the tail page
2926		 * and write to the next page. That is fine
2927		 * because we just shorten what is on this page.
2928		 */
2929		old_index += write_mask;
2930		new_index += write_mask;
2931
2932		/* caution: old_index gets updated on cmpxchg failure */
2933		if (local_try_cmpxchg(&bpage->write, &old_index, new_index)) {
2934			/* update counters */
2935			local_sub(event_length, &cpu_buffer->entries_bytes);
2936			return true;
2937		}
2938	}
2939
2940	/* could not discard */
2941	return false;
2942}
2943
2944static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2945{
2946	local_inc(&cpu_buffer->committing);
2947	local_inc(&cpu_buffer->commits);
2948}
2949
2950static __always_inline void
2951rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
2952{
2953	unsigned long max_count;
2954
2955	/*
2956	 * We only race with interrupts and NMIs on this CPU.
2957	 * If we own the commit event, then we can commit
2958	 * all others that interrupted us, since the interruptions
2959	 * are in stack format (they finish before they come
2960	 * back to us). This allows us to do a simple loop to
2961	 * assign the commit to the tail.
2962	 */
2963 again:
2964	max_count = cpu_buffer->nr_pages * 100;
2965
2966	while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) {
2967		if (RB_WARN_ON(cpu_buffer, !(--max_count)))
2968			return;
2969		if (RB_WARN_ON(cpu_buffer,
2970			       rb_is_reader_page(cpu_buffer->tail_page)))
2971			return;
2972		/*
2973		 * No need for a memory barrier here, as the update
2974		 * of the tail_page did it for this page.
2975		 */
2976		local_set(&cpu_buffer->commit_page->page->commit,
2977			  rb_page_write(cpu_buffer->commit_page));
2978		rb_inc_page(&cpu_buffer->commit_page);
 
 
 
 
2979		/* add barrier to keep gcc from optimizing too much */
2980		barrier();
2981	}
2982	while (rb_commit_index(cpu_buffer) !=
2983	       rb_page_write(cpu_buffer->commit_page)) {
2984
2985		/* Make sure the readers see the content of what is committed. */
2986		smp_wmb();
2987		local_set(&cpu_buffer->commit_page->page->commit,
2988			  rb_page_write(cpu_buffer->commit_page));
2989		RB_WARN_ON(cpu_buffer,
2990			   local_read(&cpu_buffer->commit_page->page->commit) &
2991			   ~RB_WRITE_MASK);
2992		barrier();
2993	}
2994
2995	/* again, keep gcc from optimizing */
2996	barrier();
2997
2998	/*
2999	 * If an interrupt came in just after the first while loop
3000	 * and pushed the tail page forward, we will be left with
3001	 * a dangling commit that will never go forward.
3002	 */
3003	if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)))
3004		goto again;
3005}
3006
3007static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
3008{
3009	unsigned long commits;
3010
3011	if (RB_WARN_ON(cpu_buffer,
3012		       !local_read(&cpu_buffer->committing)))
3013		return;
3014
3015 again:
3016	commits = local_read(&cpu_buffer->commits);
3017	/* synchronize with interrupts */
3018	barrier();
3019	if (local_read(&cpu_buffer->committing) == 1)
3020		rb_set_commit_to_write(cpu_buffer);
3021
3022	local_dec(&cpu_buffer->committing);
3023
3024	/* synchronize with interrupts */
3025	barrier();
3026
3027	/*
3028	 * Need to account for interrupts coming in between the
3029	 * updating of the commit page and the clearing of the
3030	 * committing counter.
3031	 */
3032	if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
3033	    !local_read(&cpu_buffer->committing)) {
3034		local_inc(&cpu_buffer->committing);
3035		goto again;
3036	}
3037}
3038
3039static inline void rb_event_discard(struct ring_buffer_event *event)
3040{
3041	if (extended_time(event))
3042		event = skip_time_extend(event);
3043
3044	/* array[0] holds the actual length for the discarded event */
3045	event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
3046	event->type_len = RINGBUF_TYPE_PADDING;
3047	/* time delta must be non zero */
3048	if (!event->time_delta)
3049		event->time_delta = 1;
3050}
3051
3052static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3053{
3054	local_inc(&cpu_buffer->entries);
 
3055	rb_end_commit(cpu_buffer);
3056}
3057
3058static __always_inline void
3059rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
3060{
 
 
3061	if (buffer->irq_work.waiters_pending) {
3062		buffer->irq_work.waiters_pending = false;
3063		/* irq_work_queue() supplies it's own memory barriers */
3064		irq_work_queue(&buffer->irq_work.work);
3065	}
3066
3067	if (cpu_buffer->irq_work.waiters_pending) {
3068		cpu_buffer->irq_work.waiters_pending = false;
3069		/* irq_work_queue() supplies it's own memory barriers */
3070		irq_work_queue(&cpu_buffer->irq_work.work);
3071	}
3072
3073	if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched))
3074		return;
3075
3076	if (cpu_buffer->reader_page == cpu_buffer->commit_page)
3077		return;
3078
3079	if (!cpu_buffer->irq_work.full_waiters_pending)
3080		return;
3081
3082	cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched);
3083
3084	if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full))
3085		return;
3086
3087	cpu_buffer->irq_work.wakeup_full = true;
3088	cpu_buffer->irq_work.full_waiters_pending = false;
3089	/* irq_work_queue() supplies it's own memory barriers */
3090	irq_work_queue(&cpu_buffer->irq_work.work);
3091}
3092
3093#ifdef CONFIG_RING_BUFFER_RECORD_RECURSION
3094# define do_ring_buffer_record_recursion()	\
3095	do_ftrace_record_recursion(_THIS_IP_, _RET_IP_)
3096#else
3097# define do_ring_buffer_record_recursion() do { } while (0)
3098#endif
3099
3100/*
3101 * The lock and unlock are done within a preempt disable section.
3102 * The current_context per_cpu variable can only be modified
3103 * by the current task between lock and unlock. But it can
3104 * be modified more than once via an interrupt. To pass this
3105 * information from the lock to the unlock without having to
3106 * access the 'in_interrupt()' functions again (which do show
3107 * a bit of overhead in something as critical as function tracing,
3108 * we use a bitmask trick.
3109 *
3110 *  bit 1 =  NMI context
3111 *  bit 2 =  IRQ context
3112 *  bit 3 =  SoftIRQ context
3113 *  bit 4 =  normal context.
3114 *
3115 * This works because this is the order of contexts that can
3116 * preempt other contexts. A SoftIRQ never preempts an IRQ
3117 * context.
3118 *
3119 * When the context is determined, the corresponding bit is
3120 * checked and set (if it was set, then a recursion of that context
3121 * happened).
3122 *
3123 * On unlock, we need to clear this bit. To do so, just subtract
3124 * 1 from the current_context and AND it to itself.
3125 *
3126 * (binary)
3127 *  101 - 1 = 100
3128 *  101 & 100 = 100 (clearing bit zero)
3129 *
3130 *  1010 - 1 = 1001
3131 *  1010 & 1001 = 1000 (clearing bit 1)
3132 *
3133 * The least significant bit can be cleared this way, and it
3134 * just so happens that it is the same bit corresponding to
3135 * the current context.
3136 *
3137 * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit
3138 * is set when a recursion is detected at the current context, and if
3139 * the TRANSITION bit is already set, it will fail the recursion.
3140 * This is needed because there's a lag between the changing of
3141 * interrupt context and updating the preempt count. In this case,
3142 * a false positive will be found. To handle this, one extra recursion
3143 * is allowed, and this is done by the TRANSITION bit. If the TRANSITION
3144 * bit is already set, then it is considered a recursion and the function
3145 * ends. Otherwise, the TRANSITION bit is set, and that bit is returned.
3146 *
3147 * On the trace_recursive_unlock(), the TRANSITION bit will be the first
3148 * to be cleared. Even if it wasn't the context that set it. That is,
3149 * if an interrupt comes in while NORMAL bit is set and the ring buffer
3150 * is called before preempt_count() is updated, since the check will
3151 * be on the NORMAL bit, the TRANSITION bit will then be set. If an
3152 * NMI then comes in, it will set the NMI bit, but when the NMI code
3153 * does the trace_recursive_unlock() it will clear the TRANSITION bit
3154 * and leave the NMI bit set. But this is fine, because the interrupt
3155 * code that set the TRANSITION bit will then clear the NMI bit when it
3156 * calls trace_recursive_unlock(). If another NMI comes in, it will
3157 * set the TRANSITION bit and continue.
3158 *
3159 * Note: The TRANSITION bit only handles a single transition between context.
3160 */
3161
3162static __always_inline bool
3163trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
3164{
3165	unsigned int val = cpu_buffer->current_context;
3166	int bit = interrupt_context_level();
3167
3168	bit = RB_CTX_NORMAL - bit;
 
 
 
 
 
 
 
 
3169
3170	if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
3171		/*
3172		 * It is possible that this was called by transitioning
3173		 * between interrupt context, and preempt_count() has not
3174		 * been updated yet. In this case, use the TRANSITION bit.
3175		 */
3176		bit = RB_CTX_TRANSITION;
3177		if (val & (1 << (bit + cpu_buffer->nest))) {
3178			do_ring_buffer_record_recursion();
3179			return true;
3180		}
3181	}
3182
3183	val |= (1 << (bit + cpu_buffer->nest));
3184	cpu_buffer->current_context = val;
3185
3186	return false;
3187}
3188
3189static __always_inline void
3190trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
3191{
3192	cpu_buffer->current_context &=
3193		cpu_buffer->current_context - (1 << cpu_buffer->nest);
3194}
3195
3196/* The recursive locking above uses 5 bits */
3197#define NESTED_BITS 5
3198
3199/**
3200 * ring_buffer_nest_start - Allow to trace while nested
3201 * @buffer: The ring buffer to modify
3202 *
3203 * The ring buffer has a safety mechanism to prevent recursion.
3204 * But there may be a case where a trace needs to be done while
3205 * tracing something else. In this case, calling this function
3206 * will allow this function to nest within a currently active
3207 * ring_buffer_lock_reserve().
3208 *
3209 * Call this function before calling another ring_buffer_lock_reserve() and
3210 * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
3211 */
3212void ring_buffer_nest_start(struct trace_buffer *buffer)
3213{
3214	struct ring_buffer_per_cpu *cpu_buffer;
3215	int cpu;
3216
3217	/* Enabled by ring_buffer_nest_end() */
3218	preempt_disable_notrace();
3219	cpu = raw_smp_processor_id();
3220	cpu_buffer = buffer->buffers[cpu];
3221	/* This is the shift value for the above recursive locking */
3222	cpu_buffer->nest += NESTED_BITS;
3223}
3224
3225/**
3226 * ring_buffer_nest_end - Allow to trace while nested
3227 * @buffer: The ring buffer to modify
3228 *
3229 * Must be called after ring_buffer_nest_start() and after the
3230 * ring_buffer_unlock_commit().
3231 */
3232void ring_buffer_nest_end(struct trace_buffer *buffer)
3233{
3234	struct ring_buffer_per_cpu *cpu_buffer;
3235	int cpu;
3236
3237	/* disabled by ring_buffer_nest_start() */
3238	cpu = raw_smp_processor_id();
3239	cpu_buffer = buffer->buffers[cpu];
3240	/* This is the shift value for the above recursive locking */
3241	cpu_buffer->nest -= NESTED_BITS;
3242	preempt_enable_notrace();
3243}
3244
3245/**
3246 * ring_buffer_unlock_commit - commit a reserved
3247 * @buffer: The buffer to commit to
 
3248 *
3249 * This commits the data to the ring buffer, and releases any locks held.
3250 *
3251 * Must be paired with ring_buffer_lock_reserve.
3252 */
3253int ring_buffer_unlock_commit(struct trace_buffer *buffer)
 
3254{
3255	struct ring_buffer_per_cpu *cpu_buffer;
3256	int cpu = raw_smp_processor_id();
3257
3258	cpu_buffer = buffer->buffers[cpu];
3259
3260	rb_commit(cpu_buffer);
3261
3262	rb_wakeups(buffer, cpu_buffer);
3263
3264	trace_recursive_unlock(cpu_buffer);
3265
3266	preempt_enable_notrace();
3267
3268	return 0;
3269}
3270EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
3271
3272/* Special value to validate all deltas on a page. */
3273#define CHECK_FULL_PAGE		1L
3274
3275#ifdef CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS
3276
3277static const char *show_irq_str(int bits)
3278{
3279	const char *type[] = {
3280		".",	// 0
3281		"s",	// 1
3282		"h",	// 2
3283		"Hs",	// 3
3284		"n",	// 4
3285		"Ns",	// 5
3286		"Nh",	// 6
3287		"NHs",	// 7
3288	};
3289
3290	return type[bits];
3291}
3292
3293/* Assume this is an trace event */
3294static const char *show_flags(struct ring_buffer_event *event)
3295{
3296	struct trace_entry *entry;
3297	int bits = 0;
3298
3299	if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3300		return "X";
3301
3302	entry = ring_buffer_event_data(event);
3303
3304	if (entry->flags & TRACE_FLAG_SOFTIRQ)
3305		bits |= 1;
3306
3307	if (entry->flags & TRACE_FLAG_HARDIRQ)
3308		bits |= 2;
3309
3310	if (entry->flags & TRACE_FLAG_NMI)
3311		bits |= 4;
3312
3313	return show_irq_str(bits);
3314}
3315
3316static const char *show_irq(struct ring_buffer_event *event)
3317{
3318	struct trace_entry *entry;
3319
3320	if (rb_event_data_length(event) - RB_EVNT_HDR_SIZE < sizeof(*entry))
3321		return "";
3322
3323	entry = ring_buffer_event_data(event);
3324	if (entry->flags & TRACE_FLAG_IRQS_OFF)
3325		return "d";
3326	return "";
3327}
3328
3329static const char *show_interrupt_level(void)
3330{
3331	unsigned long pc = preempt_count();
3332	unsigned char level = 0;
3333
3334	if (pc & SOFTIRQ_OFFSET)
3335		level |= 1;
3336
3337	if (pc & HARDIRQ_MASK)
3338		level |= 2;
3339
3340	if (pc & NMI_MASK)
3341		level |= 4;
3342
3343	return show_irq_str(level);
3344}
3345
3346static void dump_buffer_page(struct buffer_data_page *bpage,
3347			     struct rb_event_info *info,
3348			     unsigned long tail)
3349{
3350	struct ring_buffer_event *event;
3351	u64 ts, delta;
3352	int e;
3353
3354	ts = bpage->time_stamp;
3355	pr_warn("  [%lld] PAGE TIME STAMP\n", ts);
3356
3357	for (e = 0; e < tail; e += rb_event_length(event)) {
3358
3359		event = (struct ring_buffer_event *)(bpage->data + e);
3360
3361		switch (event->type_len) {
3362
3363		case RINGBUF_TYPE_TIME_EXTEND:
3364			delta = rb_event_time_stamp(event);
3365			ts += delta;
3366			pr_warn(" 0x%x: [%lld] delta:%lld TIME EXTEND\n",
3367				e, ts, delta);
3368			break;
3369
3370		case RINGBUF_TYPE_TIME_STAMP:
3371			delta = rb_event_time_stamp(event);
3372			ts = rb_fix_abs_ts(delta, ts);
3373			pr_warn(" 0x%x:  [%lld] absolute:%lld TIME STAMP\n",
3374				e, ts, delta);
3375			break;
3376
3377		case RINGBUF_TYPE_PADDING:
3378			ts += event->time_delta;
3379			pr_warn(" 0x%x:  [%lld] delta:%d PADDING\n",
3380				e, ts, event->time_delta);
3381			break;
3382
3383		case RINGBUF_TYPE_DATA:
3384			ts += event->time_delta;
3385			pr_warn(" 0x%x:  [%lld] delta:%d %s%s\n",
3386				e, ts, event->time_delta,
3387				show_flags(event), show_irq(event));
3388			break;
3389
3390		default:
3391			break;
3392		}
3393	}
3394	pr_warn("expected end:0x%lx last event actually ended at:0x%x\n", tail, e);
3395}
3396
3397static DEFINE_PER_CPU(atomic_t, checking);
3398static atomic_t ts_dump;
3399
3400#define buffer_warn_return(fmt, ...)					\
3401	do {								\
3402		/* If another report is happening, ignore this one */	\
3403		if (atomic_inc_return(&ts_dump) != 1) {			\
3404			atomic_dec(&ts_dump);				\
3405			goto out;					\
3406		}							\
3407		atomic_inc(&cpu_buffer->record_disabled);		\
3408		pr_warn(fmt, ##__VA_ARGS__);				\
3409		dump_buffer_page(bpage, info, tail);			\
3410		atomic_dec(&ts_dump);					\
3411		/* There's some cases in boot up that this can happen */ \
3412		if (WARN_ON_ONCE(system_state != SYSTEM_BOOTING))	\
3413			/* Do not re-enable checking */			\
3414			return;						\
3415	} while (0)
3416
3417/*
3418 * Check if the current event time stamp matches the deltas on
3419 * the buffer page.
3420 */
3421static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3422			 struct rb_event_info *info,
3423			 unsigned long tail)
3424{
3425	struct ring_buffer_event *event;
3426	struct buffer_data_page *bpage;
3427	u64 ts, delta;
3428	bool full = false;
3429	int e;
3430
3431	bpage = info->tail_page->page;
3432
3433	if (tail == CHECK_FULL_PAGE) {
3434		full = true;
3435		tail = local_read(&bpage->commit);
3436	} else if (info->add_timestamp &
3437		   (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE)) {
3438		/* Ignore events with absolute time stamps */
3439		return;
3440	}
3441
3442	/*
3443	 * Do not check the first event (skip possible extends too).
3444	 * Also do not check if previous events have not been committed.
3445	 */
3446	if (tail <= 8 || tail > local_read(&bpage->commit))
3447		return;
3448
3449	/*
3450	 * If this interrupted another event,
3451	 */
3452	if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
3453		goto out;
3454
3455	ts = bpage->time_stamp;
3456
3457	for (e = 0; e < tail; e += rb_event_length(event)) {
3458
3459		event = (struct ring_buffer_event *)(bpage->data + e);
3460
3461		switch (event->type_len) {
3462
3463		case RINGBUF_TYPE_TIME_EXTEND:
3464			delta = rb_event_time_stamp(event);
3465			ts += delta;
3466			break;
3467
3468		case RINGBUF_TYPE_TIME_STAMP:
3469			delta = rb_event_time_stamp(event);
3470			delta = rb_fix_abs_ts(delta, ts);
3471			if (delta < ts) {
3472				buffer_warn_return("[CPU: %d]ABSOLUTE TIME WENT BACKWARDS: last ts: %lld absolute ts: %lld\n",
3473						   cpu_buffer->cpu, ts, delta);
3474			}
3475			ts = delta;
3476			break;
3477
3478		case RINGBUF_TYPE_PADDING:
3479			if (event->time_delta == 1)
3480				break;
3481			fallthrough;
3482		case RINGBUF_TYPE_DATA:
3483			ts += event->time_delta;
3484			break;
3485
3486		default:
3487			RB_WARN_ON(cpu_buffer, 1);
3488		}
3489	}
3490	if ((full && ts > info->ts) ||
3491	    (!full && ts + info->delta != info->ts)) {
3492		buffer_warn_return("[CPU: %d]TIME DOES NOT MATCH expected:%lld actual:%lld delta:%lld before:%lld after:%lld%s context:%s\n",
3493				   cpu_buffer->cpu,
3494				   ts + info->delta, info->ts, info->delta,
3495				   info->before, info->after,
3496				   full ? " (full)" : "", show_interrupt_level());
3497	}
3498out:
3499	atomic_dec(this_cpu_ptr(&checking));
3500}
3501#else
3502static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
3503			 struct rb_event_info *info,
3504			 unsigned long tail)
3505{
3506}
3507#endif /* CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS */
3508
3509static struct ring_buffer_event *
3510__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
3511		  struct rb_event_info *info)
3512{
3513	struct ring_buffer_event *event;
3514	struct buffer_page *tail_page;
3515	unsigned long tail, write, w;
 
 
 
 
 
 
 
 
3516
3517	/* Don't let the compiler play games with cpu_buffer->tail_page */
3518	tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page);
3519
3520 /*A*/	w = local_read(&tail_page->write) & RB_WRITE_MASK;
3521	barrier();
3522	rb_time_read(&cpu_buffer->before_stamp, &info->before);
3523	rb_time_read(&cpu_buffer->write_stamp, &info->after);
3524	barrier();
3525	info->ts = rb_time_stamp(cpu_buffer->buffer);
3526
3527	if ((info->add_timestamp & RB_ADD_STAMP_ABSOLUTE)) {
3528		info->delta = info->ts;
3529	} else {
3530		/*
3531		 * If interrupting an event time update, we may need an
3532		 * absolute timestamp.
3533		 * Don't bother if this is the start of a new page (w == 0).
3534		 */
3535		if (!w) {
3536			/* Use the sub-buffer timestamp */
3537			info->delta = 0;
3538		} else if (unlikely(info->before != info->after)) {
3539			info->add_timestamp |= RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND;
3540			info->length += RB_LEN_TIME_EXTEND;
3541		} else {
3542			info->delta = info->ts - info->after;
3543			if (unlikely(test_time_stamp(info->delta))) {
3544				info->add_timestamp |= RB_ADD_STAMP_EXTEND;
3545				info->length += RB_LEN_TIME_EXTEND;
3546			}
3547		}
3548	}
3549
3550 /*B*/	rb_time_set(&cpu_buffer->before_stamp, info->ts);
3551
3552 /*C*/	write = local_add_return(info->length, &tail_page->write);
3553
3554	/* set write to only the index of the write */
3555	write &= RB_WRITE_MASK;
3556
3557	tail = write - info->length;
3558
3559	/* See if we shot pass the end of this buffer page */
3560	if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
3561		check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
3562		return rb_move_tail(cpu_buffer, tail, info);
3563	}
3564
3565	if (likely(tail == w)) {
3566		/* Nothing interrupted us between A and C */
3567 /*D*/		rb_time_set(&cpu_buffer->write_stamp, info->ts);
3568		/*
3569		 * If something came in between C and D, the write stamp
3570		 * may now not be in sync. But that's fine as the before_stamp
3571		 * will be different and then next event will just be forced
3572		 * to use an absolute timestamp.
3573		 */
3574		if (likely(!(info->add_timestamp &
3575			     (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3576			/* This did not interrupt any time update */
3577			info->delta = info->ts - info->after;
3578		else
3579			/* Just use full timestamp for interrupting event */
3580			info->delta = info->ts;
3581		check_buffer(cpu_buffer, info, tail);
3582	} else {
3583		u64 ts;
3584		/* SLOW PATH - Interrupted between A and C */
3585
3586		/* Save the old before_stamp */
3587		rb_time_read(&cpu_buffer->before_stamp, &info->before);
3588
3589		/*
3590		 * Read a new timestamp and update the before_stamp to make
3591		 * the next event after this one force using an absolute
3592		 * timestamp. This is in case an interrupt were to come in
3593		 * between E and F.
3594		 */
3595		ts = rb_time_stamp(cpu_buffer->buffer);
3596		rb_time_set(&cpu_buffer->before_stamp, ts);
3597
3598		barrier();
3599 /*E*/		rb_time_read(&cpu_buffer->write_stamp, &info->after);
3600		barrier();
3601 /*F*/		if (write == (local_read(&tail_page->write) & RB_WRITE_MASK) &&
3602		    info->after == info->before && info->after < ts) {
3603			/*
3604			 * Nothing came after this event between C and F, it is
3605			 * safe to use info->after for the delta as it
3606			 * matched info->before and is still valid.
3607			 */
3608			info->delta = ts - info->after;
3609		} else {
3610			/*
3611			 * Interrupted between C and F:
3612			 * Lost the previous events time stamp. Just set the
3613			 * delta to zero, and this will be the same time as
3614			 * the event this event interrupted. And the events that
3615			 * came after this will still be correct (as they would
3616			 * have built their delta on the previous event.
3617			 */
3618			info->delta = 0;
3619		}
3620		info->ts = ts;
3621		info->add_timestamp &= ~RB_ADD_STAMP_FORCE;
3622	}
3623
3624	/*
3625	 * If this is the first commit on the page, then it has the same
3626	 * timestamp as the page itself.
3627	 */
3628	if (unlikely(!tail && !(info->add_timestamp &
3629				(RB_ADD_STAMP_FORCE | RB_ADD_STAMP_ABSOLUTE))))
3630		info->delta = 0;
3631
 
 
 
 
3632	/* We reserved something on the buffer */
3633
3634	event = __rb_page_index(tail_page, tail);
 
3635	rb_update_event(cpu_buffer, event, info);
3636
3637	local_inc(&tail_page->entries);
3638
3639	/*
3640	 * If this is the first commit on the page, then update
3641	 * its timestamp.
3642	 */
3643	if (unlikely(!tail))
3644		tail_page->page->time_stamp = info->ts;
3645
3646	/* account for these added bytes */
3647	local_add(info->length, &cpu_buffer->entries_bytes);
3648
3649	return event;
3650}
3651
3652static __always_inline struct ring_buffer_event *
3653rb_reserve_next_event(struct trace_buffer *buffer,
3654		      struct ring_buffer_per_cpu *cpu_buffer,
3655		      unsigned long length)
3656{
3657	struct ring_buffer_event *event;
3658	struct rb_event_info info;
3659	int nr_loops = 0;
3660	int add_ts_default;
3661
3662	/* ring buffer does cmpxchg, make sure it is safe in NMI context */
3663	if (!IS_ENABLED(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) &&
3664	    (unlikely(in_nmi()))) {
3665		return NULL;
3666	}
3667
3668	rb_start_commit(cpu_buffer);
3669	/* The commit page can not change after this */
3670
3671#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3672	/*
3673	 * Due to the ability to swap a cpu buffer from a buffer
3674	 * it is possible it was swapped before we committed.
3675	 * (committing stops a swap). We check for it here and
3676	 * if it happened, we have to fail the write.
3677	 */
3678	barrier();
3679	if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) {
3680		local_dec(&cpu_buffer->committing);
3681		local_dec(&cpu_buffer->commits);
3682		return NULL;
3683	}
3684#endif
3685
3686	info.length = rb_calculate_event_length(length);
3687
3688	if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
3689		add_ts_default = RB_ADD_STAMP_ABSOLUTE;
3690		info.length += RB_LEN_TIME_EXTEND;
3691		if (info.length > cpu_buffer->buffer->max_data_size)
3692			goto out_fail;
3693	} else {
3694		add_ts_default = RB_ADD_STAMP_NONE;
3695	}
3696
3697 again:
3698	info.add_timestamp = add_ts_default;
3699	info.delta = 0;
3700
3701	/*
3702	 * We allow for interrupts to reenter here and do a trace.
3703	 * If one does, it will cause this original code to loop
3704	 * back here. Even with heavy interrupts happening, this
3705	 * should only happen a few times in a row. If this happens
3706	 * 1000 times in a row, there must be either an interrupt
3707	 * storm or we have something buggy.
3708	 * Bail!
3709	 */
3710	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
3711		goto out_fail;
3712
 
 
 
 
 
 
 
 
 
 
 
 
 
3713	event = __rb_reserve_next(cpu_buffer, &info);
3714
3715	if (unlikely(PTR_ERR(event) == -EAGAIN)) {
3716		if (info.add_timestamp & (RB_ADD_STAMP_FORCE | RB_ADD_STAMP_EXTEND))
3717			info.length -= RB_LEN_TIME_EXTEND;
3718		goto again;
3719	}
3720
3721	if (likely(event))
3722		return event;
 
 
 
3723 out_fail:
3724	rb_end_commit(cpu_buffer);
3725	return NULL;
3726}
3727
3728/**
3729 * ring_buffer_lock_reserve - reserve a part of the buffer
3730 * @buffer: the ring buffer to reserve from
3731 * @length: the length of the data to reserve (excluding event header)
3732 *
3733 * Returns a reserved event on the ring buffer to copy directly to.
3734 * The user of this interface will need to get the body to write into
3735 * and can use the ring_buffer_event_data() interface.
3736 *
3737 * The length is the length of the data needed, not the event length
3738 * which also includes the event header.
3739 *
3740 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
3741 * If NULL is returned, then nothing has been allocated or locked.
3742 */
3743struct ring_buffer_event *
3744ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
3745{
3746	struct ring_buffer_per_cpu *cpu_buffer;
3747	struct ring_buffer_event *event;
3748	int cpu;
3749
3750	/* If we are tracing schedule, we don't want to recurse */
3751	preempt_disable_notrace();
3752
3753	if (unlikely(atomic_read(&buffer->record_disabled)))
3754		goto out;
3755
3756	cpu = raw_smp_processor_id();
3757
3758	if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
3759		goto out;
3760
3761	cpu_buffer = buffer->buffers[cpu];
3762
3763	if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
3764		goto out;
3765
3766	if (unlikely(length > buffer->max_data_size))
3767		goto out;
3768
3769	if (unlikely(trace_recursive_lock(cpu_buffer)))
3770		goto out;
3771
3772	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3773	if (!event)
3774		goto out_unlock;
3775
3776	return event;
3777
3778 out_unlock:
3779	trace_recursive_unlock(cpu_buffer);
3780 out:
3781	preempt_enable_notrace();
3782	return NULL;
3783}
3784EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
3785
3786/*
3787 * Decrement the entries to the page that an event is on.
3788 * The event does not even need to exist, only the pointer
3789 * to the page it is on. This may only be called before the commit
3790 * takes place.
3791 */
3792static inline void
3793rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3794		   struct ring_buffer_event *event)
3795{
3796	unsigned long addr = (unsigned long)event;
3797	struct buffer_page *bpage = cpu_buffer->commit_page;
3798	struct buffer_page *start;
3799
3800	addr &= ~((PAGE_SIZE << cpu_buffer->buffer->subbuf_order) - 1);
3801
3802	/* Do the likely case first */
3803	if (likely(bpage->page == (void *)addr)) {
3804		local_dec(&bpage->entries);
3805		return;
3806	}
3807
3808	/*
3809	 * Because the commit page may be on the reader page we
3810	 * start with the next page and check the end loop there.
3811	 */
3812	rb_inc_page(&bpage);
3813	start = bpage;
3814	do {
3815		if (bpage->page == (void *)addr) {
3816			local_dec(&bpage->entries);
3817			return;
3818		}
3819		rb_inc_page(&bpage);
3820	} while (bpage != start);
3821
3822	/* commit not part of this buffer?? */
3823	RB_WARN_ON(cpu_buffer, 1);
3824}
3825
3826/**
3827 * ring_buffer_discard_commit - discard an event that has not been committed
3828 * @buffer: the ring buffer
3829 * @event: non committed event to discard
3830 *
3831 * Sometimes an event that is in the ring buffer needs to be ignored.
3832 * This function lets the user discard an event in the ring buffer
3833 * and then that event will not be read later.
3834 *
3835 * This function only works if it is called before the item has been
3836 * committed. It will try to free the event from the ring buffer
3837 * if another event has not been added behind it.
3838 *
3839 * If another event has been added behind it, it will set the event
3840 * up as discarded, and perform the commit.
3841 *
3842 * If this function is called, do not call ring_buffer_unlock_commit on
3843 * the event.
3844 */
3845void ring_buffer_discard_commit(struct trace_buffer *buffer,
3846				struct ring_buffer_event *event)
3847{
3848	struct ring_buffer_per_cpu *cpu_buffer;
3849	int cpu;
3850
3851	/* The event is discarded regardless */
3852	rb_event_discard(event);
3853
3854	cpu = smp_processor_id();
3855	cpu_buffer = buffer->buffers[cpu];
3856
3857	/*
3858	 * This must only be called if the event has not been
3859	 * committed yet. Thus we can assume that preemption
3860	 * is still disabled.
3861	 */
3862	RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
3863
3864	rb_decrement_entry(cpu_buffer, event);
3865	if (rb_try_to_discard(cpu_buffer, event))
3866		goto out;
3867
 
 
 
 
 
3868 out:
3869	rb_end_commit(cpu_buffer);
3870
3871	trace_recursive_unlock(cpu_buffer);
3872
3873	preempt_enable_notrace();
3874
3875}
3876EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
3877
3878/**
3879 * ring_buffer_write - write data to the buffer without reserving
3880 * @buffer: The ring buffer to write to.
3881 * @length: The length of the data being written (excluding the event header)
3882 * @data: The data to write to the buffer.
3883 *
3884 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
3885 * one function. If you already have the data to write to the buffer, it
3886 * may be easier to simply call this function.
3887 *
3888 * Note, like ring_buffer_lock_reserve, the length is the length of the data
3889 * and not the length of the event which would hold the header.
3890 */
3891int ring_buffer_write(struct trace_buffer *buffer,
3892		      unsigned long length,
3893		      void *data)
3894{
3895	struct ring_buffer_per_cpu *cpu_buffer;
3896	struct ring_buffer_event *event;
3897	void *body;
3898	int ret = -EBUSY;
3899	int cpu;
3900
3901	preempt_disable_notrace();
3902
3903	if (atomic_read(&buffer->record_disabled))
3904		goto out;
3905
3906	cpu = raw_smp_processor_id();
3907
3908	if (!cpumask_test_cpu(cpu, buffer->cpumask))
3909		goto out;
3910
3911	cpu_buffer = buffer->buffers[cpu];
3912
3913	if (atomic_read(&cpu_buffer->record_disabled))
3914		goto out;
3915
3916	if (length > buffer->max_data_size)
3917		goto out;
3918
3919	if (unlikely(trace_recursive_lock(cpu_buffer)))
3920		goto out;
3921
3922	event = rb_reserve_next_event(buffer, cpu_buffer, length);
3923	if (!event)
3924		goto out_unlock;
3925
3926	body = rb_event_data(event);
3927
3928	memcpy(body, data, length);
3929
3930	rb_commit(cpu_buffer);
3931
3932	rb_wakeups(buffer, cpu_buffer);
3933
3934	ret = 0;
3935
3936 out_unlock:
3937	trace_recursive_unlock(cpu_buffer);
3938
3939 out:
3940	preempt_enable_notrace();
3941
3942	return ret;
3943}
3944EXPORT_SYMBOL_GPL(ring_buffer_write);
3945
3946static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
3947{
3948	struct buffer_page *reader = cpu_buffer->reader_page;
3949	struct buffer_page *head = rb_set_head_page(cpu_buffer);
3950	struct buffer_page *commit = cpu_buffer->commit_page;
3951
3952	/* In case of error, head will be NULL */
3953	if (unlikely(!head))
3954		return true;
3955
3956	/* Reader should exhaust content in reader page */
3957	if (reader->read != rb_page_commit(reader))
3958		return false;
3959
3960	/*
3961	 * If writers are committing on the reader page, knowing all
3962	 * committed content has been read, the ring buffer is empty.
3963	 */
3964	if (commit == reader)
3965		return true;
3966
3967	/*
3968	 * If writers are committing on a page other than reader page
3969	 * and head page, there should always be content to read.
3970	 */
3971	if (commit != head)
3972		return false;
3973
3974	/*
3975	 * Writers are committing on the head page, we just need
3976	 * to care about there're committed data, and the reader will
3977	 * swap reader page with head page when it is to read data.
3978	 */
3979	return rb_page_commit(commit) == 0;
3980}
3981
3982/**
3983 * ring_buffer_record_disable - stop all writes into the buffer
3984 * @buffer: The ring buffer to stop writes to.
3985 *
3986 * This prevents all writes to the buffer. Any attempt to write
3987 * to the buffer after this will fail and return NULL.
3988 *
3989 * The caller should call synchronize_rcu() after this.
3990 */
3991void ring_buffer_record_disable(struct trace_buffer *buffer)
3992{
3993	atomic_inc(&buffer->record_disabled);
3994}
3995EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3996
3997/**
3998 * ring_buffer_record_enable - enable writes to the buffer
3999 * @buffer: The ring buffer to enable writes
4000 *
4001 * Note, multiple disables will need the same number of enables
4002 * to truly enable the writing (much like preempt_disable).
4003 */
4004void ring_buffer_record_enable(struct trace_buffer *buffer)
4005{
4006	atomic_dec(&buffer->record_disabled);
4007}
4008EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
4009
4010/**
4011 * ring_buffer_record_off - stop all writes into the buffer
4012 * @buffer: The ring buffer to stop writes to.
4013 *
4014 * This prevents all writes to the buffer. Any attempt to write
4015 * to the buffer after this will fail and return NULL.
4016 *
4017 * This is different than ring_buffer_record_disable() as
4018 * it works like an on/off switch, where as the disable() version
4019 * must be paired with a enable().
4020 */
4021void ring_buffer_record_off(struct trace_buffer *buffer)
4022{
4023	unsigned int rd;
4024	unsigned int new_rd;
4025
4026	rd = atomic_read(&buffer->record_disabled);
4027	do {
 
4028		new_rd = rd | RB_BUFFER_OFF;
4029	} while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4030}
4031EXPORT_SYMBOL_GPL(ring_buffer_record_off);
4032
4033/**
4034 * ring_buffer_record_on - restart writes into the buffer
4035 * @buffer: The ring buffer to start writes to.
4036 *
4037 * This enables all writes to the buffer that was disabled by
4038 * ring_buffer_record_off().
4039 *
4040 * This is different than ring_buffer_record_enable() as
4041 * it works like an on/off switch, where as the enable() version
4042 * must be paired with a disable().
4043 */
4044void ring_buffer_record_on(struct trace_buffer *buffer)
4045{
4046	unsigned int rd;
4047	unsigned int new_rd;
4048
4049	rd = atomic_read(&buffer->record_disabled);
4050	do {
 
4051		new_rd = rd & ~RB_BUFFER_OFF;
4052	} while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
4053}
4054EXPORT_SYMBOL_GPL(ring_buffer_record_on);
4055
4056/**
4057 * ring_buffer_record_is_on - return true if the ring buffer can write
4058 * @buffer: The ring buffer to see if write is enabled
4059 *
4060 * Returns true if the ring buffer is in a state that it accepts writes.
4061 */
4062bool ring_buffer_record_is_on(struct trace_buffer *buffer)
4063{
4064	return !atomic_read(&buffer->record_disabled);
4065}
4066
4067/**
4068 * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
4069 * @buffer: The ring buffer to see if write is set enabled
4070 *
4071 * Returns true if the ring buffer is set writable by ring_buffer_record_on().
4072 * Note that this does NOT mean it is in a writable state.
4073 *
4074 * It may return true when the ring buffer has been disabled by
4075 * ring_buffer_record_disable(), as that is a temporary disabling of
4076 * the ring buffer.
4077 */
4078bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
4079{
4080	return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
4081}
4082
4083/**
4084 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
4085 * @buffer: The ring buffer to stop writes to.
4086 * @cpu: The CPU buffer to stop
4087 *
4088 * This prevents all writes to the buffer. Any attempt to write
4089 * to the buffer after this will fail and return NULL.
4090 *
4091 * The caller should call synchronize_rcu() after this.
4092 */
4093void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
4094{
4095	struct ring_buffer_per_cpu *cpu_buffer;
4096
4097	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4098		return;
4099
4100	cpu_buffer = buffer->buffers[cpu];
4101	atomic_inc(&cpu_buffer->record_disabled);
4102}
4103EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
4104
4105/**
4106 * ring_buffer_record_enable_cpu - enable writes to the buffer
4107 * @buffer: The ring buffer to enable writes
4108 * @cpu: The CPU to enable.
4109 *
4110 * Note, multiple disables will need the same number of enables
4111 * to truly enable the writing (much like preempt_disable).
4112 */
4113void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
4114{
4115	struct ring_buffer_per_cpu *cpu_buffer;
4116
4117	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4118		return;
4119
4120	cpu_buffer = buffer->buffers[cpu];
4121	atomic_dec(&cpu_buffer->record_disabled);
4122}
4123EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
4124
4125/*
4126 * The total entries in the ring buffer is the running counter
4127 * of entries entered into the ring buffer, minus the sum of
4128 * the entries read from the ring buffer and the number of
4129 * entries that were overwritten.
4130 */
4131static inline unsigned long
4132rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
4133{
4134	return local_read(&cpu_buffer->entries) -
4135		(local_read(&cpu_buffer->overrun) + cpu_buffer->read);
4136}
4137
4138/**
4139 * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
4140 * @buffer: The ring buffer
4141 * @cpu: The per CPU buffer to read from.
4142 */
4143u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
4144{
4145	unsigned long flags;
4146	struct ring_buffer_per_cpu *cpu_buffer;
4147	struct buffer_page *bpage;
4148	u64 ret = 0;
4149
4150	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4151		return 0;
4152
4153	cpu_buffer = buffer->buffers[cpu];
4154	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4155	/*
4156	 * if the tail is on reader_page, oldest time stamp is on the reader
4157	 * page
4158	 */
4159	if (cpu_buffer->tail_page == cpu_buffer->reader_page)
4160		bpage = cpu_buffer->reader_page;
4161	else
4162		bpage = rb_set_head_page(cpu_buffer);
4163	if (bpage)
4164		ret = bpage->page->time_stamp;
4165	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4166
4167	return ret;
4168}
4169EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
4170
4171/**
4172 * ring_buffer_bytes_cpu - get the number of bytes unconsumed in a cpu buffer
4173 * @buffer: The ring buffer
4174 * @cpu: The per CPU buffer to read from.
4175 */
4176unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
4177{
4178	struct ring_buffer_per_cpu *cpu_buffer;
4179	unsigned long ret;
4180
4181	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4182		return 0;
4183
4184	cpu_buffer = buffer->buffers[cpu];
4185	ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
4186
4187	return ret;
4188}
4189EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
4190
4191/**
4192 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
4193 * @buffer: The ring buffer
4194 * @cpu: The per CPU buffer to get the entries from.
4195 */
4196unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
4197{
4198	struct ring_buffer_per_cpu *cpu_buffer;
4199
4200	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4201		return 0;
4202
4203	cpu_buffer = buffer->buffers[cpu];
4204
4205	return rb_num_of_entries(cpu_buffer);
4206}
4207EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
4208
4209/**
4210 * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
4211 * buffer wrapping around (only if RB_FL_OVERWRITE is on).
4212 * @buffer: The ring buffer
4213 * @cpu: The per CPU buffer to get the number of overruns from
4214 */
4215unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
4216{
4217	struct ring_buffer_per_cpu *cpu_buffer;
4218	unsigned long ret;
4219
4220	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4221		return 0;
4222
4223	cpu_buffer = buffer->buffers[cpu];
4224	ret = local_read(&cpu_buffer->overrun);
4225
4226	return ret;
4227}
4228EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
4229
4230/**
4231 * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
4232 * commits failing due to the buffer wrapping around while there are uncommitted
4233 * events, such as during an interrupt storm.
4234 * @buffer: The ring buffer
4235 * @cpu: The per CPU buffer to get the number of overruns from
4236 */
4237unsigned long
4238ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
4239{
4240	struct ring_buffer_per_cpu *cpu_buffer;
4241	unsigned long ret;
4242
4243	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4244		return 0;
4245
4246	cpu_buffer = buffer->buffers[cpu];
4247	ret = local_read(&cpu_buffer->commit_overrun);
4248
4249	return ret;
4250}
4251EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
4252
4253/**
4254 * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
4255 * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
4256 * @buffer: The ring buffer
4257 * @cpu: The per CPU buffer to get the number of overruns from
4258 */
4259unsigned long
4260ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
4261{
4262	struct ring_buffer_per_cpu *cpu_buffer;
4263	unsigned long ret;
4264
4265	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4266		return 0;
4267
4268	cpu_buffer = buffer->buffers[cpu];
4269	ret = local_read(&cpu_buffer->dropped_events);
4270
4271	return ret;
4272}
4273EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
4274
4275/**
4276 * ring_buffer_read_events_cpu - get the number of events successfully read
4277 * @buffer: The ring buffer
4278 * @cpu: The per CPU buffer to get the number of events read
4279 */
4280unsigned long
4281ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
4282{
4283	struct ring_buffer_per_cpu *cpu_buffer;
4284
4285	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4286		return 0;
4287
4288	cpu_buffer = buffer->buffers[cpu];
4289	return cpu_buffer->read;
4290}
4291EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
4292
4293/**
4294 * ring_buffer_entries - get the number of entries in a buffer
4295 * @buffer: The ring buffer
4296 *
4297 * Returns the total number of entries in the ring buffer
4298 * (all CPU entries)
4299 */
4300unsigned long ring_buffer_entries(struct trace_buffer *buffer)
4301{
4302	struct ring_buffer_per_cpu *cpu_buffer;
4303	unsigned long entries = 0;
4304	int cpu;
4305
4306	/* if you care about this being correct, lock the buffer */
4307	for_each_buffer_cpu(buffer, cpu) {
4308		cpu_buffer = buffer->buffers[cpu];
4309		entries += rb_num_of_entries(cpu_buffer);
4310	}
4311
4312	return entries;
4313}
4314EXPORT_SYMBOL_GPL(ring_buffer_entries);
4315
4316/**
4317 * ring_buffer_overruns - get the number of overruns in buffer
4318 * @buffer: The ring buffer
4319 *
4320 * Returns the total number of overruns in the ring buffer
4321 * (all CPU entries)
4322 */
4323unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
4324{
4325	struct ring_buffer_per_cpu *cpu_buffer;
4326	unsigned long overruns = 0;
4327	int cpu;
4328
4329	/* if you care about this being correct, lock the buffer */
4330	for_each_buffer_cpu(buffer, cpu) {
4331		cpu_buffer = buffer->buffers[cpu];
4332		overruns += local_read(&cpu_buffer->overrun);
4333	}
4334
4335	return overruns;
4336}
4337EXPORT_SYMBOL_GPL(ring_buffer_overruns);
4338
4339static void rb_iter_reset(struct ring_buffer_iter *iter)
4340{
4341	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4342
4343	/* Iterator usage is expected to have record disabled */
4344	iter->head_page = cpu_buffer->reader_page;
4345	iter->head = cpu_buffer->reader_page->read;
4346	iter->next_event = iter->head;
4347
4348	iter->cache_reader_page = iter->head_page;
4349	iter->cache_read = cpu_buffer->read;
4350	iter->cache_pages_removed = cpu_buffer->pages_removed;
4351
4352	if (iter->head) {
4353		iter->read_stamp = cpu_buffer->read_stamp;
4354		iter->page_stamp = cpu_buffer->reader_page->page->time_stamp;
4355	} else {
4356		iter->read_stamp = iter->head_page->page->time_stamp;
4357		iter->page_stamp = iter->read_stamp;
4358	}
4359}
4360
4361/**
4362 * ring_buffer_iter_reset - reset an iterator
4363 * @iter: The iterator to reset
4364 *
4365 * Resets the iterator, so that it will start from the beginning
4366 * again.
4367 */
4368void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
4369{
4370	struct ring_buffer_per_cpu *cpu_buffer;
4371	unsigned long flags;
4372
4373	if (!iter)
4374		return;
4375
4376	cpu_buffer = iter->cpu_buffer;
4377
4378	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4379	rb_iter_reset(iter);
4380	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4381}
4382EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
4383
4384/**
4385 * ring_buffer_iter_empty - check if an iterator has no more to read
4386 * @iter: The iterator to check
4387 */
4388int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
4389{
4390	struct ring_buffer_per_cpu *cpu_buffer;
4391	struct buffer_page *reader;
4392	struct buffer_page *head_page;
4393	struct buffer_page *commit_page;
4394	struct buffer_page *curr_commit_page;
4395	unsigned commit;
4396	u64 curr_commit_ts;
4397	u64 commit_ts;
4398
4399	cpu_buffer = iter->cpu_buffer;
4400	reader = cpu_buffer->reader_page;
4401	head_page = cpu_buffer->head_page;
4402	commit_page = READ_ONCE(cpu_buffer->commit_page);
4403	commit_ts = commit_page->page->time_stamp;
4404
4405	/*
4406	 * When the writer goes across pages, it issues a cmpxchg which
4407	 * is a mb(), which will synchronize with the rmb here.
4408	 * (see rb_tail_page_update())
4409	 */
4410	smp_rmb();
4411	commit = rb_page_commit(commit_page);
4412	/* We want to make sure that the commit page doesn't change */
4413	smp_rmb();
4414
4415	/* Make sure commit page didn't change */
4416	curr_commit_page = READ_ONCE(cpu_buffer->commit_page);
4417	curr_commit_ts = READ_ONCE(curr_commit_page->page->time_stamp);
4418
4419	/* If the commit page changed, then there's more data */
4420	if (curr_commit_page != commit_page ||
4421	    curr_commit_ts != commit_ts)
4422		return 0;
4423
4424	/* Still racy, as it may return a false positive, but that's OK */
4425	return ((iter->head_page == commit_page && iter->head >= commit) ||
4426		(iter->head_page == reader && commit_page == head_page &&
4427		 head_page->read == commit &&
4428		 iter->head == rb_page_commit(cpu_buffer->reader_page)));
4429}
4430EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
4431
4432static void
4433rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
4434		     struct ring_buffer_event *event)
4435{
4436	u64 delta;
4437
4438	switch (event->type_len) {
4439	case RINGBUF_TYPE_PADDING:
4440		return;
4441
4442	case RINGBUF_TYPE_TIME_EXTEND:
4443		delta = rb_event_time_stamp(event);
 
 
4444		cpu_buffer->read_stamp += delta;
4445		return;
4446
4447	case RINGBUF_TYPE_TIME_STAMP:
4448		delta = rb_event_time_stamp(event);
4449		delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp);
4450		cpu_buffer->read_stamp = delta;
4451		return;
4452
4453	case RINGBUF_TYPE_DATA:
4454		cpu_buffer->read_stamp += event->time_delta;
4455		return;
4456
4457	default:
4458		RB_WARN_ON(cpu_buffer, 1);
4459	}
 
4460}
4461
4462static void
4463rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
4464			  struct ring_buffer_event *event)
4465{
4466	u64 delta;
4467
4468	switch (event->type_len) {
4469	case RINGBUF_TYPE_PADDING:
4470		return;
4471
4472	case RINGBUF_TYPE_TIME_EXTEND:
4473		delta = rb_event_time_stamp(event);
 
 
4474		iter->read_stamp += delta;
4475		return;
4476
4477	case RINGBUF_TYPE_TIME_STAMP:
4478		delta = rb_event_time_stamp(event);
4479		delta = rb_fix_abs_ts(delta, iter->read_stamp);
4480		iter->read_stamp = delta;
4481		return;
4482
4483	case RINGBUF_TYPE_DATA:
4484		iter->read_stamp += event->time_delta;
4485		return;
4486
4487	default:
4488		RB_WARN_ON(iter->cpu_buffer, 1);
4489	}
 
4490}
4491
4492static struct buffer_page *
4493rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
4494{
4495	struct buffer_page *reader = NULL;
4496	unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
4497	unsigned long overwrite;
4498	unsigned long flags;
4499	int nr_loops = 0;
4500	bool ret;
4501
4502	local_irq_save(flags);
4503	arch_spin_lock(&cpu_buffer->lock);
4504
4505 again:
4506	/*
4507	 * This should normally only loop twice. But because the
4508	 * start of the reader inserts an empty page, it causes
4509	 * a case where we will loop three times. There should be no
4510	 * reason to loop four times (that I know of).
4511	 */
4512	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
4513		reader = NULL;
4514		goto out;
4515	}
4516
4517	reader = cpu_buffer->reader_page;
4518
4519	/* If there's more to read, return this page */
4520	if (cpu_buffer->reader_page->read < rb_page_size(reader))
4521		goto out;
4522
4523	/* Never should we have an index greater than the size */
4524	if (RB_WARN_ON(cpu_buffer,
4525		       cpu_buffer->reader_page->read > rb_page_size(reader)))
4526		goto out;
4527
4528	/* check if we caught up to the tail */
4529	reader = NULL;
4530	if (cpu_buffer->commit_page == cpu_buffer->reader_page)
4531		goto out;
4532
4533	/* Don't bother swapping if the ring buffer is empty */
4534	if (rb_num_of_entries(cpu_buffer) == 0)
4535		goto out;
4536
4537	/*
4538	 * Reset the reader page to size zero.
4539	 */
4540	local_set(&cpu_buffer->reader_page->write, 0);
4541	local_set(&cpu_buffer->reader_page->entries, 0);
4542	local_set(&cpu_buffer->reader_page->page->commit, 0);
4543	cpu_buffer->reader_page->real_end = 0;
4544
4545 spin:
4546	/*
4547	 * Splice the empty reader page into the list around the head.
4548	 */
4549	reader = rb_set_head_page(cpu_buffer);
4550	if (!reader)
4551		goto out;
4552	cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
4553	cpu_buffer->reader_page->list.prev = reader->list.prev;
4554
4555	/*
4556	 * cpu_buffer->pages just needs to point to the buffer, it
4557	 *  has no specific buffer page to point to. Lets move it out
4558	 *  of our way so we don't accidentally swap it.
4559	 */
4560	cpu_buffer->pages = reader->list.prev;
4561
4562	/* The reader page will be pointing to the new head */
4563	rb_set_list_to_head(&cpu_buffer->reader_page->list);
4564
4565	/*
4566	 * We want to make sure we read the overruns after we set up our
4567	 * pointers to the next object. The writer side does a
4568	 * cmpxchg to cross pages which acts as the mb on the writer
4569	 * side. Note, the reader will constantly fail the swap
4570	 * while the writer is updating the pointers, so this
4571	 * guarantees that the overwrite recorded here is the one we
4572	 * want to compare with the last_overrun.
4573	 */
4574	smp_mb();
4575	overwrite = local_read(&(cpu_buffer->overrun));
4576
4577	/*
4578	 * Here's the tricky part.
4579	 *
4580	 * We need to move the pointer past the header page.
4581	 * But we can only do that if a writer is not currently
4582	 * moving it. The page before the header page has the
4583	 * flag bit '1' set if it is pointing to the page we want.
4584	 * but if the writer is in the process of moving it
4585	 * than it will be '2' or already moved '0'.
4586	 */
4587
4588	ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
4589
4590	/*
4591	 * If we did not convert it, then we must try again.
4592	 */
4593	if (!ret)
4594		goto spin;
4595
4596	/*
4597	 * Yay! We succeeded in replacing the page.
4598	 *
4599	 * Now make the new head point back to the reader page.
4600	 */
4601	rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
4602	rb_inc_page(&cpu_buffer->head_page);
4603
4604	local_inc(&cpu_buffer->pages_read);
4605
4606	/* Finally update the reader page to the new head */
4607	cpu_buffer->reader_page = reader;
4608	cpu_buffer->reader_page->read = 0;
4609
4610	if (overwrite != cpu_buffer->last_overrun) {
4611		cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
4612		cpu_buffer->last_overrun = overwrite;
4613	}
4614
4615	goto again;
4616
4617 out:
4618	/* Update the read_stamp on the first event */
4619	if (reader && reader->read == 0)
4620		cpu_buffer->read_stamp = reader->page->time_stamp;
4621
4622	arch_spin_unlock(&cpu_buffer->lock);
4623	local_irq_restore(flags);
4624
4625	/*
4626	 * The writer has preempt disable, wait for it. But not forever
4627	 * Although, 1 second is pretty much "forever"
4628	 */
4629#define USECS_WAIT	1000000
4630        for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
4631		/* If the write is past the end of page, a writer is still updating it */
4632		if (likely(!reader || rb_page_write(reader) <= bsize))
4633			break;
4634
4635		udelay(1);
4636
4637		/* Get the latest version of the reader write value */
4638		smp_rmb();
4639	}
4640
4641	/* The writer is not moving forward? Something is wrong */
4642	if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT))
4643		reader = NULL;
4644
4645	/*
4646	 * Make sure we see any padding after the write update
4647	 * (see rb_reset_tail()).
4648	 *
4649	 * In addition, a writer may be writing on the reader page
4650	 * if the page has not been fully filled, so the read barrier
4651	 * is also needed to make sure we see the content of what is
4652	 * committed by the writer (see rb_set_commit_to_write()).
4653	 */
4654	smp_rmb();
4655
4656
4657	return reader;
4658}
4659
4660static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
4661{
4662	struct ring_buffer_event *event;
4663	struct buffer_page *reader;
4664	unsigned length;
4665
4666	reader = rb_get_reader_page(cpu_buffer);
4667
4668	/* This function should not be called when buffer is empty */
4669	if (RB_WARN_ON(cpu_buffer, !reader))
4670		return;
4671
4672	event = rb_reader_event(cpu_buffer);
4673
4674	if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
4675		cpu_buffer->read++;
4676
4677	rb_update_read_stamp(cpu_buffer, event);
4678
4679	length = rb_event_length(event);
4680	cpu_buffer->reader_page->read += length;
4681	cpu_buffer->read_bytes += length;
4682}
4683
4684static void rb_advance_iter(struct ring_buffer_iter *iter)
4685{
4686	struct ring_buffer_per_cpu *cpu_buffer;
 
 
4687
4688	cpu_buffer = iter->cpu_buffer;
4689
4690	/* If head == next_event then we need to jump to the next event */
4691	if (iter->head == iter->next_event) {
4692		/* If the event gets overwritten again, there's nothing to do */
4693		if (rb_iter_head_event(iter) == NULL)
4694			return;
4695	}
4696
4697	iter->head = iter->next_event;
4698
4699	/*
4700	 * Check if we are at the end of the buffer.
4701	 */
4702	if (iter->next_event >= rb_page_size(iter->head_page)) {
4703		/* discarded commits can make the page empty */
4704		if (iter->head_page == cpu_buffer->commit_page)
4705			return;
4706		rb_inc_iter(iter);
4707		return;
4708	}
4709
4710	rb_update_iter_read_stamp(iter, iter->event);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4711}
4712
4713static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
4714{
4715	return cpu_buffer->lost_events;
4716}
4717
4718static struct ring_buffer_event *
4719rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
4720	       unsigned long *lost_events)
4721{
4722	struct ring_buffer_event *event;
4723	struct buffer_page *reader;
4724	int nr_loops = 0;
4725
4726	if (ts)
4727		*ts = 0;
4728 again:
4729	/*
4730	 * We repeat when a time extend is encountered.
4731	 * Since the time extend is always attached to a data event,
4732	 * we should never loop more than once.
4733	 * (We never hit the following condition more than twice).
4734	 */
4735	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
4736		return NULL;
4737
4738	reader = rb_get_reader_page(cpu_buffer);
4739	if (!reader)
4740		return NULL;
4741
4742	event = rb_reader_event(cpu_buffer);
4743
4744	switch (event->type_len) {
4745	case RINGBUF_TYPE_PADDING:
4746		if (rb_null_event(event))
4747			RB_WARN_ON(cpu_buffer, 1);
4748		/*
4749		 * Because the writer could be discarding every
4750		 * event it creates (which would probably be bad)
4751		 * if we were to go back to "again" then we may never
4752		 * catch up, and will trigger the warn on, or lock
4753		 * the box. Return the padding, and we will release
4754		 * the current locks, and try again.
4755		 */
4756		return event;
4757
4758	case RINGBUF_TYPE_TIME_EXTEND:
4759		/* Internal data, OK to advance */
4760		rb_advance_reader(cpu_buffer);
4761		goto again;
4762
4763	case RINGBUF_TYPE_TIME_STAMP:
4764		if (ts) {
4765			*ts = rb_event_time_stamp(event);
4766			*ts = rb_fix_abs_ts(*ts, reader->page->time_stamp);
4767			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4768							 cpu_buffer->cpu, ts);
4769		}
4770		/* Internal data, OK to advance */
4771		rb_advance_reader(cpu_buffer);
4772		goto again;
4773
4774	case RINGBUF_TYPE_DATA:
4775		if (ts && !(*ts)) {
4776			*ts = cpu_buffer->read_stamp + event->time_delta;
4777			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4778							 cpu_buffer->cpu, ts);
4779		}
4780		if (lost_events)
4781			*lost_events = rb_lost_events(cpu_buffer);
4782		return event;
4783
4784	default:
4785		RB_WARN_ON(cpu_buffer, 1);
4786	}
4787
4788	return NULL;
4789}
4790EXPORT_SYMBOL_GPL(ring_buffer_peek);
4791
4792static struct ring_buffer_event *
4793rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4794{
4795	struct trace_buffer *buffer;
4796	struct ring_buffer_per_cpu *cpu_buffer;
4797	struct ring_buffer_event *event;
4798	int nr_loops = 0;
4799
4800	if (ts)
4801		*ts = 0;
4802
4803	cpu_buffer = iter->cpu_buffer;
4804	buffer = cpu_buffer->buffer;
4805
4806	/*
4807	 * Check if someone performed a consuming read to the buffer
4808	 * or removed some pages from the buffer. In these cases,
4809	 * iterator was invalidated and we need to reset it.
4810	 */
4811	if (unlikely(iter->cache_read != cpu_buffer->read ||
4812		     iter->cache_reader_page != cpu_buffer->reader_page ||
4813		     iter->cache_pages_removed != cpu_buffer->pages_removed))
4814		rb_iter_reset(iter);
4815
4816 again:
4817	if (ring_buffer_iter_empty(iter))
4818		return NULL;
4819
4820	/*
4821	 * As the writer can mess with what the iterator is trying
4822	 * to read, just give up if we fail to get an event after
4823	 * three tries. The iterator is not as reliable when reading
4824	 * the ring buffer with an active write as the consumer is.
4825	 * Do not warn if the three failures is reached.
 
4826	 */
4827	if (++nr_loops > 3)
4828		return NULL;
4829
4830	if (rb_per_cpu_empty(cpu_buffer))
4831		return NULL;
4832
4833	if (iter->head >= rb_page_size(iter->head_page)) {
4834		rb_inc_iter(iter);
4835		goto again;
4836	}
4837
4838	event = rb_iter_head_event(iter);
4839	if (!event)
4840		goto again;
4841
4842	switch (event->type_len) {
4843	case RINGBUF_TYPE_PADDING:
4844		if (rb_null_event(event)) {
4845			rb_inc_iter(iter);
4846			goto again;
4847		}
4848		rb_advance_iter(iter);
4849		return event;
4850
4851	case RINGBUF_TYPE_TIME_EXTEND:
4852		/* Internal data, OK to advance */
4853		rb_advance_iter(iter);
4854		goto again;
4855
4856	case RINGBUF_TYPE_TIME_STAMP:
4857		if (ts) {
4858			*ts = rb_event_time_stamp(event);
4859			*ts = rb_fix_abs_ts(*ts, iter->head_page->page->time_stamp);
4860			ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
4861							 cpu_buffer->cpu, ts);
4862		}
4863		/* Internal data, OK to advance */
4864		rb_advance_iter(iter);
4865		goto again;
4866
4867	case RINGBUF_TYPE_DATA:
4868		if (ts && !(*ts)) {
4869			*ts = iter->read_stamp + event->time_delta;
4870			ring_buffer_normalize_time_stamp(buffer,
4871							 cpu_buffer->cpu, ts);
4872		}
4873		return event;
4874
4875	default:
4876		RB_WARN_ON(cpu_buffer, 1);
4877	}
4878
4879	return NULL;
4880}
4881EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
4882
4883static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer)
4884{
4885	if (likely(!in_nmi())) {
4886		raw_spin_lock(&cpu_buffer->reader_lock);
4887		return true;
4888	}
4889
4890	/*
4891	 * If an NMI die dumps out the content of the ring buffer
4892	 * trylock must be used to prevent a deadlock if the NMI
4893	 * preempted a task that holds the ring buffer locks. If
4894	 * we get the lock then all is fine, if not, then continue
4895	 * to do the read, but this can corrupt the ring buffer,
4896	 * so it must be permanently disabled from future writes.
4897	 * Reading from NMI is a oneshot deal.
4898	 */
4899	if (raw_spin_trylock(&cpu_buffer->reader_lock))
4900		return true;
4901
4902	/* Continue without locking, but disable the ring buffer */
4903	atomic_inc(&cpu_buffer->record_disabled);
4904	return false;
4905}
4906
4907static inline void
4908rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
4909{
4910	if (likely(locked))
4911		raw_spin_unlock(&cpu_buffer->reader_lock);
 
4912}
4913
4914/**
4915 * ring_buffer_peek - peek at the next event to be read
4916 * @buffer: The ring buffer to read
4917 * @cpu: The cpu to peak at
4918 * @ts: The timestamp counter of this event.
4919 * @lost_events: a variable to store if events were lost (may be NULL)
4920 *
4921 * This will return the event that will be read next, but does
4922 * not consume the data.
4923 */
4924struct ring_buffer_event *
4925ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
4926		 unsigned long *lost_events)
4927{
4928	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4929	struct ring_buffer_event *event;
4930	unsigned long flags;
4931	bool dolock;
4932
4933	if (!cpumask_test_cpu(cpu, buffer->cpumask))
4934		return NULL;
4935
4936 again:
4937	local_irq_save(flags);
4938	dolock = rb_reader_lock(cpu_buffer);
4939	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
4940	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4941		rb_advance_reader(cpu_buffer);
4942	rb_reader_unlock(cpu_buffer, dolock);
4943	local_irq_restore(flags);
4944
4945	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4946		goto again;
4947
4948	return event;
4949}
4950
4951/** ring_buffer_iter_dropped - report if there are dropped events
4952 * @iter: The ring buffer iterator
4953 *
4954 * Returns true if there was dropped events since the last peek.
4955 */
4956bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter)
4957{
4958	bool ret = iter->missed_events != 0;
4959
4960	iter->missed_events = 0;
4961	return ret;
4962}
4963EXPORT_SYMBOL_GPL(ring_buffer_iter_dropped);
4964
4965/**
4966 * ring_buffer_iter_peek - peek at the next event to be read
4967 * @iter: The ring buffer iterator
4968 * @ts: The timestamp counter of this event.
4969 *
4970 * This will return the event that will be read next, but does
4971 * not increment the iterator.
4972 */
4973struct ring_buffer_event *
4974ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
4975{
4976	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4977	struct ring_buffer_event *event;
4978	unsigned long flags;
4979
4980 again:
4981	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4982	event = rb_iter_peek(iter, ts);
4983	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4984
4985	if (event && event->type_len == RINGBUF_TYPE_PADDING)
4986		goto again;
4987
4988	return event;
4989}
4990
4991/**
4992 * ring_buffer_consume - return an event and consume it
4993 * @buffer: The ring buffer to get the next event from
4994 * @cpu: the cpu to read the buffer from
4995 * @ts: a variable to store the timestamp (may be NULL)
4996 * @lost_events: a variable to store if events were lost (may be NULL)
4997 *
4998 * Returns the next event in the ring buffer, and that event is consumed.
4999 * Meaning, that sequential reads will keep returning a different event,
5000 * and eventually empty the ring buffer if the producer is slower.
5001 */
5002struct ring_buffer_event *
5003ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
5004		    unsigned long *lost_events)
5005{
5006	struct ring_buffer_per_cpu *cpu_buffer;
5007	struct ring_buffer_event *event = NULL;
5008	unsigned long flags;
5009	bool dolock;
5010
5011 again:
5012	/* might be called in atomic */
5013	preempt_disable();
5014
5015	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5016		goto out;
5017
5018	cpu_buffer = buffer->buffers[cpu];
5019	local_irq_save(flags);
5020	dolock = rb_reader_lock(cpu_buffer);
5021
5022	event = rb_buffer_peek(cpu_buffer, ts, lost_events);
5023	if (event) {
5024		cpu_buffer->lost_events = 0;
5025		rb_advance_reader(cpu_buffer);
5026	}
5027
5028	rb_reader_unlock(cpu_buffer, dolock);
5029	local_irq_restore(flags);
5030
5031 out:
5032	preempt_enable();
5033
5034	if (event && event->type_len == RINGBUF_TYPE_PADDING)
5035		goto again;
5036
5037	return event;
5038}
5039EXPORT_SYMBOL_GPL(ring_buffer_consume);
5040
5041/**
5042 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5043 * @buffer: The ring buffer to read from
5044 * @cpu: The cpu buffer to iterate over
5045 * @flags: gfp flags to use for memory allocation
5046 *
5047 * This performs the initial preparations necessary to iterate
5048 * through the buffer.  Memory is allocated, buffer recording
5049 * is disabled, and the iterator pointer is returned to the caller.
5050 *
5051 * Disabling buffer recording prevents the reading from being
5052 * corrupted. This is not a consuming read, so a producer is not
5053 * expected.
5054 *
5055 * After a sequence of ring_buffer_read_prepare calls, the user is
5056 * expected to make at least one call to ring_buffer_read_prepare_sync.
5057 * Afterwards, ring_buffer_read_start is invoked to get things going
5058 * for real.
5059 *
5060 * This overall must be paired with ring_buffer_read_finish.
5061 */
5062struct ring_buffer_iter *
5063ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
5064{
5065	struct ring_buffer_per_cpu *cpu_buffer;
5066	struct ring_buffer_iter *iter;
5067
5068	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5069		return NULL;
5070
5071	iter = kzalloc(sizeof(*iter), flags);
5072	if (!iter)
5073		return NULL;
5074
5075	/* Holds the entire event: data and meta data */
5076	iter->event_size = buffer->subbuf_size;
5077	iter->event = kmalloc(iter->event_size, flags);
5078	if (!iter->event) {
5079		kfree(iter);
5080		return NULL;
5081	}
5082
5083	cpu_buffer = buffer->buffers[cpu];
5084
5085	iter->cpu_buffer = cpu_buffer;
5086
5087	atomic_inc(&cpu_buffer->resize_disabled);
 
5088
5089	return iter;
5090}
5091EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
5092
5093/**
5094 * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
5095 *
5096 * All previously invoked ring_buffer_read_prepare calls to prepare
5097 * iterators will be synchronized.  Afterwards, read_buffer_read_start
5098 * calls on those iterators are allowed.
5099 */
5100void
5101ring_buffer_read_prepare_sync(void)
5102{
5103	synchronize_rcu();
5104}
5105EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
5106
5107/**
5108 * ring_buffer_read_start - start a non consuming read of the buffer
5109 * @iter: The iterator returned by ring_buffer_read_prepare
5110 *
5111 * This finalizes the startup of an iteration through the buffer.
5112 * The iterator comes from a call to ring_buffer_read_prepare and
5113 * an intervening ring_buffer_read_prepare_sync must have been
5114 * performed.
5115 *
5116 * Must be paired with ring_buffer_read_finish.
5117 */
5118void
5119ring_buffer_read_start(struct ring_buffer_iter *iter)
5120{
5121	struct ring_buffer_per_cpu *cpu_buffer;
5122	unsigned long flags;
5123
5124	if (!iter)
5125		return;
5126
5127	cpu_buffer = iter->cpu_buffer;
5128
5129	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5130	arch_spin_lock(&cpu_buffer->lock);
5131	rb_iter_reset(iter);
5132	arch_spin_unlock(&cpu_buffer->lock);
5133	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5134}
5135EXPORT_SYMBOL_GPL(ring_buffer_read_start);
5136
5137/**
5138 * ring_buffer_read_finish - finish reading the iterator of the buffer
5139 * @iter: The iterator retrieved by ring_buffer_start
5140 *
5141 * This re-enables the recording to the buffer, and frees the
5142 * iterator.
5143 */
5144void
5145ring_buffer_read_finish(struct ring_buffer_iter *iter)
5146{
5147	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5148	unsigned long flags;
5149
5150	/*
5151	 * Ring buffer is disabled from recording, here's a good place
5152	 * to check the integrity of the ring buffer.
5153	 * Must prevent readers from trying to read, as the check
5154	 * clears the HEAD page and readers require it.
5155	 */
5156	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5157	rb_check_pages(cpu_buffer);
5158	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5159
5160	atomic_dec(&cpu_buffer->resize_disabled);
5161	kfree(iter->event);
5162	kfree(iter);
5163}
5164EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
5165
5166/**
5167 * ring_buffer_iter_advance - advance the iterator to the next location
5168 * @iter: The ring buffer iterator
 
5169 *
5170 * Move the location of the iterator such that the next read will
5171 * be the next location of the iterator.
5172 */
5173void ring_buffer_iter_advance(struct ring_buffer_iter *iter)
 
5174{
 
5175	struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
5176	unsigned long flags;
5177
5178	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
 
 
 
 
 
 
 
5179
5180	rb_advance_iter(iter);
 
 
5181
5182	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5183}
5184EXPORT_SYMBOL_GPL(ring_buffer_iter_advance);
5185
5186/**
5187 * ring_buffer_size - return the size of the ring buffer (in bytes)
5188 * @buffer: The ring buffer.
5189 * @cpu: The CPU to get ring buffer size from.
5190 */
5191unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
5192{
 
 
 
 
 
 
5193	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5194		return 0;
5195
5196	return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
5197}
5198EXPORT_SYMBOL_GPL(ring_buffer_size);
5199
5200/**
5201 * ring_buffer_max_event_size - return the max data size of an event
5202 * @buffer: The ring buffer.
5203 *
5204 * Returns the maximum size an event can be.
5205 */
5206unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
5207{
5208	/* If abs timestamp is requested, events have a timestamp too */
5209	if (ring_buffer_time_stamp_abs(buffer))
5210		return buffer->max_data_size - RB_LEN_TIME_EXTEND;
5211	return buffer->max_data_size;
5212}
5213EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
5214
5215static void rb_clear_buffer_page(struct buffer_page *page)
5216{
5217	local_set(&page->write, 0);
5218	local_set(&page->entries, 0);
5219	rb_init_page(page->page);
5220	page->read = 0;
5221}
5222
5223static void
5224rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
5225{
5226	struct buffer_page *page;
5227
5228	rb_head_page_deactivate(cpu_buffer);
5229
5230	cpu_buffer->head_page
5231		= list_entry(cpu_buffer->pages, struct buffer_page, list);
5232	rb_clear_buffer_page(cpu_buffer->head_page);
5233	list_for_each_entry(page, cpu_buffer->pages, list) {
5234		rb_clear_buffer_page(page);
5235	}
 
5236
5237	cpu_buffer->tail_page = cpu_buffer->head_page;
5238	cpu_buffer->commit_page = cpu_buffer->head_page;
5239
5240	INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
5241	INIT_LIST_HEAD(&cpu_buffer->new_pages);
5242	rb_clear_buffer_page(cpu_buffer->reader_page);
 
 
 
5243
5244	local_set(&cpu_buffer->entries_bytes, 0);
5245	local_set(&cpu_buffer->overrun, 0);
5246	local_set(&cpu_buffer->commit_overrun, 0);
5247	local_set(&cpu_buffer->dropped_events, 0);
5248	local_set(&cpu_buffer->entries, 0);
5249	local_set(&cpu_buffer->committing, 0);
5250	local_set(&cpu_buffer->commits, 0);
5251	local_set(&cpu_buffer->pages_touched, 0);
5252	local_set(&cpu_buffer->pages_lost, 0);
5253	local_set(&cpu_buffer->pages_read, 0);
5254	cpu_buffer->last_pages_touch = 0;
5255	cpu_buffer->shortest_full = 0;
5256	cpu_buffer->read = 0;
5257	cpu_buffer->read_bytes = 0;
5258
5259	rb_time_set(&cpu_buffer->write_stamp, 0);
5260	rb_time_set(&cpu_buffer->before_stamp, 0);
5261
5262	memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp));
5263
5264	cpu_buffer->lost_events = 0;
5265	cpu_buffer->last_overrun = 0;
5266
5267	rb_head_page_activate(cpu_buffer);
5268	cpu_buffer->pages_removed = 0;
5269}
5270
5271/* Must have disabled the cpu buffer then done a synchronize_rcu */
5272static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
5273{
5274	unsigned long flags;
5275
5276	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5277
5278	if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
5279		goto out;
5280
5281	arch_spin_lock(&cpu_buffer->lock);
5282
5283	rb_reset_cpu(cpu_buffer);
5284
5285	arch_spin_unlock(&cpu_buffer->lock);
5286
5287 out:
5288	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5289}
5290
5291/**
5292 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
5293 * @buffer: The ring buffer to reset a per cpu buffer of
5294 * @cpu: The CPU buffer to be reset
5295 */
5296void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
5297{
5298	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
 
5299
5300	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5301		return;
5302
5303	/* prevent another thread from changing buffer sizes */
5304	mutex_lock(&buffer->mutex);
5305
5306	atomic_inc(&cpu_buffer->resize_disabled);
5307	atomic_inc(&cpu_buffer->record_disabled);
5308
5309	/* Make sure all commits have finished */
5310	synchronize_rcu();
5311
5312	reset_disabled_cpu_buffer(cpu_buffer);
5313
5314	atomic_dec(&cpu_buffer->record_disabled);
5315	atomic_dec(&cpu_buffer->resize_disabled);
5316
5317	mutex_unlock(&buffer->mutex);
5318}
5319EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
5320
5321/* Flag to ensure proper resetting of atomic variables */
5322#define RESET_BIT	(1 << 30)
5323
5324/**
5325 * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
5326 * @buffer: The ring buffer to reset a per cpu buffer of
5327 */
5328void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
5329{
5330	struct ring_buffer_per_cpu *cpu_buffer;
5331	int cpu;
5332
5333	/* prevent another thread from changing buffer sizes */
5334	mutex_lock(&buffer->mutex);
5335
5336	for_each_online_buffer_cpu(buffer, cpu) {
5337		cpu_buffer = buffer->buffers[cpu];
5338
5339		atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
5340		atomic_inc(&cpu_buffer->record_disabled);
5341	}
5342
5343	/* Make sure all commits have finished */
5344	synchronize_rcu();
5345
5346	for_each_buffer_cpu(buffer, cpu) {
5347		cpu_buffer = buffer->buffers[cpu];
5348
5349		/*
5350		 * If a CPU came online during the synchronize_rcu(), then
5351		 * ignore it.
5352		 */
5353		if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
5354			continue;
5355
5356		reset_disabled_cpu_buffer(cpu_buffer);
5357
5358		atomic_dec(&cpu_buffer->record_disabled);
5359		atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
5360	}
5361
5362	mutex_unlock(&buffer->mutex);
5363}
 
5364
5365/**
5366 * ring_buffer_reset - reset a ring buffer
5367 * @buffer: The ring buffer to reset all cpu buffers
5368 */
5369void ring_buffer_reset(struct trace_buffer *buffer)
5370{
5371	struct ring_buffer_per_cpu *cpu_buffer;
5372	int cpu;
5373
5374	/* prevent another thread from changing buffer sizes */
5375	mutex_lock(&buffer->mutex);
5376
5377	for_each_buffer_cpu(buffer, cpu) {
5378		cpu_buffer = buffer->buffers[cpu];
5379
5380		atomic_inc(&cpu_buffer->resize_disabled);
5381		atomic_inc(&cpu_buffer->record_disabled);
5382	}
5383
5384	/* Make sure all commits have finished */
5385	synchronize_rcu();
5386
5387	for_each_buffer_cpu(buffer, cpu) {
5388		cpu_buffer = buffer->buffers[cpu];
5389
5390		reset_disabled_cpu_buffer(cpu_buffer);
5391
5392		atomic_dec(&cpu_buffer->record_disabled);
5393		atomic_dec(&cpu_buffer->resize_disabled);
5394	}
5395
5396	mutex_unlock(&buffer->mutex);
5397}
5398EXPORT_SYMBOL_GPL(ring_buffer_reset);
5399
5400/**
5401 * ring_buffer_empty - is the ring buffer empty?
5402 * @buffer: The ring buffer to test
5403 */
5404bool ring_buffer_empty(struct trace_buffer *buffer)
5405{
5406	struct ring_buffer_per_cpu *cpu_buffer;
5407	unsigned long flags;
5408	bool dolock;
5409	bool ret;
5410	int cpu;
 
5411
5412	/* yes this is racy, but if you don't like the race, lock the buffer */
5413	for_each_buffer_cpu(buffer, cpu) {
5414		cpu_buffer = buffer->buffers[cpu];
5415		local_irq_save(flags);
5416		dolock = rb_reader_lock(cpu_buffer);
5417		ret = rb_per_cpu_empty(cpu_buffer);
5418		rb_reader_unlock(cpu_buffer, dolock);
5419		local_irq_restore(flags);
5420
5421		if (!ret)
5422			return false;
5423	}
5424
5425	return true;
5426}
5427EXPORT_SYMBOL_GPL(ring_buffer_empty);
5428
5429/**
5430 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
5431 * @buffer: The ring buffer
5432 * @cpu: The CPU buffer to test
5433 */
5434bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
5435{
5436	struct ring_buffer_per_cpu *cpu_buffer;
5437	unsigned long flags;
5438	bool dolock;
5439	bool ret;
5440
5441	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5442		return true;
5443
5444	cpu_buffer = buffer->buffers[cpu];
5445	local_irq_save(flags);
5446	dolock = rb_reader_lock(cpu_buffer);
5447	ret = rb_per_cpu_empty(cpu_buffer);
5448	rb_reader_unlock(cpu_buffer, dolock);
5449	local_irq_restore(flags);
5450
5451	return ret;
5452}
5453EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
5454
5455#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
5456/**
5457 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
5458 * @buffer_a: One buffer to swap with
5459 * @buffer_b: The other buffer to swap with
5460 * @cpu: the CPU of the buffers to swap
5461 *
5462 * This function is useful for tracers that want to take a "snapshot"
5463 * of a CPU buffer and has another back up buffer lying around.
5464 * it is expected that the tracer handles the cpu buffer not being
5465 * used at the moment.
5466 */
5467int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
5468			 struct trace_buffer *buffer_b, int cpu)
5469{
5470	struct ring_buffer_per_cpu *cpu_buffer_a;
5471	struct ring_buffer_per_cpu *cpu_buffer_b;
5472	int ret = -EINVAL;
5473
5474	if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
5475	    !cpumask_test_cpu(cpu, buffer_b->cpumask))
5476		goto out;
5477
5478	cpu_buffer_a = buffer_a->buffers[cpu];
5479	cpu_buffer_b = buffer_b->buffers[cpu];
5480
5481	/* At least make sure the two buffers are somewhat the same */
5482	if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
5483		goto out;
5484
5485	if (buffer_a->subbuf_order != buffer_b->subbuf_order)
5486		goto out;
5487
5488	ret = -EAGAIN;
5489
5490	if (atomic_read(&buffer_a->record_disabled))
5491		goto out;
5492
5493	if (atomic_read(&buffer_b->record_disabled))
5494		goto out;
5495
5496	if (atomic_read(&cpu_buffer_a->record_disabled))
5497		goto out;
5498
5499	if (atomic_read(&cpu_buffer_b->record_disabled))
5500		goto out;
5501
5502	/*
5503	 * We can't do a synchronize_rcu here because this
5504	 * function can be called in atomic context.
5505	 * Normally this will be called from the same CPU as cpu.
5506	 * If not it's up to the caller to protect this.
5507	 */
5508	atomic_inc(&cpu_buffer_a->record_disabled);
5509	atomic_inc(&cpu_buffer_b->record_disabled);
5510
5511	ret = -EBUSY;
5512	if (local_read(&cpu_buffer_a->committing))
5513		goto out_dec;
5514	if (local_read(&cpu_buffer_b->committing))
5515		goto out_dec;
5516
5517	/*
5518	 * When resize is in progress, we cannot swap it because
5519	 * it will mess the state of the cpu buffer.
5520	 */
5521	if (atomic_read(&buffer_a->resizing))
5522		goto out_dec;
5523	if (atomic_read(&buffer_b->resizing))
5524		goto out_dec;
5525
5526	buffer_a->buffers[cpu] = cpu_buffer_b;
5527	buffer_b->buffers[cpu] = cpu_buffer_a;
5528
5529	cpu_buffer_b->buffer = buffer_a;
5530	cpu_buffer_a->buffer = buffer_b;
5531
5532	ret = 0;
5533
5534out_dec:
5535	atomic_dec(&cpu_buffer_a->record_disabled);
5536	atomic_dec(&cpu_buffer_b->record_disabled);
5537out:
5538	return ret;
5539}
5540EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
5541#endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
5542
5543/**
5544 * ring_buffer_alloc_read_page - allocate a page to read from buffer
5545 * @buffer: the buffer to allocate for.
5546 * @cpu: the cpu buffer to allocate.
5547 *
5548 * This function is used in conjunction with ring_buffer_read_page.
5549 * When reading a full page from the ring buffer, these functions
5550 * can be used to speed up the process. The calling function should
5551 * allocate a few pages first with this function. Then when it
5552 * needs to get pages from the ring buffer, it passes the result
5553 * of this function into ring_buffer_read_page, which will swap
5554 * the page that was allocated, with the read page of the buffer.
5555 *
5556 * Returns:
5557 *  The page allocated, or ERR_PTR
5558 */
5559struct buffer_data_read_page *
5560ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
5561{
5562	struct ring_buffer_per_cpu *cpu_buffer;
5563	struct buffer_data_read_page *bpage = NULL;
5564	unsigned long flags;
5565	struct page *page;
5566
5567	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5568		return ERR_PTR(-ENODEV);
5569
5570	bpage = kzalloc(sizeof(*bpage), GFP_KERNEL);
5571	if (!bpage)
5572		return ERR_PTR(-ENOMEM);
5573
5574	bpage->order = buffer->subbuf_order;
5575	cpu_buffer = buffer->buffers[cpu];
5576	local_irq_save(flags);
5577	arch_spin_lock(&cpu_buffer->lock);
5578
5579	if (cpu_buffer->free_page) {
5580		bpage->data = cpu_buffer->free_page;
5581		cpu_buffer->free_page = NULL;
5582	}
5583
5584	arch_spin_unlock(&cpu_buffer->lock);
5585	local_irq_restore(flags);
5586
5587	if (bpage->data)
5588		goto out;
5589
5590	page = alloc_pages_node(cpu_to_node(cpu),
5591				GFP_KERNEL | __GFP_NORETRY | __GFP_ZERO,
5592				cpu_buffer->buffer->subbuf_order);
5593	if (!page) {
5594		kfree(bpage);
5595		return ERR_PTR(-ENOMEM);
5596	}
5597
5598	bpage->data = page_address(page);
5599
5600 out:
5601	rb_init_page(bpage->data);
5602
5603	return bpage;
5604}
5605EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
5606
5607/**
5608 * ring_buffer_free_read_page - free an allocated read page
5609 * @buffer: the buffer the page was allocate for
5610 * @cpu: the cpu buffer the page came from
5611 * @data_page: the page to free
5612 *
5613 * Free a page allocated from ring_buffer_alloc_read_page.
5614 */
5615void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu,
5616				struct buffer_data_read_page *data_page)
5617{
5618	struct ring_buffer_per_cpu *cpu_buffer;
5619	struct buffer_data_page *bpage = data_page->data;
5620	struct page *page = virt_to_page(bpage);
5621	unsigned long flags;
5622
5623	if (!buffer || !buffer->buffers || !buffer->buffers[cpu])
5624		return;
5625
5626	cpu_buffer = buffer->buffers[cpu];
5627
5628	/*
5629	 * If the page is still in use someplace else, or order of the page
5630	 * is different from the subbuffer order of the buffer -
5631	 * we can't reuse it
5632	 */
5633	if (page_ref_count(page) > 1 || data_page->order != buffer->subbuf_order)
5634		goto out;
5635
5636	local_irq_save(flags);
5637	arch_spin_lock(&cpu_buffer->lock);
5638
5639	if (!cpu_buffer->free_page) {
5640		cpu_buffer->free_page = bpage;
5641		bpage = NULL;
5642	}
5643
5644	arch_spin_unlock(&cpu_buffer->lock);
5645	local_irq_restore(flags);
5646
5647 out:
5648	free_pages((unsigned long)bpage, data_page->order);
5649	kfree(data_page);
5650}
5651EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
5652
5653/**
5654 * ring_buffer_read_page - extract a page from the ring buffer
5655 * @buffer: buffer to extract from
5656 * @data_page: the page to use allocated from ring_buffer_alloc_read_page
5657 * @len: amount to extract
5658 * @cpu: the cpu of the buffer to extract
5659 * @full: should the extraction only happen when the page is full.
5660 *
5661 * This function will pull out a page from the ring buffer and consume it.
5662 * @data_page must be the address of the variable that was returned
5663 * from ring_buffer_alloc_read_page. This is because the page might be used
5664 * to swap with a page in the ring buffer.
5665 *
5666 * for example:
5667 *	rpage = ring_buffer_alloc_read_page(buffer, cpu);
5668 *	if (IS_ERR(rpage))
5669 *		return PTR_ERR(rpage);
5670 *	ret = ring_buffer_read_page(buffer, rpage, len, cpu, 0);
5671 *	if (ret >= 0)
5672 *		process_page(ring_buffer_read_page_data(rpage), ret);
5673 *	ring_buffer_free_read_page(buffer, cpu, rpage);
5674 *
5675 * When @full is set, the function will not return true unless
5676 * the writer is off the reader page.
5677 *
5678 * Note: it is up to the calling functions to handle sleeps and wakeups.
5679 *  The ring buffer can be used anywhere in the kernel and can not
5680 *  blindly call wake_up. The layer that uses the ring buffer must be
5681 *  responsible for that.
5682 *
5683 * Returns:
5684 *  >=0 if data has been transferred, returns the offset of consumed data.
5685 *  <0 if no data has been transferred.
5686 */
5687int ring_buffer_read_page(struct trace_buffer *buffer,
5688			  struct buffer_data_read_page *data_page,
5689			  size_t len, int cpu, int full)
5690{
5691	struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
5692	struct ring_buffer_event *event;
5693	struct buffer_data_page *bpage;
5694	struct buffer_page *reader;
5695	unsigned long missed_events;
5696	unsigned long flags;
5697	unsigned int commit;
5698	unsigned int read;
5699	u64 save_timestamp;
5700	int ret = -1;
5701
5702	if (!cpumask_test_cpu(cpu, buffer->cpumask))
5703		goto out;
5704
5705	/*
5706	 * If len is not big enough to hold the page header, then
5707	 * we can not copy anything.
5708	 */
5709	if (len <= BUF_PAGE_HDR_SIZE)
5710		goto out;
5711
5712	len -= BUF_PAGE_HDR_SIZE;
5713
5714	if (!data_page || !data_page->data)
5715		goto out;
5716	if (data_page->order != buffer->subbuf_order)
5717		goto out;
5718
5719	bpage = data_page->data;
5720	if (!bpage)
5721		goto out;
5722
5723	raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
5724
5725	reader = rb_get_reader_page(cpu_buffer);
5726	if (!reader)
5727		goto out_unlock;
5728
5729	event = rb_reader_event(cpu_buffer);
5730
5731	read = reader->read;
5732	commit = rb_page_commit(reader);
5733
5734	/* Check if any events were dropped */
5735	missed_events = cpu_buffer->lost_events;
5736
5737	/*
5738	 * If this page has been partially read or
5739	 * if len is not big enough to read the rest of the page or
5740	 * a writer is still on the page, then
5741	 * we must copy the data from the page to the buffer.
5742	 * Otherwise, we can simply swap the page with the one passed in.
5743	 */
5744	if (read || (len < (commit - read)) ||
5745	    cpu_buffer->reader_page == cpu_buffer->commit_page) {
5746		struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
5747		unsigned int rpos = read;
5748		unsigned int pos = 0;
5749		unsigned int size;
5750
5751		/*
5752		 * If a full page is expected, this can still be returned
5753		 * if there's been a previous partial read and the
5754		 * rest of the page can be read and the commit page is off
5755		 * the reader page.
5756		 */
5757		if (full &&
5758		    (!read || (len < (commit - read)) ||
5759		     cpu_buffer->reader_page == cpu_buffer->commit_page))
5760			goto out_unlock;
5761
5762		if (len > (commit - read))
5763			len = (commit - read);
5764
5765		/* Always keep the time extend and data together */
5766		size = rb_event_ts_length(event);
5767
5768		if (len < size)
5769			goto out_unlock;
5770
5771		/* save the current timestamp, since the user will need it */
5772		save_timestamp = cpu_buffer->read_stamp;
5773
5774		/* Need to copy one event at a time */
5775		do {
5776			/* We need the size of one event, because
5777			 * rb_advance_reader only advances by one event,
5778			 * whereas rb_event_ts_length may include the size of
5779			 * one or two events.
5780			 * We have already ensured there's enough space if this
5781			 * is a time extend. */
5782			size = rb_event_length(event);
5783			memcpy(bpage->data + pos, rpage->data + rpos, size);
5784
5785			len -= size;
5786
5787			rb_advance_reader(cpu_buffer);
5788			rpos = reader->read;
5789			pos += size;
5790
5791			if (rpos >= commit)
5792				break;
5793
5794			event = rb_reader_event(cpu_buffer);
5795			/* Always keep the time extend and data together */
5796			size = rb_event_ts_length(event);
5797		} while (len >= size);
5798
5799		/* update bpage */
5800		local_set(&bpage->commit, pos);
5801		bpage->time_stamp = save_timestamp;
5802
5803		/* we copied everything to the beginning */
5804		read = 0;
5805	} else {
5806		/* update the entry counter */
5807		cpu_buffer->read += rb_page_entries(reader);
5808		cpu_buffer->read_bytes += rb_page_commit(reader);
5809
5810		/* swap the pages */
5811		rb_init_page(bpage);
5812		bpage = reader->page;
5813		reader->page = data_page->data;
5814		local_set(&reader->write, 0);
5815		local_set(&reader->entries, 0);
5816		reader->read = 0;
5817		data_page->data = bpage;
5818
5819		/*
5820		 * Use the real_end for the data size,
5821		 * This gives us a chance to store the lost events
5822		 * on the page.
5823		 */
5824		if (reader->real_end)
5825			local_set(&bpage->commit, reader->real_end);
5826	}
5827	ret = read;
5828
5829	cpu_buffer->lost_events = 0;
5830
5831	commit = local_read(&bpage->commit);
5832	/*
5833	 * Set a flag in the commit field if we lost events
5834	 */
5835	if (missed_events) {
5836		/* If there is room at the end of the page to save the
5837		 * missed events, then record it there.
5838		 */
5839		if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
5840			memcpy(&bpage->data[commit], &missed_events,
5841			       sizeof(missed_events));
5842			local_add(RB_MISSED_STORED, &bpage->commit);
5843			commit += sizeof(missed_events);
5844		}
5845		local_add(RB_MISSED_EVENTS, &bpage->commit);
5846	}
5847
5848	/*
5849	 * This page may be off to user land. Zero it out here.
5850	 */
5851	if (commit < buffer->subbuf_size)
5852		memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
5853
5854 out_unlock:
5855	raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
5856
5857 out:
5858	return ret;
5859}
5860EXPORT_SYMBOL_GPL(ring_buffer_read_page);
5861
5862/**
5863 * ring_buffer_read_page_data - get pointer to the data in the page.
5864 * @page:  the page to get the data from
5865 *
5866 * Returns pointer to the actual data in this page.
5867 */
5868void *ring_buffer_read_page_data(struct buffer_data_read_page *page)
5869{
5870	return page->data;
5871}
5872EXPORT_SYMBOL_GPL(ring_buffer_read_page_data);
5873
5874/**
5875 * ring_buffer_subbuf_size_get - get size of the sub buffer.
5876 * @buffer: the buffer to get the sub buffer size from
5877 *
5878 * Returns size of the sub buffer, in bytes.
5879 */
5880int ring_buffer_subbuf_size_get(struct trace_buffer *buffer)
5881{
5882	return buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
5883}
5884EXPORT_SYMBOL_GPL(ring_buffer_subbuf_size_get);
5885
5886/**
5887 * ring_buffer_subbuf_order_get - get order of system sub pages in one buffer page.
5888 * @buffer: The ring_buffer to get the system sub page order from
5889 *
5890 * By default, one ring buffer sub page equals to one system page. This parameter
5891 * is configurable, per ring buffer. The size of the ring buffer sub page can be
5892 * extended, but must be an order of system page size.
5893 *
5894 * Returns the order of buffer sub page size, in system pages:
5895 * 0 means the sub buffer size is 1 system page and so forth.
5896 * In case of an error < 0 is returned.
5897 */
5898int ring_buffer_subbuf_order_get(struct trace_buffer *buffer)
5899{
5900	if (!buffer)
5901		return -EINVAL;
5902
5903	return buffer->subbuf_order;
5904}
5905EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_get);
5906
5907/**
5908 * ring_buffer_subbuf_order_set - set the size of ring buffer sub page.
5909 * @buffer: The ring_buffer to set the new page size.
5910 * @order: Order of the system pages in one sub buffer page
5911 *
5912 * By default, one ring buffer pages equals to one system page. This API can be
5913 * used to set new size of the ring buffer page. The size must be order of
5914 * system page size, that's why the input parameter @order is the order of
5915 * system pages that are allocated for one ring buffer page:
5916 *  0 - 1 system page
5917 *  1 - 2 system pages
5918 *  3 - 4 system pages
5919 *  ...
5920 *
5921 * Returns 0 on success or < 0 in case of an error.
5922 */
5923int ring_buffer_subbuf_order_set(struct trace_buffer *buffer, int order)
5924{
5925	struct ring_buffer_per_cpu *cpu_buffer;
5926	struct buffer_page *bpage, *tmp;
5927	int old_order, old_size;
5928	int nr_pages;
5929	int psize;
5930	int err;
5931	int cpu;
5932
5933	if (!buffer || order < 0)
5934		return -EINVAL;
5935
5936	if (buffer->subbuf_order == order)
5937		return 0;
5938
5939	psize = (1 << order) * PAGE_SIZE;
5940	if (psize <= BUF_PAGE_HDR_SIZE)
5941		return -EINVAL;
5942
5943	/* Size of a subbuf cannot be greater than the write counter */
5944	if (psize > RB_WRITE_MASK + 1)
5945		return -EINVAL;
5946
5947	old_order = buffer->subbuf_order;
5948	old_size = buffer->subbuf_size;
5949
5950	/* prevent another thread from changing buffer sizes */
5951	mutex_lock(&buffer->mutex);
5952	atomic_inc(&buffer->record_disabled);
5953
5954	/* Make sure all commits have finished */
5955	synchronize_rcu();
5956
5957	buffer->subbuf_order = order;
5958	buffer->subbuf_size = psize - BUF_PAGE_HDR_SIZE;
5959
5960	/* Make sure all new buffers are allocated, before deleting the old ones */
5961	for_each_buffer_cpu(buffer, cpu) {
5962
5963		if (!cpumask_test_cpu(cpu, buffer->cpumask))
5964			continue;
5965
5966		cpu_buffer = buffer->buffers[cpu];
5967
5968		/* Update the number of pages to match the new size */
5969		nr_pages = old_size * buffer->buffers[cpu]->nr_pages;
5970		nr_pages = DIV_ROUND_UP(nr_pages, buffer->subbuf_size);
5971
5972		/* we need a minimum of two pages */
5973		if (nr_pages < 2)
5974			nr_pages = 2;
5975
5976		cpu_buffer->nr_pages_to_update = nr_pages;
5977
5978		/* Include the reader page */
5979		nr_pages++;
5980
5981		/* Allocate the new size buffer */
5982		INIT_LIST_HEAD(&cpu_buffer->new_pages);
5983		if (__rb_allocate_pages(cpu_buffer, nr_pages,
5984					&cpu_buffer->new_pages)) {
5985			/* not enough memory for new pages */
5986			err = -ENOMEM;
5987			goto error;
5988		}
 
 
 
 
 
 
 
 
 
 
 
 
 
5989	}
5990
5991	for_each_buffer_cpu(buffer, cpu) {
5992
5993		if (!cpumask_test_cpu(cpu, buffer->cpumask))
5994			continue;
5995
5996		cpu_buffer = buffer->buffers[cpu];
5997
5998		/* Clear the head bit to make the link list normal to read */
5999		rb_head_page_deactivate(cpu_buffer);
6000
6001		/* Now walk the list and free all the old sub buffers */
6002		list_for_each_entry_safe(bpage, tmp, cpu_buffer->pages, list) {
6003			list_del_init(&bpage->list);
6004			free_buffer_page(bpage);
6005		}
6006		/* The above loop stopped an the last page needing to be freed */
6007		bpage = list_entry(cpu_buffer->pages, struct buffer_page, list);
6008		free_buffer_page(bpage);
6009
6010		/* Free the current reader page */
6011		free_buffer_page(cpu_buffer->reader_page);
6012
6013		/* One page was allocated for the reader page */
6014		cpu_buffer->reader_page = list_entry(cpu_buffer->new_pages.next,
6015						     struct buffer_page, list);
6016		list_del_init(&cpu_buffer->reader_page->list);
6017
6018		/* The cpu_buffer pages are a link list with no head */
6019		cpu_buffer->pages = cpu_buffer->new_pages.next;
6020		cpu_buffer->new_pages.next->prev = cpu_buffer->new_pages.prev;
6021		cpu_buffer->new_pages.prev->next = cpu_buffer->new_pages.next;
6022
6023		/* Clear the new_pages list */
6024		INIT_LIST_HEAD(&cpu_buffer->new_pages);
6025
6026		cpu_buffer->head_page
6027			= list_entry(cpu_buffer->pages, struct buffer_page, list);
6028		cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
6029
6030		cpu_buffer->nr_pages = cpu_buffer->nr_pages_to_update;
6031		cpu_buffer->nr_pages_to_update = 0;
6032
6033		free_pages((unsigned long)cpu_buffer->free_page, old_order);
6034		cpu_buffer->free_page = NULL;
6035
6036		rb_head_page_activate(cpu_buffer);
6037
6038		rb_check_pages(cpu_buffer);
6039	}
6040
6041	atomic_dec(&buffer->record_disabled);
6042	mutex_unlock(&buffer->mutex);
6043
6044	return 0;
6045
6046error:
6047	buffer->subbuf_order = old_order;
6048	buffer->subbuf_size = old_size;
6049
6050	atomic_dec(&buffer->record_disabled);
6051	mutex_unlock(&buffer->mutex);
6052
6053	for_each_buffer_cpu(buffer, cpu) {
6054		cpu_buffer = buffer->buffers[cpu];
6055
6056		if (!cpu_buffer->nr_pages_to_update)
6057			continue;
6058
6059		list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) {
6060			list_del_init(&bpage->list);
6061			free_buffer_page(bpage);
6062		}
6063	}
6064
6065	return err;
6066}
6067EXPORT_SYMBOL_GPL(ring_buffer_subbuf_order_set);
6068
6069/*
6070 * We only allocate new buffers, never free them if the CPU goes down.
6071 * If we were to free the buffer, then the user would lose any trace that was in
6072 * the buffer.
6073 */
6074int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
6075{
6076	struct trace_buffer *buffer;
6077	long nr_pages_same;
6078	int cpu_i;
6079	unsigned long nr_pages;
6080
6081	buffer = container_of(node, struct trace_buffer, node);
6082	if (cpumask_test_cpu(cpu, buffer->cpumask))
6083		return 0;
6084
6085	nr_pages = 0;
6086	nr_pages_same = 1;
6087	/* check if all cpu sizes are same */
6088	for_each_buffer_cpu(buffer, cpu_i) {
6089		/* fill in the size from first enabled cpu */
6090		if (nr_pages == 0)
6091			nr_pages = buffer->buffers[cpu_i]->nr_pages;
6092		if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
6093			nr_pages_same = 0;
6094			break;
6095		}
6096	}
6097	/* allocate minimum pages, user can later expand it */
6098	if (!nr_pages_same)
6099		nr_pages = 2;
6100	buffer->buffers[cpu] =
6101		rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
6102	if (!buffer->buffers[cpu]) {
6103		WARN(1, "failed to allocate ring buffer on CPU %u\n",
6104		     cpu);
6105		return -ENOMEM;
6106	}
6107	smp_wmb();
6108	cpumask_set_cpu(cpu, buffer->cpumask);
6109	return 0;
6110}
 
6111
6112#ifdef CONFIG_RING_BUFFER_STARTUP_TEST
6113/*
6114 * This is a basic integrity check of the ring buffer.
6115 * Late in the boot cycle this test will run when configured in.
6116 * It will kick off a thread per CPU that will go into a loop
6117 * writing to the per cpu ring buffer various sizes of data.
6118 * Some of the data will be large items, some small.
6119 *
6120 * Another thread is created that goes into a spin, sending out
6121 * IPIs to the other CPUs to also write into the ring buffer.
6122 * this is to test the nesting ability of the buffer.
6123 *
6124 * Basic stats are recorded and reported. If something in the
6125 * ring buffer should happen that's not expected, a big warning
6126 * is displayed and all ring buffers are disabled.
6127 */
6128static struct task_struct *rb_threads[NR_CPUS] __initdata;
6129
6130struct rb_test_data {
6131	struct trace_buffer *buffer;
6132	unsigned long		events;
6133	unsigned long		bytes_written;
6134	unsigned long		bytes_alloc;
6135	unsigned long		bytes_dropped;
6136	unsigned long		events_nested;
6137	unsigned long		bytes_written_nested;
6138	unsigned long		bytes_alloc_nested;
6139	unsigned long		bytes_dropped_nested;
6140	int			min_size_nested;
6141	int			max_size_nested;
6142	int			max_size;
6143	int			min_size;
6144	int			cpu;
6145	int			cnt;
6146};
6147
6148static struct rb_test_data rb_data[NR_CPUS] __initdata;
6149
6150/* 1 meg per cpu */
6151#define RB_TEST_BUFFER_SIZE	1048576
6152
6153static char rb_string[] __initdata =
6154	"abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
6155	"?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
6156	"!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
6157
6158static bool rb_test_started __initdata;
6159
6160struct rb_item {
6161	int size;
6162	char str[];
6163};
6164
6165static __init int rb_write_something(struct rb_test_data *data, bool nested)
6166{
6167	struct ring_buffer_event *event;
6168	struct rb_item *item;
6169	bool started;
6170	int event_len;
6171	int size;
6172	int len;
6173	int cnt;
6174
6175	/* Have nested writes different that what is written */
6176	cnt = data->cnt + (nested ? 27 : 0);
6177
6178	/* Multiply cnt by ~e, to make some unique increment */
6179	size = (cnt * 68 / 25) % (sizeof(rb_string) - 1);
6180
6181	len = size + sizeof(struct rb_item);
6182
6183	started = rb_test_started;
6184	/* read rb_test_started before checking buffer enabled */
6185	smp_rmb();
6186
6187	event = ring_buffer_lock_reserve(data->buffer, len);
6188	if (!event) {
6189		/* Ignore dropped events before test starts. */
6190		if (started) {
6191			if (nested)
6192				data->bytes_dropped += len;
6193			else
6194				data->bytes_dropped_nested += len;
6195		}
6196		return len;
6197	}
6198
6199	event_len = ring_buffer_event_length(event);
6200
6201	if (RB_WARN_ON(data->buffer, event_len < len))
6202		goto out;
6203
6204	item = ring_buffer_event_data(event);
6205	item->size = size;
6206	memcpy(item->str, rb_string, size);
6207
6208	if (nested) {
6209		data->bytes_alloc_nested += event_len;
6210		data->bytes_written_nested += len;
6211		data->events_nested++;
6212		if (!data->min_size_nested || len < data->min_size_nested)
6213			data->min_size_nested = len;
6214		if (len > data->max_size_nested)
6215			data->max_size_nested = len;
6216	} else {
6217		data->bytes_alloc += event_len;
6218		data->bytes_written += len;
6219		data->events++;
6220		if (!data->min_size || len < data->min_size)
6221			data->max_size = len;
6222		if (len > data->max_size)
6223			data->max_size = len;
6224	}
6225
6226 out:
6227	ring_buffer_unlock_commit(data->buffer);
6228
6229	return 0;
6230}
6231
6232static __init int rb_test(void *arg)
6233{
6234	struct rb_test_data *data = arg;
6235
6236	while (!kthread_should_stop()) {
6237		rb_write_something(data, false);
6238		data->cnt++;
6239
6240		set_current_state(TASK_INTERRUPTIBLE);
6241		/* Now sleep between a min of 100-300us and a max of 1ms */
6242		usleep_range(((data->cnt % 3) + 1) * 100, 1000);
6243	}
6244
6245	return 0;
6246}
6247
6248static __init void rb_ipi(void *ignore)
6249{
6250	struct rb_test_data *data;
6251	int cpu = smp_processor_id();
6252
6253	data = &rb_data[cpu];
6254	rb_write_something(data, true);
6255}
6256
6257static __init int rb_hammer_test(void *arg)
6258{
6259	while (!kthread_should_stop()) {
6260
6261		/* Send an IPI to all cpus to write data! */
6262		smp_call_function(rb_ipi, NULL, 1);
6263		/* No sleep, but for non preempt, let others run */
6264		schedule();
6265	}
6266
6267	return 0;
6268}
6269
6270static __init int test_ringbuffer(void)
6271{
6272	struct task_struct *rb_hammer;
6273	struct trace_buffer *buffer;
6274	int cpu;
6275	int ret = 0;
6276
6277	if (security_locked_down(LOCKDOWN_TRACEFS)) {
6278		pr_warn("Lockdown is enabled, skipping ring buffer tests\n");
6279		return 0;
6280	}
6281
6282	pr_info("Running ring buffer tests...\n");
6283
6284	buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
6285	if (WARN_ON(!buffer))
6286		return 0;
6287
6288	/* Disable buffer so that threads can't write to it yet */
6289	ring_buffer_record_off(buffer);
6290
6291	for_each_online_cpu(cpu) {
6292		rb_data[cpu].buffer = buffer;
6293		rb_data[cpu].cpu = cpu;
6294		rb_data[cpu].cnt = cpu;
6295		rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu],
6296						     cpu, "rbtester/%u");
6297		if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
6298			pr_cont("FAILED\n");
6299			ret = PTR_ERR(rb_threads[cpu]);
6300			goto out_free;
6301		}
 
 
 
6302	}
6303
6304	/* Now create the rb hammer! */
6305	rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
6306	if (WARN_ON(IS_ERR(rb_hammer))) {
6307		pr_cont("FAILED\n");
6308		ret = PTR_ERR(rb_hammer);
6309		goto out_free;
6310	}
6311
6312	ring_buffer_record_on(buffer);
6313	/*
6314	 * Show buffer is enabled before setting rb_test_started.
6315	 * Yes there's a small race window where events could be
6316	 * dropped and the thread wont catch it. But when a ring
6317	 * buffer gets enabled, there will always be some kind of
6318	 * delay before other CPUs see it. Thus, we don't care about
6319	 * those dropped events. We care about events dropped after
6320	 * the threads see that the buffer is active.
6321	 */
6322	smp_wmb();
6323	rb_test_started = true;
6324
6325	set_current_state(TASK_INTERRUPTIBLE);
6326	/* Just run for 10 seconds */;
6327	schedule_timeout(10 * HZ);
6328
6329	kthread_stop(rb_hammer);
6330
6331 out_free:
6332	for_each_online_cpu(cpu) {
6333		if (!rb_threads[cpu])
6334			break;
6335		kthread_stop(rb_threads[cpu]);
6336	}
6337	if (ret) {
6338		ring_buffer_free(buffer);
6339		return ret;
6340	}
6341
6342	/* Report! */
6343	pr_info("finished\n");
6344	for_each_online_cpu(cpu) {
6345		struct ring_buffer_event *event;
6346		struct rb_test_data *data = &rb_data[cpu];
6347		struct rb_item *item;
6348		unsigned long total_events;
6349		unsigned long total_dropped;
6350		unsigned long total_written;
6351		unsigned long total_alloc;
6352		unsigned long total_read = 0;
6353		unsigned long total_size = 0;
6354		unsigned long total_len = 0;
6355		unsigned long total_lost = 0;
6356		unsigned long lost;
6357		int big_event_size;
6358		int small_event_size;
6359
6360		ret = -1;
6361
6362		total_events = data->events + data->events_nested;
6363		total_written = data->bytes_written + data->bytes_written_nested;
6364		total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
6365		total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
6366
6367		big_event_size = data->max_size + data->max_size_nested;
6368		small_event_size = data->min_size + data->min_size_nested;
6369
6370		pr_info("CPU %d:\n", cpu);
6371		pr_info("              events:    %ld\n", total_events);
6372		pr_info("       dropped bytes:    %ld\n", total_dropped);
6373		pr_info("       alloced bytes:    %ld\n", total_alloc);
6374		pr_info("       written bytes:    %ld\n", total_written);
6375		pr_info("       biggest event:    %d\n", big_event_size);
6376		pr_info("      smallest event:    %d\n", small_event_size);
6377
6378		if (RB_WARN_ON(buffer, total_dropped))
6379			break;
6380
6381		ret = 0;
6382
6383		while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
6384			total_lost += lost;
6385			item = ring_buffer_event_data(event);
6386			total_len += ring_buffer_event_length(event);
6387			total_size += item->size + sizeof(struct rb_item);
6388			if (memcmp(&item->str[0], rb_string, item->size) != 0) {
6389				pr_info("FAILED!\n");
6390				pr_info("buffer had: %.*s\n", item->size, item->str);
6391				pr_info("expected:   %.*s\n", item->size, rb_string);
6392				RB_WARN_ON(buffer, 1);
6393				ret = -1;
6394				break;
6395			}
6396			total_read++;
6397		}
6398		if (ret)
6399			break;
6400
6401		ret = -1;
6402
6403		pr_info("         read events:   %ld\n", total_read);
6404		pr_info("         lost events:   %ld\n", total_lost);
6405		pr_info("        total events:   %ld\n", total_lost + total_read);
6406		pr_info("  recorded len bytes:   %ld\n", total_len);
6407		pr_info(" recorded size bytes:   %ld\n", total_size);
6408		if (total_lost) {
6409			pr_info(" With dropped events, record len and size may not match\n"
6410				" alloced and written from above\n");
6411		} else {
6412			if (RB_WARN_ON(buffer, total_len != total_alloc ||
6413				       total_size != total_written))
6414				break;
6415		}
6416		if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
6417			break;
6418
6419		ret = 0;
6420	}
6421	if (!ret)
6422		pr_info("Ring buffer PASSED!\n");
6423
6424	ring_buffer_free(buffer);
6425	return 0;
6426}
6427
6428late_initcall(test_ringbuffer);
6429#endif /* CONFIG_RING_BUFFER_STARTUP_TEST */