Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/kernel.h>
   4#include <linux/irqflags.h>
   5#include <linux/string.h>
   6#include <linux/errno.h>
   7#include <linux/bug.h>
   8#include "printk_ringbuffer.h"
   9
  10/**
  11 * DOC: printk_ringbuffer overview
  12 *
  13 * Data Structure
  14 * --------------
  15 * The printk_ringbuffer is made up of 3 internal ringbuffers:
  16 *
  17 *   desc_ring
  18 *     A ring of descriptors and their meta data (such as sequence number,
  19 *     timestamp, loglevel, etc.) as well as internal state information about
  20 *     the record and logical positions specifying where in the other
  21 *     ringbuffer the text strings are located.
  22 *
  23 *   text_data_ring
  24 *     A ring of data blocks. A data block consists of an unsigned long
  25 *     integer (ID) that maps to a desc_ring index followed by the text
  26 *     string of the record.
  27 *
  28 * The internal state information of a descriptor is the key element to allow
  29 * readers and writers to locklessly synchronize access to the data.
  30 *
  31 * Implementation
  32 * --------------
  33 *
  34 * Descriptor Ring
  35 * ~~~~~~~~~~~~~~~
  36 * The descriptor ring is an array of descriptors. A descriptor contains
  37 * essential meta data to track the data of a printk record using
  38 * blk_lpos structs pointing to associated text data blocks (see
  39 * "Data Rings" below). Each descriptor is assigned an ID that maps
  40 * directly to index values of the descriptor array and has a state. The ID
  41 * and the state are bitwise combined into a single descriptor field named
  42 * @state_var, allowing ID and state to be synchronously and atomically
  43 * updated.
  44 *
  45 * Descriptors have four states:
  46 *
  47 *   reserved
  48 *     A writer is modifying the record.
  49 *
  50 *   committed
  51 *     The record and all its data are written. A writer can reopen the
  52 *     descriptor (transitioning it back to reserved), but in the committed
  53 *     state the data is consistent.
  54 *
  55 *   finalized
  56 *     The record and all its data are complete and available for reading. A
  57 *     writer cannot reopen the descriptor.
  58 *
  59 *   reusable
  60 *     The record exists, but its text and/or meta data may no longer be
  61 *     available.
  62 *
  63 * Querying the @state_var of a record requires providing the ID of the
  64 * descriptor to query. This can yield a possible fifth (pseudo) state:
  65 *
  66 *   miss
  67 *     The descriptor being queried has an unexpected ID.
  68 *
  69 * The descriptor ring has a @tail_id that contains the ID of the oldest
  70 * descriptor and @head_id that contains the ID of the newest descriptor.
  71 *
  72 * When a new descriptor should be created (and the ring is full), the tail
  73 * descriptor is invalidated by first transitioning to the reusable state and
  74 * then invalidating all tail data blocks up to and including the data blocks
  75 * associated with the tail descriptor (for the text ring). Then
  76 * @tail_id is advanced, followed by advancing @head_id. And finally the
  77 * @state_var of the new descriptor is initialized to the new ID and reserved
  78 * state.
  79 *
  80 * The @tail_id can only be advanced if the new @tail_id would be in the
  81 * committed or reusable queried state. This makes it possible that a valid
  82 * sequence number of the tail is always available.
  83 *
  84 * Descriptor Finalization
  85 * ~~~~~~~~~~~~~~~~~~~~~~~
  86 * When a writer calls the commit function prb_commit(), record data is
  87 * fully stored and is consistent within the ringbuffer. However, a writer can
  88 * reopen that record, claiming exclusive access (as with prb_reserve()), and
  89 * modify that record. When finished, the writer must again commit the record.
  90 *
  91 * In order for a record to be made available to readers (and also become
  92 * recyclable for writers), it must be finalized. A finalized record cannot be
  93 * reopened and can never become "unfinalized". Record finalization can occur
  94 * in three different scenarios:
  95 *
  96 *   1) A writer can simultaneously commit and finalize its record by calling
  97 *      prb_final_commit() instead of prb_commit().
  98 *
  99 *   2) When a new record is reserved and the previous record has been
 100 *      committed via prb_commit(), that previous record is automatically
 101 *      finalized.
 102 *
 103 *   3) When a record is committed via prb_commit() and a newer record
 104 *      already exists, the record being committed is automatically finalized.
 105 *
 106 * Data Ring
 107 * ~~~~~~~~~
 108 * The text data ring is a byte array composed of data blocks. Data blocks are
 109 * referenced by blk_lpos structs that point to the logical position of the
 110 * beginning of a data block and the beginning of the next adjacent data
 111 * block. Logical positions are mapped directly to index values of the byte
 112 * array ringbuffer.
 113 *
 114 * Each data block consists of an ID followed by the writer data. The ID is
 115 * the identifier of a descriptor that is associated with the data block. A
 116 * given data block is considered valid if all of the following conditions
 117 * are met:
 118 *
 119 *   1) The descriptor associated with the data block is in the committed
 120 *      or finalized queried state.
 121 *
 122 *   2) The blk_lpos struct within the descriptor associated with the data
 123 *      block references back to the same data block.
 124 *
 125 *   3) The data block is within the head/tail logical position range.
 126 *
 127 * If the writer data of a data block would extend beyond the end of the
 128 * byte array, only the ID of the data block is stored at the logical
 129 * position and the full data block (ID and writer data) is stored at the
 130 * beginning of the byte array. The referencing blk_lpos will point to the
 131 * ID before the wrap and the next data block will be at the logical
 132 * position adjacent the full data block after the wrap.
 133 *
 134 * Data rings have a @tail_lpos that points to the beginning of the oldest
 135 * data block and a @head_lpos that points to the logical position of the
 136 * next (not yet existing) data block.
 137 *
 138 * When a new data block should be created (and the ring is full), tail data
 139 * blocks will first be invalidated by putting their associated descriptors
 140 * into the reusable state and then pushing the @tail_lpos forward beyond
 141 * them. Then the @head_lpos is pushed forward and is associated with a new
 142 * descriptor. If a data block is not valid, the @tail_lpos cannot be
 143 * advanced beyond it.
 144 *
 145 * Info Array
 146 * ~~~~~~~~~~
 147 * The general meta data of printk records are stored in printk_info structs,
 148 * stored in an array with the same number of elements as the descriptor ring.
 149 * Each info corresponds to the descriptor of the same index in the
 150 * descriptor ring. Info validity is confirmed by evaluating the corresponding
 151 * descriptor before and after loading the info.
 152 *
 153 * Usage
 154 * -----
 155 * Here are some simple examples demonstrating writers and readers. For the
 156 * examples a global ringbuffer (test_rb) is available (which is not the
 157 * actual ringbuffer used by printk)::
 158 *
 159 *	DEFINE_PRINTKRB(test_rb, 15, 5);
 160 *
 161 * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of
 162 * 1 MiB (2 ^ (15 + 5)) for text data.
 163 *
 164 * Sample writer code::
 165 *
 166 *	const char *textstr = "message text";
 167 *	struct prb_reserved_entry e;
 168 *	struct printk_record r;
 169 *
 170 *	// specify how much to allocate
 171 *	prb_rec_init_wr(&r, strlen(textstr) + 1);
 172 *
 173 *	if (prb_reserve(&e, &test_rb, &r)) {
 174 *		snprintf(r.text_buf, r.text_buf_size, "%s", textstr);
 175 *
 176 *		r.info->text_len = strlen(textstr);
 177 *		r.info->ts_nsec = local_clock();
 178 *		r.info->caller_id = printk_caller_id();
 179 *
 180 *		// commit and finalize the record
 181 *		prb_final_commit(&e);
 182 *	}
 183 *
 184 * Note that additional writer functions are available to extend a record
 185 * after it has been committed but not yet finalized. This can be done as
 186 * long as no new records have been reserved and the caller is the same.
 187 *
 188 * Sample writer code (record extending)::
 189 *
 190 *		// alternate rest of previous example
 191 *
 192 *		r.info->text_len = strlen(textstr);
 193 *		r.info->ts_nsec = local_clock();
 194 *		r.info->caller_id = printk_caller_id();
 195 *
 196 *		// commit the record (but do not finalize yet)
 197 *		prb_commit(&e);
 198 *	}
 199 *
 200 *	...
 201 *
 202 *	// specify additional 5 bytes text space to extend
 203 *	prb_rec_init_wr(&r, 5);
 204 *
 205 *	// try to extend, but only if it does not exceed 32 bytes
 206 *	if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id(), 32)) {
 207 *		snprintf(&r.text_buf[r.info->text_len],
 208 *			 r.text_buf_size - r.info->text_len, "hello");
 209 *
 210 *		r.info->text_len += 5;
 211 *
 212 *		// commit and finalize the record
 213 *		prb_final_commit(&e);
 214 *	}
 215 *
 216 * Sample reader code::
 217 *
 218 *	struct printk_info info;
 219 *	struct printk_record r;
 220 *	char text_buf[32];
 221 *	u64 seq;
 222 *
 223 *	prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf));
 224 *
 225 *	prb_for_each_record(0, &test_rb, &seq, &r) {
 226 *		if (info.seq != seq)
 227 *			pr_warn("lost %llu records\n", info.seq - seq);
 228 *
 229 *		if (info.text_len > r.text_buf_size) {
 230 *			pr_warn("record %llu text truncated\n", info.seq);
 231 *			text_buf[r.text_buf_size - 1] = 0;
 232 *		}
 233 *
 234 *		pr_info("%llu: %llu: %s\n", info.seq, info.ts_nsec,
 235 *			&text_buf[0]);
 236 *	}
 237 *
 238 * Note that additional less convenient reader functions are available to
 239 * allow complex record access.
 240 *
 241 * ABA Issues
 242 * ~~~~~~~~~~
 243 * To help avoid ABA issues, descriptors are referenced by IDs (array index
 244 * values combined with tagged bits counting array wraps) and data blocks are
 245 * referenced by logical positions (array index values combined with tagged
 246 * bits counting array wraps). However, on 32-bit systems the number of
 247 * tagged bits is relatively small such that an ABA incident is (at least
 248 * theoretically) possible. For example, if 4 million maximally sized (1KiB)
 249 * printk messages were to occur in NMI context on a 32-bit system, the
 250 * interrupted context would not be able to recognize that the 32-bit integer
 251 * completely wrapped and thus represents a different data block than the one
 252 * the interrupted context expects.
 253 *
 254 * To help combat this possibility, additional state checking is performed
 255 * (such as using cmpxchg() even though set() would suffice). These extra
 256 * checks are commented as such and will hopefully catch any ABA issue that
 257 * a 32-bit system might experience.
 258 *
 259 * Memory Barriers
 260 * ~~~~~~~~~~~~~~~
 261 * Multiple memory barriers are used. To simplify proving correctness and
 262 * generating litmus tests, lines of code related to memory barriers
 263 * (loads, stores, and the associated memory barriers) are labeled::
 264 *
 265 *	LMM(function:letter)
 266 *
 267 * Comments reference the labels using only the "function:letter" part.
 268 *
 269 * The memory barrier pairs and their ordering are:
 270 *
 271 *   desc_reserve:D / desc_reserve:B
 272 *     push descriptor tail (id), then push descriptor head (id)
 273 *
 274 *   desc_reserve:D / data_push_tail:B
 275 *     push data tail (lpos), then set new descriptor reserved (state)
 276 *
 277 *   desc_reserve:D / desc_push_tail:C
 278 *     push descriptor tail (id), then set new descriptor reserved (state)
 279 *
 280 *   desc_reserve:D / prb_first_seq:C
 281 *     push descriptor tail (id), then set new descriptor reserved (state)
 282 *
 283 *   desc_reserve:F / desc_read:D
 284 *     set new descriptor id and reserved (state), then allow writer changes
 285 *
 286 *   data_alloc:A (or data_realloc:A) / desc_read:D
 287 *     set old descriptor reusable (state), then modify new data block area
 288 *
 289 *   data_alloc:A (or data_realloc:A) / data_push_tail:B
 290 *     push data tail (lpos), then modify new data block area
 291 *
 292 *   _prb_commit:B / desc_read:B
 293 *     store writer changes, then set new descriptor committed (state)
 294 *
 295 *   desc_reopen_last:A / _prb_commit:B
 296 *     set descriptor reserved (state), then read descriptor data
 297 *
 298 *   _prb_commit:B / desc_reserve:D
 299 *     set new descriptor committed (state), then check descriptor head (id)
 300 *
 301 *   data_push_tail:D / data_push_tail:A
 302 *     set descriptor reusable (state), then push data tail (lpos)
 303 *
 304 *   desc_push_tail:B / desc_reserve:D
 305 *     set descriptor reusable (state), then push descriptor tail (id)
 306 */
 307
 308#define DATA_SIZE(data_ring)		_DATA_SIZE((data_ring)->size_bits)
 309#define DATA_SIZE_MASK(data_ring)	(DATA_SIZE(data_ring) - 1)
 310
 311#define DESCS_COUNT(desc_ring)		_DESCS_COUNT((desc_ring)->count_bits)
 312#define DESCS_COUNT_MASK(desc_ring)	(DESCS_COUNT(desc_ring) - 1)
 313
 314/* Determine the data array index from a logical position. */
 315#define DATA_INDEX(data_ring, lpos)	((lpos) & DATA_SIZE_MASK(data_ring))
 316
 317/* Determine the desc array index from an ID or sequence number. */
 318#define DESC_INDEX(desc_ring, n)	((n) & DESCS_COUNT_MASK(desc_ring))
 319
 320/* Determine how many times the data array has wrapped. */
 321#define DATA_WRAPS(data_ring, lpos)	((lpos) >> (data_ring)->size_bits)
 322
 323/* Determine if a logical position refers to a data-less block. */
 324#define LPOS_DATALESS(lpos)		((lpos) & 1UL)
 325#define BLK_DATALESS(blk)		(LPOS_DATALESS((blk)->begin) && \
 326					 LPOS_DATALESS((blk)->next))
 327
 328/* Get the logical position at index 0 of the current wrap. */
 329#define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \
 330((lpos) & ~DATA_SIZE_MASK(data_ring))
 331
 332/* Get the ID for the same index of the previous wrap as the given ID. */
 333#define DESC_ID_PREV_WRAP(desc_ring, id) \
 334DESC_ID((id) - DESCS_COUNT(desc_ring))
 335
 336/*
 337 * A data block: mapped directly to the beginning of the data block area
 338 * specified as a logical position within the data ring.
 339 *
 340 * @id:   the ID of the associated descriptor
 341 * @data: the writer data
 342 *
 343 * Note that the size of a data block is only known by its associated
 344 * descriptor.
 345 */
 346struct prb_data_block {
 347	unsigned long	id;
 348	char		data[];
 349};
 350
 351/*
 352 * Return the descriptor associated with @n. @n can be either a
 353 * descriptor ID or a sequence number.
 354 */
 355static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n)
 356{
 357	return &desc_ring->descs[DESC_INDEX(desc_ring, n)];
 358}
 359
 360/*
 361 * Return the printk_info associated with @n. @n can be either a
 362 * descriptor ID or a sequence number.
 363 */
 364static struct printk_info *to_info(struct prb_desc_ring *desc_ring, u64 n)
 365{
 366	return &desc_ring->infos[DESC_INDEX(desc_ring, n)];
 367}
 368
 369static struct prb_data_block *to_block(struct prb_data_ring *data_ring,
 370				       unsigned long begin_lpos)
 371{
 372	return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)];
 373}
 374
 375/*
 376 * Increase the data size to account for data block meta data plus any
 377 * padding so that the adjacent data block is aligned on the ID size.
 378 */
 379static unsigned int to_blk_size(unsigned int size)
 380{
 381	struct prb_data_block *db = NULL;
 382
 383	size += sizeof(*db);
 384	size = ALIGN(size, sizeof(db->id));
 385	return size;
 386}
 387
 388/*
 389 * Sanity checker for reserve size. The ringbuffer code assumes that a data
 390 * block does not exceed the maximum possible size that could fit within the
 391 * ringbuffer. This function provides that basic size check so that the
 392 * assumption is safe.
 393 */
 394static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
 395{
 396	struct prb_data_block *db = NULL;
 397
 398	if (size == 0)
 399		return true;
 400
 401	/*
 402	 * Ensure the alignment padded size could possibly fit in the data
 403	 * array. The largest possible data block must still leave room for
 404	 * at least the ID of the next block.
 405	 */
 406	size = to_blk_size(size);
 407	if (size > DATA_SIZE(data_ring) - sizeof(db->id))
 408		return false;
 409
 410	return true;
 411}
 412
 413/* Query the state of a descriptor. */
 414static enum desc_state get_desc_state(unsigned long id,
 415				      unsigned long state_val)
 416{
 417	if (id != DESC_ID(state_val))
 418		return desc_miss;
 419
 420	return DESC_STATE(state_val);
 421}
 422
 423/*
 424 * Get a copy of a specified descriptor and return its queried state. If the
 425 * descriptor is in an inconsistent state (miss or reserved), the caller can
 426 * only expect the descriptor's @state_var field to be valid.
 427 *
 428 * The sequence number and caller_id can be optionally retrieved. Like all
 429 * non-state_var data, they are only valid if the descriptor is in a
 430 * consistent state.
 431 */
 432static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
 433				 unsigned long id, struct prb_desc *desc_out,
 434				 u64 *seq_out, u32 *caller_id_out)
 435{
 436	struct printk_info *info = to_info(desc_ring, id);
 437	struct prb_desc *desc = to_desc(desc_ring, id);
 438	atomic_long_t *state_var = &desc->state_var;
 439	enum desc_state d_state;
 440	unsigned long state_val;
 441
 442	/* Check the descriptor state. */
 443	state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */
 444	d_state = get_desc_state(id, state_val);
 445	if (d_state == desc_miss || d_state == desc_reserved) {
 446		/*
 447		 * The descriptor is in an inconsistent state. Set at least
 448		 * @state_var so that the caller can see the details of
 449		 * the inconsistent state.
 450		 */
 451		goto out;
 452	}
 453
 454	/*
 455	 * Guarantee the state is loaded before copying the descriptor
 456	 * content. This avoids copying obsolete descriptor content that might
 457	 * not apply to the descriptor state. This pairs with _prb_commit:B.
 458	 *
 459	 * Memory barrier involvement:
 460	 *
 461	 * If desc_read:A reads from _prb_commit:B, then desc_read:C reads
 462	 * from _prb_commit:A.
 463	 *
 464	 * Relies on:
 465	 *
 466	 * WMB from _prb_commit:A to _prb_commit:B
 467	 *    matching
 468	 * RMB from desc_read:A to desc_read:C
 469	 */
 470	smp_rmb(); /* LMM(desc_read:B) */
 471
 472	/*
 473	 * Copy the descriptor data. The data is not valid until the
 474	 * state has been re-checked. A memcpy() for all of @desc
 475	 * cannot be used because of the atomic_t @state_var field.
 476	 */
 477	if (desc_out) {
 478		memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
 479		       sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
 480	}
 481	if (seq_out)
 482		*seq_out = info->seq; /* also part of desc_read:C */
 483	if (caller_id_out)
 484		*caller_id_out = info->caller_id; /* also part of desc_read:C */
 485
 486	/*
 487	 * 1. Guarantee the descriptor content is loaded before re-checking
 488	 *    the state. This avoids reading an obsolete descriptor state
 489	 *    that may not apply to the copied content. This pairs with
 490	 *    desc_reserve:F.
 491	 *
 492	 *    Memory barrier involvement:
 493	 *
 494	 *    If desc_read:C reads from desc_reserve:G, then desc_read:E
 495	 *    reads from desc_reserve:F.
 496	 *
 497	 *    Relies on:
 498	 *
 499	 *    WMB from desc_reserve:F to desc_reserve:G
 500	 *       matching
 501	 *    RMB from desc_read:C to desc_read:E
 502	 *
 503	 * 2. Guarantee the record data is loaded before re-checking the
 504	 *    state. This avoids reading an obsolete descriptor state that may
 505	 *    not apply to the copied data. This pairs with data_alloc:A and
 506	 *    data_realloc:A.
 507	 *
 508	 *    Memory barrier involvement:
 509	 *
 510	 *    If copy_data:A reads from data_alloc:B, then desc_read:E
 511	 *    reads from desc_make_reusable:A.
 512	 *
 513	 *    Relies on:
 514	 *
 515	 *    MB from desc_make_reusable:A to data_alloc:B
 516	 *       matching
 517	 *    RMB from desc_read:C to desc_read:E
 518	 *
 519	 *    Note: desc_make_reusable:A and data_alloc:B can be different
 520	 *          CPUs. However, the data_alloc:B CPU (which performs the
 521	 *          full memory barrier) must have previously seen
 522	 *          desc_make_reusable:A.
 523	 */
 524	smp_rmb(); /* LMM(desc_read:D) */
 525
 526	/*
 527	 * The data has been copied. Return the current descriptor state,
 528	 * which may have changed since the load above.
 529	 */
 530	state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
 531	d_state = get_desc_state(id, state_val);
 532out:
 533	if (desc_out)
 534		atomic_long_set(&desc_out->state_var, state_val);
 535	return d_state;
 536}
 537
 538/*
 539 * Take a specified descriptor out of the finalized state by attempting
 540 * the transition from finalized to reusable. Either this context or some
 541 * other context will have been successful.
 542 */
 543static void desc_make_reusable(struct prb_desc_ring *desc_ring,
 544			       unsigned long id)
 545{
 546	unsigned long val_finalized = DESC_SV(id, desc_finalized);
 547	unsigned long val_reusable = DESC_SV(id, desc_reusable);
 548	struct prb_desc *desc = to_desc(desc_ring, id);
 549	atomic_long_t *state_var = &desc->state_var;
 550
 551	atomic_long_cmpxchg_relaxed(state_var, val_finalized,
 552				    val_reusable); /* LMM(desc_make_reusable:A) */
 553}
 554
 555/*
 556 * Given the text data ring, put the associated descriptor of each
 557 * data block from @lpos_begin until @lpos_end into the reusable state.
 558 *
 559 * If there is any problem making the associated descriptor reusable, either
 560 * the descriptor has not yet been finalized or another writer context has
 561 * already pushed the tail lpos past the problematic data block. Regardless,
 562 * on error the caller can re-load the tail lpos to determine the situation.
 563 */
 564static bool data_make_reusable(struct printk_ringbuffer *rb,
 565			       unsigned long lpos_begin,
 566			       unsigned long lpos_end,
 567			       unsigned long *lpos_out)
 568{
 569
 570	struct prb_data_ring *data_ring = &rb->text_data_ring;
 571	struct prb_desc_ring *desc_ring = &rb->desc_ring;
 572	struct prb_data_block *blk;
 573	enum desc_state d_state;
 574	struct prb_desc desc;
 575	struct prb_data_blk_lpos *blk_lpos = &desc.text_blk_lpos;
 576	unsigned long id;
 577
 578	/* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
 579	while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
 580		blk = to_block(data_ring, lpos_begin);
 581
 582		/*
 583		 * Load the block ID from the data block. This is a data race
 584		 * against a writer that may have newly reserved this data
 585		 * area. If the loaded value matches a valid descriptor ID,
 586		 * the blk_lpos of that descriptor will be checked to make
 587		 * sure it points back to this data block. If the check fails,
 588		 * the data area has been recycled by another writer.
 589		 */
 590		id = blk->id; /* LMM(data_make_reusable:A) */
 591
 592		d_state = desc_read(desc_ring, id, &desc,
 593				    NULL, NULL); /* LMM(data_make_reusable:B) */
 594
 595		switch (d_state) {
 596		case desc_miss:
 597		case desc_reserved:
 598		case desc_committed:
 599			return false;
 600		case desc_finalized:
 601			/*
 602			 * This data block is invalid if the descriptor
 603			 * does not point back to it.
 604			 */
 605			if (blk_lpos->begin != lpos_begin)
 606				return false;
 607			desc_make_reusable(desc_ring, id);
 608			break;
 609		case desc_reusable:
 610			/*
 611			 * This data block is invalid if the descriptor
 612			 * does not point back to it.
 613			 */
 614			if (blk_lpos->begin != lpos_begin)
 615				return false;
 616			break;
 617		}
 618
 619		/* Advance @lpos_begin to the next data block. */
 620		lpos_begin = blk_lpos->next;
 621	}
 622
 623	*lpos_out = lpos_begin;
 624	return true;
 625}
 626
 627/*
 628 * Advance the data ring tail to at least @lpos. This function puts
 629 * descriptors into the reusable state if the tail is pushed beyond
 630 * their associated data block.
 631 */
 632static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos)
 633{
 634	struct prb_data_ring *data_ring = &rb->text_data_ring;
 635	unsigned long tail_lpos_new;
 636	unsigned long tail_lpos;
 637	unsigned long next_lpos;
 638
 639	/* If @lpos is from a data-less block, there is nothing to do. */
 640	if (LPOS_DATALESS(lpos))
 641		return true;
 642
 643	/*
 644	 * Any descriptor states that have transitioned to reusable due to the
 645	 * data tail being pushed to this loaded value will be visible to this
 646	 * CPU. This pairs with data_push_tail:D.
 647	 *
 648	 * Memory barrier involvement:
 649	 *
 650	 * If data_push_tail:A reads from data_push_tail:D, then this CPU can
 651	 * see desc_make_reusable:A.
 652	 *
 653	 * Relies on:
 654	 *
 655	 * MB from desc_make_reusable:A to data_push_tail:D
 656	 *    matches
 657	 * READFROM from data_push_tail:D to data_push_tail:A
 658	 *    thus
 659	 * READFROM from desc_make_reusable:A to this CPU
 660	 */
 661	tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */
 662
 663	/*
 664	 * Loop until the tail lpos is at or beyond @lpos. This condition
 665	 * may already be satisfied, resulting in no full memory barrier
 666	 * from data_push_tail:D being performed. However, since this CPU
 667	 * sees the new tail lpos, any descriptor states that transitioned to
 668	 * the reusable state must already be visible.
 669	 */
 670	while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
 671		/*
 672		 * Make all descriptors reusable that are associated with
 673		 * data blocks before @lpos.
 674		 */
 675		if (!data_make_reusable(rb, tail_lpos, lpos, &next_lpos)) {
 676			/*
 677			 * 1. Guarantee the block ID loaded in
 678			 *    data_make_reusable() is performed before
 679			 *    reloading the tail lpos. The failed
 680			 *    data_make_reusable() may be due to a newly
 681			 *    recycled data area causing the tail lpos to
 682			 *    have been previously pushed. This pairs with
 683			 *    data_alloc:A and data_realloc:A.
 684			 *
 685			 *    Memory barrier involvement:
 686			 *
 687			 *    If data_make_reusable:A reads from data_alloc:B,
 688			 *    then data_push_tail:C reads from
 689			 *    data_push_tail:D.
 690			 *
 691			 *    Relies on:
 692			 *
 693			 *    MB from data_push_tail:D to data_alloc:B
 694			 *       matching
 695			 *    RMB from data_make_reusable:A to
 696			 *    data_push_tail:C
 697			 *
 698			 *    Note: data_push_tail:D and data_alloc:B can be
 699			 *          different CPUs. However, the data_alloc:B
 700			 *          CPU (which performs the full memory
 701			 *          barrier) must have previously seen
 702			 *          data_push_tail:D.
 703			 *
 704			 * 2. Guarantee the descriptor state loaded in
 705			 *    data_make_reusable() is performed before
 706			 *    reloading the tail lpos. The failed
 707			 *    data_make_reusable() may be due to a newly
 708			 *    recycled descriptor causing the tail lpos to
 709			 *    have been previously pushed. This pairs with
 710			 *    desc_reserve:D.
 711			 *
 712			 *    Memory barrier involvement:
 713			 *
 714			 *    If data_make_reusable:B reads from
 715			 *    desc_reserve:F, then data_push_tail:C reads
 716			 *    from data_push_tail:D.
 717			 *
 718			 *    Relies on:
 719			 *
 720			 *    MB from data_push_tail:D to desc_reserve:F
 721			 *       matching
 722			 *    RMB from data_make_reusable:B to
 723			 *    data_push_tail:C
 724			 *
 725			 *    Note: data_push_tail:D and desc_reserve:F can
 726			 *          be different CPUs. However, the
 727			 *          desc_reserve:F CPU (which performs the
 728			 *          full memory barrier) must have previously
 729			 *          seen data_push_tail:D.
 730			 */
 731			smp_rmb(); /* LMM(data_push_tail:B) */
 732
 733			tail_lpos_new = atomic_long_read(&data_ring->tail_lpos
 734							); /* LMM(data_push_tail:C) */
 735			if (tail_lpos_new == tail_lpos)
 736				return false;
 737
 738			/* Another CPU pushed the tail. Try again. */
 739			tail_lpos = tail_lpos_new;
 740			continue;
 741		}
 742
 743		/*
 744		 * Guarantee any descriptor states that have transitioned to
 745		 * reusable are stored before pushing the tail lpos. A full
 746		 * memory barrier is needed since other CPUs may have made
 747		 * the descriptor states reusable. This pairs with
 748		 * data_push_tail:A.
 749		 */
 750		if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos,
 751					    next_lpos)) { /* LMM(data_push_tail:D) */
 752			break;
 753		}
 754	}
 755
 756	return true;
 757}
 758
 759/*
 760 * Advance the desc ring tail. This function advances the tail by one
 761 * descriptor, thus invalidating the oldest descriptor. Before advancing
 762 * the tail, the tail descriptor is made reusable and all data blocks up to
 763 * and including the descriptor's data block are invalidated (i.e. the data
 764 * ring tail is pushed past the data block of the descriptor being made
 765 * reusable).
 766 */
 767static bool desc_push_tail(struct printk_ringbuffer *rb,
 768			   unsigned long tail_id)
 769{
 770	struct prb_desc_ring *desc_ring = &rb->desc_ring;
 771	enum desc_state d_state;
 772	struct prb_desc desc;
 773
 774	d_state = desc_read(desc_ring, tail_id, &desc, NULL, NULL);
 775
 776	switch (d_state) {
 777	case desc_miss:
 778		/*
 779		 * If the ID is exactly 1 wrap behind the expected, it is
 780		 * in the process of being reserved by another writer and
 781		 * must be considered reserved.
 782		 */
 783		if (DESC_ID(atomic_long_read(&desc.state_var)) ==
 784		    DESC_ID_PREV_WRAP(desc_ring, tail_id)) {
 785			return false;
 786		}
 787
 788		/*
 789		 * The ID has changed. Another writer must have pushed the
 790		 * tail and recycled the descriptor already. Success is
 791		 * returned because the caller is only interested in the
 792		 * specified tail being pushed, which it was.
 793		 */
 794		return true;
 795	case desc_reserved:
 796	case desc_committed:
 797		return false;
 798	case desc_finalized:
 799		desc_make_reusable(desc_ring, tail_id);
 800		break;
 801	case desc_reusable:
 802		break;
 803	}
 804
 805	/*
 806	 * Data blocks must be invalidated before their associated
 807	 * descriptor can be made available for recycling. Invalidating
 808	 * them later is not possible because there is no way to trust
 809	 * data blocks once their associated descriptor is gone.
 810	 */
 811
 812	if (!data_push_tail(rb, desc.text_blk_lpos.next))
 813		return false;
 814
 815	/*
 816	 * Check the next descriptor after @tail_id before pushing the tail
 817	 * to it because the tail must always be in a finalized or reusable
 818	 * state. The implementation of prb_first_seq() relies on this.
 819	 *
 820	 * A successful read implies that the next descriptor is less than or
 821	 * equal to @head_id so there is no risk of pushing the tail past the
 822	 * head.
 823	 */
 824	d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc,
 825			    NULL, NULL); /* LMM(desc_push_tail:A) */
 826
 827	if (d_state == desc_finalized || d_state == desc_reusable) {
 828		/*
 829		 * Guarantee any descriptor states that have transitioned to
 830		 * reusable are stored before pushing the tail ID. This allows
 831		 * verifying the recycled descriptor state. A full memory
 832		 * barrier is needed since other CPUs may have made the
 833		 * descriptor states reusable. This pairs with desc_reserve:D.
 834		 */
 835		atomic_long_cmpxchg(&desc_ring->tail_id, tail_id,
 836				    DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */
 837	} else {
 838		/*
 839		 * Guarantee the last state load from desc_read() is before
 840		 * reloading @tail_id in order to see a new tail ID in the
 841		 * case that the descriptor has been recycled. This pairs
 842		 * with desc_reserve:D.
 843		 *
 844		 * Memory barrier involvement:
 845		 *
 846		 * If desc_push_tail:A reads from desc_reserve:F, then
 847		 * desc_push_tail:D reads from desc_push_tail:B.
 848		 *
 849		 * Relies on:
 850		 *
 851		 * MB from desc_push_tail:B to desc_reserve:F
 852		 *    matching
 853		 * RMB from desc_push_tail:A to desc_push_tail:D
 854		 *
 855		 * Note: desc_push_tail:B and desc_reserve:F can be different
 856		 *       CPUs. However, the desc_reserve:F CPU (which performs
 857		 *       the full memory barrier) must have previously seen
 858		 *       desc_push_tail:B.
 859		 */
 860		smp_rmb(); /* LMM(desc_push_tail:C) */
 861
 862		/*
 863		 * Re-check the tail ID. The descriptor following @tail_id is
 864		 * not in an allowed tail state. But if the tail has since
 865		 * been moved by another CPU, then it does not matter.
 866		 */
 867		if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */
 868			return false;
 869	}
 870
 871	return true;
 872}
 873
 874/* Reserve a new descriptor, invalidating the oldest if necessary. */
 875static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
 876{
 877	struct prb_desc_ring *desc_ring = &rb->desc_ring;
 878	unsigned long prev_state_val;
 879	unsigned long id_prev_wrap;
 880	struct prb_desc *desc;
 881	unsigned long head_id;
 882	unsigned long id;
 883
 884	head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */
 885
 886	do {
 887		id = DESC_ID(head_id + 1);
 888		id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id);
 889
 890		/*
 891		 * Guarantee the head ID is read before reading the tail ID.
 892		 * Since the tail ID is updated before the head ID, this
 893		 * guarantees that @id_prev_wrap is never ahead of the tail
 894		 * ID. This pairs with desc_reserve:D.
 895		 *
 896		 * Memory barrier involvement:
 897		 *
 898		 * If desc_reserve:A reads from desc_reserve:D, then
 899		 * desc_reserve:C reads from desc_push_tail:B.
 900		 *
 901		 * Relies on:
 902		 *
 903		 * MB from desc_push_tail:B to desc_reserve:D
 904		 *    matching
 905		 * RMB from desc_reserve:A to desc_reserve:C
 906		 *
 907		 * Note: desc_push_tail:B and desc_reserve:D can be different
 908		 *       CPUs. However, the desc_reserve:D CPU (which performs
 909		 *       the full memory barrier) must have previously seen
 910		 *       desc_push_tail:B.
 911		 */
 912		smp_rmb(); /* LMM(desc_reserve:B) */
 913
 914		if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id
 915						    )) { /* LMM(desc_reserve:C) */
 916			/*
 917			 * Make space for the new descriptor by
 918			 * advancing the tail.
 919			 */
 920			if (!desc_push_tail(rb, id_prev_wrap))
 921				return false;
 922		}
 923
 924		/*
 925		 * 1. Guarantee the tail ID is read before validating the
 926		 *    recycled descriptor state. A read memory barrier is
 927		 *    sufficient for this. This pairs with desc_push_tail:B.
 928		 *
 929		 *    Memory barrier involvement:
 930		 *
 931		 *    If desc_reserve:C reads from desc_push_tail:B, then
 932		 *    desc_reserve:E reads from desc_make_reusable:A.
 933		 *
 934		 *    Relies on:
 935		 *
 936		 *    MB from desc_make_reusable:A to desc_push_tail:B
 937		 *       matching
 938		 *    RMB from desc_reserve:C to desc_reserve:E
 939		 *
 940		 *    Note: desc_make_reusable:A and desc_push_tail:B can be
 941		 *          different CPUs. However, the desc_push_tail:B CPU
 942		 *          (which performs the full memory barrier) must have
 943		 *          previously seen desc_make_reusable:A.
 944		 *
 945		 * 2. Guarantee the tail ID is stored before storing the head
 946		 *    ID. This pairs with desc_reserve:B.
 947		 *
 948		 * 3. Guarantee any data ring tail changes are stored before
 949		 *    recycling the descriptor. Data ring tail changes can
 950		 *    happen via desc_push_tail()->data_push_tail(). A full
 951		 *    memory barrier is needed since another CPU may have
 952		 *    pushed the data ring tails. This pairs with
 953		 *    data_push_tail:B.
 954		 *
 955		 * 4. Guarantee a new tail ID is stored before recycling the
 956		 *    descriptor. A full memory barrier is needed since
 957		 *    another CPU may have pushed the tail ID. This pairs
 958		 *    with desc_push_tail:C and this also pairs with
 959		 *    prb_first_seq:C.
 960		 *
 961		 * 5. Guarantee the head ID is stored before trying to
 962		 *    finalize the previous descriptor. This pairs with
 963		 *    _prb_commit:B.
 964		 */
 965	} while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id,
 966					  id)); /* LMM(desc_reserve:D) */
 967
 968	desc = to_desc(desc_ring, id);
 969
 970	/*
 971	 * If the descriptor has been recycled, verify the old state val.
 972	 * See "ABA Issues" about why this verification is performed.
 973	 */
 974	prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */
 975	if (prev_state_val &&
 976	    get_desc_state(id_prev_wrap, prev_state_val) != desc_reusable) {
 977		WARN_ON_ONCE(1);
 978		return false;
 979	}
 980
 981	/*
 982	 * Assign the descriptor a new ID and set its state to reserved.
 983	 * See "ABA Issues" about why cmpxchg() instead of set() is used.
 984	 *
 985	 * Guarantee the new descriptor ID and state is stored before making
 986	 * any other changes. A write memory barrier is sufficient for this.
 987	 * This pairs with desc_read:D.
 988	 */
 989	if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val,
 990			DESC_SV(id, desc_reserved))) { /* LMM(desc_reserve:F) */
 991		WARN_ON_ONCE(1);
 992		return false;
 993	}
 994
 995	/* Now data in @desc can be modified: LMM(desc_reserve:G) */
 996
 997	*id_out = id;
 998	return true;
 999}
1000
1001/* Determine the end of a data block. */
1002static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
1003				   unsigned long lpos, unsigned int size)
1004{
1005	unsigned long begin_lpos;
1006	unsigned long next_lpos;
1007
1008	begin_lpos = lpos;
1009	next_lpos = lpos + size;
1010
1011	/* First check if the data block does not wrap. */
1012	if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
1013		return next_lpos;
1014
1015	/* Wrapping data blocks store their data at the beginning. */
1016	return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size);
1017}
1018
1019/*
1020 * Allocate a new data block, invalidating the oldest data block(s)
1021 * if necessary. This function also associates the data block with
1022 * a specified descriptor.
1023 */
1024static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
1025			struct prb_data_blk_lpos *blk_lpos, unsigned long id)
1026{
1027	struct prb_data_ring *data_ring = &rb->text_data_ring;
1028	struct prb_data_block *blk;
1029	unsigned long begin_lpos;
1030	unsigned long next_lpos;
1031
1032	if (size == 0) {
1033		/* Specify a data-less block. */
1034		blk_lpos->begin = NO_LPOS;
1035		blk_lpos->next = NO_LPOS;
1036		return NULL;
1037	}
1038
1039	size = to_blk_size(size);
1040
1041	begin_lpos = atomic_long_read(&data_ring->head_lpos);
1042
1043	do {
1044		next_lpos = get_next_lpos(data_ring, begin_lpos, size);
1045
1046		if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
1047			/* Failed to allocate, specify a data-less block. */
1048			blk_lpos->begin = FAILED_LPOS;
1049			blk_lpos->next = FAILED_LPOS;
1050			return NULL;
1051		}
1052
1053		/*
1054		 * 1. Guarantee any descriptor states that have transitioned
1055		 *    to reusable are stored before modifying the newly
1056		 *    allocated data area. A full memory barrier is needed
1057		 *    since other CPUs may have made the descriptor states
1058		 *    reusable. See data_push_tail:A about why the reusable
1059		 *    states are visible. This pairs with desc_read:D.
1060		 *
1061		 * 2. Guarantee any updated tail lpos is stored before
1062		 *    modifying the newly allocated data area. Another CPU may
1063		 *    be in data_make_reusable() and is reading a block ID
1064		 *    from this area. data_make_reusable() can handle reading
1065		 *    a garbage block ID value, but then it must be able to
1066		 *    load a new tail lpos. A full memory barrier is needed
1067		 *    since other CPUs may have updated the tail lpos. This
1068		 *    pairs with data_push_tail:B.
1069		 */
1070	} while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos,
1071					  next_lpos)); /* LMM(data_alloc:A) */
1072
1073	blk = to_block(data_ring, begin_lpos);
1074	blk->id = id; /* LMM(data_alloc:B) */
1075
1076	if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
1077		/* Wrapping data blocks store their data at the beginning. */
1078		blk = to_block(data_ring, 0);
1079
1080		/*
1081		 * Store the ID on the wrapped block for consistency.
1082		 * The printk_ringbuffer does not actually use it.
1083		 */
1084		blk->id = id;
1085	}
1086
1087	blk_lpos->begin = begin_lpos;
1088	blk_lpos->next = next_lpos;
1089
1090	return &blk->data[0];
1091}
1092
1093/*
1094 * Try to resize an existing data block associated with the descriptor
1095 * specified by @id. If the resized data block should become wrapped, it
1096 * copies the old data to the new data block. If @size yields a data block
1097 * with the same or less size, the data block is left as is.
1098 *
1099 * Fail if this is not the last allocated data block or if there is not
1100 * enough space or it is not possible make enough space.
1101 *
1102 * Return a pointer to the beginning of the entire data buffer or NULL on
1103 * failure.
1104 */
1105static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
1106			  struct prb_data_blk_lpos *blk_lpos, unsigned long id)
1107{
1108	struct prb_data_ring *data_ring = &rb->text_data_ring;
1109	struct prb_data_block *blk;
1110	unsigned long head_lpos;
1111	unsigned long next_lpos;
1112	bool wrapped;
1113
1114	/* Reallocation only works if @blk_lpos is the newest data block. */
1115	head_lpos = atomic_long_read(&data_ring->head_lpos);
1116	if (head_lpos != blk_lpos->next)
1117		return NULL;
1118
1119	/* Keep track if @blk_lpos was a wrapping data block. */
1120	wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next));
1121
1122	size = to_blk_size(size);
1123
1124	next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
1125
1126	/* If the data block does not increase, there is nothing to do. */
1127	if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
1128		if (wrapped)
1129			blk = to_block(data_ring, 0);
1130		else
1131			blk = to_block(data_ring, blk_lpos->begin);
1132		return &blk->data[0];
1133	}
1134
1135	if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring)))
1136		return NULL;
1137
1138	/* The memory barrier involvement is the same as data_alloc:A. */
1139	if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos,
1140				     next_lpos)) { /* LMM(data_realloc:A) */
1141		return NULL;
1142	}
1143
1144	blk = to_block(data_ring, blk_lpos->begin);
1145
1146	if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) {
1147		struct prb_data_block *old_blk = blk;
1148
1149		/* Wrapping data blocks store their data at the beginning. */
1150		blk = to_block(data_ring, 0);
1151
1152		/*
1153		 * Store the ID on the wrapped block for consistency.
1154		 * The printk_ringbuffer does not actually use it.
1155		 */
1156		blk->id = id;
1157
1158		if (!wrapped) {
1159			/*
1160			 * Since the allocated space is now in the newly
1161			 * created wrapping data block, copy the content
1162			 * from the old data block.
1163			 */
1164			memcpy(&blk->data[0], &old_blk->data[0],
1165			       (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id));
1166		}
1167	}
1168
1169	blk_lpos->next = next_lpos;
1170
1171	return &blk->data[0];
1172}
1173
1174/* Return the number of bytes used by a data block. */
1175static unsigned int space_used(struct prb_data_ring *data_ring,
1176			       struct prb_data_blk_lpos *blk_lpos)
1177{
1178	/* Data-less blocks take no space. */
1179	if (BLK_DATALESS(blk_lpos))
1180		return 0;
1181
1182	if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
1183		/* Data block does not wrap. */
1184		return (DATA_INDEX(data_ring, blk_lpos->next) -
1185			DATA_INDEX(data_ring, blk_lpos->begin));
1186	}
1187
1188	/*
1189	 * For wrapping data blocks, the trailing (wasted) space is
1190	 * also counted.
1191	 */
1192	return (DATA_INDEX(data_ring, blk_lpos->next) +
1193		DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin));
1194}
1195
1196/*
1197 * Given @blk_lpos, return a pointer to the writer data from the data block
1198 * and calculate the size of the data part. A NULL pointer is returned if
1199 * @blk_lpos specifies values that could never be legal.
1200 *
1201 * This function (used by readers) performs strict validation on the lpos
1202 * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
1203 * triggered if an internal error is detected.
1204 */
1205static const char *get_data(struct prb_data_ring *data_ring,
1206			    struct prb_data_blk_lpos *blk_lpos,
1207			    unsigned int *data_size)
1208{
1209	struct prb_data_block *db;
1210
1211	/* Data-less data block description. */
1212	if (BLK_DATALESS(blk_lpos)) {
1213		if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
1214			*data_size = 0;
1215			return "";
1216		}
1217		return NULL;
1218	}
1219
1220	/* Regular data block: @begin less than @next and in same wrap. */
1221	if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
1222	    blk_lpos->begin < blk_lpos->next) {
1223		db = to_block(data_ring, blk_lpos->begin);
1224		*data_size = blk_lpos->next - blk_lpos->begin;
1225
1226	/* Wrapping data block: @begin is one wrap behind @next. */
1227	} else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
1228		   DATA_WRAPS(data_ring, blk_lpos->next)) {
1229		db = to_block(data_ring, 0);
1230		*data_size = DATA_INDEX(data_ring, blk_lpos->next);
1231
1232	/* Illegal block description. */
1233	} else {
1234		WARN_ON_ONCE(1);
1235		return NULL;
1236	}
1237
1238	/* A valid data block will always be aligned to the ID size. */
1239	if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
1240	    WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
1241		return NULL;
1242	}
1243
1244	/* A valid data block will always have at least an ID. */
1245	if (WARN_ON_ONCE(*data_size < sizeof(db->id)))
1246		return NULL;
1247
1248	/* Subtract block ID space from size to reflect data size. */
1249	*data_size -= sizeof(db->id);
1250
1251	return &db->data[0];
1252}
1253
1254/*
1255 * Attempt to transition the newest descriptor from committed back to reserved
1256 * so that the record can be modified by a writer again. This is only possible
1257 * if the descriptor is not yet finalized and the provided @caller_id matches.
1258 */
1259static struct prb_desc *desc_reopen_last(struct prb_desc_ring *desc_ring,
1260					 u32 caller_id, unsigned long *id_out)
1261{
1262	unsigned long prev_state_val;
1263	enum desc_state d_state;
1264	struct prb_desc desc;
1265	struct prb_desc *d;
1266	unsigned long id;
1267	u32 cid;
1268
1269	id = atomic_long_read(&desc_ring->head_id);
1270
1271	/*
1272	 * To reduce unnecessarily reopening, first check if the descriptor
1273	 * state and caller ID are correct.
1274	 */
1275	d_state = desc_read(desc_ring, id, &desc, NULL, &cid);
1276	if (d_state != desc_committed || cid != caller_id)
1277		return NULL;
1278
1279	d = to_desc(desc_ring, id);
1280
1281	prev_state_val = DESC_SV(id, desc_committed);
1282
1283	/*
1284	 * Guarantee the reserved state is stored before reading any
1285	 * record data. A full memory barrier is needed because @state_var
1286	 * modification is followed by reading. This pairs with _prb_commit:B.
1287	 *
1288	 * Memory barrier involvement:
1289	 *
1290	 * If desc_reopen_last:A reads from _prb_commit:B, then
1291	 * prb_reserve_in_last:A reads from _prb_commit:A.
1292	 *
1293	 * Relies on:
1294	 *
1295	 * WMB from _prb_commit:A to _prb_commit:B
1296	 *    matching
1297	 * MB If desc_reopen_last:A to prb_reserve_in_last:A
1298	 */
1299	if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
1300			DESC_SV(id, desc_reserved))) { /* LMM(desc_reopen_last:A) */
1301		return NULL;
1302	}
1303
1304	*id_out = id;
1305	return d;
1306}
1307
1308/**
1309 * prb_reserve_in_last() - Re-reserve and extend the space in the ringbuffer
1310 *                         used by the newest record.
1311 *
1312 * @e:         The entry structure to setup.
1313 * @rb:        The ringbuffer to re-reserve and extend data in.
1314 * @r:         The record structure to allocate buffers for.
1315 * @caller_id: The caller ID of the caller (reserving writer).
1316 * @max_size:  Fail if the extended size would be greater than this.
1317 *
1318 * This is the public function available to writers to re-reserve and extend
1319 * data.
1320 *
1321 * The writer specifies the text size to extend (not the new total size) by
1322 * setting the @text_buf_size field of @r. To ensure proper initialization
1323 * of @r, prb_rec_init_wr() should be used.
1324 *
1325 * This function will fail if @caller_id does not match the caller ID of the
1326 * newest record. In that case the caller must reserve new data using
1327 * prb_reserve().
1328 *
1329 * Context: Any context. Disables local interrupts on success.
1330 * Return: true if text data could be extended, otherwise false.
1331 *
1332 * On success:
1333 *
1334 *   - @r->text_buf points to the beginning of the entire text buffer.
1335 *
1336 *   - @r->text_buf_size is set to the new total size of the buffer.
1337 *
1338 *   - @r->info is not touched so that @r->info->text_len could be used
1339 *     to append the text.
1340 *
1341 *   - prb_record_text_space() can be used on @e to query the new
1342 *     actually used space.
1343 *
1344 * Important: All @r->info fields will already be set with the current values
1345 *            for the record. I.e. @r->info->text_len will be less than
1346 *            @text_buf_size. Writers can use @r->info->text_len to know
1347 *            where concatenation begins and writers should update
1348 *            @r->info->text_len after concatenating.
1349 */
1350bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
1351			 struct printk_record *r, u32 caller_id, unsigned int max_size)
1352{
1353	struct prb_desc_ring *desc_ring = &rb->desc_ring;
1354	struct printk_info *info;
1355	unsigned int data_size;
1356	struct prb_desc *d;
1357	unsigned long id;
1358
1359	local_irq_save(e->irqflags);
1360
1361	/* Transition the newest descriptor back to the reserved state. */
1362	d = desc_reopen_last(desc_ring, caller_id, &id);
1363	if (!d) {
1364		local_irq_restore(e->irqflags);
1365		goto fail_reopen;
1366	}
1367
1368	/* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */
1369
1370	info = to_info(desc_ring, id);
1371
1372	/*
1373	 * Set the @e fields here so that prb_commit() can be used if
1374	 * anything fails from now on.
1375	 */
1376	e->rb = rb;
1377	e->id = id;
1378
1379	/*
1380	 * desc_reopen_last() checked the caller_id, but there was no
1381	 * exclusive access at that point. The descriptor may have
1382	 * changed since then.
1383	 */
1384	if (caller_id != info->caller_id)
1385		goto fail;
1386
1387	if (BLK_DATALESS(&d->text_blk_lpos)) {
1388		if (WARN_ON_ONCE(info->text_len != 0)) {
1389			pr_warn_once("wrong text_len value (%hu, expecting 0)\n",
1390				     info->text_len);
1391			info->text_len = 0;
1392		}
1393
1394		if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1395			goto fail;
1396
1397		if (r->text_buf_size > max_size)
1398			goto fail;
1399
1400		r->text_buf = data_alloc(rb, r->text_buf_size,
1401					 &d->text_blk_lpos, id);
1402	} else {
1403		if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size))
1404			goto fail;
1405
1406		/*
1407		 * Increase the buffer size to include the original size. If
1408		 * the meta data (@text_len) is not sane, use the full data
1409		 * block size.
1410		 */
1411		if (WARN_ON_ONCE(info->text_len > data_size)) {
1412			pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n",
1413				     info->text_len, data_size);
1414			info->text_len = data_size;
1415		}
1416		r->text_buf_size += info->text_len;
1417
1418		if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1419			goto fail;
1420
1421		if (r->text_buf_size > max_size)
1422			goto fail;
1423
1424		r->text_buf = data_realloc(rb, r->text_buf_size,
1425					   &d->text_blk_lpos, id);
1426	}
1427	if (r->text_buf_size && !r->text_buf)
1428		goto fail;
1429
1430	r->info = info;
1431
1432	e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
1433
1434	return true;
1435fail:
1436	prb_commit(e);
1437	/* prb_commit() re-enabled interrupts. */
1438fail_reopen:
1439	/* Make it clear to the caller that the re-reserve failed. */
1440	memset(r, 0, sizeof(*r));
1441	return false;
1442}
1443
1444/*
1445 * Attempt to finalize a specified descriptor. If this fails, the descriptor
1446 * is either already final or it will finalize itself when the writer commits.
1447 */
1448static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
1449{
1450	unsigned long prev_state_val = DESC_SV(id, desc_committed);
1451	struct prb_desc *d = to_desc(desc_ring, id);
1452
1453	atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
1454			DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
1455
1456	/* Best effort to remember the last finalized @id. */
1457	atomic_long_set(&desc_ring->last_finalized_id, id);
1458}
1459
1460/**
1461 * prb_reserve() - Reserve space in the ringbuffer.
1462 *
1463 * @e:  The entry structure to setup.
1464 * @rb: The ringbuffer to reserve data in.
1465 * @r:  The record structure to allocate buffers for.
1466 *
1467 * This is the public function available to writers to reserve data.
1468 *
1469 * The writer specifies the text size to reserve by setting the
1470 * @text_buf_size field of @r. To ensure proper initialization of @r,
1471 * prb_rec_init_wr() should be used.
1472 *
1473 * Context: Any context. Disables local interrupts on success.
1474 * Return: true if at least text data could be allocated, otherwise false.
1475 *
1476 * On success, the fields @info and @text_buf of @r will be set by this
1477 * function and should be filled in by the writer before committing. Also
1478 * on success, prb_record_text_space() can be used on @e to query the actual
1479 * space used for the text data block.
1480 *
1481 * Important: @info->text_len needs to be set correctly by the writer in
1482 *            order for data to be readable and/or extended. Its value
1483 *            is initialized to 0.
1484 */
1485bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
1486		 struct printk_record *r)
1487{
1488	struct prb_desc_ring *desc_ring = &rb->desc_ring;
1489	struct printk_info *info;
1490	struct prb_desc *d;
1491	unsigned long id;
1492	u64 seq;
1493
1494	if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1495		goto fail;
1496
1497	/*
1498	 * Descriptors in the reserved state act as blockers to all further
1499	 * reservations once the desc_ring has fully wrapped. Disable
1500	 * interrupts during the reserve/commit window in order to minimize
1501	 * the likelihood of this happening.
1502	 */
1503	local_irq_save(e->irqflags);
1504
1505	if (!desc_reserve(rb, &id)) {
1506		/* Descriptor reservation failures are tracked. */
1507		atomic_long_inc(&rb->fail);
1508		local_irq_restore(e->irqflags);
1509		goto fail;
1510	}
1511
1512	d = to_desc(desc_ring, id);
1513	info = to_info(desc_ring, id);
1514
1515	/*
1516	 * All @info fields (except @seq) are cleared and must be filled in
1517	 * by the writer. Save @seq before clearing because it is used to
1518	 * determine the new sequence number.
1519	 */
1520	seq = info->seq;
1521	memset(info, 0, sizeof(*info));
1522
1523	/*
1524	 * Set the @e fields here so that prb_commit() can be used if
1525	 * text data allocation fails.
1526	 */
1527	e->rb = rb;
1528	e->id = id;
1529
1530	/*
1531	 * Initialize the sequence number if it has "never been set".
1532	 * Otherwise just increment it by a full wrap.
1533	 *
1534	 * @seq is considered "never been set" if it has a value of 0,
1535	 * _except_ for @infos[0], which was specially setup by the ringbuffer
1536	 * initializer and therefore is always considered as set.
1537	 *
1538	 * See the "Bootstrap" comment block in printk_ringbuffer.h for
1539	 * details about how the initializer bootstraps the descriptors.
1540	 */
1541	if (seq == 0 && DESC_INDEX(desc_ring, id) != 0)
1542		info->seq = DESC_INDEX(desc_ring, id);
1543	else
1544		info->seq = seq + DESCS_COUNT(desc_ring);
1545
1546	/*
1547	 * New data is about to be reserved. Once that happens, previous
1548	 * descriptors are no longer able to be extended. Finalize the
1549	 * previous descriptor now so that it can be made available to
1550	 * readers. (For seq==0 there is no previous descriptor.)
1551	 */
1552	if (info->seq > 0)
1553		desc_make_final(desc_ring, DESC_ID(id - 1));
1554
1555	r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
1556	/* If text data allocation fails, a data-less record is committed. */
1557	if (r->text_buf_size && !r->text_buf) {
1558		prb_commit(e);
1559		/* prb_commit() re-enabled interrupts. */
1560		goto fail;
1561	}
1562
1563	r->info = info;
1564
1565	/* Record full text space used by record. */
1566	e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
1567
1568	return true;
1569fail:
1570	/* Make it clear to the caller that the reserve failed. */
1571	memset(r, 0, sizeof(*r));
1572	return false;
1573}
1574
1575/* Commit the data (possibly finalizing it) and restore interrupts. */
1576static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)
1577{
1578	struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
1579	struct prb_desc *d = to_desc(desc_ring, e->id);
1580	unsigned long prev_state_val = DESC_SV(e->id, desc_reserved);
1581
1582	/* Now the writer has finished all writing: LMM(_prb_commit:A) */
1583
1584	/*
1585	 * Set the descriptor as committed. See "ABA Issues" about why
1586	 * cmpxchg() instead of set() is used.
1587	 *
1588	 * 1  Guarantee all record data is stored before the descriptor state
1589	 *    is stored as committed. A write memory barrier is sufficient
1590	 *    for this. This pairs with desc_read:B and desc_reopen_last:A.
1591	 *
1592	 * 2. Guarantee the descriptor state is stored as committed before
1593	 *    re-checking the head ID in order to possibly finalize this
1594	 *    descriptor. This pairs with desc_reserve:D.
1595	 *
1596	 *    Memory barrier involvement:
1597	 *
1598	 *    If prb_commit:A reads from desc_reserve:D, then
1599	 *    desc_make_final:A reads from _prb_commit:B.
1600	 *
1601	 *    Relies on:
1602	 *
1603	 *    MB _prb_commit:B to prb_commit:A
1604	 *       matching
1605	 *    MB desc_reserve:D to desc_make_final:A
1606	 */
1607	if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
1608			DESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */
1609		WARN_ON_ONCE(1);
1610	}
1611
1612	/* Restore interrupts, the reserve/commit window is finished. */
1613	local_irq_restore(e->irqflags);
1614}
1615
1616/**
1617 * prb_commit() - Commit (previously reserved) data to the ringbuffer.
1618 *
1619 * @e: The entry containing the reserved data information.
1620 *
1621 * This is the public function available to writers to commit data.
1622 *
1623 * Note that the data is not yet available to readers until it is finalized.
1624 * Finalizing happens automatically when space for the next record is
1625 * reserved.
1626 *
1627 * See prb_final_commit() for a version of this function that finalizes
1628 * immediately.
1629 *
1630 * Context: Any context. Enables local interrupts.
1631 */
1632void prb_commit(struct prb_reserved_entry *e)
1633{
1634	struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
1635	unsigned long head_id;
1636
1637	_prb_commit(e, desc_committed);
1638
1639	/*
1640	 * If this descriptor is no longer the head (i.e. a new record has
1641	 * been allocated), extending the data for this record is no longer
1642	 * allowed and therefore it must be finalized.
1643	 */
1644	head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
1645	if (head_id != e->id)
1646		desc_make_final(desc_ring, e->id);
1647}
1648
1649/**
1650 * prb_final_commit() - Commit and finalize (previously reserved) data to
1651 *                      the ringbuffer.
1652 *
1653 * @e: The entry containing the reserved data information.
1654 *
1655 * This is the public function available to writers to commit+finalize data.
1656 *
1657 * By finalizing, the data is made immediately available to readers.
1658 *
1659 * This function should only be used if there are no intentions of extending
1660 * this data using prb_reserve_in_last().
1661 *
1662 * Context: Any context. Enables local interrupts.
1663 */
1664void prb_final_commit(struct prb_reserved_entry *e)
1665{
1666	struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
1667
1668	_prb_commit(e, desc_finalized);
1669
1670	/* Best effort to remember the last finalized @id. */
1671	atomic_long_set(&desc_ring->last_finalized_id, e->id);
1672}
1673
1674/*
1675 * Count the number of lines in provided text. All text has at least 1 line
1676 * (even if @text_size is 0). Each '\n' processed is counted as an additional
1677 * line.
1678 */
1679static unsigned int count_lines(const char *text, unsigned int text_size)
1680{
1681	unsigned int next_size = text_size;
1682	unsigned int line_count = 1;
1683	const char *next = text;
1684
1685	while (next_size) {
1686		next = memchr(next, '\n', next_size);
1687		if (!next)
1688			break;
1689		line_count++;
1690		next++;
1691		next_size = text_size - (next - text);
1692	}
1693
1694	return line_count;
1695}
1696
1697/*
1698 * Given @blk_lpos, copy an expected @len of data into the provided buffer.
1699 * If @line_count is provided, count the number of lines in the data.
1700 *
1701 * This function (used by readers) performs strict validation on the data
1702 * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
1703 * triggered if an internal error is detected.
1704 */
1705static bool copy_data(struct prb_data_ring *data_ring,
1706		      struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf,
1707		      unsigned int buf_size, unsigned int *line_count)
1708{
1709	unsigned int data_size;
1710	const char *data;
1711
1712	/* Caller might not want any data. */
1713	if ((!buf || !buf_size) && !line_count)
1714		return true;
1715
1716	data = get_data(data_ring, blk_lpos, &data_size);
1717	if (!data)
1718		return false;
1719
1720	/*
1721	 * Actual cannot be less than expected. It can be more than expected
1722	 * because of the trailing alignment padding.
1723	 *
1724	 * Note that invalid @len values can occur because the caller loads
1725	 * the value during an allowed data race.
1726	 */
1727	if (data_size < (unsigned int)len)
1728		return false;
1729
1730	/* Caller interested in the line count? */
1731	if (line_count)
1732		*line_count = count_lines(data, len);
1733
1734	/* Caller interested in the data content? */
1735	if (!buf || !buf_size)
1736		return true;
1737
1738	data_size = min_t(u16, buf_size, len);
1739
1740	memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
1741	return true;
1742}
1743
1744/*
1745 * This is an extended version of desc_read(). It gets a copy of a specified
1746 * descriptor. However, it also verifies that the record is finalized and has
1747 * the sequence number @seq. On success, 0 is returned.
1748 *
1749 * Error return values:
1750 * -EINVAL: A finalized record with sequence number @seq does not exist.
1751 * -ENOENT: A finalized record with sequence number @seq exists, but its data
1752 *          is not available. This is a valid record, so readers should
1753 *          continue with the next record.
1754 */
1755static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring,
1756				   unsigned long id, u64 seq,
1757				   struct prb_desc *desc_out)
1758{
1759	struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos;
1760	enum desc_state d_state;
1761	u64 s;
1762
1763	d_state = desc_read(desc_ring, id, desc_out, &s, NULL);
1764
1765	/*
1766	 * An unexpected @id (desc_miss) or @seq mismatch means the record
1767	 * does not exist. A descriptor in the reserved or committed state
1768	 * means the record does not yet exist for the reader.
1769	 */
1770	if (d_state == desc_miss ||
1771	    d_state == desc_reserved ||
1772	    d_state == desc_committed ||
1773	    s != seq) {
1774		return -EINVAL;
1775	}
1776
1777	/*
1778	 * A descriptor in the reusable state may no longer have its data
1779	 * available; report it as existing but with lost data. Or the record
1780	 * may actually be a record with lost data.
1781	 */
1782	if (d_state == desc_reusable ||
1783	    (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) {
1784		return -ENOENT;
1785	}
1786
1787	return 0;
1788}
1789
1790/*
1791 * Copy the ringbuffer data from the record with @seq to the provided
1792 * @r buffer. On success, 0 is returned.
1793 *
1794 * See desc_read_finalized_seq() for error return values.
1795 */
1796static int prb_read(struct printk_ringbuffer *rb, u64 seq,
1797		    struct printk_record *r, unsigned int *line_count)
1798{
1799	struct prb_desc_ring *desc_ring = &rb->desc_ring;
1800	struct printk_info *info = to_info(desc_ring, seq);
1801	struct prb_desc *rdesc = to_desc(desc_ring, seq);
1802	atomic_long_t *state_var = &rdesc->state_var;
1803	struct prb_desc desc;
1804	unsigned long id;
1805	int err;
1806
1807	/* Extract the ID, used to specify the descriptor to read. */
1808	id = DESC_ID(atomic_long_read(state_var));
1809
1810	/* Get a local copy of the correct descriptor (if available). */
1811	err = desc_read_finalized_seq(desc_ring, id, seq, &desc);
1812
1813	/*
1814	 * If @r is NULL, the caller is only interested in the availability
1815	 * of the record.
1816	 */
1817	if (err || !r)
1818		return err;
1819
1820	/* If requested, copy meta data. */
1821	if (r->info)
1822		memcpy(r->info, info, sizeof(*(r->info)));
1823
1824	/* Copy text data. If it fails, this is a data-less record. */
1825	if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len,
1826		       r->text_buf, r->text_buf_size, line_count)) {
1827		return -ENOENT;
1828	}
1829
1830	/* Ensure the record is still finalized and has the same @seq. */
1831	return desc_read_finalized_seq(desc_ring, id, seq, &desc);
1832}
1833
1834/* Get the sequence number of the tail descriptor. */
1835static u64 prb_first_seq(struct printk_ringbuffer *rb)
1836{
1837	struct prb_desc_ring *desc_ring = &rb->desc_ring;
1838	enum desc_state d_state;
1839	struct prb_desc desc;
1840	unsigned long id;
1841	u64 seq;
1842
1843	for (;;) {
1844		id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */
1845
1846		d_state = desc_read(desc_ring, id, &desc, &seq, NULL); /* LMM(prb_first_seq:B) */
1847
1848		/*
1849		 * This loop will not be infinite because the tail is
1850		 * _always_ in the finalized or reusable state.
1851		 */
1852		if (d_state == desc_finalized || d_state == desc_reusable)
1853			break;
1854
1855		/*
1856		 * Guarantee the last state load from desc_read() is before
1857		 * reloading @tail_id in order to see a new tail in the case
1858		 * that the descriptor has been recycled. This pairs with
1859		 * desc_reserve:D.
1860		 *
1861		 * Memory barrier involvement:
1862		 *
1863		 * If prb_first_seq:B reads from desc_reserve:F, then
1864		 * prb_first_seq:A reads from desc_push_tail:B.
1865		 *
1866		 * Relies on:
1867		 *
1868		 * MB from desc_push_tail:B to desc_reserve:F
1869		 *    matching
1870		 * RMB prb_first_seq:B to prb_first_seq:A
1871		 */
1872		smp_rmb(); /* LMM(prb_first_seq:C) */
1873	}
1874
1875	return seq;
1876}
1877
1878/*
1879 * Non-blocking read of a record. Updates @seq to the last finalized record
1880 * (which may have no data available).
1881 *
1882 * See the description of prb_read_valid() and prb_read_valid_info()
1883 * for details.
1884 */
1885static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
1886			    struct printk_record *r, unsigned int *line_count)
1887{
1888	u64 tail_seq;
1889	int err;
1890
1891	while ((err = prb_read(rb, *seq, r, line_count))) {
1892		tail_seq = prb_first_seq(rb);
1893
1894		if (*seq < tail_seq) {
1895			/*
1896			 * Behind the tail. Catch up and try again. This
1897			 * can happen for -ENOENT and -EINVAL cases.
1898			 */
1899			*seq = tail_seq;
1900
1901		} else if (err == -ENOENT) {
1902			/* Record exists, but no data available. Skip. */
1903			(*seq)++;
1904
1905		} else {
1906			/* Non-existent/non-finalized record. Must stop. */
1907			return false;
1908		}
1909	}
1910
1911	return true;
1912}
1913
1914/**
1915 * prb_read_valid() - Non-blocking read of a requested record or (if gone)
1916 *                    the next available record.
1917 *
1918 * @rb:  The ringbuffer to read from.
1919 * @seq: The sequence number of the record to read.
1920 * @r:   A record data buffer to store the read record to.
1921 *
1922 * This is the public function available to readers to read a record.
1923 *
1924 * The reader provides the @info and @text_buf buffers of @r to be
1925 * filled in. Any of the buffer pointers can be set to NULL if the reader
1926 * is not interested in that data. To ensure proper initialization of @r,
1927 * prb_rec_init_rd() should be used.
1928 *
1929 * Context: Any context.
1930 * Return: true if a record was read, otherwise false.
1931 *
1932 * On success, the reader must check r->info.seq to see which record was
1933 * actually read. This allows the reader to detect dropped records.
1934 *
1935 * Failure means @seq refers to a not yet written record.
1936 */
1937bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
1938		    struct printk_record *r)
1939{
1940	return _prb_read_valid(rb, &seq, r, NULL);
1941}
1942
1943/**
1944 * prb_read_valid_info() - Non-blocking read of meta data for a requested
1945 *                         record or (if gone) the next available record.
1946 *
1947 * @rb:         The ringbuffer to read from.
1948 * @seq:        The sequence number of the record to read.
1949 * @info:       A buffer to store the read record meta data to.
1950 * @line_count: A buffer to store the number of lines in the record text.
1951 *
1952 * This is the public function available to readers to read only the
1953 * meta data of a record.
1954 *
1955 * The reader provides the @info, @line_count buffers to be filled in.
1956 * Either of the buffer pointers can be set to NULL if the reader is not
1957 * interested in that data.
1958 *
1959 * Context: Any context.
1960 * Return: true if a record's meta data was read, otherwise false.
1961 *
1962 * On success, the reader must check info->seq to see which record meta data
1963 * was actually read. This allows the reader to detect dropped records.
1964 *
1965 * Failure means @seq refers to a not yet written record.
1966 */
1967bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
1968			 struct printk_info *info, unsigned int *line_count)
1969{
1970	struct printk_record r;
1971
1972	prb_rec_init_rd(&r, info, NULL, 0);
1973
1974	return _prb_read_valid(rb, &seq, &r, line_count);
1975}
1976
1977/**
1978 * prb_first_valid_seq() - Get the sequence number of the oldest available
1979 *                         record.
1980 *
1981 * @rb: The ringbuffer to get the sequence number from.
1982 *
1983 * This is the public function available to readers to see what the
1984 * first/oldest valid sequence number is.
1985 *
1986 * This provides readers a starting point to begin iterating the ringbuffer.
1987 *
1988 * Context: Any context.
1989 * Return: The sequence number of the first/oldest record or, if the
1990 *         ringbuffer is empty, 0 is returned.
1991 */
1992u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
1993{
1994	u64 seq = 0;
1995
1996	if (!_prb_read_valid(rb, &seq, NULL, NULL))
1997		return 0;
1998
1999	return seq;
2000}
2001
2002/**
2003 * prb_next_seq() - Get the sequence number after the last available record.
2004 *
2005 * @rb:  The ringbuffer to get the sequence number from.
2006 *
2007 * This is the public function available to readers to see what the next
2008 * newest sequence number available to readers will be.
2009 *
2010 * This provides readers a sequence number to jump to if all currently
2011 * available records should be skipped.
2012 *
2013 * Context: Any context.
2014 * Return: The sequence number of the next newest (not yet available) record
2015 *         for readers.
2016 */
2017u64 prb_next_seq(struct printk_ringbuffer *rb)
2018{
2019	struct prb_desc_ring *desc_ring = &rb->desc_ring;
2020	enum desc_state d_state;
2021	unsigned long id;
2022	u64 seq;
2023
2024	/* Check if the cached @id still points to a valid @seq. */
2025	id = atomic_long_read(&desc_ring->last_finalized_id);
2026	d_state = desc_read(desc_ring, id, NULL, &seq, NULL);
2027
2028	if (d_state == desc_finalized || d_state == desc_reusable) {
2029		/*
2030		 * Begin searching after the last finalized record.
2031		 *
2032		 * On 0, the search must begin at 0 because of hack#2
2033		 * of the bootstrapping phase it is not known if a
2034		 * record at index 0 exists.
2035		 */
2036		if (seq != 0)
2037			seq++;
2038	} else {
2039		/*
2040		 * The information about the last finalized sequence number
2041		 * has gone. It should happen only when there is a flood of
2042		 * new messages and the ringbuffer is rapidly recycled.
2043		 * Give up and start from the beginning.
2044		 */
2045		seq = 0;
2046	}
2047
2048	/*
2049	 * The information about the last finalized @seq might be inaccurate.
2050	 * Search forward to find the current one.
2051	 */
2052	while (_prb_read_valid(rb, &seq, NULL, NULL))
2053		seq++;
2054
2055	return seq;
2056}
2057
2058/**
2059 * prb_init() - Initialize a ringbuffer to use provided external buffers.
2060 *
2061 * @rb:       The ringbuffer to initialize.
2062 * @text_buf: The data buffer for text data.
2063 * @textbits: The size of @text_buf as a power-of-2 value.
2064 * @descs:    The descriptor buffer for ringbuffer records.
2065 * @descbits: The count of @descs items as a power-of-2 value.
2066 * @infos:    The printk_info buffer for ringbuffer records.
2067 *
2068 * This is the public function available to writers to setup a ringbuffer
2069 * during runtime using provided buffers.
2070 *
2071 * This must match the initialization of DEFINE_PRINTKRB().
2072 *
2073 * Context: Any context.
2074 */
2075void prb_init(struct printk_ringbuffer *rb,
2076	      char *text_buf, unsigned int textbits,
2077	      struct prb_desc *descs, unsigned int descbits,
2078	      struct printk_info *infos)
2079{
2080	memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0]));
2081	memset(infos, 0, _DESCS_COUNT(descbits) * sizeof(infos[0]));
2082
2083	rb->desc_ring.count_bits = descbits;
2084	rb->desc_ring.descs = descs;
2085	rb->desc_ring.infos = infos;
2086	atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
2087	atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
2088	atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits));
2089
2090	rb->text_data_ring.size_bits = textbits;
2091	rb->text_data_ring.data = text_buf;
2092	atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits));
2093	atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits));
2094
2095	atomic_long_set(&rb->fail, 0);
2096
2097	atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
2098	descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS;
2099	descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS;
2100
2101	infos[0].seq = -(u64)_DESCS_COUNT(descbits);
2102	infos[_DESCS_COUNT(descbits) - 1].seq = 0;
2103}
2104
2105/**
2106 * prb_record_text_space() - Query the full actual used ringbuffer space for
2107 *                           the text data of a reserved entry.
2108 *
2109 * @e: The successfully reserved entry to query.
2110 *
2111 * This is the public function available to writers to see how much actual
2112 * space is used in the ringbuffer to store the text data of the specified
2113 * entry.
2114 *
2115 * This function is only valid if @e has been successfully reserved using
2116 * prb_reserve().
2117 *
2118 * Context: Any context.
2119 * Return: The size in bytes used by the text data of the associated record.
2120 */
2121unsigned int prb_record_text_space(struct prb_reserved_entry *e)
2122{
2123	return e->text_space;
2124}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/kernel.h>
   4#include <linux/irqflags.h>
   5#include <linux/string.h>
   6#include <linux/errno.h>
   7#include <linux/bug.h>
   8#include "printk_ringbuffer.h"
   9
  10/**
  11 * DOC: printk_ringbuffer overview
  12 *
  13 * Data Structure
  14 * --------------
  15 * The printk_ringbuffer is made up of 3 internal ringbuffers:
  16 *
  17 *   desc_ring
  18 *     A ring of descriptors and their meta data (such as sequence number,
  19 *     timestamp, loglevel, etc.) as well as internal state information about
  20 *     the record and logical positions specifying where in the other
  21 *     ringbuffer the text strings are located.
  22 *
  23 *   text_data_ring
  24 *     A ring of data blocks. A data block consists of an unsigned long
  25 *     integer (ID) that maps to a desc_ring index followed by the text
  26 *     string of the record.
  27 *
  28 * The internal state information of a descriptor is the key element to allow
  29 * readers and writers to locklessly synchronize access to the data.
  30 *
  31 * Implementation
  32 * --------------
  33 *
  34 * Descriptor Ring
  35 * ~~~~~~~~~~~~~~~
  36 * The descriptor ring is an array of descriptors. A descriptor contains
  37 * essential meta data to track the data of a printk record using
  38 * blk_lpos structs pointing to associated text data blocks (see
  39 * "Data Rings" below). Each descriptor is assigned an ID that maps
  40 * directly to index values of the descriptor array and has a state. The ID
  41 * and the state are bitwise combined into a single descriptor field named
  42 * @state_var, allowing ID and state to be synchronously and atomically
  43 * updated.
  44 *
  45 * Descriptors have four states:
  46 *
  47 *   reserved
  48 *     A writer is modifying the record.
  49 *
  50 *   committed
  51 *     The record and all its data are written. A writer can reopen the
  52 *     descriptor (transitioning it back to reserved), but in the committed
  53 *     state the data is consistent.
  54 *
  55 *   finalized
  56 *     The record and all its data are complete and available for reading. A
  57 *     writer cannot reopen the descriptor.
  58 *
  59 *   reusable
  60 *     The record exists, but its text and/or meta data may no longer be
  61 *     available.
  62 *
  63 * Querying the @state_var of a record requires providing the ID of the
  64 * descriptor to query. This can yield a possible fifth (pseudo) state:
  65 *
  66 *   miss
  67 *     The descriptor being queried has an unexpected ID.
  68 *
  69 * The descriptor ring has a @tail_id that contains the ID of the oldest
  70 * descriptor and @head_id that contains the ID of the newest descriptor.
  71 *
  72 * When a new descriptor should be created (and the ring is full), the tail
  73 * descriptor is invalidated by first transitioning to the reusable state and
  74 * then invalidating all tail data blocks up to and including the data blocks
  75 * associated with the tail descriptor (for the text ring). Then
  76 * @tail_id is advanced, followed by advancing @head_id. And finally the
  77 * @state_var of the new descriptor is initialized to the new ID and reserved
  78 * state.
  79 *
  80 * The @tail_id can only be advanced if the new @tail_id would be in the
  81 * committed or reusable queried state. This makes it possible that a valid
  82 * sequence number of the tail is always available.
  83 *
  84 * Descriptor Finalization
  85 * ~~~~~~~~~~~~~~~~~~~~~~~
  86 * When a writer calls the commit function prb_commit(), record data is
  87 * fully stored and is consistent within the ringbuffer. However, a writer can
  88 * reopen that record, claiming exclusive access (as with prb_reserve()), and
  89 * modify that record. When finished, the writer must again commit the record.
  90 *
  91 * In order for a record to be made available to readers (and also become
  92 * recyclable for writers), it must be finalized. A finalized record cannot be
  93 * reopened and can never become "unfinalized". Record finalization can occur
  94 * in three different scenarios:
  95 *
  96 *   1) A writer can simultaneously commit and finalize its record by calling
  97 *      prb_final_commit() instead of prb_commit().
  98 *
  99 *   2) When a new record is reserved and the previous record has been
 100 *      committed via prb_commit(), that previous record is automatically
 101 *      finalized.
 102 *
 103 *   3) When a record is committed via prb_commit() and a newer record
 104 *      already exists, the record being committed is automatically finalized.
 105 *
 106 * Data Ring
 107 * ~~~~~~~~~
 108 * The text data ring is a byte array composed of data blocks. Data blocks are
 109 * referenced by blk_lpos structs that point to the logical position of the
 110 * beginning of a data block and the beginning of the next adjacent data
 111 * block. Logical positions are mapped directly to index values of the byte
 112 * array ringbuffer.
 113 *
 114 * Each data block consists of an ID followed by the writer data. The ID is
 115 * the identifier of a descriptor that is associated with the data block. A
 116 * given data block is considered valid if all of the following conditions
 117 * are met:
 118 *
 119 *   1) The descriptor associated with the data block is in the committed
 120 *      or finalized queried state.
 121 *
 122 *   2) The blk_lpos struct within the descriptor associated with the data
 123 *      block references back to the same data block.
 124 *
 125 *   3) The data block is within the head/tail logical position range.
 126 *
 127 * If the writer data of a data block would extend beyond the end of the
 128 * byte array, only the ID of the data block is stored at the logical
 129 * position and the full data block (ID and writer data) is stored at the
 130 * beginning of the byte array. The referencing blk_lpos will point to the
 131 * ID before the wrap and the next data block will be at the logical
 132 * position adjacent the full data block after the wrap.
 133 *
 134 * Data rings have a @tail_lpos that points to the beginning of the oldest
 135 * data block and a @head_lpos that points to the logical position of the
 136 * next (not yet existing) data block.
 137 *
 138 * When a new data block should be created (and the ring is full), tail data
 139 * blocks will first be invalidated by putting their associated descriptors
 140 * into the reusable state and then pushing the @tail_lpos forward beyond
 141 * them. Then the @head_lpos is pushed forward and is associated with a new
 142 * descriptor. If a data block is not valid, the @tail_lpos cannot be
 143 * advanced beyond it.
 144 *
 145 * Info Array
 146 * ~~~~~~~~~~
 147 * The general meta data of printk records are stored in printk_info structs,
 148 * stored in an array with the same number of elements as the descriptor ring.
 149 * Each info corresponds to the descriptor of the same index in the
 150 * descriptor ring. Info validity is confirmed by evaluating the corresponding
 151 * descriptor before and after loading the info.
 152 *
 153 * Usage
 154 * -----
 155 * Here are some simple examples demonstrating writers and readers. For the
 156 * examples a global ringbuffer (test_rb) is available (which is not the
 157 * actual ringbuffer used by printk)::
 158 *
 159 *	DEFINE_PRINTKRB(test_rb, 15, 5);
 160 *
 161 * This ringbuffer allows up to 32768 records (2 ^ 15) and has a size of
 162 * 1 MiB (2 ^ (15 + 5)) for text data.
 163 *
 164 * Sample writer code::
 165 *
 166 *	const char *textstr = "message text";
 167 *	struct prb_reserved_entry e;
 168 *	struct printk_record r;
 169 *
 170 *	// specify how much to allocate
 171 *	prb_rec_init_wr(&r, strlen(textstr) + 1);
 172 *
 173 *	if (prb_reserve(&e, &test_rb, &r)) {
 174 *		snprintf(r.text_buf, r.text_buf_size, "%s", textstr);
 175 *
 176 *		r.info->text_len = strlen(textstr);
 177 *		r.info->ts_nsec = local_clock();
 178 *		r.info->caller_id = printk_caller_id();
 179 *
 180 *		// commit and finalize the record
 181 *		prb_final_commit(&e);
 182 *	}
 183 *
 184 * Note that additional writer functions are available to extend a record
 185 * after it has been committed but not yet finalized. This can be done as
 186 * long as no new records have been reserved and the caller is the same.
 187 *
 188 * Sample writer code (record extending)::
 189 *
 190 *		// alternate rest of previous example
 191 *
 192 *		r.info->text_len = strlen(textstr);
 193 *		r.info->ts_nsec = local_clock();
 194 *		r.info->caller_id = printk_caller_id();
 195 *
 196 *		// commit the record (but do not finalize yet)
 197 *		prb_commit(&e);
 198 *	}
 199 *
 200 *	...
 201 *
 202 *	// specify additional 5 bytes text space to extend
 203 *	prb_rec_init_wr(&r, 5);
 204 *
 205 *	// try to extend, but only if it does not exceed 32 bytes
 206 *	if (prb_reserve_in_last(&e, &test_rb, &r, printk_caller_id()), 32) {
 207 *		snprintf(&r.text_buf[r.info->text_len],
 208 *			 r.text_buf_size - r.info->text_len, "hello");
 209 *
 210 *		r.info->text_len += 5;
 211 *
 212 *		// commit and finalize the record
 213 *		prb_final_commit(&e);
 214 *	}
 215 *
 216 * Sample reader code::
 217 *
 218 *	struct printk_info info;
 219 *	struct printk_record r;
 220 *	char text_buf[32];
 221 *	u64 seq;
 222 *
 223 *	prb_rec_init_rd(&r, &info, &text_buf[0], sizeof(text_buf));
 224 *
 225 *	prb_for_each_record(0, &test_rb, &seq, &r) {
 226 *		if (info.seq != seq)
 227 *			pr_warn("lost %llu records\n", info.seq - seq);
 228 *
 229 *		if (info.text_len > r.text_buf_size) {
 230 *			pr_warn("record %llu text truncated\n", info.seq);
 231 *			text_buf[r.text_buf_size - 1] = 0;
 232 *		}
 233 *
 234 *		pr_info("%llu: %llu: %s\n", info.seq, info.ts_nsec,
 235 *			&text_buf[0]);
 236 *	}
 237 *
 238 * Note that additional less convenient reader functions are available to
 239 * allow complex record access.
 240 *
 241 * ABA Issues
 242 * ~~~~~~~~~~
 243 * To help avoid ABA issues, descriptors are referenced by IDs (array index
 244 * values combined with tagged bits counting array wraps) and data blocks are
 245 * referenced by logical positions (array index values combined with tagged
 246 * bits counting array wraps). However, on 32-bit systems the number of
 247 * tagged bits is relatively small such that an ABA incident is (at least
 248 * theoretically) possible. For example, if 4 million maximally sized (1KiB)
 249 * printk messages were to occur in NMI context on a 32-bit system, the
 250 * interrupted context would not be able to recognize that the 32-bit integer
 251 * completely wrapped and thus represents a different data block than the one
 252 * the interrupted context expects.
 253 *
 254 * To help combat this possibility, additional state checking is performed
 255 * (such as using cmpxchg() even though set() would suffice). These extra
 256 * checks are commented as such and will hopefully catch any ABA issue that
 257 * a 32-bit system might experience.
 258 *
 259 * Memory Barriers
 260 * ~~~~~~~~~~~~~~~
 261 * Multiple memory barriers are used. To simplify proving correctness and
 262 * generating litmus tests, lines of code related to memory barriers
 263 * (loads, stores, and the associated memory barriers) are labeled::
 264 *
 265 *	LMM(function:letter)
 266 *
 267 * Comments reference the labels using only the "function:letter" part.
 268 *
 269 * The memory barrier pairs and their ordering are:
 270 *
 271 *   desc_reserve:D / desc_reserve:B
 272 *     push descriptor tail (id), then push descriptor head (id)
 273 *
 274 *   desc_reserve:D / data_push_tail:B
 275 *     push data tail (lpos), then set new descriptor reserved (state)
 276 *
 277 *   desc_reserve:D / desc_push_tail:C
 278 *     push descriptor tail (id), then set new descriptor reserved (state)
 279 *
 280 *   desc_reserve:D / prb_first_seq:C
 281 *     push descriptor tail (id), then set new descriptor reserved (state)
 282 *
 283 *   desc_reserve:F / desc_read:D
 284 *     set new descriptor id and reserved (state), then allow writer changes
 285 *
 286 *   data_alloc:A (or data_realloc:A) / desc_read:D
 287 *     set old descriptor reusable (state), then modify new data block area
 288 *
 289 *   data_alloc:A (or data_realloc:A) / data_push_tail:B
 290 *     push data tail (lpos), then modify new data block area
 291 *
 292 *   _prb_commit:B / desc_read:B
 293 *     store writer changes, then set new descriptor committed (state)
 294 *
 295 *   desc_reopen_last:A / _prb_commit:B
 296 *     set descriptor reserved (state), then read descriptor data
 297 *
 298 *   _prb_commit:B / desc_reserve:D
 299 *     set new descriptor committed (state), then check descriptor head (id)
 300 *
 301 *   data_push_tail:D / data_push_tail:A
 302 *     set descriptor reusable (state), then push data tail (lpos)
 303 *
 304 *   desc_push_tail:B / desc_reserve:D
 305 *     set descriptor reusable (state), then push descriptor tail (id)
 306 */
 307
 308#define DATA_SIZE(data_ring)		_DATA_SIZE((data_ring)->size_bits)
 309#define DATA_SIZE_MASK(data_ring)	(DATA_SIZE(data_ring) - 1)
 310
 311#define DESCS_COUNT(desc_ring)		_DESCS_COUNT((desc_ring)->count_bits)
 312#define DESCS_COUNT_MASK(desc_ring)	(DESCS_COUNT(desc_ring) - 1)
 313
 314/* Determine the data array index from a logical position. */
 315#define DATA_INDEX(data_ring, lpos)	((lpos) & DATA_SIZE_MASK(data_ring))
 316
 317/* Determine the desc array index from an ID or sequence number. */
 318#define DESC_INDEX(desc_ring, n)	((n) & DESCS_COUNT_MASK(desc_ring))
 319
 320/* Determine how many times the data array has wrapped. */
 321#define DATA_WRAPS(data_ring, lpos)	((lpos) >> (data_ring)->size_bits)
 322
 323/* Determine if a logical position refers to a data-less block. */
 324#define LPOS_DATALESS(lpos)		((lpos) & 1UL)
 325#define BLK_DATALESS(blk)		(LPOS_DATALESS((blk)->begin) && \
 326					 LPOS_DATALESS((blk)->next))
 327
 328/* Get the logical position at index 0 of the current wrap. */
 329#define DATA_THIS_WRAP_START_LPOS(data_ring, lpos) \
 330((lpos) & ~DATA_SIZE_MASK(data_ring))
 331
 332/* Get the ID for the same index of the previous wrap as the given ID. */
 333#define DESC_ID_PREV_WRAP(desc_ring, id) \
 334DESC_ID((id) - DESCS_COUNT(desc_ring))
 335
 336/*
 337 * A data block: mapped directly to the beginning of the data block area
 338 * specified as a logical position within the data ring.
 339 *
 340 * @id:   the ID of the associated descriptor
 341 * @data: the writer data
 342 *
 343 * Note that the size of a data block is only known by its associated
 344 * descriptor.
 345 */
 346struct prb_data_block {
 347	unsigned long	id;
 348	char		data[];
 349};
 350
 351/*
 352 * Return the descriptor associated with @n. @n can be either a
 353 * descriptor ID or a sequence number.
 354 */
 355static struct prb_desc *to_desc(struct prb_desc_ring *desc_ring, u64 n)
 356{
 357	return &desc_ring->descs[DESC_INDEX(desc_ring, n)];
 358}
 359
 360/*
 361 * Return the printk_info associated with @n. @n can be either a
 362 * descriptor ID or a sequence number.
 363 */
 364static struct printk_info *to_info(struct prb_desc_ring *desc_ring, u64 n)
 365{
 366	return &desc_ring->infos[DESC_INDEX(desc_ring, n)];
 367}
 368
 369static struct prb_data_block *to_block(struct prb_data_ring *data_ring,
 370				       unsigned long begin_lpos)
 371{
 372	return (void *)&data_ring->data[DATA_INDEX(data_ring, begin_lpos)];
 373}
 374
 375/*
 376 * Increase the data size to account for data block meta data plus any
 377 * padding so that the adjacent data block is aligned on the ID size.
 378 */
 379static unsigned int to_blk_size(unsigned int size)
 380{
 381	struct prb_data_block *db = NULL;
 382
 383	size += sizeof(*db);
 384	size = ALIGN(size, sizeof(db->id));
 385	return size;
 386}
 387
 388/*
 389 * Sanity checker for reserve size. The ringbuffer code assumes that a data
 390 * block does not exceed the maximum possible size that could fit within the
 391 * ringbuffer. This function provides that basic size check so that the
 392 * assumption is safe.
 393 */
 394static bool data_check_size(struct prb_data_ring *data_ring, unsigned int size)
 395{
 396	struct prb_data_block *db = NULL;
 397
 398	if (size == 0)
 399		return true;
 400
 401	/*
 402	 * Ensure the alignment padded size could possibly fit in the data
 403	 * array. The largest possible data block must still leave room for
 404	 * at least the ID of the next block.
 405	 */
 406	size = to_blk_size(size);
 407	if (size > DATA_SIZE(data_ring) - sizeof(db->id))
 408		return false;
 409
 410	return true;
 411}
 412
 413/* Query the state of a descriptor. */
 414static enum desc_state get_desc_state(unsigned long id,
 415				      unsigned long state_val)
 416{
 417	if (id != DESC_ID(state_val))
 418		return desc_miss;
 419
 420	return DESC_STATE(state_val);
 421}
 422
 423/*
 424 * Get a copy of a specified descriptor and return its queried state. If the
 425 * descriptor is in an inconsistent state (miss or reserved), the caller can
 426 * only expect the descriptor's @state_var field to be valid.
 427 *
 428 * The sequence number and caller_id can be optionally retrieved. Like all
 429 * non-state_var data, they are only valid if the descriptor is in a
 430 * consistent state.
 431 */
 432static enum desc_state desc_read(struct prb_desc_ring *desc_ring,
 433				 unsigned long id, struct prb_desc *desc_out,
 434				 u64 *seq_out, u32 *caller_id_out)
 435{
 436	struct printk_info *info = to_info(desc_ring, id);
 437	struct prb_desc *desc = to_desc(desc_ring, id);
 438	atomic_long_t *state_var = &desc->state_var;
 439	enum desc_state d_state;
 440	unsigned long state_val;
 441
 442	/* Check the descriptor state. */
 443	state_val = atomic_long_read(state_var); /* LMM(desc_read:A) */
 444	d_state = get_desc_state(id, state_val);
 445	if (d_state == desc_miss || d_state == desc_reserved) {
 446		/*
 447		 * The descriptor is in an inconsistent state. Set at least
 448		 * @state_var so that the caller can see the details of
 449		 * the inconsistent state.
 450		 */
 451		goto out;
 452	}
 453
 454	/*
 455	 * Guarantee the state is loaded before copying the descriptor
 456	 * content. This avoids copying obsolete descriptor content that might
 457	 * not apply to the descriptor state. This pairs with _prb_commit:B.
 458	 *
 459	 * Memory barrier involvement:
 460	 *
 461	 * If desc_read:A reads from _prb_commit:B, then desc_read:C reads
 462	 * from _prb_commit:A.
 463	 *
 464	 * Relies on:
 465	 *
 466	 * WMB from _prb_commit:A to _prb_commit:B
 467	 *    matching
 468	 * RMB from desc_read:A to desc_read:C
 469	 */
 470	smp_rmb(); /* LMM(desc_read:B) */
 471
 472	/*
 473	 * Copy the descriptor data. The data is not valid until the
 474	 * state has been re-checked. A memcpy() for all of @desc
 475	 * cannot be used because of the atomic_t @state_var field.
 476	 */
 477	memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos,
 478	       sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */
 
 
 479	if (seq_out)
 480		*seq_out = info->seq; /* also part of desc_read:C */
 481	if (caller_id_out)
 482		*caller_id_out = info->caller_id; /* also part of desc_read:C */
 483
 484	/*
 485	 * 1. Guarantee the descriptor content is loaded before re-checking
 486	 *    the state. This avoids reading an obsolete descriptor state
 487	 *    that may not apply to the copied content. This pairs with
 488	 *    desc_reserve:F.
 489	 *
 490	 *    Memory barrier involvement:
 491	 *
 492	 *    If desc_read:C reads from desc_reserve:G, then desc_read:E
 493	 *    reads from desc_reserve:F.
 494	 *
 495	 *    Relies on:
 496	 *
 497	 *    WMB from desc_reserve:F to desc_reserve:G
 498	 *       matching
 499	 *    RMB from desc_read:C to desc_read:E
 500	 *
 501	 * 2. Guarantee the record data is loaded before re-checking the
 502	 *    state. This avoids reading an obsolete descriptor state that may
 503	 *    not apply to the copied data. This pairs with data_alloc:A and
 504	 *    data_realloc:A.
 505	 *
 506	 *    Memory barrier involvement:
 507	 *
 508	 *    If copy_data:A reads from data_alloc:B, then desc_read:E
 509	 *    reads from desc_make_reusable:A.
 510	 *
 511	 *    Relies on:
 512	 *
 513	 *    MB from desc_make_reusable:A to data_alloc:B
 514	 *       matching
 515	 *    RMB from desc_read:C to desc_read:E
 516	 *
 517	 *    Note: desc_make_reusable:A and data_alloc:B can be different
 518	 *          CPUs. However, the data_alloc:B CPU (which performs the
 519	 *          full memory barrier) must have previously seen
 520	 *          desc_make_reusable:A.
 521	 */
 522	smp_rmb(); /* LMM(desc_read:D) */
 523
 524	/*
 525	 * The data has been copied. Return the current descriptor state,
 526	 * which may have changed since the load above.
 527	 */
 528	state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */
 529	d_state = get_desc_state(id, state_val);
 530out:
 531	atomic_long_set(&desc_out->state_var, state_val);
 
 532	return d_state;
 533}
 534
 535/*
 536 * Take a specified descriptor out of the finalized state by attempting
 537 * the transition from finalized to reusable. Either this context or some
 538 * other context will have been successful.
 539 */
 540static void desc_make_reusable(struct prb_desc_ring *desc_ring,
 541			       unsigned long id)
 542{
 543	unsigned long val_finalized = DESC_SV(id, desc_finalized);
 544	unsigned long val_reusable = DESC_SV(id, desc_reusable);
 545	struct prb_desc *desc = to_desc(desc_ring, id);
 546	atomic_long_t *state_var = &desc->state_var;
 547
 548	atomic_long_cmpxchg_relaxed(state_var, val_finalized,
 549				    val_reusable); /* LMM(desc_make_reusable:A) */
 550}
 551
 552/*
 553 * Given the text data ring, put the associated descriptor of each
 554 * data block from @lpos_begin until @lpos_end into the reusable state.
 555 *
 556 * If there is any problem making the associated descriptor reusable, either
 557 * the descriptor has not yet been finalized or another writer context has
 558 * already pushed the tail lpos past the problematic data block. Regardless,
 559 * on error the caller can re-load the tail lpos to determine the situation.
 560 */
 561static bool data_make_reusable(struct printk_ringbuffer *rb,
 562			       unsigned long lpos_begin,
 563			       unsigned long lpos_end,
 564			       unsigned long *lpos_out)
 565{
 566
 567	struct prb_data_ring *data_ring = &rb->text_data_ring;
 568	struct prb_desc_ring *desc_ring = &rb->desc_ring;
 569	struct prb_data_block *blk;
 570	enum desc_state d_state;
 571	struct prb_desc desc;
 572	struct prb_data_blk_lpos *blk_lpos = &desc.text_blk_lpos;
 573	unsigned long id;
 574
 575	/* Loop until @lpos_begin has advanced to or beyond @lpos_end. */
 576	while ((lpos_end - lpos_begin) - 1 < DATA_SIZE(data_ring)) {
 577		blk = to_block(data_ring, lpos_begin);
 578
 579		/*
 580		 * Load the block ID from the data block. This is a data race
 581		 * against a writer that may have newly reserved this data
 582		 * area. If the loaded value matches a valid descriptor ID,
 583		 * the blk_lpos of that descriptor will be checked to make
 584		 * sure it points back to this data block. If the check fails,
 585		 * the data area has been recycled by another writer.
 586		 */
 587		id = blk->id; /* LMM(data_make_reusable:A) */
 588
 589		d_state = desc_read(desc_ring, id, &desc,
 590				    NULL, NULL); /* LMM(data_make_reusable:B) */
 591
 592		switch (d_state) {
 593		case desc_miss:
 594		case desc_reserved:
 595		case desc_committed:
 596			return false;
 597		case desc_finalized:
 598			/*
 599			 * This data block is invalid if the descriptor
 600			 * does not point back to it.
 601			 */
 602			if (blk_lpos->begin != lpos_begin)
 603				return false;
 604			desc_make_reusable(desc_ring, id);
 605			break;
 606		case desc_reusable:
 607			/*
 608			 * This data block is invalid if the descriptor
 609			 * does not point back to it.
 610			 */
 611			if (blk_lpos->begin != lpos_begin)
 612				return false;
 613			break;
 614		}
 615
 616		/* Advance @lpos_begin to the next data block. */
 617		lpos_begin = blk_lpos->next;
 618	}
 619
 620	*lpos_out = lpos_begin;
 621	return true;
 622}
 623
 624/*
 625 * Advance the data ring tail to at least @lpos. This function puts
 626 * descriptors into the reusable state if the tail is pushed beyond
 627 * their associated data block.
 628 */
 629static bool data_push_tail(struct printk_ringbuffer *rb, unsigned long lpos)
 630{
 631	struct prb_data_ring *data_ring = &rb->text_data_ring;
 632	unsigned long tail_lpos_new;
 633	unsigned long tail_lpos;
 634	unsigned long next_lpos;
 635
 636	/* If @lpos is from a data-less block, there is nothing to do. */
 637	if (LPOS_DATALESS(lpos))
 638		return true;
 639
 640	/*
 641	 * Any descriptor states that have transitioned to reusable due to the
 642	 * data tail being pushed to this loaded value will be visible to this
 643	 * CPU. This pairs with data_push_tail:D.
 644	 *
 645	 * Memory barrier involvement:
 646	 *
 647	 * If data_push_tail:A reads from data_push_tail:D, then this CPU can
 648	 * see desc_make_reusable:A.
 649	 *
 650	 * Relies on:
 651	 *
 652	 * MB from desc_make_reusable:A to data_push_tail:D
 653	 *    matches
 654	 * READFROM from data_push_tail:D to data_push_tail:A
 655	 *    thus
 656	 * READFROM from desc_make_reusable:A to this CPU
 657	 */
 658	tail_lpos = atomic_long_read(&data_ring->tail_lpos); /* LMM(data_push_tail:A) */
 659
 660	/*
 661	 * Loop until the tail lpos is at or beyond @lpos. This condition
 662	 * may already be satisfied, resulting in no full memory barrier
 663	 * from data_push_tail:D being performed. However, since this CPU
 664	 * sees the new tail lpos, any descriptor states that transitioned to
 665	 * the reusable state must already be visible.
 666	 */
 667	while ((lpos - tail_lpos) - 1 < DATA_SIZE(data_ring)) {
 668		/*
 669		 * Make all descriptors reusable that are associated with
 670		 * data blocks before @lpos.
 671		 */
 672		if (!data_make_reusable(rb, tail_lpos, lpos, &next_lpos)) {
 673			/*
 674			 * 1. Guarantee the block ID loaded in
 675			 *    data_make_reusable() is performed before
 676			 *    reloading the tail lpos. The failed
 677			 *    data_make_reusable() may be due to a newly
 678			 *    recycled data area causing the tail lpos to
 679			 *    have been previously pushed. This pairs with
 680			 *    data_alloc:A and data_realloc:A.
 681			 *
 682			 *    Memory barrier involvement:
 683			 *
 684			 *    If data_make_reusable:A reads from data_alloc:B,
 685			 *    then data_push_tail:C reads from
 686			 *    data_push_tail:D.
 687			 *
 688			 *    Relies on:
 689			 *
 690			 *    MB from data_push_tail:D to data_alloc:B
 691			 *       matching
 692			 *    RMB from data_make_reusable:A to
 693			 *    data_push_tail:C
 694			 *
 695			 *    Note: data_push_tail:D and data_alloc:B can be
 696			 *          different CPUs. However, the data_alloc:B
 697			 *          CPU (which performs the full memory
 698			 *          barrier) must have previously seen
 699			 *          data_push_tail:D.
 700			 *
 701			 * 2. Guarantee the descriptor state loaded in
 702			 *    data_make_reusable() is performed before
 703			 *    reloading the tail lpos. The failed
 704			 *    data_make_reusable() may be due to a newly
 705			 *    recycled descriptor causing the tail lpos to
 706			 *    have been previously pushed. This pairs with
 707			 *    desc_reserve:D.
 708			 *
 709			 *    Memory barrier involvement:
 710			 *
 711			 *    If data_make_reusable:B reads from
 712			 *    desc_reserve:F, then data_push_tail:C reads
 713			 *    from data_push_tail:D.
 714			 *
 715			 *    Relies on:
 716			 *
 717			 *    MB from data_push_tail:D to desc_reserve:F
 718			 *       matching
 719			 *    RMB from data_make_reusable:B to
 720			 *    data_push_tail:C
 721			 *
 722			 *    Note: data_push_tail:D and desc_reserve:F can
 723			 *          be different CPUs. However, the
 724			 *          desc_reserve:F CPU (which performs the
 725			 *          full memory barrier) must have previously
 726			 *          seen data_push_tail:D.
 727			 */
 728			smp_rmb(); /* LMM(data_push_tail:B) */
 729
 730			tail_lpos_new = atomic_long_read(&data_ring->tail_lpos
 731							); /* LMM(data_push_tail:C) */
 732			if (tail_lpos_new == tail_lpos)
 733				return false;
 734
 735			/* Another CPU pushed the tail. Try again. */
 736			tail_lpos = tail_lpos_new;
 737			continue;
 738		}
 739
 740		/*
 741		 * Guarantee any descriptor states that have transitioned to
 742		 * reusable are stored before pushing the tail lpos. A full
 743		 * memory barrier is needed since other CPUs may have made
 744		 * the descriptor states reusable. This pairs with
 745		 * data_push_tail:A.
 746		 */
 747		if (atomic_long_try_cmpxchg(&data_ring->tail_lpos, &tail_lpos,
 748					    next_lpos)) { /* LMM(data_push_tail:D) */
 749			break;
 750		}
 751	}
 752
 753	return true;
 754}
 755
 756/*
 757 * Advance the desc ring tail. This function advances the tail by one
 758 * descriptor, thus invalidating the oldest descriptor. Before advancing
 759 * the tail, the tail descriptor is made reusable and all data blocks up to
 760 * and including the descriptor's data block are invalidated (i.e. the data
 761 * ring tail is pushed past the data block of the descriptor being made
 762 * reusable).
 763 */
 764static bool desc_push_tail(struct printk_ringbuffer *rb,
 765			   unsigned long tail_id)
 766{
 767	struct prb_desc_ring *desc_ring = &rb->desc_ring;
 768	enum desc_state d_state;
 769	struct prb_desc desc;
 770
 771	d_state = desc_read(desc_ring, tail_id, &desc, NULL, NULL);
 772
 773	switch (d_state) {
 774	case desc_miss:
 775		/*
 776		 * If the ID is exactly 1 wrap behind the expected, it is
 777		 * in the process of being reserved by another writer and
 778		 * must be considered reserved.
 779		 */
 780		if (DESC_ID(atomic_long_read(&desc.state_var)) ==
 781		    DESC_ID_PREV_WRAP(desc_ring, tail_id)) {
 782			return false;
 783		}
 784
 785		/*
 786		 * The ID has changed. Another writer must have pushed the
 787		 * tail and recycled the descriptor already. Success is
 788		 * returned because the caller is only interested in the
 789		 * specified tail being pushed, which it was.
 790		 */
 791		return true;
 792	case desc_reserved:
 793	case desc_committed:
 794		return false;
 795	case desc_finalized:
 796		desc_make_reusable(desc_ring, tail_id);
 797		break;
 798	case desc_reusable:
 799		break;
 800	}
 801
 802	/*
 803	 * Data blocks must be invalidated before their associated
 804	 * descriptor can be made available for recycling. Invalidating
 805	 * them later is not possible because there is no way to trust
 806	 * data blocks once their associated descriptor is gone.
 807	 */
 808
 809	if (!data_push_tail(rb, desc.text_blk_lpos.next))
 810		return false;
 811
 812	/*
 813	 * Check the next descriptor after @tail_id before pushing the tail
 814	 * to it because the tail must always be in a finalized or reusable
 815	 * state. The implementation of prb_first_seq() relies on this.
 816	 *
 817	 * A successful read implies that the next descriptor is less than or
 818	 * equal to @head_id so there is no risk of pushing the tail past the
 819	 * head.
 820	 */
 821	d_state = desc_read(desc_ring, DESC_ID(tail_id + 1), &desc,
 822			    NULL, NULL); /* LMM(desc_push_tail:A) */
 823
 824	if (d_state == desc_finalized || d_state == desc_reusable) {
 825		/*
 826		 * Guarantee any descriptor states that have transitioned to
 827		 * reusable are stored before pushing the tail ID. This allows
 828		 * verifying the recycled descriptor state. A full memory
 829		 * barrier is needed since other CPUs may have made the
 830		 * descriptor states reusable. This pairs with desc_reserve:D.
 831		 */
 832		atomic_long_cmpxchg(&desc_ring->tail_id, tail_id,
 833				    DESC_ID(tail_id + 1)); /* LMM(desc_push_tail:B) */
 834	} else {
 835		/*
 836		 * Guarantee the last state load from desc_read() is before
 837		 * reloading @tail_id in order to see a new tail ID in the
 838		 * case that the descriptor has been recycled. This pairs
 839		 * with desc_reserve:D.
 840		 *
 841		 * Memory barrier involvement:
 842		 *
 843		 * If desc_push_tail:A reads from desc_reserve:F, then
 844		 * desc_push_tail:D reads from desc_push_tail:B.
 845		 *
 846		 * Relies on:
 847		 *
 848		 * MB from desc_push_tail:B to desc_reserve:F
 849		 *    matching
 850		 * RMB from desc_push_tail:A to desc_push_tail:D
 851		 *
 852		 * Note: desc_push_tail:B and desc_reserve:F can be different
 853		 *       CPUs. However, the desc_reserve:F CPU (which performs
 854		 *       the full memory barrier) must have previously seen
 855		 *       desc_push_tail:B.
 856		 */
 857		smp_rmb(); /* LMM(desc_push_tail:C) */
 858
 859		/*
 860		 * Re-check the tail ID. The descriptor following @tail_id is
 861		 * not in an allowed tail state. But if the tail has since
 862		 * been moved by another CPU, then it does not matter.
 863		 */
 864		if (atomic_long_read(&desc_ring->tail_id) == tail_id) /* LMM(desc_push_tail:D) */
 865			return false;
 866	}
 867
 868	return true;
 869}
 870
 871/* Reserve a new descriptor, invalidating the oldest if necessary. */
 872static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out)
 873{
 874	struct prb_desc_ring *desc_ring = &rb->desc_ring;
 875	unsigned long prev_state_val;
 876	unsigned long id_prev_wrap;
 877	struct prb_desc *desc;
 878	unsigned long head_id;
 879	unsigned long id;
 880
 881	head_id = atomic_long_read(&desc_ring->head_id); /* LMM(desc_reserve:A) */
 882
 883	do {
 884		id = DESC_ID(head_id + 1);
 885		id_prev_wrap = DESC_ID_PREV_WRAP(desc_ring, id);
 886
 887		/*
 888		 * Guarantee the head ID is read before reading the tail ID.
 889		 * Since the tail ID is updated before the head ID, this
 890		 * guarantees that @id_prev_wrap is never ahead of the tail
 891		 * ID. This pairs with desc_reserve:D.
 892		 *
 893		 * Memory barrier involvement:
 894		 *
 895		 * If desc_reserve:A reads from desc_reserve:D, then
 896		 * desc_reserve:C reads from desc_push_tail:B.
 897		 *
 898		 * Relies on:
 899		 *
 900		 * MB from desc_push_tail:B to desc_reserve:D
 901		 *    matching
 902		 * RMB from desc_reserve:A to desc_reserve:C
 903		 *
 904		 * Note: desc_push_tail:B and desc_reserve:D can be different
 905		 *       CPUs. However, the desc_reserve:D CPU (which performs
 906		 *       the full memory barrier) must have previously seen
 907		 *       desc_push_tail:B.
 908		 */
 909		smp_rmb(); /* LMM(desc_reserve:B) */
 910
 911		if (id_prev_wrap == atomic_long_read(&desc_ring->tail_id
 912						    )) { /* LMM(desc_reserve:C) */
 913			/*
 914			 * Make space for the new descriptor by
 915			 * advancing the tail.
 916			 */
 917			if (!desc_push_tail(rb, id_prev_wrap))
 918				return false;
 919		}
 920
 921		/*
 922		 * 1. Guarantee the tail ID is read before validating the
 923		 *    recycled descriptor state. A read memory barrier is
 924		 *    sufficient for this. This pairs with desc_push_tail:B.
 925		 *
 926		 *    Memory barrier involvement:
 927		 *
 928		 *    If desc_reserve:C reads from desc_push_tail:B, then
 929		 *    desc_reserve:E reads from desc_make_reusable:A.
 930		 *
 931		 *    Relies on:
 932		 *
 933		 *    MB from desc_make_reusable:A to desc_push_tail:B
 934		 *       matching
 935		 *    RMB from desc_reserve:C to desc_reserve:E
 936		 *
 937		 *    Note: desc_make_reusable:A and desc_push_tail:B can be
 938		 *          different CPUs. However, the desc_push_tail:B CPU
 939		 *          (which performs the full memory barrier) must have
 940		 *          previously seen desc_make_reusable:A.
 941		 *
 942		 * 2. Guarantee the tail ID is stored before storing the head
 943		 *    ID. This pairs with desc_reserve:B.
 944		 *
 945		 * 3. Guarantee any data ring tail changes are stored before
 946		 *    recycling the descriptor. Data ring tail changes can
 947		 *    happen via desc_push_tail()->data_push_tail(). A full
 948		 *    memory barrier is needed since another CPU may have
 949		 *    pushed the data ring tails. This pairs with
 950		 *    data_push_tail:B.
 951		 *
 952		 * 4. Guarantee a new tail ID is stored before recycling the
 953		 *    descriptor. A full memory barrier is needed since
 954		 *    another CPU may have pushed the tail ID. This pairs
 955		 *    with desc_push_tail:C and this also pairs with
 956		 *    prb_first_seq:C.
 957		 *
 958		 * 5. Guarantee the head ID is stored before trying to
 959		 *    finalize the previous descriptor. This pairs with
 960		 *    _prb_commit:B.
 961		 */
 962	} while (!atomic_long_try_cmpxchg(&desc_ring->head_id, &head_id,
 963					  id)); /* LMM(desc_reserve:D) */
 964
 965	desc = to_desc(desc_ring, id);
 966
 967	/*
 968	 * If the descriptor has been recycled, verify the old state val.
 969	 * See "ABA Issues" about why this verification is performed.
 970	 */
 971	prev_state_val = atomic_long_read(&desc->state_var); /* LMM(desc_reserve:E) */
 972	if (prev_state_val &&
 973	    get_desc_state(id_prev_wrap, prev_state_val) != desc_reusable) {
 974		WARN_ON_ONCE(1);
 975		return false;
 976	}
 977
 978	/*
 979	 * Assign the descriptor a new ID and set its state to reserved.
 980	 * See "ABA Issues" about why cmpxchg() instead of set() is used.
 981	 *
 982	 * Guarantee the new descriptor ID and state is stored before making
 983	 * any other changes. A write memory barrier is sufficient for this.
 984	 * This pairs with desc_read:D.
 985	 */
 986	if (!atomic_long_try_cmpxchg(&desc->state_var, &prev_state_val,
 987			DESC_SV(id, desc_reserved))) { /* LMM(desc_reserve:F) */
 988		WARN_ON_ONCE(1);
 989		return false;
 990	}
 991
 992	/* Now data in @desc can be modified: LMM(desc_reserve:G) */
 993
 994	*id_out = id;
 995	return true;
 996}
 997
 998/* Determine the end of a data block. */
 999static unsigned long get_next_lpos(struct prb_data_ring *data_ring,
1000				   unsigned long lpos, unsigned int size)
1001{
1002	unsigned long begin_lpos;
1003	unsigned long next_lpos;
1004
1005	begin_lpos = lpos;
1006	next_lpos = lpos + size;
1007
1008	/* First check if the data block does not wrap. */
1009	if (DATA_WRAPS(data_ring, begin_lpos) == DATA_WRAPS(data_ring, next_lpos))
1010		return next_lpos;
1011
1012	/* Wrapping data blocks store their data at the beginning. */
1013	return (DATA_THIS_WRAP_START_LPOS(data_ring, next_lpos) + size);
1014}
1015
1016/*
1017 * Allocate a new data block, invalidating the oldest data block(s)
1018 * if necessary. This function also associates the data block with
1019 * a specified descriptor.
1020 */
1021static char *data_alloc(struct printk_ringbuffer *rb, unsigned int size,
1022			struct prb_data_blk_lpos *blk_lpos, unsigned long id)
1023{
1024	struct prb_data_ring *data_ring = &rb->text_data_ring;
1025	struct prb_data_block *blk;
1026	unsigned long begin_lpos;
1027	unsigned long next_lpos;
1028
1029	if (size == 0) {
1030		/* Specify a data-less block. */
1031		blk_lpos->begin = NO_LPOS;
1032		blk_lpos->next = NO_LPOS;
1033		return NULL;
1034	}
1035
1036	size = to_blk_size(size);
1037
1038	begin_lpos = atomic_long_read(&data_ring->head_lpos);
1039
1040	do {
1041		next_lpos = get_next_lpos(data_ring, begin_lpos, size);
1042
1043		if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring))) {
1044			/* Failed to allocate, specify a data-less block. */
1045			blk_lpos->begin = FAILED_LPOS;
1046			blk_lpos->next = FAILED_LPOS;
1047			return NULL;
1048		}
1049
1050		/*
1051		 * 1. Guarantee any descriptor states that have transitioned
1052		 *    to reusable are stored before modifying the newly
1053		 *    allocated data area. A full memory barrier is needed
1054		 *    since other CPUs may have made the descriptor states
1055		 *    reusable. See data_push_tail:A about why the reusable
1056		 *    states are visible. This pairs with desc_read:D.
1057		 *
1058		 * 2. Guarantee any updated tail lpos is stored before
1059		 *    modifying the newly allocated data area. Another CPU may
1060		 *    be in data_make_reusable() and is reading a block ID
1061		 *    from this area. data_make_reusable() can handle reading
1062		 *    a garbage block ID value, but then it must be able to
1063		 *    load a new tail lpos. A full memory barrier is needed
1064		 *    since other CPUs may have updated the tail lpos. This
1065		 *    pairs with data_push_tail:B.
1066		 */
1067	} while (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &begin_lpos,
1068					  next_lpos)); /* LMM(data_alloc:A) */
1069
1070	blk = to_block(data_ring, begin_lpos);
1071	blk->id = id; /* LMM(data_alloc:B) */
1072
1073	if (DATA_WRAPS(data_ring, begin_lpos) != DATA_WRAPS(data_ring, next_lpos)) {
1074		/* Wrapping data blocks store their data at the beginning. */
1075		blk = to_block(data_ring, 0);
1076
1077		/*
1078		 * Store the ID on the wrapped block for consistency.
1079		 * The printk_ringbuffer does not actually use it.
1080		 */
1081		blk->id = id;
1082	}
1083
1084	blk_lpos->begin = begin_lpos;
1085	blk_lpos->next = next_lpos;
1086
1087	return &blk->data[0];
1088}
1089
1090/*
1091 * Try to resize an existing data block associated with the descriptor
1092 * specified by @id. If the resized data block should become wrapped, it
1093 * copies the old data to the new data block. If @size yields a data block
1094 * with the same or less size, the data block is left as is.
1095 *
1096 * Fail if this is not the last allocated data block or if there is not
1097 * enough space or it is not possible make enough space.
1098 *
1099 * Return a pointer to the beginning of the entire data buffer or NULL on
1100 * failure.
1101 */
1102static char *data_realloc(struct printk_ringbuffer *rb, unsigned int size,
1103			  struct prb_data_blk_lpos *blk_lpos, unsigned long id)
1104{
1105	struct prb_data_ring *data_ring = &rb->text_data_ring;
1106	struct prb_data_block *blk;
1107	unsigned long head_lpos;
1108	unsigned long next_lpos;
1109	bool wrapped;
1110
1111	/* Reallocation only works if @blk_lpos is the newest data block. */
1112	head_lpos = atomic_long_read(&data_ring->head_lpos);
1113	if (head_lpos != blk_lpos->next)
1114		return NULL;
1115
1116	/* Keep track if @blk_lpos was a wrapping data block. */
1117	wrapped = (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, blk_lpos->next));
1118
1119	size = to_blk_size(size);
1120
1121	next_lpos = get_next_lpos(data_ring, blk_lpos->begin, size);
1122
1123	/* If the data block does not increase, there is nothing to do. */
1124	if (head_lpos - next_lpos < DATA_SIZE(data_ring)) {
1125		if (wrapped)
1126			blk = to_block(data_ring, 0);
1127		else
1128			blk = to_block(data_ring, blk_lpos->begin);
1129		return &blk->data[0];
1130	}
1131
1132	if (!data_push_tail(rb, next_lpos - DATA_SIZE(data_ring)))
1133		return NULL;
1134
1135	/* The memory barrier involvement is the same as data_alloc:A. */
1136	if (!atomic_long_try_cmpxchg(&data_ring->head_lpos, &head_lpos,
1137				     next_lpos)) { /* LMM(data_realloc:A) */
1138		return NULL;
1139	}
1140
1141	blk = to_block(data_ring, blk_lpos->begin);
1142
1143	if (DATA_WRAPS(data_ring, blk_lpos->begin) != DATA_WRAPS(data_ring, next_lpos)) {
1144		struct prb_data_block *old_blk = blk;
1145
1146		/* Wrapping data blocks store their data at the beginning. */
1147		blk = to_block(data_ring, 0);
1148
1149		/*
1150		 * Store the ID on the wrapped block for consistency.
1151		 * The printk_ringbuffer does not actually use it.
1152		 */
1153		blk->id = id;
1154
1155		if (!wrapped) {
1156			/*
1157			 * Since the allocated space is now in the newly
1158			 * created wrapping data block, copy the content
1159			 * from the old data block.
1160			 */
1161			memcpy(&blk->data[0], &old_blk->data[0],
1162			       (blk_lpos->next - blk_lpos->begin) - sizeof(blk->id));
1163		}
1164	}
1165
1166	blk_lpos->next = next_lpos;
1167
1168	return &blk->data[0];
1169}
1170
1171/* Return the number of bytes used by a data block. */
1172static unsigned int space_used(struct prb_data_ring *data_ring,
1173			       struct prb_data_blk_lpos *blk_lpos)
1174{
1175	/* Data-less blocks take no space. */
1176	if (BLK_DATALESS(blk_lpos))
1177		return 0;
1178
1179	if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next)) {
1180		/* Data block does not wrap. */
1181		return (DATA_INDEX(data_ring, blk_lpos->next) -
1182			DATA_INDEX(data_ring, blk_lpos->begin));
1183	}
1184
1185	/*
1186	 * For wrapping data blocks, the trailing (wasted) space is
1187	 * also counted.
1188	 */
1189	return (DATA_INDEX(data_ring, blk_lpos->next) +
1190		DATA_SIZE(data_ring) - DATA_INDEX(data_ring, blk_lpos->begin));
1191}
1192
1193/*
1194 * Given @blk_lpos, return a pointer to the writer data from the data block
1195 * and calculate the size of the data part. A NULL pointer is returned if
1196 * @blk_lpos specifies values that could never be legal.
1197 *
1198 * This function (used by readers) performs strict validation on the lpos
1199 * values to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
1200 * triggered if an internal error is detected.
1201 */
1202static const char *get_data(struct prb_data_ring *data_ring,
1203			    struct prb_data_blk_lpos *blk_lpos,
1204			    unsigned int *data_size)
1205{
1206	struct prb_data_block *db;
1207
1208	/* Data-less data block description. */
1209	if (BLK_DATALESS(blk_lpos)) {
1210		if (blk_lpos->begin == NO_LPOS && blk_lpos->next == NO_LPOS) {
1211			*data_size = 0;
1212			return "";
1213		}
1214		return NULL;
1215	}
1216
1217	/* Regular data block: @begin less than @next and in same wrap. */
1218	if (DATA_WRAPS(data_ring, blk_lpos->begin) == DATA_WRAPS(data_ring, blk_lpos->next) &&
1219	    blk_lpos->begin < blk_lpos->next) {
1220		db = to_block(data_ring, blk_lpos->begin);
1221		*data_size = blk_lpos->next - blk_lpos->begin;
1222
1223	/* Wrapping data block: @begin is one wrap behind @next. */
1224	} else if (DATA_WRAPS(data_ring, blk_lpos->begin + DATA_SIZE(data_ring)) ==
1225		   DATA_WRAPS(data_ring, blk_lpos->next)) {
1226		db = to_block(data_ring, 0);
1227		*data_size = DATA_INDEX(data_ring, blk_lpos->next);
1228
1229	/* Illegal block description. */
1230	} else {
1231		WARN_ON_ONCE(1);
1232		return NULL;
1233	}
1234
1235	/* A valid data block will always be aligned to the ID size. */
1236	if (WARN_ON_ONCE(blk_lpos->begin != ALIGN(blk_lpos->begin, sizeof(db->id))) ||
1237	    WARN_ON_ONCE(blk_lpos->next != ALIGN(blk_lpos->next, sizeof(db->id)))) {
1238		return NULL;
1239	}
1240
1241	/* A valid data block will always have at least an ID. */
1242	if (WARN_ON_ONCE(*data_size < sizeof(db->id)))
1243		return NULL;
1244
1245	/* Subtract block ID space from size to reflect data size. */
1246	*data_size -= sizeof(db->id);
1247
1248	return &db->data[0];
1249}
1250
1251/*
1252 * Attempt to transition the newest descriptor from committed back to reserved
1253 * so that the record can be modified by a writer again. This is only possible
1254 * if the descriptor is not yet finalized and the provided @caller_id matches.
1255 */
1256static struct prb_desc *desc_reopen_last(struct prb_desc_ring *desc_ring,
1257					 u32 caller_id, unsigned long *id_out)
1258{
1259	unsigned long prev_state_val;
1260	enum desc_state d_state;
1261	struct prb_desc desc;
1262	struct prb_desc *d;
1263	unsigned long id;
1264	u32 cid;
1265
1266	id = atomic_long_read(&desc_ring->head_id);
1267
1268	/*
1269	 * To reduce unnecessarily reopening, first check if the descriptor
1270	 * state and caller ID are correct.
1271	 */
1272	d_state = desc_read(desc_ring, id, &desc, NULL, &cid);
1273	if (d_state != desc_committed || cid != caller_id)
1274		return NULL;
1275
1276	d = to_desc(desc_ring, id);
1277
1278	prev_state_val = DESC_SV(id, desc_committed);
1279
1280	/*
1281	 * Guarantee the reserved state is stored before reading any
1282	 * record data. A full memory barrier is needed because @state_var
1283	 * modification is followed by reading. This pairs with _prb_commit:B.
1284	 *
1285	 * Memory barrier involvement:
1286	 *
1287	 * If desc_reopen_last:A reads from _prb_commit:B, then
1288	 * prb_reserve_in_last:A reads from _prb_commit:A.
1289	 *
1290	 * Relies on:
1291	 *
1292	 * WMB from _prb_commit:A to _prb_commit:B
1293	 *    matching
1294	 * MB If desc_reopen_last:A to prb_reserve_in_last:A
1295	 */
1296	if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
1297			DESC_SV(id, desc_reserved))) { /* LMM(desc_reopen_last:A) */
1298		return NULL;
1299	}
1300
1301	*id_out = id;
1302	return d;
1303}
1304
1305/**
1306 * prb_reserve_in_last() - Re-reserve and extend the space in the ringbuffer
1307 *                         used by the newest record.
1308 *
1309 * @e:         The entry structure to setup.
1310 * @rb:        The ringbuffer to re-reserve and extend data in.
1311 * @r:         The record structure to allocate buffers for.
1312 * @caller_id: The caller ID of the caller (reserving writer).
1313 * @max_size:  Fail if the extended size would be greater than this.
1314 *
1315 * This is the public function available to writers to re-reserve and extend
1316 * data.
1317 *
1318 * The writer specifies the text size to extend (not the new total size) by
1319 * setting the @text_buf_size field of @r. To ensure proper initialization
1320 * of @r, prb_rec_init_wr() should be used.
1321 *
1322 * This function will fail if @caller_id does not match the caller ID of the
1323 * newest record. In that case the caller must reserve new data using
1324 * prb_reserve().
1325 *
1326 * Context: Any context. Disables local interrupts on success.
1327 * Return: true if text data could be extended, otherwise false.
1328 *
1329 * On success:
1330 *
1331 *   - @r->text_buf points to the beginning of the entire text buffer.
1332 *
1333 *   - @r->text_buf_size is set to the new total size of the buffer.
1334 *
1335 *   - @r->info is not touched so that @r->info->text_len could be used
1336 *     to append the text.
1337 *
1338 *   - prb_record_text_space() can be used on @e to query the new
1339 *     actually used space.
1340 *
1341 * Important: All @r->info fields will already be set with the current values
1342 *            for the record. I.e. @r->info->text_len will be less than
1343 *            @text_buf_size. Writers can use @r->info->text_len to know
1344 *            where concatenation begins and writers should update
1345 *            @r->info->text_len after concatenating.
1346 */
1347bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
1348			 struct printk_record *r, u32 caller_id, unsigned int max_size)
1349{
1350	struct prb_desc_ring *desc_ring = &rb->desc_ring;
1351	struct printk_info *info;
1352	unsigned int data_size;
1353	struct prb_desc *d;
1354	unsigned long id;
1355
1356	local_irq_save(e->irqflags);
1357
1358	/* Transition the newest descriptor back to the reserved state. */
1359	d = desc_reopen_last(desc_ring, caller_id, &id);
1360	if (!d) {
1361		local_irq_restore(e->irqflags);
1362		goto fail_reopen;
1363	}
1364
1365	/* Now the writer has exclusive access: LMM(prb_reserve_in_last:A) */
1366
1367	info = to_info(desc_ring, id);
1368
1369	/*
1370	 * Set the @e fields here so that prb_commit() can be used if
1371	 * anything fails from now on.
1372	 */
1373	e->rb = rb;
1374	e->id = id;
1375
1376	/*
1377	 * desc_reopen_last() checked the caller_id, but there was no
1378	 * exclusive access at that point. The descriptor may have
1379	 * changed since then.
1380	 */
1381	if (caller_id != info->caller_id)
1382		goto fail;
1383
1384	if (BLK_DATALESS(&d->text_blk_lpos)) {
1385		if (WARN_ON_ONCE(info->text_len != 0)) {
1386			pr_warn_once("wrong text_len value (%hu, expecting 0)\n",
1387				     info->text_len);
1388			info->text_len = 0;
1389		}
1390
1391		if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1392			goto fail;
1393
1394		if (r->text_buf_size > max_size)
1395			goto fail;
1396
1397		r->text_buf = data_alloc(rb, r->text_buf_size,
1398					 &d->text_blk_lpos, id);
1399	} else {
1400		if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size))
1401			goto fail;
1402
1403		/*
1404		 * Increase the buffer size to include the original size. If
1405		 * the meta data (@text_len) is not sane, use the full data
1406		 * block size.
1407		 */
1408		if (WARN_ON_ONCE(info->text_len > data_size)) {
1409			pr_warn_once("wrong text_len value (%hu, expecting <=%u)\n",
1410				     info->text_len, data_size);
1411			info->text_len = data_size;
1412		}
1413		r->text_buf_size += info->text_len;
1414
1415		if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1416			goto fail;
1417
1418		if (r->text_buf_size > max_size)
1419			goto fail;
1420
1421		r->text_buf = data_realloc(rb, r->text_buf_size,
1422					   &d->text_blk_lpos, id);
1423	}
1424	if (r->text_buf_size && !r->text_buf)
1425		goto fail;
1426
1427	r->info = info;
1428
1429	e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
1430
1431	return true;
1432fail:
1433	prb_commit(e);
1434	/* prb_commit() re-enabled interrupts. */
1435fail_reopen:
1436	/* Make it clear to the caller that the re-reserve failed. */
1437	memset(r, 0, sizeof(*r));
1438	return false;
1439}
1440
1441/*
1442 * Attempt to finalize a specified descriptor. If this fails, the descriptor
1443 * is either already final or it will finalize itself when the writer commits.
1444 */
1445static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id)
1446{
1447	unsigned long prev_state_val = DESC_SV(id, desc_committed);
1448	struct prb_desc *d = to_desc(desc_ring, id);
1449
1450	atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val,
1451			DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */
 
 
 
1452}
1453
1454/**
1455 * prb_reserve() - Reserve space in the ringbuffer.
1456 *
1457 * @e:  The entry structure to setup.
1458 * @rb: The ringbuffer to reserve data in.
1459 * @r:  The record structure to allocate buffers for.
1460 *
1461 * This is the public function available to writers to reserve data.
1462 *
1463 * The writer specifies the text size to reserve by setting the
1464 * @text_buf_size field of @r. To ensure proper initialization of @r,
1465 * prb_rec_init_wr() should be used.
1466 *
1467 * Context: Any context. Disables local interrupts on success.
1468 * Return: true if at least text data could be allocated, otherwise false.
1469 *
1470 * On success, the fields @info and @text_buf of @r will be set by this
1471 * function and should be filled in by the writer before committing. Also
1472 * on success, prb_record_text_space() can be used on @e to query the actual
1473 * space used for the text data block.
1474 *
1475 * Important: @info->text_len needs to be set correctly by the writer in
1476 *            order for data to be readable and/or extended. Its value
1477 *            is initialized to 0.
1478 */
1479bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb,
1480		 struct printk_record *r)
1481{
1482	struct prb_desc_ring *desc_ring = &rb->desc_ring;
1483	struct printk_info *info;
1484	struct prb_desc *d;
1485	unsigned long id;
1486	u64 seq;
1487
1488	if (!data_check_size(&rb->text_data_ring, r->text_buf_size))
1489		goto fail;
1490
1491	/*
1492	 * Descriptors in the reserved state act as blockers to all further
1493	 * reservations once the desc_ring has fully wrapped. Disable
1494	 * interrupts during the reserve/commit window in order to minimize
1495	 * the likelihood of this happening.
1496	 */
1497	local_irq_save(e->irqflags);
1498
1499	if (!desc_reserve(rb, &id)) {
1500		/* Descriptor reservation failures are tracked. */
1501		atomic_long_inc(&rb->fail);
1502		local_irq_restore(e->irqflags);
1503		goto fail;
1504	}
1505
1506	d = to_desc(desc_ring, id);
1507	info = to_info(desc_ring, id);
1508
1509	/*
1510	 * All @info fields (except @seq) are cleared and must be filled in
1511	 * by the writer. Save @seq before clearing because it is used to
1512	 * determine the new sequence number.
1513	 */
1514	seq = info->seq;
1515	memset(info, 0, sizeof(*info));
1516
1517	/*
1518	 * Set the @e fields here so that prb_commit() can be used if
1519	 * text data allocation fails.
1520	 */
1521	e->rb = rb;
1522	e->id = id;
1523
1524	/*
1525	 * Initialize the sequence number if it has "never been set".
1526	 * Otherwise just increment it by a full wrap.
1527	 *
1528	 * @seq is considered "never been set" if it has a value of 0,
1529	 * _except_ for @infos[0], which was specially setup by the ringbuffer
1530	 * initializer and therefore is always considered as set.
1531	 *
1532	 * See the "Bootstrap" comment block in printk_ringbuffer.h for
1533	 * details about how the initializer bootstraps the descriptors.
1534	 */
1535	if (seq == 0 && DESC_INDEX(desc_ring, id) != 0)
1536		info->seq = DESC_INDEX(desc_ring, id);
1537	else
1538		info->seq = seq + DESCS_COUNT(desc_ring);
1539
1540	/*
1541	 * New data is about to be reserved. Once that happens, previous
1542	 * descriptors are no longer able to be extended. Finalize the
1543	 * previous descriptor now so that it can be made available to
1544	 * readers. (For seq==0 there is no previous descriptor.)
1545	 */
1546	if (info->seq > 0)
1547		desc_make_final(desc_ring, DESC_ID(id - 1));
1548
1549	r->text_buf = data_alloc(rb, r->text_buf_size, &d->text_blk_lpos, id);
1550	/* If text data allocation fails, a data-less record is committed. */
1551	if (r->text_buf_size && !r->text_buf) {
1552		prb_commit(e);
1553		/* prb_commit() re-enabled interrupts. */
1554		goto fail;
1555	}
1556
1557	r->info = info;
1558
1559	/* Record full text space used by record. */
1560	e->text_space = space_used(&rb->text_data_ring, &d->text_blk_lpos);
1561
1562	return true;
1563fail:
1564	/* Make it clear to the caller that the reserve failed. */
1565	memset(r, 0, sizeof(*r));
1566	return false;
1567}
1568
1569/* Commit the data (possibly finalizing it) and restore interrupts. */
1570static void _prb_commit(struct prb_reserved_entry *e, unsigned long state_val)
1571{
1572	struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
1573	struct prb_desc *d = to_desc(desc_ring, e->id);
1574	unsigned long prev_state_val = DESC_SV(e->id, desc_reserved);
1575
1576	/* Now the writer has finished all writing: LMM(_prb_commit:A) */
1577
1578	/*
1579	 * Set the descriptor as committed. See "ABA Issues" about why
1580	 * cmpxchg() instead of set() is used.
1581	 *
1582	 * 1  Guarantee all record data is stored before the descriptor state
1583	 *    is stored as committed. A write memory barrier is sufficient
1584	 *    for this. This pairs with desc_read:B and desc_reopen_last:A.
1585	 *
1586	 * 2. Guarantee the descriptor state is stored as committed before
1587	 *    re-checking the head ID in order to possibly finalize this
1588	 *    descriptor. This pairs with desc_reserve:D.
1589	 *
1590	 *    Memory barrier involvement:
1591	 *
1592	 *    If prb_commit:A reads from desc_reserve:D, then
1593	 *    desc_make_final:A reads from _prb_commit:B.
1594	 *
1595	 *    Relies on:
1596	 *
1597	 *    MB _prb_commit:B to prb_commit:A
1598	 *       matching
1599	 *    MB desc_reserve:D to desc_make_final:A
1600	 */
1601	if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val,
1602			DESC_SV(e->id, state_val))) { /* LMM(_prb_commit:B) */
1603		WARN_ON_ONCE(1);
1604	}
1605
1606	/* Restore interrupts, the reserve/commit window is finished. */
1607	local_irq_restore(e->irqflags);
1608}
1609
1610/**
1611 * prb_commit() - Commit (previously reserved) data to the ringbuffer.
1612 *
1613 * @e: The entry containing the reserved data information.
1614 *
1615 * This is the public function available to writers to commit data.
1616 *
1617 * Note that the data is not yet available to readers until it is finalized.
1618 * Finalizing happens automatically when space for the next record is
1619 * reserved.
1620 *
1621 * See prb_final_commit() for a version of this function that finalizes
1622 * immediately.
1623 *
1624 * Context: Any context. Enables local interrupts.
1625 */
1626void prb_commit(struct prb_reserved_entry *e)
1627{
1628	struct prb_desc_ring *desc_ring = &e->rb->desc_ring;
1629	unsigned long head_id;
1630
1631	_prb_commit(e, desc_committed);
1632
1633	/*
1634	 * If this descriptor is no longer the head (i.e. a new record has
1635	 * been allocated), extending the data for this record is no longer
1636	 * allowed and therefore it must be finalized.
1637	 */
1638	head_id = atomic_long_read(&desc_ring->head_id); /* LMM(prb_commit:A) */
1639	if (head_id != e->id)
1640		desc_make_final(desc_ring, e->id);
1641}
1642
1643/**
1644 * prb_final_commit() - Commit and finalize (previously reserved) data to
1645 *                      the ringbuffer.
1646 *
1647 * @e: The entry containing the reserved data information.
1648 *
1649 * This is the public function available to writers to commit+finalize data.
1650 *
1651 * By finalizing, the data is made immediately available to readers.
1652 *
1653 * This function should only be used if there are no intentions of extending
1654 * this data using prb_reserve_in_last().
1655 *
1656 * Context: Any context. Enables local interrupts.
1657 */
1658void prb_final_commit(struct prb_reserved_entry *e)
1659{
 
 
1660	_prb_commit(e, desc_finalized);
 
 
 
1661}
1662
1663/*
1664 * Count the number of lines in provided text. All text has at least 1 line
1665 * (even if @text_size is 0). Each '\n' processed is counted as an additional
1666 * line.
1667 */
1668static unsigned int count_lines(const char *text, unsigned int text_size)
1669{
1670	unsigned int next_size = text_size;
1671	unsigned int line_count = 1;
1672	const char *next = text;
1673
1674	while (next_size) {
1675		next = memchr(next, '\n', next_size);
1676		if (!next)
1677			break;
1678		line_count++;
1679		next++;
1680		next_size = text_size - (next - text);
1681	}
1682
1683	return line_count;
1684}
1685
1686/*
1687 * Given @blk_lpos, copy an expected @len of data into the provided buffer.
1688 * If @line_count is provided, count the number of lines in the data.
1689 *
1690 * This function (used by readers) performs strict validation on the data
1691 * size to possibly detect bugs in the writer code. A WARN_ON_ONCE() is
1692 * triggered if an internal error is detected.
1693 */
1694static bool copy_data(struct prb_data_ring *data_ring,
1695		      struct prb_data_blk_lpos *blk_lpos, u16 len, char *buf,
1696		      unsigned int buf_size, unsigned int *line_count)
1697{
1698	unsigned int data_size;
1699	const char *data;
1700
1701	/* Caller might not want any data. */
1702	if ((!buf || !buf_size) && !line_count)
1703		return true;
1704
1705	data = get_data(data_ring, blk_lpos, &data_size);
1706	if (!data)
1707		return false;
1708
1709	/*
1710	 * Actual cannot be less than expected. It can be more than expected
1711	 * because of the trailing alignment padding.
1712	 *
1713	 * Note that invalid @len values can occur because the caller loads
1714	 * the value during an allowed data race.
1715	 */
1716	if (data_size < (unsigned int)len)
1717		return false;
1718
1719	/* Caller interested in the line count? */
1720	if (line_count)
1721		*line_count = count_lines(data, len);
1722
1723	/* Caller interested in the data content? */
1724	if (!buf || !buf_size)
1725		return true;
1726
1727	data_size = min_t(u16, buf_size, len);
1728
1729	memcpy(&buf[0], data, data_size); /* LMM(copy_data:A) */
1730	return true;
1731}
1732
1733/*
1734 * This is an extended version of desc_read(). It gets a copy of a specified
1735 * descriptor. However, it also verifies that the record is finalized and has
1736 * the sequence number @seq. On success, 0 is returned.
1737 *
1738 * Error return values:
1739 * -EINVAL: A finalized record with sequence number @seq does not exist.
1740 * -ENOENT: A finalized record with sequence number @seq exists, but its data
1741 *          is not available. This is a valid record, so readers should
1742 *          continue with the next record.
1743 */
1744static int desc_read_finalized_seq(struct prb_desc_ring *desc_ring,
1745				   unsigned long id, u64 seq,
1746				   struct prb_desc *desc_out)
1747{
1748	struct prb_data_blk_lpos *blk_lpos = &desc_out->text_blk_lpos;
1749	enum desc_state d_state;
1750	u64 s;
1751
1752	d_state = desc_read(desc_ring, id, desc_out, &s, NULL);
1753
1754	/*
1755	 * An unexpected @id (desc_miss) or @seq mismatch means the record
1756	 * does not exist. A descriptor in the reserved or committed state
1757	 * means the record does not yet exist for the reader.
1758	 */
1759	if (d_state == desc_miss ||
1760	    d_state == desc_reserved ||
1761	    d_state == desc_committed ||
1762	    s != seq) {
1763		return -EINVAL;
1764	}
1765
1766	/*
1767	 * A descriptor in the reusable state may no longer have its data
1768	 * available; report it as existing but with lost data. Or the record
1769	 * may actually be a record with lost data.
1770	 */
1771	if (d_state == desc_reusable ||
1772	    (blk_lpos->begin == FAILED_LPOS && blk_lpos->next == FAILED_LPOS)) {
1773		return -ENOENT;
1774	}
1775
1776	return 0;
1777}
1778
1779/*
1780 * Copy the ringbuffer data from the record with @seq to the provided
1781 * @r buffer. On success, 0 is returned.
1782 *
1783 * See desc_read_finalized_seq() for error return values.
1784 */
1785static int prb_read(struct printk_ringbuffer *rb, u64 seq,
1786		    struct printk_record *r, unsigned int *line_count)
1787{
1788	struct prb_desc_ring *desc_ring = &rb->desc_ring;
1789	struct printk_info *info = to_info(desc_ring, seq);
1790	struct prb_desc *rdesc = to_desc(desc_ring, seq);
1791	atomic_long_t *state_var = &rdesc->state_var;
1792	struct prb_desc desc;
1793	unsigned long id;
1794	int err;
1795
1796	/* Extract the ID, used to specify the descriptor to read. */
1797	id = DESC_ID(atomic_long_read(state_var));
1798
1799	/* Get a local copy of the correct descriptor (if available). */
1800	err = desc_read_finalized_seq(desc_ring, id, seq, &desc);
1801
1802	/*
1803	 * If @r is NULL, the caller is only interested in the availability
1804	 * of the record.
1805	 */
1806	if (err || !r)
1807		return err;
1808
1809	/* If requested, copy meta data. */
1810	if (r->info)
1811		memcpy(r->info, info, sizeof(*(r->info)));
1812
1813	/* Copy text data. If it fails, this is a data-less record. */
1814	if (!copy_data(&rb->text_data_ring, &desc.text_blk_lpos, info->text_len,
1815		       r->text_buf, r->text_buf_size, line_count)) {
1816		return -ENOENT;
1817	}
1818
1819	/* Ensure the record is still finalized and has the same @seq. */
1820	return desc_read_finalized_seq(desc_ring, id, seq, &desc);
1821}
1822
1823/* Get the sequence number of the tail descriptor. */
1824static u64 prb_first_seq(struct printk_ringbuffer *rb)
1825{
1826	struct prb_desc_ring *desc_ring = &rb->desc_ring;
1827	enum desc_state d_state;
1828	struct prb_desc desc;
1829	unsigned long id;
1830	u64 seq;
1831
1832	for (;;) {
1833		id = atomic_long_read(&rb->desc_ring.tail_id); /* LMM(prb_first_seq:A) */
1834
1835		d_state = desc_read(desc_ring, id, &desc, &seq, NULL); /* LMM(prb_first_seq:B) */
1836
1837		/*
1838		 * This loop will not be infinite because the tail is
1839		 * _always_ in the finalized or reusable state.
1840		 */
1841		if (d_state == desc_finalized || d_state == desc_reusable)
1842			break;
1843
1844		/*
1845		 * Guarantee the last state load from desc_read() is before
1846		 * reloading @tail_id in order to see a new tail in the case
1847		 * that the descriptor has been recycled. This pairs with
1848		 * desc_reserve:D.
1849		 *
1850		 * Memory barrier involvement:
1851		 *
1852		 * If prb_first_seq:B reads from desc_reserve:F, then
1853		 * prb_first_seq:A reads from desc_push_tail:B.
1854		 *
1855		 * Relies on:
1856		 *
1857		 * MB from desc_push_tail:B to desc_reserve:F
1858		 *    matching
1859		 * RMB prb_first_seq:B to prb_first_seq:A
1860		 */
1861		smp_rmb(); /* LMM(prb_first_seq:C) */
1862	}
1863
1864	return seq;
1865}
1866
1867/*
1868 * Non-blocking read of a record. Updates @seq to the last finalized record
1869 * (which may have no data available).
1870 *
1871 * See the description of prb_read_valid() and prb_read_valid_info()
1872 * for details.
1873 */
1874static bool _prb_read_valid(struct printk_ringbuffer *rb, u64 *seq,
1875			    struct printk_record *r, unsigned int *line_count)
1876{
1877	u64 tail_seq;
1878	int err;
1879
1880	while ((err = prb_read(rb, *seq, r, line_count))) {
1881		tail_seq = prb_first_seq(rb);
1882
1883		if (*seq < tail_seq) {
1884			/*
1885			 * Behind the tail. Catch up and try again. This
1886			 * can happen for -ENOENT and -EINVAL cases.
1887			 */
1888			*seq = tail_seq;
1889
1890		} else if (err == -ENOENT) {
1891			/* Record exists, but no data available. Skip. */
1892			(*seq)++;
1893
1894		} else {
1895			/* Non-existent/non-finalized record. Must stop. */
1896			return false;
1897		}
1898	}
1899
1900	return true;
1901}
1902
1903/**
1904 * prb_read_valid() - Non-blocking read of a requested record or (if gone)
1905 *                    the next available record.
1906 *
1907 * @rb:  The ringbuffer to read from.
1908 * @seq: The sequence number of the record to read.
1909 * @r:   A record data buffer to store the read record to.
1910 *
1911 * This is the public function available to readers to read a record.
1912 *
1913 * The reader provides the @info and @text_buf buffers of @r to be
1914 * filled in. Any of the buffer pointers can be set to NULL if the reader
1915 * is not interested in that data. To ensure proper initialization of @r,
1916 * prb_rec_init_rd() should be used.
1917 *
1918 * Context: Any context.
1919 * Return: true if a record was read, otherwise false.
1920 *
1921 * On success, the reader must check r->info.seq to see which record was
1922 * actually read. This allows the reader to detect dropped records.
1923 *
1924 * Failure means @seq refers to a not yet written record.
1925 */
1926bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq,
1927		    struct printk_record *r)
1928{
1929	return _prb_read_valid(rb, &seq, r, NULL);
1930}
1931
1932/**
1933 * prb_read_valid_info() - Non-blocking read of meta data for a requested
1934 *                         record or (if gone) the next available record.
1935 *
1936 * @rb:         The ringbuffer to read from.
1937 * @seq:        The sequence number of the record to read.
1938 * @info:       A buffer to store the read record meta data to.
1939 * @line_count: A buffer to store the number of lines in the record text.
1940 *
1941 * This is the public function available to readers to read only the
1942 * meta data of a record.
1943 *
1944 * The reader provides the @info, @line_count buffers to be filled in.
1945 * Either of the buffer pointers can be set to NULL if the reader is not
1946 * interested in that data.
1947 *
1948 * Context: Any context.
1949 * Return: true if a record's meta data was read, otherwise false.
1950 *
1951 * On success, the reader must check info->seq to see which record meta data
1952 * was actually read. This allows the reader to detect dropped records.
1953 *
1954 * Failure means @seq refers to a not yet written record.
1955 */
1956bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq,
1957			 struct printk_info *info, unsigned int *line_count)
1958{
1959	struct printk_record r;
1960
1961	prb_rec_init_rd(&r, info, NULL, 0);
1962
1963	return _prb_read_valid(rb, &seq, &r, line_count);
1964}
1965
1966/**
1967 * prb_first_valid_seq() - Get the sequence number of the oldest available
1968 *                         record.
1969 *
1970 * @rb: The ringbuffer to get the sequence number from.
1971 *
1972 * This is the public function available to readers to see what the
1973 * first/oldest valid sequence number is.
1974 *
1975 * This provides readers a starting point to begin iterating the ringbuffer.
1976 *
1977 * Context: Any context.
1978 * Return: The sequence number of the first/oldest record or, if the
1979 *         ringbuffer is empty, 0 is returned.
1980 */
1981u64 prb_first_valid_seq(struct printk_ringbuffer *rb)
1982{
1983	u64 seq = 0;
1984
1985	if (!_prb_read_valid(rb, &seq, NULL, NULL))
1986		return 0;
1987
1988	return seq;
1989}
1990
1991/**
1992 * prb_next_seq() - Get the sequence number after the last available record.
1993 *
1994 * @rb:  The ringbuffer to get the sequence number from.
1995 *
1996 * This is the public function available to readers to see what the next
1997 * newest sequence number available to readers will be.
1998 *
1999 * This provides readers a sequence number to jump to if all currently
2000 * available records should be skipped.
2001 *
2002 * Context: Any context.
2003 * Return: The sequence number of the next newest (not yet available) record
2004 *         for readers.
2005 */
2006u64 prb_next_seq(struct printk_ringbuffer *rb)
2007{
2008	u64 seq = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2009
2010	/* Search forward from the oldest descriptor. */
 
 
 
2011	while (_prb_read_valid(rb, &seq, NULL, NULL))
2012		seq++;
2013
2014	return seq;
2015}
2016
2017/**
2018 * prb_init() - Initialize a ringbuffer to use provided external buffers.
2019 *
2020 * @rb:       The ringbuffer to initialize.
2021 * @text_buf: The data buffer for text data.
2022 * @textbits: The size of @text_buf as a power-of-2 value.
2023 * @descs:    The descriptor buffer for ringbuffer records.
2024 * @descbits: The count of @descs items as a power-of-2 value.
2025 * @infos:    The printk_info buffer for ringbuffer records.
2026 *
2027 * This is the public function available to writers to setup a ringbuffer
2028 * during runtime using provided buffers.
2029 *
2030 * This must match the initialization of DEFINE_PRINTKRB().
2031 *
2032 * Context: Any context.
2033 */
2034void prb_init(struct printk_ringbuffer *rb,
2035	      char *text_buf, unsigned int textbits,
2036	      struct prb_desc *descs, unsigned int descbits,
2037	      struct printk_info *infos)
2038{
2039	memset(descs, 0, _DESCS_COUNT(descbits) * sizeof(descs[0]));
2040	memset(infos, 0, _DESCS_COUNT(descbits) * sizeof(infos[0]));
2041
2042	rb->desc_ring.count_bits = descbits;
2043	rb->desc_ring.descs = descs;
2044	rb->desc_ring.infos = infos;
2045	atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits));
2046	atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits));
 
2047
2048	rb->text_data_ring.size_bits = textbits;
2049	rb->text_data_ring.data = text_buf;
2050	atomic_long_set(&rb->text_data_ring.head_lpos, BLK0_LPOS(textbits));
2051	atomic_long_set(&rb->text_data_ring.tail_lpos, BLK0_LPOS(textbits));
2052
2053	atomic_long_set(&rb->fail, 0);
2054
2055	atomic_long_set(&(descs[_DESCS_COUNT(descbits) - 1].state_var), DESC0_SV(descbits));
2056	descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.begin = FAILED_LPOS;
2057	descs[_DESCS_COUNT(descbits) - 1].text_blk_lpos.next = FAILED_LPOS;
2058
2059	infos[0].seq = -(u64)_DESCS_COUNT(descbits);
2060	infos[_DESCS_COUNT(descbits) - 1].seq = 0;
2061}
2062
2063/**
2064 * prb_record_text_space() - Query the full actual used ringbuffer space for
2065 *                           the text data of a reserved entry.
2066 *
2067 * @e: The successfully reserved entry to query.
2068 *
2069 * This is the public function available to writers to see how much actual
2070 * space is used in the ringbuffer to store the text data of the specified
2071 * entry.
2072 *
2073 * This function is only valid if @e has been successfully reserved using
2074 * prb_reserve().
2075 *
2076 * Context: Any context.
2077 * Return: The size in bytes used by the text data of the associated record.
2078 */
2079unsigned int prb_record_text_space(struct prb_reserved_entry *e)
2080{
2081	return e->text_space;
2082}