Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/kernel/printk.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 * Modified to make sys_syslog() more flexible: added commands to
   8 * return the last 4k of kernel messages, regardless of whether
   9 * they've been read or not.  Added option to suppress kernel printk's
  10 * to the console.  Added hook for sending the console messages
  11 * elsewhere, in preparation for a serial line console (someday).
  12 * Ted Ts'o, 2/11/93.
  13 * Modified for sysctl support, 1/8/97, Chris Horn.
  14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
  15 *     manfred@colorfullife.com
  16 * Rewrote bits to get rid of console_lock
  17 *	01Mar01 Andrew Morton
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/kernel.h>
  23#include <linux/mm.h>
  24#include <linux/tty.h>
  25#include <linux/tty_driver.h>
  26#include <linux/console.h>
  27#include <linux/init.h>
  28#include <linux/jiffies.h>
  29#include <linux/nmi.h>
  30#include <linux/module.h>
  31#include <linux/moduleparam.h>
  32#include <linux/delay.h>
  33#include <linux/smp.h>
  34#include <linux/security.h>
  35#include <linux/memblock.h>
  36#include <linux/syscalls.h>
  37#include <linux/crash_core.h>
  38#include <linux/kdb.h>
  39#include <linux/ratelimit.h>
  40#include <linux/kmsg_dump.h>
  41#include <linux/syslog.h>
  42#include <linux/cpu.h>
  43#include <linux/rculist.h>
  44#include <linux/poll.h>
  45#include <linux/irq_work.h>
  46#include <linux/ctype.h>
  47#include <linux/uio.h>
  48#include <linux/sched/clock.h>
  49#include <linux/sched/debug.h>
  50#include <linux/sched/task_stack.h>
  51
  52#include <linux/uaccess.h>
  53#include <asm/sections.h>
  54
  55#include <trace/events/initcall.h>
  56#define CREATE_TRACE_POINTS
  57#include <trace/events/printk.h>
  58
 
  59#include "console_cmdline.h"
  60#include "braille.h"
  61#include "internal.h"
  62
  63int console_printk[4] = {
  64	CONSOLE_LOGLEVEL_DEFAULT,	/* console_loglevel */
  65	MESSAGE_LOGLEVEL_DEFAULT,	/* default_message_loglevel */
  66	CONSOLE_LOGLEVEL_MIN,		/* minimum_console_loglevel */
  67	CONSOLE_LOGLEVEL_DEFAULT,	/* default_console_loglevel */
  68};
  69EXPORT_SYMBOL_GPL(console_printk);
  70
  71atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
  72EXPORT_SYMBOL(ignore_console_lock_warning);
  73
 
 
  74/*
  75 * Low level drivers may need that to know if they can schedule in
  76 * their unblank() callback or not. So let's export it.
  77 */
  78int oops_in_progress;
  79EXPORT_SYMBOL(oops_in_progress);
  80
  81/*
  82 * console_sem protects the console_drivers list, and also
  83 * provides serialisation for access to the entire console
  84 * driver system.
  85 */
  86static DEFINE_SEMAPHORE(console_sem);
  87struct console *console_drivers;
  88EXPORT_SYMBOL_GPL(console_drivers);
 
 
 
 
 
 
 
  89
  90/*
  91 * System may need to suppress printk message under certain
  92 * circumstances, like after kernel panic happens.
  93 */
  94int __read_mostly suppress_printk;
  95
  96#ifdef CONFIG_LOCKDEP
  97static struct lockdep_map console_lock_dep_map = {
  98	.name = "console_lock"
  99};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100#endif
 101
 102enum devkmsg_log_bits {
 103	__DEVKMSG_LOG_BIT_ON = 0,
 104	__DEVKMSG_LOG_BIT_OFF,
 105	__DEVKMSG_LOG_BIT_LOCK,
 106};
 107
 108enum devkmsg_log_masks {
 109	DEVKMSG_LOG_MASK_ON             = BIT(__DEVKMSG_LOG_BIT_ON),
 110	DEVKMSG_LOG_MASK_OFF            = BIT(__DEVKMSG_LOG_BIT_OFF),
 111	DEVKMSG_LOG_MASK_LOCK           = BIT(__DEVKMSG_LOG_BIT_LOCK),
 112};
 113
 114/* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
 115#define DEVKMSG_LOG_MASK_DEFAULT	0
 116
 117static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
 118
 119static int __control_devkmsg(char *str)
 120{
 121	size_t len;
 122
 123	if (!str)
 124		return -EINVAL;
 125
 126	len = str_has_prefix(str, "on");
 127	if (len) {
 128		devkmsg_log = DEVKMSG_LOG_MASK_ON;
 129		return len;
 130	}
 131
 132	len = str_has_prefix(str, "off");
 133	if (len) {
 134		devkmsg_log = DEVKMSG_LOG_MASK_OFF;
 135		return len;
 136	}
 137
 138	len = str_has_prefix(str, "ratelimit");
 139	if (len) {
 140		devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
 141		return len;
 142	}
 143
 144	return -EINVAL;
 145}
 146
 147static int __init control_devkmsg(char *str)
 148{
 149	if (__control_devkmsg(str) < 0)
 
 150		return 1;
 
 151
 152	/*
 153	 * Set sysctl string accordingly:
 154	 */
 155	if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
 156		strcpy(devkmsg_log_str, "on");
 157	else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
 158		strcpy(devkmsg_log_str, "off");
 159	/* else "ratelimit" which is set by default. */
 160
 161	/*
 162	 * Sysctl cannot change it anymore. The kernel command line setting of
 163	 * this parameter is to force the setting to be permanent throughout the
 164	 * runtime of the system. This is a precation measure against userspace
 165	 * trying to be a smarta** and attempting to change it up on us.
 166	 */
 167	devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
 168
 169	return 0;
 170}
 171__setup("printk.devkmsg=", control_devkmsg);
 172
 173char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
 174
 175int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
 176			      void __user *buffer, size_t *lenp, loff_t *ppos)
 177{
 178	char old_str[DEVKMSG_STR_MAX_SIZE];
 179	unsigned int old;
 180	int err;
 181
 182	if (write) {
 183		if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
 184			return -EINVAL;
 185
 186		old = devkmsg_log;
 187		strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE);
 188	}
 189
 190	err = proc_dostring(table, write, buffer, lenp, ppos);
 191	if (err)
 192		return err;
 193
 194	if (write) {
 195		err = __control_devkmsg(devkmsg_log_str);
 196
 197		/*
 198		 * Do not accept an unknown string OR a known string with
 199		 * trailing crap...
 200		 */
 201		if (err < 0 || (err + 1 != *lenp)) {
 202
 203			/* ... and restore old setting. */
 204			devkmsg_log = old;
 205			strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE);
 206
 207			return -EINVAL;
 208		}
 209	}
 210
 211	return 0;
 212}
 
 213
 214/* Number of registered extended console drivers. */
 215static int nr_ext_console_drivers;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 216
 217/*
 218 * Helper macros to handle lockdep when locking/unlocking console_sem. We use
 219 * macros instead of functions so that _RET_IP_ contains useful information.
 220 */
 221#define down_console_sem() do { \
 222	down(&console_sem);\
 223	mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
 224} while (0)
 225
 226static int __down_trylock_console_sem(unsigned long ip)
 227{
 228	int lock_failed;
 229	unsigned long flags;
 230
 231	/*
 232	 * Here and in __up_console_sem() we need to be in safe mode,
 233	 * because spindump/WARN/etc from under console ->lock will
 234	 * deadlock in printk()->down_trylock_console_sem() otherwise.
 235	 */
 236	printk_safe_enter_irqsave(flags);
 237	lock_failed = down_trylock(&console_sem);
 238	printk_safe_exit_irqrestore(flags);
 239
 240	if (lock_failed)
 241		return 1;
 242	mutex_acquire(&console_lock_dep_map, 0, 1, ip);
 243	return 0;
 244}
 245#define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
 246
 247static void __up_console_sem(unsigned long ip)
 248{
 249	unsigned long flags;
 250
 251	mutex_release(&console_lock_dep_map, 1, ip);
 252
 253	printk_safe_enter_irqsave(flags);
 254	up(&console_sem);
 255	printk_safe_exit_irqrestore(flags);
 256}
 257#define up_console_sem() __up_console_sem(_RET_IP_)
 258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 259/*
 260 * This is used for debugging the mess that is the VT code by
 261 * keeping track if we have the console semaphore held. It's
 262 * definitely not the perfect debug tool (we don't know if _WE_
 263 * hold it and are racing, but it helps tracking those weird code
 264 * paths in the console code where we end up in places I want
 265 * locked without the console sempahore held).
 266 */
 267static int console_locked, console_suspended;
 268
 269/*
 270 * If exclusive_console is non-NULL then only this console is to be printed to.
 271 */
 272static struct console *exclusive_console;
 273
 274/*
 275 *	Array of consoles built from command line options (console=)
 276 */
 277
 278#define MAX_CMDLINECONSOLES 8
 279
 280static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
 281
 282static int preferred_console = -1;
 283int console_set_on_cmdline;
 284EXPORT_SYMBOL(console_set_on_cmdline);
 285
 286/* Flag: console code may call schedule() */
 287static int console_may_schedule;
 288
 289enum con_msg_format_flags {
 290	MSG_FORMAT_DEFAULT	= 0,
 291	MSG_FORMAT_SYSLOG	= (1 << 0),
 292};
 293
 294static int console_msg_format = MSG_FORMAT_DEFAULT;
 295
 296/*
 297 * The printk log buffer consists of a chain of concatenated variable
 298 * length records. Every record starts with a record header, containing
 299 * the overall length of the record.
 300 *
 301 * The heads to the first and last entry in the buffer, as well as the
 302 * sequence numbers of these entries are maintained when messages are
 303 * stored.
 304 *
 305 * If the heads indicate available messages, the length in the header
 306 * tells the start next message. A length == 0 for the next message
 307 * indicates a wrap-around to the beginning of the buffer.
 308 *
 309 * Every record carries the monotonic timestamp in microseconds, as well as
 310 * the standard userspace syslog level and syslog facility. The usual
 311 * kernel messages use LOG_KERN; userspace-injected messages always carry
 312 * a matching syslog facility, by default LOG_USER. The origin of every
 313 * message can be reliably determined that way.
 314 *
 315 * The human readable log message directly follows the message header. The
 316 * length of the message text is stored in the header, the stored message
 317 * is not terminated.
 318 *
 319 * Optionally, a message can carry a dictionary of properties (key/value pairs),
 320 * to provide userspace with a machine-readable message context.
 321 *
 322 * Examples for well-defined, commonly used property names are:
 323 *   DEVICE=b12:8               device identifier
 324 *                                b12:8         block dev_t
 325 *                                c127:3        char dev_t
 326 *                                n8            netdev ifindex
 327 *                                +sound:card0  subsystem:devname
 328 *   SUBSYSTEM=pci              driver-core subsystem name
 329 *
 330 * Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value
 331 * follows directly after a '=' character. Every property is terminated by
 332 * a '\0' character. The last property is not terminated.
 333 *
 334 * Example of a message structure:
 335 *   0000  ff 8f 00 00 00 00 00 00      monotonic time in nsec
 336 *   0008  34 00                        record is 52 bytes long
 337 *   000a        0b 00                  text is 11 bytes long
 338 *   000c              1f 00            dictionary is 23 bytes long
 339 *   000e                    03 00      LOG_KERN (facility) LOG_ERR (level)
 340 *   0010  69 74 27 73 20 61 20 6c      "it's a l"
 341 *         69 6e 65                     "ine"
 342 *   001b           44 45 56 49 43      "DEVIC"
 343 *         45 3d 62 38 3a 32 00 44      "E=b8:2\0D"
 344 *         52 49 56 45 52 3d 62 75      "RIVER=bu"
 345 *         67                           "g"
 346 *   0032     00 00 00                  padding to next message header
 347 *
 348 * The 'struct printk_log' buffer header must never be directly exported to
 
 
 
 
 
 
 
 
 
 
 
 
 349 * userspace, it is a kernel-private implementation detail that might
 350 * need to be changed in the future, when the requirements change.
 351 *
 352 * /dev/kmsg exports the structured data in the following line format:
 353 *   "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
 354 *
 355 * Users of the export format should ignore possible additional values
 356 * separated by ',', and find the message after the ';' character.
 357 *
 358 * The optional key/value pairs are attached as continuation lines starting
 359 * with a space character and terminated by a newline. All possible
 360 * non-prinatable characters are escaped in the "\xff" notation.
 361 */
 362
 363enum log_flags {
 364	LOG_NEWLINE	= 2,	/* text ended with a newline */
 365	LOG_CONT	= 8,	/* text is a fragment of a continuation line */
 366};
 367
 368struct printk_log {
 369	u64 ts_nsec;		/* timestamp in nanoseconds */
 370	u16 len;		/* length of entire record */
 371	u16 text_len;		/* length of text buffer */
 372	u16 dict_len;		/* length of dictionary buffer */
 373	u8 facility;		/* syslog facility */
 374	u8 flags:5;		/* internal record flags */
 375	u8 level:3;		/* syslog level */
 376#ifdef CONFIG_PRINTK_CALLER
 377	u32 caller_id;            /* thread id or processor id */
 378#endif
 379}
 380#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
 381__packed __aligned(4)
 382#endif
 383;
 384
 385/*
 386 * The logbuf_lock protects kmsg buffer, indices, counters.  This can be taken
 387 * within the scheduler's rq lock. It must be released before calling
 388 * console_unlock() or anything else that might wake up a process.
 389 */
 390DEFINE_RAW_SPINLOCK(logbuf_lock);
 391
 392/*
 393 * Helper macros to lock/unlock logbuf_lock and switch between
 394 * printk-safe/unsafe modes.
 395 */
 396#define logbuf_lock_irq()				\
 397	do {						\
 398		printk_safe_enter_irq();		\
 399		raw_spin_lock(&logbuf_lock);		\
 400	} while (0)
 401
 402#define logbuf_unlock_irq()				\
 403	do {						\
 404		raw_spin_unlock(&logbuf_lock);		\
 405		printk_safe_exit_irq();			\
 406	} while (0)
 407
 408#define logbuf_lock_irqsave(flags)			\
 409	do {						\
 410		printk_safe_enter_irqsave(flags);	\
 411		raw_spin_lock(&logbuf_lock);		\
 412	} while (0)
 413
 414#define logbuf_unlock_irqrestore(flags)		\
 415	do {						\
 416		raw_spin_unlock(&logbuf_lock);		\
 417		printk_safe_exit_irqrestore(flags);	\
 418	} while (0)
 419
 420#ifdef CONFIG_PRINTK
 421DECLARE_WAIT_QUEUE_HEAD(log_wait);
 
 422/* the next printk record to read by syslog(READ) or /proc/kmsg */
 423static u64 syslog_seq;
 424static u32 syslog_idx;
 425static size_t syslog_partial;
 426static bool syslog_time;
 427
 428/* index and sequence number of the first record stored in the buffer */
 429static u64 log_first_seq;
 430static u32 log_first_idx;
 431
 432/* index and sequence number of the next record to store in the buffer */
 433static u64 log_next_seq;
 434static u32 log_next_idx;
 435
 436/* the next printk record to write to the console */
 437static u64 console_seq;
 438static u32 console_idx;
 439static u64 exclusive_console_stop_seq;
 440
 441/* the next printk record to read after the last 'clear' command */
 442static u64 clear_seq;
 443static u32 clear_idx;
 444
 445#ifdef CONFIG_PRINTK_CALLER
 446#define PREFIX_MAX		48
 447#else
 448#define PREFIX_MAX		32
 449#endif
 450#define LOG_LINE_MAX		(1024 - PREFIX_MAX)
 
 
 
 
 451
 452#define LOG_LEVEL(v)		((v) & 0x07)
 453#define LOG_FACILITY(v)		((v) >> 3 & 0xff)
 454
 455/* record buffer */
 456#define LOG_ALIGN __alignof__(struct printk_log)
 457#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
 458#define LOG_BUF_LEN_MAX (u32)(1 << 31)
 459static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
 460static char *log_buf = __log_buf;
 461static u32 log_buf_len = __LOG_BUF_LEN;
 462
 463/* Return log buffer address */
 464char *log_buf_addr_get(void)
 465{
 466	return log_buf;
 467}
 
 468
 469/* Return log buffer size */
 470u32 log_buf_len_get(void)
 471{
 472	return log_buf_len;
 473}
 474
 475/* human readable text of the record */
 476static char *log_text(const struct printk_log *msg)
 477{
 478	return (char *)msg + sizeof(struct printk_log);
 479}
 480
 481/* optional key/value pair dictionary attached to the record */
 482static char *log_dict(const struct printk_log *msg)
 483{
 484	return (char *)msg + sizeof(struct printk_log) + msg->text_len;
 485}
 486
 487/* get record by index; idx must point to valid msg */
 488static struct printk_log *log_from_idx(u32 idx)
 489{
 490	struct printk_log *msg = (struct printk_log *)(log_buf + idx);
 
 
 491
 492	/*
 493	 * A length == 0 record is the end of buffer marker. Wrap around and
 494	 * read the message at the start of the buffer.
 495	 */
 496	if (!msg->len)
 497		return (struct printk_log *)log_buf;
 498	return msg;
 499}
 500
 501/* get next record; idx must point to valid msg */
 502static u32 log_next(u32 idx)
 503{
 504	struct printk_log *msg = (struct printk_log *)(log_buf + idx);
 505
 506	/* length == 0 indicates the end of the buffer; wrap */
 507	/*
 508	 * A length == 0 record is the end of buffer marker. Wrap around and
 509	 * read the message at the start of the buffer as *this* one, and
 510	 * return the one after that.
 511	 */
 512	if (!msg->len) {
 513		msg = (struct printk_log *)log_buf;
 514		return msg->len;
 515	}
 516	return idx + msg->len;
 517}
 518
 519/*
 520 * Check whether there is enough free space for the given message.
 521 *
 522 * The same values of first_idx and next_idx mean that the buffer
 523 * is either empty or full.
 524 *
 525 * If the buffer is empty, we must respect the position of the indexes.
 526 * They cannot be reset to the beginning of the buffer.
 527 */
 528static int logbuf_has_space(u32 msg_size, bool empty)
 529{
 530	u32 free;
 
 
 531
 532	if (log_next_idx > log_first_idx || empty)
 533		free = max(log_buf_len - log_next_idx, log_first_idx);
 534	else
 535		free = log_first_idx - log_next_idx;
 
 536
 537	/*
 538	 * We need space also for an empty header that signalizes wrapping
 539	 * of the buffer.
 540	 */
 541	return free >= msg_size + sizeof(struct printk_log);
 542}
 543
 544static int log_make_free_space(u32 msg_size)
 
 545{
 546	while (log_first_seq < log_next_seq &&
 547	       !logbuf_has_space(msg_size, false)) {
 548		/* drop old messages until we have enough contiguous space */
 549		log_first_idx = log_next(log_first_idx);
 550		log_first_seq++;
 551	}
 552
 553	if (clear_seq < log_first_seq) {
 554		clear_seq = log_first_seq;
 555		clear_idx = log_first_idx;
 556	}
 557
 558	/* sequence numbers are equal, so the log buffer is empty */
 559	if (logbuf_has_space(msg_size, log_first_seq == log_next_seq))
 560		return 0;
 561
 562	return -ENOMEM;
 563}
 564
 565/* compute the message size including the padding bytes */
 566static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len)
 567{
 568	u32 size;
 569
 570	size = sizeof(struct printk_log) + text_len + dict_len;
 571	*pad_len = (-size) & (LOG_ALIGN - 1);
 572	size += *pad_len;
 573
 574	return size;
 575}
 576
 577/*
 578 * Define how much of the log buffer we could take at maximum. The value
 579 * must be greater than two. Note that only half of the buffer is available
 580 * when the index points to the middle.
 581 */
 582#define MAX_LOG_TAKE_PART 4
 583static const char trunc_msg[] = "<truncated>";
 584
 585static u32 truncate_msg(u16 *text_len, u16 *trunc_msg_len,
 586			u16 *dict_len, u32 *pad_len)
 587{
 588	/*
 589	 * The message should not take the whole buffer. Otherwise, it might
 590	 * get removed too soon.
 591	 */
 592	u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
 
 593	if (*text_len > max_text_len)
 594		*text_len = max_text_len;
 595	/* enable the warning message */
 596	*trunc_msg_len = strlen(trunc_msg);
 597	/* disable the "dict" completely */
 598	*dict_len = 0;
 599	/* compute the size again, count also the warning message */
 600	return msg_used_size(*text_len + *trunc_msg_len, 0, pad_len);
 601}
 602
 603/* insert record into the buffer, discard old ones, update heads */
 604static int log_store(u32 caller_id, int facility, int level,
 605		     enum log_flags flags, u64 ts_nsec,
 606		     const char *dict, u16 dict_len,
 607		     const char *text, u16 text_len)
 608{
 609	struct printk_log *msg;
 610	u32 size, pad_len;
 611	u16 trunc_msg_len = 0;
 612
 613	/* number of '\0' padding bytes to next message */
 614	size = msg_used_size(text_len, dict_len, &pad_len);
 615
 616	if (log_make_free_space(size)) {
 617		/* truncate the message if it is too long for empty buffer */
 618		size = truncate_msg(&text_len, &trunc_msg_len,
 619				    &dict_len, &pad_len);
 620		/* survive when the log buffer is too small for trunc_msg */
 621		if (log_make_free_space(size))
 622			return 0;
 623	}
 624
 625	if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) {
 626		/*
 627		 * This message + an additional empty header does not fit
 628		 * at the end of the buffer. Add an empty header with len == 0
 629		 * to signify a wrap around.
 630		 */
 631		memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
 632		log_next_idx = 0;
 633	}
 634
 635	/* fill message */
 636	msg = (struct printk_log *)(log_buf + log_next_idx);
 637	memcpy(log_text(msg), text, text_len);
 638	msg->text_len = text_len;
 639	if (trunc_msg_len) {
 640		memcpy(log_text(msg) + text_len, trunc_msg, trunc_msg_len);
 641		msg->text_len += trunc_msg_len;
 642	}
 643	memcpy(log_dict(msg), dict, dict_len);
 644	msg->dict_len = dict_len;
 645	msg->facility = facility;
 646	msg->level = level & 7;
 647	msg->flags = flags & 0x1f;
 648	if (ts_nsec > 0)
 649		msg->ts_nsec = ts_nsec;
 650	else
 651		msg->ts_nsec = local_clock();
 652#ifdef CONFIG_PRINTK_CALLER
 653	msg->caller_id = caller_id;
 654#endif
 655	memset(log_dict(msg) + dict_len, 0, pad_len);
 656	msg->len = size;
 657
 658	/* insert message */
 659	log_next_idx += msg->len;
 660	log_next_seq++;
 661
 662	return msg->text_len;
 663}
 664
 665int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
 666
 667static int syslog_action_restricted(int type)
 668{
 669	if (dmesg_restrict)
 670		return 1;
 671	/*
 672	 * Unless restricted, we allow "read all" and "get buffer size"
 673	 * for everybody.
 674	 */
 675	return type != SYSLOG_ACTION_READ_ALL &&
 676	       type != SYSLOG_ACTION_SIZE_BUFFER;
 677}
 678
 679static int check_syslog_permissions(int type, int source)
 680{
 681	/*
 682	 * If this is from /proc/kmsg and we've already opened it, then we've
 683	 * already done the capabilities checks at open time.
 684	 */
 685	if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
 686		goto ok;
 687
 688	if (syslog_action_restricted(type)) {
 689		if (capable(CAP_SYSLOG))
 690			goto ok;
 691		/*
 692		 * For historical reasons, accept CAP_SYS_ADMIN too, with
 693		 * a warning.
 694		 */
 695		if (capable(CAP_SYS_ADMIN)) {
 696			pr_warn_once("%s (%d): Attempt to access syslog with "
 697				     "CAP_SYS_ADMIN but no CAP_SYSLOG "
 698				     "(deprecated).\n",
 699				 current->comm, task_pid_nr(current));
 700			goto ok;
 701		}
 702		return -EPERM;
 703	}
 704ok:
 705	return security_syslog(type);
 706}
 707
 708static void append_char(char **pp, char *e, char c)
 709{
 710	if (*pp < e)
 711		*(*pp)++ = c;
 712}
 713
 714static ssize_t msg_print_ext_header(char *buf, size_t size,
 715				    struct printk_log *msg, u64 seq)
 716{
 717	u64 ts_usec = msg->ts_nsec;
 718	char caller[20];
 719#ifdef CONFIG_PRINTK_CALLER
 720	u32 id = msg->caller_id;
 721
 722	snprintf(caller, sizeof(caller), ",caller=%c%u",
 723		 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
 724#else
 725	caller[0] = '\0';
 726#endif
 727
 728	do_div(ts_usec, 1000);
 729
 730	return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
 731			 (msg->facility << 3) | msg->level, seq, ts_usec,
 732			 msg->flags & LOG_CONT ? 'c' : '-', caller);
 733}
 734
 735static ssize_t msg_print_ext_body(char *buf, size_t size,
 736				  char *dict, size_t dict_len,
 737				  char *text, size_t text_len)
 738{
 739	char *p = buf, *e = buf + size;
 740	size_t i;
 741
 742	/* escape non-printable characters */
 743	for (i = 0; i < text_len; i++) {
 744		unsigned char c = text[i];
 745
 746		if (c < ' ' || c >= 127 || c == '\\')
 747			p += scnprintf(p, e - p, "\\x%02x", c);
 748		else
 749			append_char(&p, e, c);
 750	}
 751	append_char(&p, e, '\n');
 752
 753	if (dict_len) {
 754		bool line = true;
 755
 756		for (i = 0; i < dict_len; i++) {
 757			unsigned char c = dict[i];
 
 
 
 758
 759			if (line) {
 760				append_char(&p, e, ' ');
 761				line = false;
 762			}
 763
 764			if (c == '\0') {
 765				append_char(&p, e, '\n');
 766				line = true;
 767				continue;
 768			}
 769
 770			if (c < ' ' || c >= 127 || c == '\\') {
 771				p += scnprintf(p, e - p, "\\x%02x", c);
 772				continue;
 773			}
 774
 775			append_char(&p, e, c);
 776		}
 777		append_char(&p, e, '\n');
 778	}
 
 779
 780	return p - buf;
 
 
 
 
 
 
 
 
 
 
 781}
 782
 783/* /dev/kmsg - userspace message inject/listen interface */
 784struct devkmsg_user {
 785	u64 seq;
 786	u32 idx;
 787	struct ratelimit_state rs;
 788	struct mutex lock;
 789	char buf[CONSOLE_EXT_LOG_MAX];
 790};
 791
 792static __printf(3, 4) __cold
 793int devkmsg_emit(int facility, int level, const char *fmt, ...)
 794{
 795	va_list args;
 796	int r;
 797
 798	va_start(args, fmt);
 799	r = vprintk_emit(facility, level, NULL, 0, fmt, args);
 800	va_end(args);
 801
 802	return r;
 803}
 804
 805static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
 806{
 807	char *buf, *line;
 808	int level = default_message_loglevel;
 809	int facility = 1;	/* LOG_USER */
 810	struct file *file = iocb->ki_filp;
 811	struct devkmsg_user *user = file->private_data;
 812	size_t len = iov_iter_count(from);
 813	ssize_t ret = len;
 814
 815	if (!user || len > LOG_LINE_MAX)
 816		return -EINVAL;
 817
 818	/* Ignore when user logging is disabled. */
 819	if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
 820		return len;
 821
 822	/* Ratelimit when not explicitly enabled. */
 823	if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
 824		if (!___ratelimit(&user->rs, current->comm))
 825			return ret;
 826	}
 827
 828	buf = kmalloc(len+1, GFP_KERNEL);
 829	if (buf == NULL)
 830		return -ENOMEM;
 831
 832	buf[len] = '\0';
 833	if (!copy_from_iter_full(buf, len, from)) {
 834		kfree(buf);
 835		return -EFAULT;
 836	}
 837
 838	/*
 839	 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
 840	 * the decimal value represents 32bit, the lower 3 bit are the log
 841	 * level, the rest are the log facility.
 842	 *
 843	 * If no prefix or no userspace facility is specified, we
 844	 * enforce LOG_USER, to be able to reliably distinguish
 845	 * kernel-generated messages from userspace-injected ones.
 846	 */
 847	line = buf;
 848	if (line[0] == '<') {
 849		char *endp = NULL;
 850		unsigned int u;
 851
 852		u = simple_strtoul(line + 1, &endp, 10);
 853		if (endp && endp[0] == '>') {
 854			level = LOG_LEVEL(u);
 855			if (LOG_FACILITY(u) != 0)
 856				facility = LOG_FACILITY(u);
 857			endp++;
 858			len -= endp - line;
 859			line = endp;
 860		}
 861	}
 862
 863	devkmsg_emit(facility, level, "%s", line);
 864	kfree(buf);
 865	return ret;
 866}
 867
 868static ssize_t devkmsg_read(struct file *file, char __user *buf,
 869			    size_t count, loff_t *ppos)
 870{
 871	struct devkmsg_user *user = file->private_data;
 872	struct printk_log *msg;
 873	size_t len;
 
 
 874	ssize_t ret;
 875
 876	if (!user)
 877		return -EBADF;
 878
 879	ret = mutex_lock_interruptible(&user->lock);
 880	if (ret)
 881		return ret;
 882
 883	logbuf_lock_irq();
 884	while (user->seq == log_next_seq) {
 885		if (file->f_flags & O_NONBLOCK) {
 886			ret = -EAGAIN;
 887			logbuf_unlock_irq();
 888			goto out;
 889		}
 890
 891		logbuf_unlock_irq();
 
 
 
 
 
 
 
 
 
 892		ret = wait_event_interruptible(log_wait,
 893					       user->seq != log_next_seq);
 
 894		if (ret)
 895			goto out;
 896		logbuf_lock_irq();
 897	}
 898
 899	if (user->seq < log_first_seq) {
 900		/* our last seen message is gone, return error and reset */
 901		user->idx = log_first_idx;
 902		user->seq = log_first_seq;
 903		ret = -EPIPE;
 904		logbuf_unlock_irq();
 905		goto out;
 906	}
 907
 908	msg = log_from_idx(user->idx);
 909	len = msg_print_ext_header(user->buf, sizeof(user->buf),
 910				   msg, user->seq);
 911	len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
 912				  log_dict(msg), msg->dict_len,
 913				  log_text(msg), msg->text_len);
 914
 915	user->idx = log_next(user->idx);
 916	user->seq++;
 917	logbuf_unlock_irq();
 918
 919	if (len > count) {
 920		ret = -EINVAL;
 921		goto out;
 922	}
 923
 924	if (copy_to_user(buf, user->buf, len)) {
 925		ret = -EFAULT;
 926		goto out;
 927	}
 928	ret = len;
 929out:
 930	mutex_unlock(&user->lock);
 931	return ret;
 932}
 933
 
 
 
 
 
 
 
 
 934static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
 935{
 936	struct devkmsg_user *user = file->private_data;
 937	loff_t ret = 0;
 938
 939	if (!user)
 940		return -EBADF;
 941	if (offset)
 942		return -ESPIPE;
 943
 944	logbuf_lock_irq();
 945	switch (whence) {
 946	case SEEK_SET:
 947		/* the first record */
 948		user->idx = log_first_idx;
 949		user->seq = log_first_seq;
 950		break;
 951	case SEEK_DATA:
 952		/*
 953		 * The first record after the last SYSLOG_ACTION_CLEAR,
 954		 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
 955		 * changes no global state, and does not clear anything.
 956		 */
 957		user->idx = clear_idx;
 958		user->seq = clear_seq;
 959		break;
 960	case SEEK_END:
 961		/* after the last record */
 962		user->idx = log_next_idx;
 963		user->seq = log_next_seq;
 964		break;
 965	default:
 966		ret = -EINVAL;
 967	}
 968	logbuf_unlock_irq();
 969	return ret;
 970}
 971
 972static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
 973{
 974	struct devkmsg_user *user = file->private_data;
 
 975	__poll_t ret = 0;
 976
 977	if (!user)
 978		return EPOLLERR|EPOLLNVAL;
 979
 980	poll_wait(file, &log_wait, wait);
 981
 982	logbuf_lock_irq();
 983	if (user->seq < log_next_seq) {
 984		/* return error when data has vanished underneath us */
 985		if (user->seq < log_first_seq)
 986			ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
 987		else
 988			ret = EPOLLIN|EPOLLRDNORM;
 989	}
 990	logbuf_unlock_irq();
 991
 992	return ret;
 993}
 994
 995static int devkmsg_open(struct inode *inode, struct file *file)
 996{
 997	struct devkmsg_user *user;
 998	int err;
 999
1000	if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
1001		return -EPERM;
1002
1003	/* write-only does not need any file context */
1004	if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1005		err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
1006					       SYSLOG_FROM_READER);
1007		if (err)
1008			return err;
1009	}
1010
1011	user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
1012	if (!user)
1013		return -ENOMEM;
1014
1015	ratelimit_default_init(&user->rs);
1016	ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
1017
1018	mutex_init(&user->lock);
1019
1020	logbuf_lock_irq();
1021	user->idx = log_first_idx;
1022	user->seq = log_first_seq;
1023	logbuf_unlock_irq();
1024
1025	file->private_data = user;
1026	return 0;
1027}
1028
1029static int devkmsg_release(struct inode *inode, struct file *file)
1030{
1031	struct devkmsg_user *user = file->private_data;
1032
1033	if (!user)
1034		return 0;
1035
1036	ratelimit_state_exit(&user->rs);
1037
1038	mutex_destroy(&user->lock);
1039	kfree(user);
1040	return 0;
1041}
1042
1043const struct file_operations kmsg_fops = {
1044	.open = devkmsg_open,
1045	.read = devkmsg_read,
1046	.write_iter = devkmsg_write,
1047	.llseek = devkmsg_llseek,
1048	.poll = devkmsg_poll,
1049	.release = devkmsg_release,
1050};
1051
1052#ifdef CONFIG_CRASH_CORE
1053/*
1054 * This appends the listed symbols to /proc/vmcore
1055 *
1056 * /proc/vmcore is used by various utilities, like crash and makedumpfile to
1057 * obtain access to symbols that are otherwise very difficult to locate.  These
1058 * symbols are specifically used so that utilities can access and extract the
1059 * dmesg log from a vmcore file after a crash.
1060 */
1061void log_buf_vmcoreinfo_setup(void)
1062{
1063	VMCOREINFO_SYMBOL(log_buf);
1064	VMCOREINFO_SYMBOL(log_buf_len);
1065	VMCOREINFO_SYMBOL(log_first_idx);
1066	VMCOREINFO_SYMBOL(clear_idx);
1067	VMCOREINFO_SYMBOL(log_next_idx);
 
1068	/*
1069	 * Export struct printk_log size and field offsets. User space tools can
1070	 * parse it and detect any changes to structure down the line.
1071	 */
1072	VMCOREINFO_STRUCT_SIZE(printk_log);
1073	VMCOREINFO_OFFSET(printk_log, ts_nsec);
1074	VMCOREINFO_OFFSET(printk_log, len);
1075	VMCOREINFO_OFFSET(printk_log, text_len);
1076	VMCOREINFO_OFFSET(printk_log, dict_len);
1077#ifdef CONFIG_PRINTK_CALLER
1078	VMCOREINFO_OFFSET(printk_log, caller_id);
1079#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1080}
1081#endif
1082
1083/* requested log_buf_len from kernel cmdline */
1084static unsigned long __initdata new_log_buf_len;
1085
1086/* we practice scaling the ring buffer by powers of 2 */
1087static void __init log_buf_len_update(u64 size)
1088{
1089	if (size > (u64)LOG_BUF_LEN_MAX) {
1090		size = (u64)LOG_BUF_LEN_MAX;
1091		pr_err("log_buf over 2G is not supported.\n");
1092	}
1093
1094	if (size)
1095		size = roundup_pow_of_two(size);
1096	if (size > log_buf_len)
1097		new_log_buf_len = (unsigned long)size;
1098}
1099
1100/* save requested log_buf_len since it's too early to process it */
1101static int __init log_buf_len_setup(char *str)
1102{
1103	u64 size;
1104
1105	if (!str)
1106		return -EINVAL;
1107
1108	size = memparse(str, &str);
1109
1110	log_buf_len_update(size);
1111
1112	return 0;
1113}
1114early_param("log_buf_len", log_buf_len_setup);
1115
1116#ifdef CONFIG_SMP
1117#define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1118
1119static void __init log_buf_add_cpu(void)
1120{
1121	unsigned int cpu_extra;
1122
1123	/*
1124	 * archs should set up cpu_possible_bits properly with
1125	 * set_cpu_possible() after setup_arch() but just in
1126	 * case lets ensure this is valid.
1127	 */
1128	if (num_possible_cpus() == 1)
1129		return;
1130
1131	cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1132
1133	/* by default this will only continue through for large > 64 CPUs */
1134	if (cpu_extra <= __LOG_BUF_LEN / 2)
1135		return;
1136
1137	pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1138		__LOG_CPU_MAX_BUF_LEN);
1139	pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1140		cpu_extra);
1141	pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1142
1143	log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1144}
1145#else /* !CONFIG_SMP */
1146static inline void log_buf_add_cpu(void) {}
1147#endif /* CONFIG_SMP */
1148
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1149void __init setup_log_buf(int early)
1150{
 
 
 
 
 
 
 
 
1151	unsigned long flags;
1152	char *new_log_buf;
1153	unsigned int free;
 
 
 
 
 
 
 
 
 
1154
1155	if (log_buf != __log_buf)
1156		return;
1157
1158	if (!early && !new_log_buf_len)
1159		log_buf_add_cpu();
1160
1161	if (!new_log_buf_len)
1162		return;
1163
 
 
 
 
 
 
1164	new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1165	if (unlikely(!new_log_buf)) {
1166		pr_err("log_buf_len: %lu bytes not available\n",
1167			new_log_buf_len);
1168		return;
1169	}
1170
1171	logbuf_lock_irqsave(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1172	log_buf_len = new_log_buf_len;
1173	log_buf = new_log_buf;
1174	new_log_buf_len = 0;
1175	free = __LOG_BUF_LEN - log_next_idx;
1176	memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
1177	logbuf_unlock_irqrestore(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1178
1179	pr_info("log_buf_len: %u bytes\n", log_buf_len);
1180	pr_info("early log buf free: %u(%u%%)\n",
1181		free, (free * 100) / __LOG_BUF_LEN);
 
 
 
 
 
 
1182}
1183
1184static bool __read_mostly ignore_loglevel;
1185
1186static int __init ignore_loglevel_setup(char *str)
1187{
1188	ignore_loglevel = true;
1189	pr_info("debug: ignoring loglevel setting.\n");
1190
1191	return 0;
1192}
1193
1194early_param("ignore_loglevel", ignore_loglevel_setup);
1195module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1196MODULE_PARM_DESC(ignore_loglevel,
1197		 "ignore loglevel setting (prints all kernel messages to the console)");
1198
1199static bool suppress_message_printing(int level)
1200{
1201	return (level >= console_loglevel && !ignore_loglevel);
1202}
1203
1204#ifdef CONFIG_BOOT_PRINTK_DELAY
1205
1206static int boot_delay; /* msecs delay after each printk during bootup */
1207static unsigned long long loops_per_msec;	/* based on boot_delay */
1208
1209static int __init boot_delay_setup(char *str)
1210{
1211	unsigned long lpj;
1212
1213	lpj = preset_lpj ? preset_lpj : 1000000;	/* some guess */
1214	loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1215
1216	get_option(&str, &boot_delay);
1217	if (boot_delay > 10 * 1000)
1218		boot_delay = 0;
1219
1220	pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1221		"HZ: %d, loops_per_msec: %llu\n",
1222		boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1223	return 0;
1224}
1225early_param("boot_delay", boot_delay_setup);
1226
1227static void boot_delay_msec(int level)
1228{
1229	unsigned long long k;
1230	unsigned long timeout;
1231
1232	if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
1233		|| suppress_message_printing(level)) {
1234		return;
1235	}
1236
1237	k = (unsigned long long)loops_per_msec * boot_delay;
1238
1239	timeout = jiffies + msecs_to_jiffies(boot_delay);
1240	while (k) {
1241		k--;
1242		cpu_relax();
1243		/*
1244		 * use (volatile) jiffies to prevent
1245		 * compiler reduction; loop termination via jiffies
1246		 * is secondary and may or may not happen.
1247		 */
1248		if (time_after(jiffies, timeout))
1249			break;
1250		touch_nmi_watchdog();
1251	}
1252}
1253#else
1254static inline void boot_delay_msec(int level)
1255{
1256}
1257#endif
1258
1259static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1260module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1261
1262static size_t print_syslog(unsigned int level, char *buf)
1263{
1264	return sprintf(buf, "<%u>", level);
1265}
1266
1267static size_t print_time(u64 ts, char *buf)
1268{
1269	unsigned long rem_nsec = do_div(ts, 1000000000);
1270
1271	return sprintf(buf, "[%5lu.%06lu]",
1272		       (unsigned long)ts, rem_nsec / 1000);
1273}
1274
1275#ifdef CONFIG_PRINTK_CALLER
1276static size_t print_caller(u32 id, char *buf)
1277{
1278	char caller[12];
1279
1280	snprintf(caller, sizeof(caller), "%c%u",
1281		 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1282	return sprintf(buf, "[%6s]", caller);
1283}
1284#else
1285#define print_caller(id, buf) 0
1286#endif
1287
1288static size_t print_prefix(const struct printk_log *msg, bool syslog,
1289			   bool time, char *buf)
1290{
1291	size_t len = 0;
1292
1293	if (syslog)
1294		len = print_syslog((msg->facility << 3) | msg->level, buf);
1295
1296	if (time)
1297		len += print_time(msg->ts_nsec, buf + len);
1298
1299	len += print_caller(msg->caller_id, buf + len);
1300
1301	if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1302		buf[len++] = ' ';
1303		buf[len] = '\0';
1304	}
1305
1306	return len;
1307}
1308
1309static size_t msg_print_text(const struct printk_log *msg, bool syslog,
1310			     bool time, char *buf, size_t size)
1311{
1312	const char *text = log_text(msg);
1313	size_t text_size = msg->text_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1314	size_t len = 0;
1315	char prefix[PREFIX_MAX];
1316	const size_t prefix_len = print_prefix(msg, syslog, time, prefix);
1317
1318	do {
1319		const char *next = memchr(text, '\n', text_size);
1320		size_t text_len;
 
 
 
1321
 
 
 
 
 
 
 
 
 
 
1322		if (next) {
1323			text_len = next - text;
1324			next++;
1325			text_size -= next - text;
1326		} else {
1327			text_len = text_size;
 
 
 
1328		}
1329
1330		if (buf) {
1331			if (prefix_len + text_len + 1 >= size - len)
 
 
 
 
 
1332				break;
1333
1334			memcpy(buf + len, prefix, prefix_len);
1335			len += prefix_len;
1336			memcpy(buf + len, text, text_len);
1337			len += text_len;
1338			buf[len++] = '\n';
1339		} else {
1340			/* SYSLOG_ACTION_* buffer size only calculation */
1341			len += prefix_len + text_len + 1;
1342		}
1343
1344		text = next;
1345	} while (text);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346
1347	return len;
1348}
1349
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1350static int syslog_print(char __user *buf, int size)
1351{
 
 
1352	char *text;
1353	struct printk_log *msg;
1354	int len = 0;
 
1355
1356	text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
1357	if (!text)
1358		return -ENOMEM;
1359
1360	while (size > 0) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1361		size_t n;
1362		size_t skip;
 
1363
1364		logbuf_lock_irq();
1365		if (syslog_seq < log_first_seq) {
1366			/* messages are gone, move to first one */
1367			syslog_seq = log_first_seq;
1368			syslog_idx = log_first_idx;
1369			syslog_partial = 0;
1370		}
1371		if (syslog_seq == log_next_seq) {
1372			logbuf_unlock_irq();
1373			break;
 
 
 
 
 
1374		}
1375
1376		/*
1377		 * To keep reading/counting partial line consistent,
1378		 * use printk_time value as of the beginning of a line.
1379		 */
1380		if (!syslog_partial)
1381			syslog_time = printk_time;
1382
1383		skip = syslog_partial;
1384		msg = log_from_idx(syslog_idx);
1385		n = msg_print_text(msg, true, syslog_time, text,
1386				   LOG_LINE_MAX + PREFIX_MAX);
1387		if (n - syslog_partial <= size) {
1388			/* message fits into buffer, move forward */
1389			syslog_idx = log_next(syslog_idx);
1390			syslog_seq++;
1391			n -= syslog_partial;
1392			syslog_partial = 0;
1393		} else if (!len){
1394			/* partial read(), remember position */
1395			n = size;
1396			syslog_partial += n;
1397		} else
1398			n = 0;
1399		logbuf_unlock_irq();
1400
1401		if (!n)
1402			break;
1403
1404		if (copy_to_user(buf, text + skip, n)) {
 
 
 
 
1405			if (!len)
1406				len = -EFAULT;
1407			break;
1408		}
1409
1410		len += n;
1411		size -= n;
1412		buf += n;
1413	}
1414
 
1415	kfree(text);
1416	return len;
1417}
1418
1419static int syslog_print_all(char __user *buf, int size, bool clear)
1420{
 
 
1421	char *text;
1422	int len = 0;
1423	u64 next_seq;
1424	u64 seq;
1425	u32 idx;
1426	bool time;
1427
1428	text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
1429	if (!text)
1430		return -ENOMEM;
1431
1432	time = printk_time;
1433	logbuf_lock_irq();
1434	/*
1435	 * Find first record that fits, including all following records,
1436	 * into the user-provided buffer for this dump.
1437	 */
1438	seq = clear_seq;
1439	idx = clear_idx;
1440	while (seq < log_next_seq) {
1441		struct printk_log *msg = log_from_idx(idx);
1442
1443		len += msg_print_text(msg, true, time, NULL, 0);
1444		idx = log_next(idx);
1445		seq++;
1446	}
1447
1448	/* move first record forward until length fits into the buffer */
1449	seq = clear_seq;
1450	idx = clear_idx;
1451	while (len > size && seq < log_next_seq) {
1452		struct printk_log *msg = log_from_idx(idx);
1453
1454		len -= msg_print_text(msg, true, time, NULL, 0);
1455		idx = log_next(idx);
1456		seq++;
1457	}
1458
1459	/* last message fitting into this dump */
1460	next_seq = log_next_seq;
1461
1462	len = 0;
1463	while (len >= 0 && seq < next_seq) {
1464		struct printk_log *msg = log_from_idx(idx);
1465		int textlen = msg_print_text(msg, true, time, text,
1466					     LOG_LINE_MAX + PREFIX_MAX);
1467
1468		idx = log_next(idx);
1469		seq++;
 
 
1470
1471		logbuf_unlock_irq();
1472		if (copy_to_user(buf + len, text, textlen))
1473			len = -EFAULT;
1474		else
1475			len += textlen;
1476		logbuf_lock_irq();
1477
1478		if (seq < log_first_seq) {
1479			/* messages are gone, move to next one */
1480			seq = log_first_seq;
1481			idx = log_first_idx;
1482		}
1483	}
1484
1485	if (clear) {
1486		clear_seq = log_next_seq;
1487		clear_idx = log_next_idx;
 
1488	}
1489	logbuf_unlock_irq();
1490
1491	kfree(text);
1492	return len;
1493}
1494
1495static void syslog_clear(void)
1496{
1497	logbuf_lock_irq();
1498	clear_seq = log_next_seq;
1499	clear_idx = log_next_idx;
1500	logbuf_unlock_irq();
1501}
1502
1503int do_syslog(int type, char __user *buf, int len, int source)
1504{
 
1505	bool clear = false;
1506	static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1507	int error;
1508
1509	error = check_syslog_permissions(type, source);
1510	if (error)
1511		return error;
1512
1513	switch (type) {
1514	case SYSLOG_ACTION_CLOSE:	/* Close log */
1515		break;
1516	case SYSLOG_ACTION_OPEN:	/* Open log */
1517		break;
1518	case SYSLOG_ACTION_READ:	/* Read from log */
1519		if (!buf || len < 0)
1520			return -EINVAL;
1521		if (!len)
1522			return 0;
1523		if (!access_ok(buf, len))
1524			return -EFAULT;
1525		error = wait_event_interruptible(log_wait,
1526						 syslog_seq != log_next_seq);
1527		if (error)
1528			return error;
1529		error = syslog_print(buf, len);
1530		break;
1531	/* Read/clear last kernel messages */
1532	case SYSLOG_ACTION_READ_CLEAR:
1533		clear = true;
1534		/* FALL THRU */
1535	/* Read last kernel messages */
1536	case SYSLOG_ACTION_READ_ALL:
1537		if (!buf || len < 0)
1538			return -EINVAL;
1539		if (!len)
1540			return 0;
1541		if (!access_ok(buf, len))
1542			return -EFAULT;
1543		error = syslog_print_all(buf, len, clear);
1544		break;
1545	/* Clear ring buffer */
1546	case SYSLOG_ACTION_CLEAR:
1547		syslog_clear();
1548		break;
1549	/* Disable logging to console */
1550	case SYSLOG_ACTION_CONSOLE_OFF:
1551		if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1552			saved_console_loglevel = console_loglevel;
1553		console_loglevel = minimum_console_loglevel;
1554		break;
1555	/* Enable logging to console */
1556	case SYSLOG_ACTION_CONSOLE_ON:
1557		if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1558			console_loglevel = saved_console_loglevel;
1559			saved_console_loglevel = LOGLEVEL_DEFAULT;
1560		}
1561		break;
1562	/* Set level of messages printed to console */
1563	case SYSLOG_ACTION_CONSOLE_LEVEL:
1564		if (len < 1 || len > 8)
1565			return -EINVAL;
1566		if (len < minimum_console_loglevel)
1567			len = minimum_console_loglevel;
1568		console_loglevel = len;
1569		/* Implicitly re-enable logging to console */
1570		saved_console_loglevel = LOGLEVEL_DEFAULT;
1571		break;
1572	/* Number of chars in the log buffer */
1573	case SYSLOG_ACTION_SIZE_UNREAD:
1574		logbuf_lock_irq();
1575		if (syslog_seq < log_first_seq) {
 
 
 
 
 
1576			/* messages are gone, move to first one */
1577			syslog_seq = log_first_seq;
1578			syslog_idx = log_first_idx;
1579			syslog_partial = 0;
1580		}
1581		if (source == SYSLOG_FROM_PROC) {
1582			/*
1583			 * Short-cut for poll(/"proc/kmsg") which simply checks
1584			 * for pending data, not the size; return the count of
1585			 * records, not the length.
1586			 */
1587			error = log_next_seq - syslog_seq;
1588		} else {
1589			u64 seq = syslog_seq;
1590			u32 idx = syslog_idx;
1591			bool time = syslog_partial ? syslog_time : printk_time;
 
 
1592
1593			while (seq < log_next_seq) {
1594				struct printk_log *msg = log_from_idx(idx);
1595
1596				error += msg_print_text(msg, true, time, NULL,
1597							0);
1598				time = printk_time;
1599				idx = log_next(idx);
1600				seq++;
1601			}
1602			error -= syslog_partial;
1603		}
1604		logbuf_unlock_irq();
1605		break;
1606	/* Size of the log buffer */
1607	case SYSLOG_ACTION_SIZE_BUFFER:
1608		error = log_buf_len;
1609		break;
1610	default:
1611		error = -EINVAL;
1612		break;
1613	}
1614
1615	return error;
1616}
1617
1618SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1619{
1620	return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1621}
1622
1623/*
1624 * Special console_lock variants that help to reduce the risk of soft-lockups.
1625 * They allow to pass console_lock to another printk() call using a busy wait.
1626 */
1627
1628#ifdef CONFIG_LOCKDEP
1629static struct lockdep_map console_owner_dep_map = {
1630	.name = "console_owner"
1631};
1632#endif
1633
1634static DEFINE_RAW_SPINLOCK(console_owner_lock);
1635static struct task_struct *console_owner;
1636static bool console_waiter;
1637
1638/**
1639 * console_lock_spinning_enable - mark beginning of code where another
1640 *	thread might safely busy wait
1641 *
1642 * This basically converts console_lock into a spinlock. This marks
1643 * the section where the console_lock owner can not sleep, because
1644 * there may be a waiter spinning (like a spinlock). Also it must be
1645 * ready to hand over the lock at the end of the section.
1646 */
1647static void console_lock_spinning_enable(void)
1648{
 
 
 
 
 
 
 
 
 
 
 
 
1649	raw_spin_lock(&console_owner_lock);
1650	console_owner = current;
1651	raw_spin_unlock(&console_owner_lock);
1652
 
1653	/* The waiter may spin on us after setting console_owner */
1654	spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1655}
1656
1657/**
1658 * console_lock_spinning_disable_and_check - mark end of code where another
1659 *	thread was able to busy wait and check if there is a waiter
 
1660 *
1661 * This is called at the end of the section where spinning is allowed.
1662 * It has two functions. First, it is a signal that it is no longer
1663 * safe to start busy waiting for the lock. Second, it checks if
1664 * there is a busy waiter and passes the lock rights to her.
1665 *
1666 * Important: Callers lose the lock if there was a busy waiter.
1667 *	They must not touch items synchronized by console_lock
1668 *	in this case.
1669 *
1670 * Return: 1 if the lock rights were passed, 0 otherwise.
1671 */
1672static int console_lock_spinning_disable_and_check(void)
1673{
1674	int waiter;
1675
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1676	raw_spin_lock(&console_owner_lock);
1677	waiter = READ_ONCE(console_waiter);
1678	console_owner = NULL;
1679	raw_spin_unlock(&console_owner_lock);
1680
1681	if (!waiter) {
1682		spin_release(&console_owner_dep_map, 1, _THIS_IP_);
1683		return 0;
1684	}
1685
1686	/* The waiter is now free to continue */
1687	WRITE_ONCE(console_waiter, false);
1688
1689	spin_release(&console_owner_dep_map, 1, _THIS_IP_);
 
 
 
 
 
 
1690
1691	/*
1692	 * Hand off console_lock to waiter. The waiter will perform
1693	 * the up(). After this, the waiter is the console_lock owner.
1694	 */
1695	mutex_release(&console_lock_dep_map, 1, _THIS_IP_);
1696	return 1;
1697}
1698
1699/**
1700 * console_trylock_spinning - try to get console_lock by busy waiting
1701 *
1702 * This allows to busy wait for the console_lock when the current
1703 * owner is running in specially marked sections. It means that
1704 * the current owner is running and cannot reschedule until it
1705 * is ready to lose the lock.
1706 *
1707 * Return: 1 if we got the lock, 0 othrewise
1708 */
1709static int console_trylock_spinning(void)
1710{
1711	struct task_struct *owner = NULL;
1712	bool waiter;
1713	bool spin = false;
1714	unsigned long flags;
1715
1716	if (console_trylock())
1717		return 1;
1718
 
 
 
 
 
 
 
 
 
 
1719	printk_safe_enter_irqsave(flags);
1720
1721	raw_spin_lock(&console_owner_lock);
1722	owner = READ_ONCE(console_owner);
1723	waiter = READ_ONCE(console_waiter);
1724	if (!waiter && owner && owner != current) {
1725		WRITE_ONCE(console_waiter, true);
1726		spin = true;
1727	}
1728	raw_spin_unlock(&console_owner_lock);
1729
1730	/*
1731	 * If there is an active printk() writing to the
1732	 * consoles, instead of having it write our data too,
1733	 * see if we can offload that load from the active
1734	 * printer, and do some printing ourselves.
1735	 * Go into a spin only if there isn't already a waiter
1736	 * spinning, and there is an active printer, and
1737	 * that active printer isn't us (recursive printk?).
1738	 */
1739	if (!spin) {
1740		printk_safe_exit_irqrestore(flags);
1741		return 0;
1742	}
1743
1744	/* We spin waiting for the owner to release us */
1745	spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1746	/* Owner will clear console_waiter on hand off */
1747	while (READ_ONCE(console_waiter))
1748		cpu_relax();
1749	spin_release(&console_owner_dep_map, 1, _THIS_IP_);
1750
1751	printk_safe_exit_irqrestore(flags);
1752	/*
1753	 * The owner passed the console lock to us.
1754	 * Since we did not spin on console lock, annotate
1755	 * this as a trylock. Otherwise lockdep will
1756	 * complain.
1757	 */
1758	mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
1759
 
 
 
 
 
 
1760	return 1;
1761}
1762
1763/*
1764 * Call the console drivers, asking them to write out
1765 * log_buf[start] to log_buf[end - 1].
1766 * The console_lock must be held.
1767 */
1768static void call_console_drivers(const char *ext_text, size_t ext_len,
1769				 const char *text, size_t len)
1770{
1771	struct console *con;
1772
1773	trace_console_rcuidle(text, len);
1774
1775	if (!console_drivers)
1776		return;
 
 
 
 
 
1777
1778	for_each_console(con) {
1779		if (exclusive_console && con != exclusive_console)
1780			continue;
1781		if (!(con->flags & CON_ENABLED))
1782			continue;
1783		if (!con->write)
1784			continue;
1785		if (!cpu_online(smp_processor_id()) &&
1786		    !(con->flags & CON_ANYTIME))
1787			continue;
1788		if (con->flags & CON_EXTENDED)
1789			con->write(con, ext_text, ext_len);
1790		else
1791			con->write(con, text, len);
1792	}
 
 
 
 
1793}
1794
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1795int printk_delay_msec __read_mostly;
1796
1797static inline void printk_delay(void)
1798{
 
 
1799	if (unlikely(printk_delay_msec)) {
1800		int m = printk_delay_msec;
1801
1802		while (m--) {
1803			mdelay(1);
1804			touch_nmi_watchdog();
1805		}
1806	}
1807}
1808
1809static inline u32 printk_caller_id(void)
1810{
1811	return in_task() ? task_pid_nr(current) :
1812		0x80000000 + raw_smp_processor_id();
1813}
1814
1815/*
1816 * Continuation lines are buffered, and not committed to the record buffer
1817 * until the line is complete, or a race forces it. The line fragments
1818 * though, are printed immediately to the consoles to ensure everything has
1819 * reached the console in case of a kernel crash.
 
 
 
 
 
 
 
 
 
 
 
1820 */
1821static struct cont {
1822	char buf[LOG_LINE_MAX];
1823	size_t len;			/* length == 0 means unused buffer */
1824	u32 caller_id;			/* printk_caller_id() of first print */
1825	u64 ts_nsec;			/* time of first print */
1826	u8 level;			/* log level of first message */
1827	u8 facility;			/* log facility of first message */
1828	enum log_flags flags;		/* prefix, newline flags */
1829} cont;
1830
1831static void cont_flush(void)
1832{
1833	if (cont.len == 0)
1834		return;
1835
1836	log_store(cont.caller_id, cont.facility, cont.level, cont.flags,
1837		  cont.ts_nsec, NULL, 0, cont.buf, cont.len);
1838	cont.len = 0;
1839}
1840
1841static bool cont_add(u32 caller_id, int facility, int level,
1842		     enum log_flags flags, const char *text, size_t len)
1843{
1844	/* If the line gets too long, split it up in separate records. */
1845	if (cont.len + len > sizeof(cont.buf)) {
1846		cont_flush();
1847		return false;
1848	}
 
1849
1850	if (!cont.len) {
1851		cont.facility = facility;
1852		cont.level = level;
1853		cont.caller_id = caller_id;
1854		cont.ts_nsec = local_clock();
1855		cont.flags = flags;
1856	}
1857
1858	memcpy(cont.buf + cont.len, text, len);
1859	cont.len += len;
1860
1861	// The original flags come from the first line,
1862	// but later continuations can add a newline.
1863	if (flags & LOG_NEWLINE) {
1864		cont.flags |= LOG_NEWLINE;
1865		cont_flush();
1866	}
1867
1868	return true;
1869}
1870
1871static size_t log_output(int facility, int level, enum log_flags lflags, const char *dict, size_t dictlen, char *text, size_t text_len)
 
 
 
1872{
1873	const u32 caller_id = printk_caller_id();
1874
1875	/*
1876	 * If an earlier line was buffered, and we're a continuation
1877	 * write from the same context, try to add it to the buffer.
1878	 */
1879	if (cont.len) {
1880		if (cont.caller_id == caller_id && (lflags & LOG_CONT)) {
1881			if (cont_add(caller_id, facility, level, lflags, text, text_len))
1882				return text_len;
1883		}
1884		/* Otherwise, make sure it's flushed */
1885		cont_flush();
1886	}
1887
1888	/* Skip empty continuation lines that couldn't be added - they just flush */
1889	if (!text_len && (lflags & LOG_CONT))
1890		return 0;
1891
1892	/* If it doesn't end in a newline, try to buffer the current line */
1893	if (!(lflags & LOG_NEWLINE)) {
1894		if (cont_add(caller_id, facility, level, lflags, text, text_len))
1895			return text_len;
 
1896	}
1897
1898	/* Store it in the record log */
1899	return log_store(caller_id, facility, level, lflags, 0,
1900			 dict, dictlen, text, text_len);
1901}
1902
1903/* Must be called under logbuf_lock. */
1904int vprintk_store(int facility, int level,
1905		  const char *dict, size_t dictlen,
1906		  const char *fmt, va_list args)
1907{
1908	static char textbuf[LOG_LINE_MAX];
1909	char *text = textbuf;
1910	size_t text_len;
1911	enum log_flags lflags = 0;
 
 
 
 
 
 
 
 
 
 
 
 
1912
1913	/*
1914	 * The printf needs to come first; we need the syslog
1915	 * prefix which might be passed-in as a parameter.
 
 
1916	 */
1917	text_len = vscnprintf(text, sizeof(textbuf), fmt, args);
1918
1919	/* mark and strip a trailing newline */
1920	if (text_len && text[text_len-1] == '\n') {
1921		text_len--;
1922		lflags |= LOG_NEWLINE;
1923	}
1924
1925	/* strip kernel syslog prefix and extract log level or control flags */
1926	if (facility == 0) {
1927		int kern_level;
 
 
 
 
 
 
 
 
 
 
 
 
 
1928
1929		while ((kern_level = printk_get_level(text)) != 0) {
1930			switch (kern_level) {
1931			case '0' ... '7':
1932				if (level == LOGLEVEL_DEFAULT)
1933					level = kern_level - '0';
1934				break;
1935			case 'c':	/* KERN_CONT */
1936				lflags |= LOG_CONT;
 
 
 
 
 
 
 
 
 
 
1937			}
1938
1939			text_len -= 2;
1940			text += 2;
1941		}
1942	}
1943
1944	if (level == LOGLEVEL_DEFAULT)
1945		level = default_message_loglevel;
 
 
 
 
 
 
 
1946
1947	if (dict)
1948		lflags |= LOG_NEWLINE;
 
 
1949
1950	return log_output(facility, level, lflags,
1951			  dict, dictlen, text, text_len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1952}
1953
1954asmlinkage int vprintk_emit(int facility, int level,
1955			    const char *dict, size_t dictlen,
1956			    const char *fmt, va_list args)
1957{
1958	int printed_len;
1959	bool in_sched = false, pending_output;
1960	unsigned long flags;
1961	u64 curr_log_seq;
1962
1963	/* Suppress unimportant messages after panic happens */
1964	if (unlikely(suppress_printk))
1965		return 0;
1966
 
 
 
 
 
 
 
 
1967	if (level == LOGLEVEL_SCHED) {
1968		level = LOGLEVEL_DEFAULT;
1969		in_sched = true;
1970	}
1971
1972	boot_delay_msec(level);
1973	printk_delay();
1974
1975	/* This stops the holder of console_sem just where we want him */
1976	logbuf_lock_irqsave(flags);
1977	curr_log_seq = log_next_seq;
1978	printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
1979	pending_output = (curr_log_seq != log_next_seq);
1980	logbuf_unlock_irqrestore(flags);
1981
1982	/* If called from the scheduler, we can not call up(). */
1983	if (!in_sched && pending_output) {
1984		/*
1985		 * Disable preemption to avoid being preempted while holding
1986		 * console_sem which would prevent anyone from printing to
1987		 * console
 
 
1988		 */
1989		preempt_disable();
1990		/*
1991		 * Try to acquire and then immediately release the console
1992		 * semaphore.  The release will print out buffers and wake up
1993		 * /dev/kmsg and syslog() users.
 
1994		 */
1995		if (console_trylock_spinning())
1996			console_unlock();
1997		preempt_enable();
1998	}
1999
2000	if (pending_output)
 
 
2001		wake_up_klogd();
 
2002	return printed_len;
2003}
2004EXPORT_SYMBOL(vprintk_emit);
2005
2006asmlinkage int vprintk(const char *fmt, va_list args)
2007{
2008	return vprintk_func(fmt, args);
2009}
2010EXPORT_SYMBOL(vprintk);
2011
2012int vprintk_default(const char *fmt, va_list args)
2013{
2014	int r;
2015
2016#ifdef CONFIG_KGDB_KDB
2017	/* Allow to pass printk() to kdb but avoid a recursion. */
2018	if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) {
2019		r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
2020		return r;
2021	}
2022#endif
2023	r = vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
2024
2025	return r;
2026}
2027EXPORT_SYMBOL_GPL(vprintk_default);
2028
2029/**
2030 * printk - print a kernel message
2031 * @fmt: format string
2032 *
2033 * This is printk(). It can be called from any context. We want it to work.
2034 *
2035 * We try to grab the console_lock. If we succeed, it's easy - we log the
2036 * output and call the console drivers.  If we fail to get the semaphore, we
2037 * place the output into the log buffer and return. The current holder of
2038 * the console_sem will notice the new output in console_unlock(); and will
2039 * send it to the consoles before releasing the lock.
2040 *
2041 * One effect of this deferred printing is that code which calls printk() and
2042 * then changes console_loglevel may break. This is because console_loglevel
2043 * is inspected when the actual printing occurs.
2044 *
2045 * See also:
2046 * printf(3)
2047 *
2048 * See the vsnprintf() documentation for format string extensions over C99.
2049 */
2050asmlinkage __visible int printk(const char *fmt, ...)
2051{
2052	va_list args;
2053	int r;
2054
2055	va_start(args, fmt);
2056	r = vprintk_func(fmt, args);
2057	va_end(args);
2058
2059	return r;
2060}
2061EXPORT_SYMBOL(printk);
 
 
 
2062
2063#else /* CONFIG_PRINTK */
2064
2065#define LOG_LINE_MAX		0
2066#define PREFIX_MAX		0
2067#define printk_time		false
2068
 
 
 
 
2069static u64 syslog_seq;
2070static u32 syslog_idx;
2071static u64 console_seq;
2072static u32 console_idx;
2073static u64 exclusive_console_stop_seq;
2074static u64 log_first_seq;
2075static u32 log_first_idx;
2076static u64 log_next_seq;
2077static char *log_text(const struct printk_log *msg) { return NULL; }
2078static char *log_dict(const struct printk_log *msg) { return NULL; }
2079static struct printk_log *log_from_idx(u32 idx) { return NULL; }
2080static u32 log_next(u32 idx) { return 0; }
2081static ssize_t msg_print_ext_header(char *buf, size_t size,
2082				    struct printk_log *msg,
2083				    u64 seq) { return 0; }
2084static ssize_t msg_print_ext_body(char *buf, size_t size,
2085				  char *dict, size_t dict_len,
2086				  char *text, size_t text_len) { return 0; }
2087static void console_lock_spinning_enable(void) { }
2088static int console_lock_spinning_disable_and_check(void) { return 0; }
2089static void call_console_drivers(const char *ext_text, size_t ext_len,
2090				 const char *text, size_t len) {}
2091static size_t msg_print_text(const struct printk_log *msg, bool syslog,
2092			     bool time, char *buf, size_t size) { return 0; }
2093static bool suppress_message_printing(int level) { return false; }
2094
2095#endif /* CONFIG_PRINTK */
2096
2097#ifdef CONFIG_EARLY_PRINTK
2098struct console *early_console;
2099
2100asmlinkage __visible void early_printk(const char *fmt, ...)
2101{
2102	va_list ap;
2103	char buf[512];
2104	int n;
2105
2106	if (!early_console)
2107		return;
2108
2109	va_start(ap, fmt);
2110	n = vscnprintf(buf, sizeof(buf), fmt, ap);
2111	va_end(ap);
2112
2113	early_console->write(early_console, buf, n);
2114}
2115#endif
2116
2117static int __add_preferred_console(char *name, int idx, char *options,
2118				   char *brl_options)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2119{
2120	struct console_cmdline *c;
2121	int i;
2122
2123	/*
 
 
 
 
 
 
 
 
2124	 *	See if this tty is not yet registered, and
2125	 *	if we have a slot free.
2126	 */
2127	for (i = 0, c = console_cmdline;
2128	     i < MAX_CMDLINECONSOLES && c->name[0];
2129	     i++, c++) {
2130		if (strcmp(c->name, name) == 0 && c->index == idx) {
2131			if (!brl_options)
2132				preferred_console = i;
 
2133			return 0;
2134		}
2135	}
2136	if (i == MAX_CMDLINECONSOLES)
2137		return -E2BIG;
2138	if (!brl_options)
2139		preferred_console = i;
2140	strlcpy(c->name, name, sizeof(c->name));
2141	c->options = options;
 
2142	braille_set_options(c, brl_options);
2143
2144	c->index = idx;
2145	return 0;
2146}
2147
2148static int __init console_msg_format_setup(char *str)
2149{
2150	if (!strcmp(str, "syslog"))
2151		console_msg_format = MSG_FORMAT_SYSLOG;
2152	if (!strcmp(str, "default"))
2153		console_msg_format = MSG_FORMAT_DEFAULT;
2154	return 1;
2155}
2156__setup("console_msg_format=", console_msg_format_setup);
2157
2158/*
2159 * Set up a console.  Called via do_early_param() in init/main.c
2160 * for each "console=" parameter in the boot command line.
2161 */
2162static int __init console_setup(char *str)
2163{
2164	char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */
2165	char *s, *options, *brl_options = NULL;
2166	int idx;
2167
 
 
 
 
 
 
 
 
 
 
2168	if (_braille_console_setup(&str, &brl_options))
2169		return 1;
2170
2171	/*
2172	 * Decode str into name, index, options.
2173	 */
2174	if (str[0] >= '0' && str[0] <= '9') {
2175		strcpy(buf, "ttyS");
2176		strncpy(buf + 4, str, sizeof(buf) - 5);
2177	} else {
2178		strncpy(buf, str, sizeof(buf) - 1);
2179	}
2180	buf[sizeof(buf) - 1] = 0;
2181	options = strchr(str, ',');
2182	if (options)
2183		*(options++) = 0;
2184#ifdef __sparc__
2185	if (!strcmp(str, "ttya"))
2186		strcpy(buf, "ttyS0");
2187	if (!strcmp(str, "ttyb"))
2188		strcpy(buf, "ttyS1");
2189#endif
2190	for (s = buf; *s; s++)
2191		if (isdigit(*s) || *s == ',')
2192			break;
2193	idx = simple_strtoul(s, NULL, 10);
2194	*s = 0;
2195
2196	__add_preferred_console(buf, idx, options, brl_options);
2197	console_set_on_cmdline = 1;
2198	return 1;
2199}
2200__setup("console=", console_setup);
2201
2202/**
2203 * add_preferred_console - add a device to the list of preferred consoles.
2204 * @name: device name
2205 * @idx: device index
2206 * @options: options for this console
2207 *
2208 * The last preferred console added will be used for kernel messages
2209 * and stdin/out/err for init.  Normally this is used by console_setup
2210 * above to handle user-supplied console arguments; however it can also
2211 * be used by arch-specific code either to override the user or more
2212 * commonly to provide a default console (ie from PROM variables) when
2213 * the user has not supplied one.
2214 */
2215int add_preferred_console(char *name, int idx, char *options)
2216{
2217	return __add_preferred_console(name, idx, options, NULL);
2218}
2219
2220bool console_suspend_enabled = true;
2221EXPORT_SYMBOL(console_suspend_enabled);
2222
2223static int __init console_suspend_disable(char *str)
2224{
2225	console_suspend_enabled = false;
2226	return 1;
2227}
2228__setup("no_console_suspend", console_suspend_disable);
2229module_param_named(console_suspend, console_suspend_enabled,
2230		bool, S_IRUGO | S_IWUSR);
2231MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2232	" and hibernate operations");
2233
 
 
 
 
 
 
 
 
 
 
 
 
2234/**
2235 * suspend_console - suspend the console subsystem
2236 *
2237 * This disables printk() while we go into suspend states
2238 */
2239void suspend_console(void)
2240{
 
 
2241	if (!console_suspend_enabled)
2242		return;
2243	pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2244	console_lock();
2245	console_suspended = 1;
2246	up_console_sem();
 
 
 
 
 
 
 
 
 
 
 
2247}
2248
2249void resume_console(void)
2250{
 
 
2251	if (!console_suspend_enabled)
2252		return;
2253	down_console_sem();
2254	console_suspended = 0;
2255	console_unlock();
 
 
 
 
 
 
 
 
 
 
 
2256}
2257
2258/**
2259 * console_cpu_notify - print deferred console messages after CPU hotplug
2260 * @cpu: unused
2261 *
2262 * If printk() is called from a CPU that is not online yet, the messages
2263 * will be printed on the console only if there are CON_ANYTIME consoles.
2264 * This function is called when a new CPU comes online (or fails to come
2265 * up) or goes offline.
2266 */
2267static int console_cpu_notify(unsigned int cpu)
2268{
2269	if (!cpuhp_tasks_frozen) {
2270		/* If trylock fails, someone else is doing the printing */
2271		if (console_trylock())
2272			console_unlock();
2273	}
2274	return 0;
2275}
2276
2277/**
2278 * console_lock - lock the console system for exclusive use.
2279 *
2280 * Acquires a lock which guarantees that the caller has
2281 * exclusive access to the console system and the console_drivers list.
2282 *
2283 * Can sleep, returns nothing.
2284 */
2285void console_lock(void)
2286{
2287	might_sleep();
2288
 
 
 
 
2289	down_console_sem();
2290	if (console_suspended)
2291		return;
2292	console_locked = 1;
2293	console_may_schedule = 1;
2294}
2295EXPORT_SYMBOL(console_lock);
2296
2297/**
2298 * console_trylock - try to lock the console system for exclusive use.
2299 *
2300 * Try to acquire a lock which guarantees that the caller has exclusive
2301 * access to the console system and the console_drivers list.
2302 *
2303 * returns 1 on success, and 0 on failure to acquire the lock.
2304 */
2305int console_trylock(void)
2306{
2307	if (down_trylock_console_sem())
 
2308		return 0;
2309	if (console_suspended) {
2310		up_console_sem();
2311		return 0;
2312	}
2313	console_locked = 1;
2314	console_may_schedule = 0;
2315	return 1;
2316}
2317EXPORT_SYMBOL(console_trylock);
2318
2319int is_console_locked(void)
2320{
2321	return console_locked;
2322}
2323EXPORT_SYMBOL(is_console_locked);
2324
2325/*
2326 * Check if we have any console that is capable of printing while cpu is
2327 * booting or shutting down. Requires console_sem.
 
 
2328 */
2329static int have_callable_console(void)
2330{
2331	struct console *con;
2332
2333	for_each_console(con)
2334		if ((con->flags & CON_ENABLED) &&
2335				(con->flags & CON_ANYTIME))
2336			return 1;
2337
2338	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2339}
2340
 
 
2341/*
2342 * Can we actually use the console at this time on this cpu?
 
 
 
 
2343 *
2344 * Console drivers may assume that per-cpu resources have been allocated. So
2345 * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
2346 * call them until this CPU is officially up.
 
 
 
2347 */
2348static inline int can_use_console(void)
2349{
2350	return cpu_online(raw_smp_processor_id()) || have_callable_console();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2351}
2352
2353/**
2354 * console_unlock - unlock the console system
 
2355 *
2356 * Releases the console_lock which the caller holds on the console system
2357 * and the console driver list.
2358 *
2359 * While the console_lock was held, console output may have been buffered
2360 * by printk().  If this is the case, console_unlock(); emits
2361 * the output prior to releasing the lock.
2362 *
2363 * If there is output waiting, we wake /dev/kmsg and syslog() users.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2364 *
2365 * console_unlock(); may be called from any context.
 
 
 
 
 
 
 
 
 
2366 */
2367void console_unlock(void)
2368{
2369	static char ext_text[CONSOLE_EXT_LOG_MAX];
2370	static char text[LOG_LINE_MAX + PREFIX_MAX];
 
 
 
2371	unsigned long flags;
2372	bool do_cond_resched, retry;
2373
2374	if (console_suspended) {
2375		up_console_sem();
2376		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
2377	}
2378
2379	/*
2380	 * Console drivers are called with interrupts disabled, so
2381	 * @console_may_schedule should be cleared before; however, we may
2382	 * end up dumping a lot of lines, for example, if called from
2383	 * console registration path, and should invoke cond_resched()
2384	 * between lines if allowable.  Not doing so can cause a very long
2385	 * scheduling stall on a slow console leading to RCU stall and
2386	 * softlockup warnings which exacerbate the issue with more
2387	 * messages practically incapacitating the system.
2388	 *
2389	 * console_trylock() is not able to detect the preemptive
2390	 * context reliably. Therefore the value must be stored before
2391	 * and cleared after the the "again" goto label.
2392	 */
2393	do_cond_resched = console_may_schedule;
2394again:
2395	console_may_schedule = 0;
2396
2397	/*
2398	 * We released the console_sem lock, so we need to recheck if
2399	 * cpu is online and (if not) is there at least one CON_ANYTIME
2400	 * console.
2401	 */
2402	if (!can_use_console()) {
2403		console_locked = 0;
2404		up_console_sem();
2405		return;
2406	}
2407
2408	for (;;) {
2409		struct printk_log *msg;
2410		size_t ext_len = 0;
2411		size_t len;
2412
2413		printk_safe_enter_irqsave(flags);
2414		raw_spin_lock(&logbuf_lock);
2415		if (console_seq < log_first_seq) {
2416			len = sprintf(text,
2417				      "** %llu printk messages dropped **\n",
2418				      log_first_seq - console_seq);
2419
2420			/* messages are gone, move to first one */
2421			console_seq = log_first_seq;
2422			console_idx = log_first_idx;
2423		} else {
2424			len = 0;
2425		}
2426skip:
2427		if (console_seq == log_next_seq)
2428			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2429
2430		msg = log_from_idx(console_idx);
2431		if (suppress_message_printing(msg->level)) {
2432			/*
2433			 * Skip record we have buffered and already printed
2434			 * directly to the console when we received it, and
2435			 * record that has level above the console loglevel.
2436			 */
2437			console_idx = log_next(console_idx);
2438			console_seq++;
2439			goto skip;
2440		}
2441
2442		/* Output to all consoles once old messages replayed. */
2443		if (unlikely(exclusive_console &&
2444			     console_seq >= exclusive_console_stop_seq)) {
2445			exclusive_console = NULL;
2446		}
2447
2448		len += msg_print_text(msg,
2449				console_msg_format & MSG_FORMAT_SYSLOG,
2450				printk_time, text + len, sizeof(text) - len);
2451		if (nr_ext_console_drivers) {
2452			ext_len = msg_print_ext_header(ext_text,
2453						sizeof(ext_text),
2454						msg, console_seq);
2455			ext_len += msg_print_ext_body(ext_text + ext_len,
2456						sizeof(ext_text) - ext_len,
2457						log_dict(msg), msg->dict_len,
2458						log_text(msg), msg->text_len);
2459		}
2460		console_idx = log_next(console_idx);
2461		console_seq++;
2462		raw_spin_unlock(&logbuf_lock);
2463
2464		/*
2465		 * While actively printing out messages, if another printk()
2466		 * were to occur on another CPU, it may wait for this one to
2467		 * finish. This task can not be preempted if there is a
2468		 * waiter waiting to take over.
2469		 */
2470		console_lock_spinning_enable();
2471
2472		stop_critical_timings();	/* don't trace print latency */
2473		call_console_drivers(ext_text, ext_len, text, len);
2474		start_critical_timings();
2475
2476		if (console_lock_spinning_disable_and_check()) {
2477			printk_safe_exit_irqrestore(flags);
2478			return;
 
 
 
2479		}
 
 
2480
2481		printk_safe_exit_irqrestore(flags);
2482
2483		if (do_cond_resched)
2484			cond_resched();
2485	}
 
2486
2487	console_locked = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2488
2489	raw_spin_unlock(&logbuf_lock);
 
 
 
 
 
 
 
 
 
 
 
2490
2491	up_console_sem();
 
2492
2493	/*
2494	 * Someone could have filled up the buffer again, so re-check if there's
2495	 * something to flush. In case we cannot trylock the console_sem again,
2496	 * there's a new owner and the console_unlock() from them will do the
2497	 * flush, no worries.
2498	 */
2499	raw_spin_lock(&logbuf_lock);
2500	retry = console_seq != log_next_seq;
2501	raw_spin_unlock(&logbuf_lock);
2502	printk_safe_exit_irqrestore(flags);
 
 
2503
2504	if (retry && console_trylock())
2505		goto again;
 
 
 
 
 
2506}
2507EXPORT_SYMBOL(console_unlock);
2508
2509/**
2510 * console_conditional_schedule - yield the CPU if required
2511 *
2512 * If the console code is currently allowed to sleep, and
2513 * if this CPU should yield the CPU to another task, do
2514 * so here.
2515 *
2516 * Must be called within console_lock();.
2517 */
2518void __sched console_conditional_schedule(void)
2519{
2520	if (console_may_schedule)
2521		cond_resched();
2522}
2523EXPORT_SYMBOL(console_conditional_schedule);
2524
2525void console_unblank(void)
2526{
 
2527	struct console *c;
 
2528
2529	/*
2530	 * console_unblank can no longer be called in interrupt context unless
2531	 * oops_in_progress is set to 1..
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2532	 */
2533	if (oops_in_progress) {
 
 
 
 
 
 
 
 
 
 
2534		if (down_trylock_console_sem() != 0)
2535			return;
2536	} else
2537		console_lock();
2538
2539	console_locked = 1;
2540	console_may_schedule = 0;
2541	for_each_console(c)
2542		if ((c->flags & CON_ENABLED) && c->unblank)
 
 
2543			c->unblank();
 
 
 
2544	console_unlock();
 
 
 
2545}
2546
2547/**
2548 * console_flush_on_panic - flush console content on panic
2549 * @mode: flush all messages in buffer or just the pending ones
2550 *
2551 * Immediately output all pending messages no matter what.
2552 */
2553void console_flush_on_panic(enum con_flush_mode mode)
2554{
 
 
 
 
 
 
 
 
 
 
 
 
 
2555	/*
2556	 * If someone else is holding the console lock, trylock will fail
2557	 * and may_schedule may be set.  Ignore and proceed to unlock so
2558	 * that messages are flushed out.  As this can be called from any
2559	 * context and we don't want to get preempted while flushing,
2560	 * ensure may_schedule is cleared.
2561	 */
2562	console_trylock();
2563	console_may_schedule = 0;
2564
2565	if (mode == CONSOLE_REPLAY_ALL) {
2566		unsigned long flags;
2567
2568		logbuf_lock_irqsave(flags);
2569		console_seq = log_first_seq;
2570		console_idx = log_first_idx;
2571		logbuf_unlock_irqrestore(flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2572	}
2573	console_unlock();
 
2574}
2575
2576/*
2577 * Return the console tty driver structure and its associated index
2578 */
2579struct tty_driver *console_device(int *index)
2580{
2581	struct console *c;
2582	struct tty_driver *driver = NULL;
 
2583
 
 
 
 
 
2584	console_lock();
2585	for_each_console(c) {
 
 
2586		if (!c->device)
2587			continue;
2588		driver = c->device(c, index);
2589		if (driver)
2590			break;
2591	}
 
 
2592	console_unlock();
2593	return driver;
2594}
2595
2596/*
2597 * Prevent further output on the passed console device so that (for example)
2598 * serial drivers can disable console output before suspending a port, and can
2599 * re-enable output afterwards.
2600 */
2601void console_stop(struct console *console)
2602{
2603	console_lock();
2604	console->flags &= ~CON_ENABLED;
2605	console_unlock();
 
 
 
 
 
 
 
 
 
2606}
2607EXPORT_SYMBOL(console_stop);
2608
2609void console_start(struct console *console)
2610{
2611	console_lock();
2612	console->flags |= CON_ENABLED;
2613	console_unlock();
 
2614}
2615EXPORT_SYMBOL(console_start);
2616
2617static int __read_mostly keep_bootcon;
2618
2619static int __init keep_bootcon_setup(char *str)
2620{
2621	keep_bootcon = 1;
2622	pr_info("debug: skip boot console de-registration.\n");
2623
2624	return 0;
2625}
2626
2627early_param("keep_bootcon", keep_bootcon_setup);
2628
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2629/*
2630 * The console driver calls this routine during kernel initialization
2631 * to register the console printing procedure with printk() and to
2632 * print any messages that were printed by the kernel before the
2633 * console driver was initialized.
2634 *
2635 * This can happen pretty early during the boot process (because of
2636 * early_printk) - sometimes before setup_arch() completes - be careful
2637 * of what kernel features are used - they may not be initialised yet.
2638 *
2639 * There are two types of consoles - bootconsoles (early_printk) and
2640 * "real" consoles (everything which is not a bootconsole) which are
2641 * handled differently.
2642 *  - Any number of bootconsoles can be registered at any time.
2643 *  - As soon as a "real" console is registered, all bootconsoles
2644 *    will be unregistered automatically.
2645 *  - Once a "real" console is registered, any attempt to register a
2646 *    bootconsoles will be rejected
2647 */
2648void register_console(struct console *newcon)
2649{
2650	int i;
2651	unsigned long flags;
2652	struct console *bcon = NULL;
2653	struct console_cmdline *c;
2654	static bool has_preferred;
2655
2656	if (console_drivers)
2657		for_each_console(bcon)
2658			if (WARN(bcon == newcon,
2659					"console '%s%d' already registered\n",
2660					bcon->name, bcon->index))
2661				return;
2662
2663	/*
2664	 * before we register a new CON_BOOT console, make sure we don't
2665	 * already have a valid console
2666	 */
2667	if (console_drivers && newcon->flags & CON_BOOT) {
2668		/* find the last or real console */
2669		for_each_console(bcon) {
2670			if (!(bcon->flags & CON_BOOT)) {
2671				pr_info("Too late to register bootconsole %s%d\n",
2672					newcon->name, newcon->index);
2673				return;
2674			}
2675		}
2676	}
2677
2678	if (console_drivers && console_drivers->flags & CON_BOOT)
2679		bcon = console_drivers;
 
 
 
2680
2681	if (!has_preferred || bcon || !console_drivers)
2682		has_preferred = preferred_console >= 0;
 
 
 
 
2683
2684	/*
2685	 *	See if we want to use this console driver. If we
2686	 *	didn't select a console we take the first one
2687	 *	that registers here.
2688	 */
2689	if (!has_preferred) {
2690		if (newcon->index < 0)
2691			newcon->index = 0;
2692		if (newcon->setup == NULL ||
2693		    newcon->setup(newcon, NULL) == 0) {
2694			newcon->flags |= CON_ENABLED;
2695			if (newcon->device) {
2696				newcon->flags |= CON_CONSDEV;
2697				has_preferred = true;
2698			}
2699		}
2700	}
2701
2702	/*
2703	 *	See if this console matches one we selected on
2704	 *	the command line.
 
 
 
 
 
 
 
 
2705	 */
2706	for (i = 0, c = console_cmdline;
2707	     i < MAX_CMDLINECONSOLES && c->name[0];
2708	     i++, c++) {
2709		if (!newcon->match ||
2710		    newcon->match(newcon, c->name, c->index, c->options) != 0) {
2711			/* default matching */
2712			BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
2713			if (strcmp(c->name, newcon->name) != 0)
2714				continue;
2715			if (newcon->index >= 0 &&
2716			    newcon->index != c->index)
2717				continue;
2718			if (newcon->index < 0)
2719				newcon->index = c->index;
2720
2721			if (_braille_register_console(newcon, c))
2722				return;
2723
2724			if (newcon->setup &&
2725			    newcon->setup(newcon, c->options) != 0)
2726				break;
2727		}
2728
2729		newcon->flags |= CON_ENABLED;
2730		if (i == preferred_console) {
2731			newcon->flags |= CON_CONSDEV;
2732			has_preferred = true;
2733		}
2734		break;
2735	}
2736
2737	if (!(newcon->flags & CON_ENABLED))
2738		return;
 
 
 
 
 
 
 
 
 
 
 
2739
2740	/*
2741	 * If we have a bootconsole, and are switching to a real console,
2742	 * don't print everything out again, since when the boot console, and
2743	 * the real console are the same physical device, it's annoying to
2744	 * see the beginning boot messages twice
2745	 */
2746	if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
 
2747		newcon->flags &= ~CON_PRINTBUFFER;
 
 
 
 
 
 
 
2748
2749	/*
2750	 *	Put this console in the list - keep the
2751	 *	preferred driver at the head of the list.
2752	 */
2753	console_lock();
2754	if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
2755		newcon->next = console_drivers;
2756		console_drivers = newcon;
2757		if (newcon->next)
2758			newcon->next->flags &= ~CON_CONSDEV;
 
 
 
 
2759	} else {
2760		newcon->next = console_drivers->next;
2761		console_drivers->next = newcon;
2762	}
2763
2764	if (newcon->flags & CON_EXTENDED)
2765		nr_ext_console_drivers++;
 
 
 
2766
2767	if (newcon->flags & CON_PRINTBUFFER) {
2768		/*
2769		 * console_unlock(); will print out the buffered messages
2770		 * for us.
2771		 */
2772		logbuf_lock_irqsave(flags);
2773		console_seq = syslog_seq;
2774		console_idx = syslog_idx;
2775		/*
2776		 * We're about to replay the log buffer.  Only do this to the
2777		 * just-registered console to avoid excessive message spam to
2778		 * the already-registered consoles.
2779		 *
2780		 * Set exclusive_console with disabled interrupts to reduce
2781		 * race window with eventual console_flush_on_panic() that
2782		 * ignores console_lock.
2783		 */
2784		exclusive_console = newcon;
2785		exclusive_console_stop_seq = console_seq;
2786		logbuf_unlock_irqrestore(flags);
2787	}
2788	console_unlock();
2789	console_sysfs_notify();
2790
2791	/*
2792	 * By unregistering the bootconsoles after we enable the real console
2793	 * we get the "console xxx enabled" message on all the consoles -
2794	 * boot consoles, real consoles, etc - this is to ensure that end
2795	 * users know there might be something in the kernel's log buffer that
2796	 * went to the bootconsole (that they do not see on the real console)
2797	 */
2798	pr_info("%sconsole [%s%d] enabled\n",
2799		(newcon->flags & CON_BOOT) ? "boot" : "" ,
2800		newcon->name, newcon->index);
2801	if (bcon &&
2802	    ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
2803	    !keep_bootcon) {
2804		/* We need to iterate through all boot consoles, to make
2805		 * sure we print everything out, before we unregister them.
2806		 */
2807		for_each_console(bcon)
2808			if (bcon->flags & CON_BOOT)
2809				unregister_console(bcon);
2810	}
 
 
2811}
2812EXPORT_SYMBOL(register_console);
2813
2814int unregister_console(struct console *console)
 
2815{
2816        struct console *a, *b;
2817	int res;
2818
2819	pr_info("%sconsole [%s%d] disabled\n",
2820		(console->flags & CON_BOOT) ? "boot" : "" ,
2821		console->name, console->index);
2822
2823	res = _braille_unregister_console(console);
2824	if (res)
2825		return res;
 
 
2826
2827	res = 1;
2828	console_lock();
2829	if (console_drivers == console) {
2830		console_drivers=console->next;
2831		res = 0;
2832	} else if (console_drivers) {
2833		for (a=console_drivers->next, b=console_drivers ;
2834		     a; b=a, a=b->next) {
2835			if (a == console) {
2836				b->next = a->next;
2837				res = 0;
2838				break;
2839			}
2840		}
2841	}
2842
2843	if (!res && (console->flags & CON_EXTENDED))
2844		nr_ext_console_drivers--;
 
 
2845
2846	/*
 
2847	 * If this isn't the last console and it has CON_CONSDEV set, we
2848	 * need to set it on the next preferred console.
 
 
 
 
2849	 */
2850	if (console_drivers != NULL && console->flags & CON_CONSDEV)
2851		console_drivers->flags |= CON_CONSDEV;
 
 
 
 
 
 
 
 
 
 
2852
2853	console->flags &= ~CON_ENABLED;
2854	console_unlock();
2855	console_sysfs_notify();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2856	return res;
2857}
2858EXPORT_SYMBOL(unregister_console);
2859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2860/*
2861 * Initialize the console device. This is called *early*, so
2862 * we can't necessarily depend on lots of kernel help here.
2863 * Just do some early initializations, and do the complex setup
2864 * later.
2865 */
2866void __init console_init(void)
2867{
2868	int ret;
2869	initcall_t call;
2870	initcall_entry_t *ce;
2871
2872	/* Setup the default TTY line discipline. */
2873	n_tty_init();
2874
2875	/*
2876	 * set up the console device so that later boot sequences can
2877	 * inform about problems etc..
2878	 */
2879	ce = __con_initcall_start;
2880	trace_initcall_level("console");
2881	while (ce < __con_initcall_end) {
2882		call = initcall_from_entry(ce);
2883		trace_initcall_start(call);
2884		ret = call();
2885		trace_initcall_finish(call, ret);
2886		ce++;
2887	}
2888}
2889
2890/*
2891 * Some boot consoles access data that is in the init section and which will
2892 * be discarded after the initcalls have been run. To make sure that no code
2893 * will access this data, unregister the boot consoles in a late initcall.
2894 *
2895 * If for some reason, such as deferred probe or the driver being a loadable
2896 * module, the real console hasn't registered yet at this point, there will
2897 * be a brief interval in which no messages are logged to the console, which
2898 * makes it difficult to diagnose problems that occur during this time.
2899 *
2900 * To mitigate this problem somewhat, only unregister consoles whose memory
2901 * intersects with the init section. Note that all other boot consoles will
2902 * get unregistred when the real preferred console is registered.
2903 */
2904static int __init printk_late_init(void)
2905{
 
2906	struct console *con;
2907	int ret;
2908
2909	for_each_console(con) {
 
2910		if (!(con->flags & CON_BOOT))
2911			continue;
2912
2913		/* Check addresses that might be used for enabled consoles. */
2914		if (init_section_intersects(con, sizeof(*con)) ||
2915		    init_section_contains(con->write, 0) ||
2916		    init_section_contains(con->read, 0) ||
2917		    init_section_contains(con->device, 0) ||
2918		    init_section_contains(con->unblank, 0) ||
2919		    init_section_contains(con->data, 0)) {
2920			/*
2921			 * Please, consider moving the reported consoles out
2922			 * of the init section.
2923			 */
2924			pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
2925				con->name, con->index);
2926			unregister_console(con);
2927		}
2928	}
 
 
2929	ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
2930					console_cpu_notify);
2931	WARN_ON(ret < 0);
2932	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
2933					console_cpu_notify, NULL);
2934	WARN_ON(ret < 0);
 
2935	return 0;
2936}
2937late_initcall(printk_late_init);
2938
2939#if defined CONFIG_PRINTK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2940/*
2941 * Delayed printk version, for scheduler-internal messages:
2942 */
2943#define PRINTK_PENDING_WAKEUP	0x01
2944#define PRINTK_PENDING_OUTPUT	0x02
2945
2946static DEFINE_PER_CPU(int, printk_pending);
2947
2948static void wake_up_klogd_work_func(struct irq_work *irq_work)
2949{
2950	int pending = __this_cpu_xchg(printk_pending, 0);
2951
2952	if (pending & PRINTK_PENDING_OUTPUT) {
2953		/* If trylock fails, someone else is doing the printing */
2954		if (console_trylock())
2955			console_unlock();
2956	}
2957
2958	if (pending & PRINTK_PENDING_WAKEUP)
2959		wake_up_interruptible(&log_wait);
2960}
2961
2962static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
2963	.func = wake_up_klogd_work_func,
2964	.flags = IRQ_WORK_LAZY,
2965};
2966
2967void wake_up_klogd(void)
2968{
 
 
 
2969	preempt_disable();
2970	if (waitqueue_active(&log_wait)) {
2971		this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
 
 
 
 
 
 
 
 
 
 
 
 
2972		irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
2973	}
2974	preempt_enable();
2975}
2976
2977void defer_console_output(void)
 
 
 
 
 
 
 
 
 
 
2978{
2979	preempt_disable();
2980	__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
2981	irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
2982	preempt_enable();
2983}
2984
2985int vprintk_deferred(const char *fmt, va_list args)
 
 
 
 
 
 
 
 
 
 
 
 
2986{
2987	int r;
 
 
 
 
 
2988
2989	r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
 
2990	defer_console_output();
 
2991
2992	return r;
 
 
2993}
2994
2995int printk_deferred(const char *fmt, ...)
2996{
2997	va_list args;
2998	int r;
2999
3000	va_start(args, fmt);
3001	r = vprintk_deferred(fmt, args);
3002	va_end(args);
3003
3004	return r;
3005}
3006
3007/*
3008 * printk rate limiting, lifted from the networking subsystem.
3009 *
3010 * This enforces a rate limit: not more than 10 kernel messages
3011 * every 5s to make a denial-of-service attack impossible.
3012 */
3013DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
3014
3015int __printk_ratelimit(const char *func)
3016{
3017	return ___ratelimit(&printk_ratelimit_state, func);
3018}
3019EXPORT_SYMBOL(__printk_ratelimit);
3020
3021/**
3022 * printk_timed_ratelimit - caller-controlled printk ratelimiting
3023 * @caller_jiffies: pointer to caller's state
3024 * @interval_msecs: minimum interval between prints
3025 *
3026 * printk_timed_ratelimit() returns true if more than @interval_msecs
3027 * milliseconds have elapsed since the last time printk_timed_ratelimit()
3028 * returned true.
3029 */
3030bool printk_timed_ratelimit(unsigned long *caller_jiffies,
3031			unsigned int interval_msecs)
3032{
3033	unsigned long elapsed = jiffies - *caller_jiffies;
3034
3035	if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
3036		return false;
3037
3038	*caller_jiffies = jiffies;
3039	return true;
3040}
3041EXPORT_SYMBOL(printk_timed_ratelimit);
3042
3043static DEFINE_SPINLOCK(dump_list_lock);
3044static LIST_HEAD(dump_list);
3045
3046/**
3047 * kmsg_dump_register - register a kernel log dumper.
3048 * @dumper: pointer to the kmsg_dumper structure
3049 *
3050 * Adds a kernel log dumper to the system. The dump callback in the
3051 * structure will be called when the kernel oopses or panics and must be
3052 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
3053 */
3054int kmsg_dump_register(struct kmsg_dumper *dumper)
3055{
3056	unsigned long flags;
3057	int err = -EBUSY;
3058
3059	/* The dump callback needs to be set */
3060	if (!dumper->dump)
3061		return -EINVAL;
3062
3063	spin_lock_irqsave(&dump_list_lock, flags);
3064	/* Don't allow registering multiple times */
3065	if (!dumper->registered) {
3066		dumper->registered = 1;
3067		list_add_tail_rcu(&dumper->list, &dump_list);
3068		err = 0;
3069	}
3070	spin_unlock_irqrestore(&dump_list_lock, flags);
3071
3072	return err;
3073}
3074EXPORT_SYMBOL_GPL(kmsg_dump_register);
3075
3076/**
3077 * kmsg_dump_unregister - unregister a kmsg dumper.
3078 * @dumper: pointer to the kmsg_dumper structure
3079 *
3080 * Removes a dump device from the system. Returns zero on success and
3081 * %-EINVAL otherwise.
3082 */
3083int kmsg_dump_unregister(struct kmsg_dumper *dumper)
3084{
3085	unsigned long flags;
3086	int err = -EINVAL;
3087
3088	spin_lock_irqsave(&dump_list_lock, flags);
3089	if (dumper->registered) {
3090		dumper->registered = 0;
3091		list_del_rcu(&dumper->list);
3092		err = 0;
3093	}
3094	spin_unlock_irqrestore(&dump_list_lock, flags);
3095	synchronize_rcu();
3096
3097	return err;
3098}
3099EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
3100
3101static bool always_kmsg_dump;
3102module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
3103
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3104/**
3105 * kmsg_dump - dump kernel log to kernel message dumpers.
3106 * @reason: the reason (oops, panic etc) for dumping
3107 *
3108 * Call each of the registered dumper's dump() callback, which can
3109 * retrieve the kmsg records with kmsg_dump_get_line() or
3110 * kmsg_dump_get_buffer().
3111 */
3112void kmsg_dump(enum kmsg_dump_reason reason)
3113{
3114	struct kmsg_dumper *dumper;
3115	unsigned long flags;
3116
3117	if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
3118		return;
3119
3120	rcu_read_lock();
3121	list_for_each_entry_rcu(dumper, &dump_list, list) {
3122		if (dumper->max_reason && reason > dumper->max_reason)
3123			continue;
3124
3125		/* initialize iterator with data about the stored records */
3126		dumper->active = true;
3127
3128		logbuf_lock_irqsave(flags);
3129		dumper->cur_seq = clear_seq;
3130		dumper->cur_idx = clear_idx;
3131		dumper->next_seq = log_next_seq;
3132		dumper->next_idx = log_next_idx;
3133		logbuf_unlock_irqrestore(flags);
 
 
 
 
3134
3135		/* invoke dumper which will iterate over records */
3136		dumper->dump(dumper, reason);
3137
3138		/* reset iterator */
3139		dumper->active = false;
3140	}
3141	rcu_read_unlock();
3142}
3143
3144/**
3145 * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
3146 * @dumper: registered kmsg dumper
3147 * @syslog: include the "<4>" prefixes
3148 * @line: buffer to copy the line to
3149 * @size: maximum size of the buffer
3150 * @len: length of line placed into buffer
3151 *
3152 * Start at the beginning of the kmsg buffer, with the oldest kmsg
3153 * record, and copy one record into the provided buffer.
3154 *
3155 * Consecutive calls will return the next available record moving
3156 * towards the end of the buffer with the youngest messages.
3157 *
3158 * A return value of FALSE indicates that there are no more records to
3159 * read.
3160 *
3161 * The function is similar to kmsg_dump_get_line(), but grabs no locks.
3162 */
3163bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
3164			       char *line, size_t size, size_t *len)
3165{
3166	struct printk_log *msg;
 
 
 
3167	size_t l = 0;
3168	bool ret = false;
3169
3170	if (!dumper->active)
3171		goto out;
3172
3173	if (dumper->cur_seq < log_first_seq) {
3174		/* messages are gone, move to first available one */
3175		dumper->cur_seq = log_first_seq;
3176		dumper->cur_idx = log_first_idx;
3177	}
3178
3179	/* last entry */
3180	if (dumper->cur_seq >= log_next_seq)
3181		goto out;
 
 
 
 
 
 
 
 
 
3182
3183	msg = log_from_idx(dumper->cur_idx);
3184	l = msg_print_text(msg, syslog, printk_time, line, size);
3185
3186	dumper->cur_idx = log_next(dumper->cur_idx);
3187	dumper->cur_seq++;
3188	ret = true;
3189out:
3190	if (len)
3191		*len = l;
3192	return ret;
3193}
3194
3195/**
3196 * kmsg_dump_get_line - retrieve one kmsg log line
3197 * @dumper: registered kmsg dumper
3198 * @syslog: include the "<4>" prefixes
3199 * @line: buffer to copy the line to
3200 * @size: maximum size of the buffer
3201 * @len: length of line placed into buffer
3202 *
3203 * Start at the beginning of the kmsg buffer, with the oldest kmsg
3204 * record, and copy one record into the provided buffer.
3205 *
3206 * Consecutive calls will return the next available record moving
3207 * towards the end of the buffer with the youngest messages.
3208 *
3209 * A return value of FALSE indicates that there are no more records to
3210 * read.
3211 */
3212bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
3213			char *line, size_t size, size_t *len)
3214{
3215	unsigned long flags;
3216	bool ret;
3217
3218	logbuf_lock_irqsave(flags);
3219	ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
3220	logbuf_unlock_irqrestore(flags);
3221
3222	return ret;
3223}
3224EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
3225
3226/**
3227 * kmsg_dump_get_buffer - copy kmsg log lines
3228 * @dumper: registered kmsg dumper
3229 * @syslog: include the "<4>" prefixes
3230 * @buf: buffer to copy the line to
3231 * @size: maximum size of the buffer
3232 * @len: length of line placed into buffer
3233 *
3234 * Start at the end of the kmsg buffer and fill the provided buffer
3235 * with as many of the the *youngest* kmsg records that fit into it.
3236 * If the buffer is large enough, all available kmsg records will be
3237 * copied with a single call.
3238 *
3239 * Consecutive calls will fill the buffer with the next block of
3240 * available older records, not including the earlier retrieved ones.
3241 *
3242 * A return value of FALSE indicates that there are no more records to
3243 * read.
3244 */
3245bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
3246			  char *buf, size_t size, size_t *len)
3247{
3248	unsigned long flags;
 
 
3249	u64 seq;
3250	u32 idx;
3251	u64 next_seq;
3252	u32 next_idx;
3253	size_t l = 0;
3254	bool ret = false;
3255	bool time = printk_time;
3256
3257	if (!dumper->active)
3258		goto out;
3259
3260	logbuf_lock_irqsave(flags);
3261	if (dumper->cur_seq < log_first_seq) {
3262		/* messages are gone, move to first available one */
3263		dumper->cur_seq = log_first_seq;
3264		dumper->cur_idx = log_first_idx;
 
 
 
3265	}
3266
3267	/* last entry */
3268	if (dumper->cur_seq >= dumper->next_seq) {
3269		logbuf_unlock_irqrestore(flags);
3270		goto out;
3271	}
3272
3273	/* calculate length of entire buffer */
3274	seq = dumper->cur_seq;
3275	idx = dumper->cur_idx;
3276	while (seq < dumper->next_seq) {
3277		struct printk_log *msg = log_from_idx(idx);
3278
3279		l += msg_print_text(msg, true, time, NULL, 0);
3280		idx = log_next(idx);
3281		seq++;
3282	}
 
 
 
 
3283
3284	/* move first record forward until length fits into the buffer */
3285	seq = dumper->cur_seq;
3286	idx = dumper->cur_idx;
3287	while (l >= size && seq < dumper->next_seq) {
3288		struct printk_log *msg = log_from_idx(idx);
3289
3290		l -= msg_print_text(msg, true, time, NULL, 0);
3291		idx = log_next(idx);
3292		seq++;
3293	}
3294
3295	/* last message in next interation */
3296	next_seq = seq;
3297	next_idx = idx;
3298
3299	l = 0;
3300	while (seq < dumper->next_seq) {
3301		struct printk_log *msg = log_from_idx(idx);
3302
3303		l += msg_print_text(msg, syslog, time, buf + l, size - l);
3304		idx = log_next(idx);
3305		seq++;
3306	}
3307
3308	dumper->next_seq = next_seq;
3309	dumper->next_idx = next_idx;
3310	ret = true;
3311	logbuf_unlock_irqrestore(flags);
3312out:
3313	if (len)
3314		*len = l;
3315	return ret;
3316}
3317EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
3318
3319/**
3320 * kmsg_dump_rewind_nolock - reset the interator (unlocked version)
3321 * @dumper: registered kmsg dumper
3322 *
3323 * Reset the dumper's iterator so that kmsg_dump_get_line() and
3324 * kmsg_dump_get_buffer() can be called again and used multiple
3325 * times within the same dumper.dump() callback.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3326 *
3327 * The function is similar to kmsg_dump_rewind(), but grabs no locks.
3328 */
3329void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
3330{
3331	dumper->cur_seq = clear_seq;
3332	dumper->cur_idx = clear_idx;
3333	dumper->next_seq = log_next_seq;
3334	dumper->next_idx = log_next_idx;
3335}
 
3336
3337/**
3338 * kmsg_dump_rewind - reset the interator
3339 * @dumper: registered kmsg dumper
3340 *
3341 * Reset the dumper's iterator so that kmsg_dump_get_line() and
3342 * kmsg_dump_get_buffer() can be called again and used multiple
3343 * times within the same dumper.dump() callback.
 
 
 
3344 */
3345void kmsg_dump_rewind(struct kmsg_dumper *dumper)
3346{
3347	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3348
3349	logbuf_lock_irqsave(flags);
3350	kmsg_dump_rewind_nolock(dumper);
3351	logbuf_unlock_irqrestore(flags);
 
 
 
 
3352}
3353EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
3354
3355#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/kernel/printk.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 * Modified to make sys_syslog() more flexible: added commands to
   8 * return the last 4k of kernel messages, regardless of whether
   9 * they've been read or not.  Added option to suppress kernel printk's
  10 * to the console.  Added hook for sending the console messages
  11 * elsewhere, in preparation for a serial line console (someday).
  12 * Ted Ts'o, 2/11/93.
  13 * Modified for sysctl support, 1/8/97, Chris Horn.
  14 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
  15 *     manfred@colorfullife.com
  16 * Rewrote bits to get rid of console_lock
  17 *	01Mar01 Andrew Morton
  18 */
  19
  20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21
  22#include <linux/kernel.h>
  23#include <linux/mm.h>
  24#include <linux/tty.h>
  25#include <linux/tty_driver.h>
  26#include <linux/console.h>
  27#include <linux/init.h>
  28#include <linux/jiffies.h>
  29#include <linux/nmi.h>
  30#include <linux/module.h>
  31#include <linux/moduleparam.h>
  32#include <linux/delay.h>
  33#include <linux/smp.h>
  34#include <linux/security.h>
  35#include <linux/memblock.h>
  36#include <linux/syscalls.h>
  37#include <linux/vmcore_info.h>
 
  38#include <linux/ratelimit.h>
  39#include <linux/kmsg_dump.h>
  40#include <linux/syslog.h>
  41#include <linux/cpu.h>
  42#include <linux/rculist.h>
  43#include <linux/poll.h>
  44#include <linux/irq_work.h>
  45#include <linux/ctype.h>
  46#include <linux/uio.h>
  47#include <linux/sched/clock.h>
  48#include <linux/sched/debug.h>
  49#include <linux/sched/task_stack.h>
  50
  51#include <linux/uaccess.h>
  52#include <asm/sections.h>
  53
  54#include <trace/events/initcall.h>
  55#define CREATE_TRACE_POINTS
  56#include <trace/events/printk.h>
  57
  58#include "printk_ringbuffer.h"
  59#include "console_cmdline.h"
  60#include "braille.h"
  61#include "internal.h"
  62
  63int console_printk[4] = {
  64	CONSOLE_LOGLEVEL_DEFAULT,	/* console_loglevel */
  65	MESSAGE_LOGLEVEL_DEFAULT,	/* default_message_loglevel */
  66	CONSOLE_LOGLEVEL_MIN,		/* minimum_console_loglevel */
  67	CONSOLE_LOGLEVEL_DEFAULT,	/* default_console_loglevel */
  68};
  69EXPORT_SYMBOL_GPL(console_printk);
  70
  71atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
  72EXPORT_SYMBOL(ignore_console_lock_warning);
  73
  74EXPORT_TRACEPOINT_SYMBOL_GPL(console);
  75
  76/*
  77 * Low level drivers may need that to know if they can schedule in
  78 * their unblank() callback or not. So let's export it.
  79 */
  80int oops_in_progress;
  81EXPORT_SYMBOL(oops_in_progress);
  82
  83/*
  84 * console_mutex protects console_list updates and console->flags updates.
  85 * The flags are synchronized only for consoles that are registered, i.e.
  86 * accessible via the console list.
  87 */
  88static DEFINE_MUTEX(console_mutex);
  89
  90/*
  91 * console_sem protects updates to console->seq
  92 * and also provides serialization for console printing.
  93 */
  94static DEFINE_SEMAPHORE(console_sem, 1);
  95HLIST_HEAD(console_list);
  96EXPORT_SYMBOL_GPL(console_list);
  97DEFINE_STATIC_SRCU(console_srcu);
  98
  99/*
 100 * System may need to suppress printk message under certain
 101 * circumstances, like after kernel panic happens.
 102 */
 103int __read_mostly suppress_printk;
 104
 105#ifdef CONFIG_LOCKDEP
 106static struct lockdep_map console_lock_dep_map = {
 107	.name = "console_lock"
 108};
 109
 110void lockdep_assert_console_list_lock_held(void)
 111{
 112	lockdep_assert_held(&console_mutex);
 113}
 114EXPORT_SYMBOL(lockdep_assert_console_list_lock_held);
 115#endif
 116
 117#ifdef CONFIG_DEBUG_LOCK_ALLOC
 118bool console_srcu_read_lock_is_held(void)
 119{
 120	return srcu_read_lock_held(&console_srcu);
 121}
 122EXPORT_SYMBOL(console_srcu_read_lock_is_held);
 123#endif
 124
 125enum devkmsg_log_bits {
 126	__DEVKMSG_LOG_BIT_ON = 0,
 127	__DEVKMSG_LOG_BIT_OFF,
 128	__DEVKMSG_LOG_BIT_LOCK,
 129};
 130
 131enum devkmsg_log_masks {
 132	DEVKMSG_LOG_MASK_ON             = BIT(__DEVKMSG_LOG_BIT_ON),
 133	DEVKMSG_LOG_MASK_OFF            = BIT(__DEVKMSG_LOG_BIT_OFF),
 134	DEVKMSG_LOG_MASK_LOCK           = BIT(__DEVKMSG_LOG_BIT_LOCK),
 135};
 136
 137/* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
 138#define DEVKMSG_LOG_MASK_DEFAULT	0
 139
 140static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
 141
 142static int __control_devkmsg(char *str)
 143{
 144	size_t len;
 145
 146	if (!str)
 147		return -EINVAL;
 148
 149	len = str_has_prefix(str, "on");
 150	if (len) {
 151		devkmsg_log = DEVKMSG_LOG_MASK_ON;
 152		return len;
 153	}
 154
 155	len = str_has_prefix(str, "off");
 156	if (len) {
 157		devkmsg_log = DEVKMSG_LOG_MASK_OFF;
 158		return len;
 159	}
 160
 161	len = str_has_prefix(str, "ratelimit");
 162	if (len) {
 163		devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
 164		return len;
 165	}
 166
 167	return -EINVAL;
 168}
 169
 170static int __init control_devkmsg(char *str)
 171{
 172	if (__control_devkmsg(str) < 0) {
 173		pr_warn("printk.devkmsg: bad option string '%s'\n", str);
 174		return 1;
 175	}
 176
 177	/*
 178	 * Set sysctl string accordingly:
 179	 */
 180	if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
 181		strcpy(devkmsg_log_str, "on");
 182	else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
 183		strcpy(devkmsg_log_str, "off");
 184	/* else "ratelimit" which is set by default. */
 185
 186	/*
 187	 * Sysctl cannot change it anymore. The kernel command line setting of
 188	 * this parameter is to force the setting to be permanent throughout the
 189	 * runtime of the system. This is a precation measure against userspace
 190	 * trying to be a smarta** and attempting to change it up on us.
 191	 */
 192	devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
 193
 194	return 1;
 195}
 196__setup("printk.devkmsg=", control_devkmsg);
 197
 198char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
 199#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
 200int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
 201			      void *buffer, size_t *lenp, loff_t *ppos)
 202{
 203	char old_str[DEVKMSG_STR_MAX_SIZE];
 204	unsigned int old;
 205	int err;
 206
 207	if (write) {
 208		if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
 209			return -EINVAL;
 210
 211		old = devkmsg_log;
 212		strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE);
 213	}
 214
 215	err = proc_dostring(table, write, buffer, lenp, ppos);
 216	if (err)
 217		return err;
 218
 219	if (write) {
 220		err = __control_devkmsg(devkmsg_log_str);
 221
 222		/*
 223		 * Do not accept an unknown string OR a known string with
 224		 * trailing crap...
 225		 */
 226		if (err < 0 || (err + 1 != *lenp)) {
 227
 228			/* ... and restore old setting. */
 229			devkmsg_log = old;
 230			strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE);
 231
 232			return -EINVAL;
 233		}
 234	}
 235
 236	return 0;
 237}
 238#endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
 239
 240/**
 241 * console_list_lock - Lock the console list
 242 *
 243 * For console list or console->flags updates
 244 */
 245void console_list_lock(void)
 246{
 247	/*
 248	 * In unregister_console() and console_force_preferred_locked(),
 249	 * synchronize_srcu() is called with the console_list_lock held.
 250	 * Therefore it is not allowed that the console_list_lock is taken
 251	 * with the srcu_lock held.
 252	 *
 253	 * Detecting if this context is really in the read-side critical
 254	 * section is only possible if the appropriate debug options are
 255	 * enabled.
 256	 */
 257	WARN_ON_ONCE(debug_lockdep_rcu_enabled() &&
 258		     srcu_read_lock_held(&console_srcu));
 259
 260	mutex_lock(&console_mutex);
 261}
 262EXPORT_SYMBOL(console_list_lock);
 263
 264/**
 265 * console_list_unlock - Unlock the console list
 266 *
 267 * Counterpart to console_list_lock()
 268 */
 269void console_list_unlock(void)
 270{
 271	mutex_unlock(&console_mutex);
 272}
 273EXPORT_SYMBOL(console_list_unlock);
 274
 275/**
 276 * console_srcu_read_lock - Register a new reader for the
 277 *	SRCU-protected console list
 278 *
 279 * Use for_each_console_srcu() to iterate the console list
 280 *
 281 * Context: Any context.
 282 * Return: A cookie to pass to console_srcu_read_unlock().
 283 */
 284int console_srcu_read_lock(void)
 285{
 286	return srcu_read_lock_nmisafe(&console_srcu);
 287}
 288EXPORT_SYMBOL(console_srcu_read_lock);
 289
 290/**
 291 * console_srcu_read_unlock - Unregister an old reader from
 292 *	the SRCU-protected console list
 293 * @cookie: cookie returned from console_srcu_read_lock()
 294 *
 295 * Counterpart to console_srcu_read_lock()
 296 */
 297void console_srcu_read_unlock(int cookie)
 298{
 299	srcu_read_unlock_nmisafe(&console_srcu, cookie);
 300}
 301EXPORT_SYMBOL(console_srcu_read_unlock);
 302
 303/*
 304 * Helper macros to handle lockdep when locking/unlocking console_sem. We use
 305 * macros instead of functions so that _RET_IP_ contains useful information.
 306 */
 307#define down_console_sem() do { \
 308	down(&console_sem);\
 309	mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
 310} while (0)
 311
 312static int __down_trylock_console_sem(unsigned long ip)
 313{
 314	int lock_failed;
 315	unsigned long flags;
 316
 317	/*
 318	 * Here and in __up_console_sem() we need to be in safe mode,
 319	 * because spindump/WARN/etc from under console ->lock will
 320	 * deadlock in printk()->down_trylock_console_sem() otherwise.
 321	 */
 322	printk_safe_enter_irqsave(flags);
 323	lock_failed = down_trylock(&console_sem);
 324	printk_safe_exit_irqrestore(flags);
 325
 326	if (lock_failed)
 327		return 1;
 328	mutex_acquire(&console_lock_dep_map, 0, 1, ip);
 329	return 0;
 330}
 331#define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
 332
 333static void __up_console_sem(unsigned long ip)
 334{
 335	unsigned long flags;
 336
 337	mutex_release(&console_lock_dep_map, ip);
 338
 339	printk_safe_enter_irqsave(flags);
 340	up(&console_sem);
 341	printk_safe_exit_irqrestore(flags);
 342}
 343#define up_console_sem() __up_console_sem(_RET_IP_)
 344
 345static bool panic_in_progress(void)
 346{
 347	return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
 348}
 349
 350/* Return true if a panic is in progress on the current CPU. */
 351bool this_cpu_in_panic(void)
 352{
 353	/*
 354	 * We can use raw_smp_processor_id() here because it is impossible for
 355	 * the task to be migrated to the panic_cpu, or away from it. If
 356	 * panic_cpu has already been set, and we're not currently executing on
 357	 * that CPU, then we never will be.
 358	 */
 359	return unlikely(atomic_read(&panic_cpu) == raw_smp_processor_id());
 360}
 361
 362/*
 363 * Return true if a panic is in progress on a remote CPU.
 364 *
 365 * On true, the local CPU should immediately release any printing resources
 366 * that may be needed by the panic CPU.
 367 */
 368bool other_cpu_in_panic(void)
 369{
 370	return (panic_in_progress() && !this_cpu_in_panic());
 371}
 372
 373/*
 374 * This is used for debugging the mess that is the VT code by
 375 * keeping track if we have the console semaphore held. It's
 376 * definitely not the perfect debug tool (we don't know if _WE_
 377 * hold it and are racing, but it helps tracking those weird code
 378 * paths in the console code where we end up in places I want
 379 * locked without the console semaphore held).
 
 
 
 
 
 380 */
 381static int console_locked;
 382
 383/*
 384 *	Array of consoles built from command line options (console=)
 385 */
 386
 387#define MAX_CMDLINECONSOLES 8
 388
 389static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
 390
 391static int preferred_console = -1;
 392int console_set_on_cmdline;
 393EXPORT_SYMBOL(console_set_on_cmdline);
 394
 395/* Flag: console code may call schedule() */
 396static int console_may_schedule;
 397
 398enum con_msg_format_flags {
 399	MSG_FORMAT_DEFAULT	= 0,
 400	MSG_FORMAT_SYSLOG	= (1 << 0),
 401};
 402
 403static int console_msg_format = MSG_FORMAT_DEFAULT;
 404
 405/*
 406 * The printk log buffer consists of a sequenced collection of records, each
 407 * containing variable length message text. Every record also contains its
 408 * own meta-data (@info).
 409 *
 410 * Every record meta-data carries the timestamp in microseconds, as well as
 411 * the standard userspace syslog level and syslog facility. The usual kernel
 412 * messages use LOG_KERN; userspace-injected messages always carry a matching
 413 * syslog facility, by default LOG_USER. The origin of every message can be
 414 * reliably determined that way.
 415 *
 416 * The human readable log message of a record is available in @text, the
 417 * length of the message text in @text_len. The stored message is not
 418 * terminated.
 
 
 
 
 
 
 
 
 419 *
 420 * Optionally, a record can carry a dictionary of properties (key/value
 421 * pairs), to provide userspace with a machine-readable message context.
 422 *
 423 * Examples for well-defined, commonly used property names are:
 424 *   DEVICE=b12:8               device identifier
 425 *                                b12:8         block dev_t
 426 *                                c127:3        char dev_t
 427 *                                n8            netdev ifindex
 428 *                                +sound:card0  subsystem:devname
 429 *   SUBSYSTEM=pci              driver-core subsystem name
 430 *
 431 * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
 432 * and values are terminated by a '\0' character.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 433 *
 434 * Example of record values:
 435 *   record.text_buf                = "it's a line" (unterminated)
 436 *   record.info.seq                = 56
 437 *   record.info.ts_nsec            = 36863
 438 *   record.info.text_len           = 11
 439 *   record.info.facility           = 0 (LOG_KERN)
 440 *   record.info.flags              = 0
 441 *   record.info.level              = 3 (LOG_ERR)
 442 *   record.info.caller_id          = 299 (task 299)
 443 *   record.info.dev_info.subsystem = "pci" (terminated)
 444 *   record.info.dev_info.device    = "+pci:0000:00:01.0" (terminated)
 445 *
 446 * The 'struct printk_info' buffer must never be directly exported to
 447 * userspace, it is a kernel-private implementation detail that might
 448 * need to be changed in the future, when the requirements change.
 449 *
 450 * /dev/kmsg exports the structured data in the following line format:
 451 *   "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
 452 *
 453 * Users of the export format should ignore possible additional values
 454 * separated by ',', and find the message after the ';' character.
 455 *
 456 * The optional key/value pairs are attached as continuation lines starting
 457 * with a space character and terminated by a newline. All possible
 458 * non-prinatable characters are escaped in the "\xff" notation.
 459 */
 460
 461/* syslog_lock protects syslog_* variables and write access to clear_seq. */
 462static DEFINE_MUTEX(syslog_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463
 464#ifdef CONFIG_PRINTK
 465DECLARE_WAIT_QUEUE_HEAD(log_wait);
 466/* All 3 protected by @syslog_lock. */
 467/* the next printk record to read by syslog(READ) or /proc/kmsg */
 468static u64 syslog_seq;
 
 469static size_t syslog_partial;
 470static bool syslog_time;
 471
 472struct latched_seq {
 473	seqcount_latch_t	latch;
 474	u64			val[2];
 475};
 
 
 
 
 
 
 
 
 
 
 
 
 476
 477/*
 478 * The next printk record to read after the last 'clear' command. There are
 479 * two copies (updated with seqcount_latch) so that reads can locklessly
 480 * access a valid value. Writers are synchronized by @syslog_lock.
 481 */
 482static struct latched_seq clear_seq = {
 483	.latch		= SEQCNT_LATCH_ZERO(clear_seq.latch),
 484	.val[0]		= 0,
 485	.val[1]		= 0,
 486};
 487
 488#define LOG_LEVEL(v)		((v) & 0x07)
 489#define LOG_FACILITY(v)		((v) >> 3 & 0xff)
 490
 491/* record buffer */
 492#define LOG_ALIGN __alignof__(unsigned long)
 493#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
 494#define LOG_BUF_LEN_MAX (u32)(1 << 31)
 495static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
 496static char *log_buf = __log_buf;
 497static u32 log_buf_len = __LOG_BUF_LEN;
 498
 499/*
 500 * Define the average message size. This only affects the number of
 501 * descriptors that will be available. Underestimating is better than
 502 * overestimating (too many available descriptors is better than not enough).
 503 */
 504#define PRB_AVGBITS 5	/* 32 character average length */
 505
 506#if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
 507#error CONFIG_LOG_BUF_SHIFT value too small.
 508#endif
 509_DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
 510		 PRB_AVGBITS, &__log_buf[0]);
 511
 512static struct printk_ringbuffer printk_rb_dynamic;
 
 
 
 
 513
 514struct printk_ringbuffer *prb = &printk_rb_static;
 
 
 
 
 515
 516/*
 517 * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
 518 * per_cpu_areas are initialised. This variable is set to true when
 519 * it's safe to access per-CPU data.
 520 */
 521static bool __printk_percpu_data_ready __ro_after_init;
 522
 523bool printk_percpu_data_ready(void)
 524{
 525	return __printk_percpu_data_ready;
 
 
 
 
 526}
 527
 528/* Must be called under syslog_lock. */
 529static void latched_seq_write(struct latched_seq *ls, u64 val)
 530{
 531	raw_write_seqcount_latch(&ls->latch);
 532	ls->val[0] = val;
 533	raw_write_seqcount_latch(&ls->latch);
 534	ls->val[1] = val;
 
 
 
 
 
 
 
 
 
 535}
 536
 537/* Can be called from any context. */
 538static u64 latched_seq_read_nolock(struct latched_seq *ls)
 
 
 
 
 
 
 
 
 539{
 540	unsigned int seq;
 541	unsigned int idx;
 542	u64 val;
 543
 544	do {
 545		seq = raw_read_seqcount_latch(&ls->latch);
 546		idx = seq & 0x1;
 547		val = ls->val[idx];
 548	} while (raw_read_seqcount_latch_retry(&ls->latch, seq));
 549
 550	return val;
 
 
 
 
 551}
 552
 553/* Return log buffer address */
 554char *log_buf_addr_get(void)
 555{
 556	return log_buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 557}
 558
 559/* Return log buffer size */
 560u32 log_buf_len_get(void)
 561{
 562	return log_buf_len;
 
 
 
 
 
 
 563}
 564
 565/*
 566 * Define how much of the log buffer we could take at maximum. The value
 567 * must be greater than two. Note that only half of the buffer is available
 568 * when the index points to the middle.
 569 */
 570#define MAX_LOG_TAKE_PART 4
 571static const char trunc_msg[] = "<truncated>";
 572
 573static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
 
 574{
 575	/*
 576	 * The message should not take the whole buffer. Otherwise, it might
 577	 * get removed too soon.
 578	 */
 579	u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
 580
 581	if (*text_len > max_text_len)
 582		*text_len = max_text_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 583
 584	/* enable the warning message (if there is room) */
 585	*trunc_msg_len = strlen(trunc_msg);
 586	if (*text_len >= *trunc_msg_len)
 587		*text_len -= *trunc_msg_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 588	else
 589		*trunc_msg_len = 0;
 
 
 
 
 
 
 
 
 
 
 
 590}
 591
 592int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
 593
 594static int syslog_action_restricted(int type)
 595{
 596	if (dmesg_restrict)
 597		return 1;
 598	/*
 599	 * Unless restricted, we allow "read all" and "get buffer size"
 600	 * for everybody.
 601	 */
 602	return type != SYSLOG_ACTION_READ_ALL &&
 603	       type != SYSLOG_ACTION_SIZE_BUFFER;
 604}
 605
 606static int check_syslog_permissions(int type, int source)
 607{
 608	/*
 609	 * If this is from /proc/kmsg and we've already opened it, then we've
 610	 * already done the capabilities checks at open time.
 611	 */
 612	if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
 613		goto ok;
 614
 615	if (syslog_action_restricted(type)) {
 616		if (capable(CAP_SYSLOG))
 617			goto ok;
 
 
 
 
 
 
 
 
 
 
 
 618		return -EPERM;
 619	}
 620ok:
 621	return security_syslog(type);
 622}
 623
 624static void append_char(char **pp, char *e, char c)
 625{
 626	if (*pp < e)
 627		*(*pp)++ = c;
 628}
 629
 630static ssize_t info_print_ext_header(char *buf, size_t size,
 631				     struct printk_info *info)
 632{
 633	u64 ts_usec = info->ts_nsec;
 634	char caller[20];
 635#ifdef CONFIG_PRINTK_CALLER
 636	u32 id = info->caller_id;
 637
 638	snprintf(caller, sizeof(caller), ",caller=%c%u",
 639		 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
 640#else
 641	caller[0] = '\0';
 642#endif
 643
 644	do_div(ts_usec, 1000);
 645
 646	return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
 647			 (info->facility << 3) | info->level, info->seq,
 648			 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
 649}
 650
 651static ssize_t msg_add_ext_text(char *buf, size_t size,
 652				const char *text, size_t text_len,
 653				unsigned char endc)
 654{
 655	char *p = buf, *e = buf + size;
 656	size_t i;
 657
 658	/* escape non-printable characters */
 659	for (i = 0; i < text_len; i++) {
 660		unsigned char c = text[i];
 661
 662		if (c < ' ' || c >= 127 || c == '\\')
 663			p += scnprintf(p, e - p, "\\x%02x", c);
 664		else
 665			append_char(&p, e, c);
 666	}
 667	append_char(&p, e, endc);
 668
 669	return p - buf;
 670}
 671
 672static ssize_t msg_add_dict_text(char *buf, size_t size,
 673				 const char *key, const char *val)
 674{
 675	size_t val_len = strlen(val);
 676	ssize_t len;
 677
 678	if (!val_len)
 679		return 0;
 
 
 680
 681	len = msg_add_ext_text(buf, size, "", 0, ' ');	/* dict prefix */
 682	len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
 683	len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
 
 
 684
 685	return len;
 686}
 
 
 687
 688static ssize_t msg_print_ext_body(char *buf, size_t size,
 689				  char *text, size_t text_len,
 690				  struct dev_printk_info *dev_info)
 691{
 692	ssize_t len;
 693
 694	len = msg_add_ext_text(buf, size, text, text_len, '\n');
 695
 696	if (!dev_info)
 697		goto out;
 698
 699	len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
 700				 dev_info->subsystem);
 701	len += msg_add_dict_text(buf + len, size - len, "DEVICE",
 702				 dev_info->device);
 703out:
 704	return len;
 705}
 706
 707/* /dev/kmsg - userspace message inject/listen interface */
 708struct devkmsg_user {
 709	atomic64_t seq;
 
 710	struct ratelimit_state rs;
 711	struct mutex lock;
 712	struct printk_buffers pbufs;
 713};
 714
 715static __printf(3, 4) __cold
 716int devkmsg_emit(int facility, int level, const char *fmt, ...)
 717{
 718	va_list args;
 719	int r;
 720
 721	va_start(args, fmt);
 722	r = vprintk_emit(facility, level, NULL, fmt, args);
 723	va_end(args);
 724
 725	return r;
 726}
 727
 728static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
 729{
 730	char *buf, *line;
 731	int level = default_message_loglevel;
 732	int facility = 1;	/* LOG_USER */
 733	struct file *file = iocb->ki_filp;
 734	struct devkmsg_user *user = file->private_data;
 735	size_t len = iov_iter_count(from);
 736	ssize_t ret = len;
 737
 738	if (len > PRINTKRB_RECORD_MAX)
 739		return -EINVAL;
 740
 741	/* Ignore when user logging is disabled. */
 742	if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
 743		return len;
 744
 745	/* Ratelimit when not explicitly enabled. */
 746	if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
 747		if (!___ratelimit(&user->rs, current->comm))
 748			return ret;
 749	}
 750
 751	buf = kmalloc(len+1, GFP_KERNEL);
 752	if (buf == NULL)
 753		return -ENOMEM;
 754
 755	buf[len] = '\0';
 756	if (!copy_from_iter_full(buf, len, from)) {
 757		kfree(buf);
 758		return -EFAULT;
 759	}
 760
 761	/*
 762	 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
 763	 * the decimal value represents 32bit, the lower 3 bit are the log
 764	 * level, the rest are the log facility.
 765	 *
 766	 * If no prefix or no userspace facility is specified, we
 767	 * enforce LOG_USER, to be able to reliably distinguish
 768	 * kernel-generated messages from userspace-injected ones.
 769	 */
 770	line = buf;
 771	if (line[0] == '<') {
 772		char *endp = NULL;
 773		unsigned int u;
 774
 775		u = simple_strtoul(line + 1, &endp, 10);
 776		if (endp && endp[0] == '>') {
 777			level = LOG_LEVEL(u);
 778			if (LOG_FACILITY(u) != 0)
 779				facility = LOG_FACILITY(u);
 780			endp++;
 
 781			line = endp;
 782		}
 783	}
 784
 785	devkmsg_emit(facility, level, "%s", line);
 786	kfree(buf);
 787	return ret;
 788}
 789
 790static ssize_t devkmsg_read(struct file *file, char __user *buf,
 791			    size_t count, loff_t *ppos)
 792{
 793	struct devkmsg_user *user = file->private_data;
 794	char *outbuf = &user->pbufs.outbuf[0];
 795	struct printk_message pmsg = {
 796		.pbufs = &user->pbufs,
 797	};
 798	ssize_t ret;
 799
 
 
 
 800	ret = mutex_lock_interruptible(&user->lock);
 801	if (ret)
 802		return ret;
 803
 804	if (!printk_get_next_message(&pmsg, atomic64_read(&user->seq), true, false)) {
 
 805		if (file->f_flags & O_NONBLOCK) {
 806			ret = -EAGAIN;
 
 807			goto out;
 808		}
 809
 810		/*
 811		 * Guarantee this task is visible on the waitqueue before
 812		 * checking the wake condition.
 813		 *
 814		 * The full memory barrier within set_current_state() of
 815		 * prepare_to_wait_event() pairs with the full memory barrier
 816		 * within wq_has_sleeper().
 817		 *
 818		 * This pairs with __wake_up_klogd:A.
 819		 */
 820		ret = wait_event_interruptible(log_wait,
 821				printk_get_next_message(&pmsg, atomic64_read(&user->seq), true,
 822							false)); /* LMM(devkmsg_read:A) */
 823		if (ret)
 824			goto out;
 
 825	}
 826
 827	if (pmsg.dropped) {
 828		/* our last seen message is gone, return error and reset */
 829		atomic64_set(&user->seq, pmsg.seq);
 
 830		ret = -EPIPE;
 
 831		goto out;
 832	}
 833
 834	atomic64_set(&user->seq, pmsg.seq + 1);
 
 
 
 
 
 
 
 
 
 835
 836	if (pmsg.outbuf_len > count) {
 837		ret = -EINVAL;
 838		goto out;
 839	}
 840
 841	if (copy_to_user(buf, outbuf, pmsg.outbuf_len)) {
 842		ret = -EFAULT;
 843		goto out;
 844	}
 845	ret = pmsg.outbuf_len;
 846out:
 847	mutex_unlock(&user->lock);
 848	return ret;
 849}
 850
 851/*
 852 * Be careful when modifying this function!!!
 853 *
 854 * Only few operations are supported because the device works only with the
 855 * entire variable length messages (records). Non-standard values are
 856 * returned in the other cases and has been this way for quite some time.
 857 * User space applications might depend on this behavior.
 858 */
 859static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
 860{
 861	struct devkmsg_user *user = file->private_data;
 862	loff_t ret = 0;
 863
 
 
 864	if (offset)
 865		return -ESPIPE;
 866
 
 867	switch (whence) {
 868	case SEEK_SET:
 869		/* the first record */
 870		atomic64_set(&user->seq, prb_first_valid_seq(prb));
 
 871		break;
 872	case SEEK_DATA:
 873		/*
 874		 * The first record after the last SYSLOG_ACTION_CLEAR,
 875		 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
 876		 * changes no global state, and does not clear anything.
 877		 */
 878		atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
 
 879		break;
 880	case SEEK_END:
 881		/* after the last record */
 882		atomic64_set(&user->seq, prb_next_seq(prb));
 
 883		break;
 884	default:
 885		ret = -EINVAL;
 886	}
 
 887	return ret;
 888}
 889
 890static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
 891{
 892	struct devkmsg_user *user = file->private_data;
 893	struct printk_info info;
 894	__poll_t ret = 0;
 895
 
 
 
 896	poll_wait(file, &log_wait, wait);
 897
 898	if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
 
 899		/* return error when data has vanished underneath us */
 900		if (info.seq != atomic64_read(&user->seq))
 901			ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
 902		else
 903			ret = EPOLLIN|EPOLLRDNORM;
 904	}
 
 905
 906	return ret;
 907}
 908
 909static int devkmsg_open(struct inode *inode, struct file *file)
 910{
 911	struct devkmsg_user *user;
 912	int err;
 913
 914	if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
 915		return -EPERM;
 916
 917	/* write-only does not need any file context */
 918	if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
 919		err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
 920					       SYSLOG_FROM_READER);
 921		if (err)
 922			return err;
 923	}
 924
 925	user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
 926	if (!user)
 927		return -ENOMEM;
 928
 929	ratelimit_default_init(&user->rs);
 930	ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
 931
 932	mutex_init(&user->lock);
 933
 934	atomic64_set(&user->seq, prb_first_valid_seq(prb));
 
 
 
 935
 936	file->private_data = user;
 937	return 0;
 938}
 939
 940static int devkmsg_release(struct inode *inode, struct file *file)
 941{
 942	struct devkmsg_user *user = file->private_data;
 943
 
 
 
 944	ratelimit_state_exit(&user->rs);
 945
 946	mutex_destroy(&user->lock);
 947	kvfree(user);
 948	return 0;
 949}
 950
 951const struct file_operations kmsg_fops = {
 952	.open = devkmsg_open,
 953	.read = devkmsg_read,
 954	.write_iter = devkmsg_write,
 955	.llseek = devkmsg_llseek,
 956	.poll = devkmsg_poll,
 957	.release = devkmsg_release,
 958};
 959
 960#ifdef CONFIG_VMCORE_INFO
 961/*
 962 * This appends the listed symbols to /proc/vmcore
 963 *
 964 * /proc/vmcore is used by various utilities, like crash and makedumpfile to
 965 * obtain access to symbols that are otherwise very difficult to locate.  These
 966 * symbols are specifically used so that utilities can access and extract the
 967 * dmesg log from a vmcore file after a crash.
 968 */
 969void log_buf_vmcoreinfo_setup(void)
 970{
 971	struct dev_printk_info *dev_info = NULL;
 972
 973	VMCOREINFO_SYMBOL(prb);
 974	VMCOREINFO_SYMBOL(printk_rb_static);
 975	VMCOREINFO_SYMBOL(clear_seq);
 976
 977	/*
 978	 * Export struct size and field offsets. User space tools can
 979	 * parse it and detect any changes to structure down the line.
 980	 */
 981
 982	VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
 983	VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
 984	VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
 985	VMCOREINFO_OFFSET(printk_ringbuffer, fail);
 986
 987	VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
 988	VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
 989	VMCOREINFO_OFFSET(prb_desc_ring, descs);
 990	VMCOREINFO_OFFSET(prb_desc_ring, infos);
 991	VMCOREINFO_OFFSET(prb_desc_ring, head_id);
 992	VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
 993
 994	VMCOREINFO_STRUCT_SIZE(prb_desc);
 995	VMCOREINFO_OFFSET(prb_desc, state_var);
 996	VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
 997
 998	VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
 999	VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
1000	VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
1001
1002	VMCOREINFO_STRUCT_SIZE(printk_info);
1003	VMCOREINFO_OFFSET(printk_info, seq);
1004	VMCOREINFO_OFFSET(printk_info, ts_nsec);
1005	VMCOREINFO_OFFSET(printk_info, text_len);
1006	VMCOREINFO_OFFSET(printk_info, caller_id);
1007	VMCOREINFO_OFFSET(printk_info, dev_info);
1008
1009	VMCOREINFO_STRUCT_SIZE(dev_printk_info);
1010	VMCOREINFO_OFFSET(dev_printk_info, subsystem);
1011	VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
1012	VMCOREINFO_OFFSET(dev_printk_info, device);
1013	VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
1014
1015	VMCOREINFO_STRUCT_SIZE(prb_data_ring);
1016	VMCOREINFO_OFFSET(prb_data_ring, size_bits);
1017	VMCOREINFO_OFFSET(prb_data_ring, data);
1018	VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
1019	VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
1020
1021	VMCOREINFO_SIZE(atomic_long_t);
1022	VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
1023
1024	VMCOREINFO_STRUCT_SIZE(latched_seq);
1025	VMCOREINFO_OFFSET(latched_seq, val);
1026}
1027#endif
1028
1029/* requested log_buf_len from kernel cmdline */
1030static unsigned long __initdata new_log_buf_len;
1031
1032/* we practice scaling the ring buffer by powers of 2 */
1033static void __init log_buf_len_update(u64 size)
1034{
1035	if (size > (u64)LOG_BUF_LEN_MAX) {
1036		size = (u64)LOG_BUF_LEN_MAX;
1037		pr_err("log_buf over 2G is not supported.\n");
1038	}
1039
1040	if (size)
1041		size = roundup_pow_of_two(size);
1042	if (size > log_buf_len)
1043		new_log_buf_len = (unsigned long)size;
1044}
1045
1046/* save requested log_buf_len since it's too early to process it */
1047static int __init log_buf_len_setup(char *str)
1048{
1049	u64 size;
1050
1051	if (!str)
1052		return -EINVAL;
1053
1054	size = memparse(str, &str);
1055
1056	log_buf_len_update(size);
1057
1058	return 0;
1059}
1060early_param("log_buf_len", log_buf_len_setup);
1061
1062#ifdef CONFIG_SMP
1063#define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1064
1065static void __init log_buf_add_cpu(void)
1066{
1067	unsigned int cpu_extra;
1068
1069	/*
1070	 * archs should set up cpu_possible_bits properly with
1071	 * set_cpu_possible() after setup_arch() but just in
1072	 * case lets ensure this is valid.
1073	 */
1074	if (num_possible_cpus() == 1)
1075		return;
1076
1077	cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1078
1079	/* by default this will only continue through for large > 64 CPUs */
1080	if (cpu_extra <= __LOG_BUF_LEN / 2)
1081		return;
1082
1083	pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1084		__LOG_CPU_MAX_BUF_LEN);
1085	pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1086		cpu_extra);
1087	pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1088
1089	log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1090}
1091#else /* !CONFIG_SMP */
1092static inline void log_buf_add_cpu(void) {}
1093#endif /* CONFIG_SMP */
1094
1095static void __init set_percpu_data_ready(void)
1096{
1097	__printk_percpu_data_ready = true;
1098}
1099
1100static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1101				     struct printk_record *r)
1102{
1103	struct prb_reserved_entry e;
1104	struct printk_record dest_r;
1105
1106	prb_rec_init_wr(&dest_r, r->info->text_len);
1107
1108	if (!prb_reserve(&e, rb, &dest_r))
1109		return 0;
1110
1111	memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1112	dest_r.info->text_len = r->info->text_len;
1113	dest_r.info->facility = r->info->facility;
1114	dest_r.info->level = r->info->level;
1115	dest_r.info->flags = r->info->flags;
1116	dest_r.info->ts_nsec = r->info->ts_nsec;
1117	dest_r.info->caller_id = r->info->caller_id;
1118	memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1119
1120	prb_final_commit(&e);
1121
1122	return prb_record_text_space(&e);
1123}
1124
1125static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
1126
1127void __init setup_log_buf(int early)
1128{
1129	struct printk_info *new_infos;
1130	unsigned int new_descs_count;
1131	struct prb_desc *new_descs;
1132	struct printk_info info;
1133	struct printk_record r;
1134	unsigned int text_size;
1135	size_t new_descs_size;
1136	size_t new_infos_size;
1137	unsigned long flags;
1138	char *new_log_buf;
1139	unsigned int free;
1140	u64 seq;
1141
1142	/*
1143	 * Some archs call setup_log_buf() multiple times - first is very
1144	 * early, e.g. from setup_arch(), and second - when percpu_areas
1145	 * are initialised.
1146	 */
1147	if (!early)
1148		set_percpu_data_ready();
1149
1150	if (log_buf != __log_buf)
1151		return;
1152
1153	if (!early && !new_log_buf_len)
1154		log_buf_add_cpu();
1155
1156	if (!new_log_buf_len)
1157		return;
1158
1159	new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1160	if (new_descs_count == 0) {
1161		pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1162		return;
1163	}
1164
1165	new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1166	if (unlikely(!new_log_buf)) {
1167		pr_err("log_buf_len: %lu text bytes not available\n",
1168		       new_log_buf_len);
1169		return;
1170	}
1171
1172	new_descs_size = new_descs_count * sizeof(struct prb_desc);
1173	new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1174	if (unlikely(!new_descs)) {
1175		pr_err("log_buf_len: %zu desc bytes not available\n",
1176		       new_descs_size);
1177		goto err_free_log_buf;
1178	}
1179
1180	new_infos_size = new_descs_count * sizeof(struct printk_info);
1181	new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1182	if (unlikely(!new_infos)) {
1183		pr_err("log_buf_len: %zu info bytes not available\n",
1184		       new_infos_size);
1185		goto err_free_descs;
1186	}
1187
1188	prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1189
1190	prb_init(&printk_rb_dynamic,
1191		 new_log_buf, ilog2(new_log_buf_len),
1192		 new_descs, ilog2(new_descs_count),
1193		 new_infos);
1194
1195	local_irq_save(flags);
1196
1197	log_buf_len = new_log_buf_len;
1198	log_buf = new_log_buf;
1199	new_log_buf_len = 0;
1200
1201	free = __LOG_BUF_LEN;
1202	prb_for_each_record(0, &printk_rb_static, seq, &r) {
1203		text_size = add_to_rb(&printk_rb_dynamic, &r);
1204		if (text_size > free)
1205			free = 0;
1206		else
1207			free -= text_size;
1208	}
1209
1210	prb = &printk_rb_dynamic;
1211
1212	local_irq_restore(flags);
1213
1214	/*
1215	 * Copy any remaining messages that might have appeared from
1216	 * NMI context after copying but before switching to the
1217	 * dynamic buffer.
1218	 */
1219	prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1220		text_size = add_to_rb(&printk_rb_dynamic, &r);
1221		if (text_size > free)
1222			free = 0;
1223		else
1224			free -= text_size;
1225	}
1226
1227	if (seq != prb_next_seq(&printk_rb_static)) {
1228		pr_err("dropped %llu messages\n",
1229		       prb_next_seq(&printk_rb_static) - seq);
1230	}
1231
1232	pr_info("log_buf_len: %u bytes\n", log_buf_len);
1233	pr_info("early log buf free: %u(%u%%)\n",
1234		free, (free * 100) / __LOG_BUF_LEN);
1235	return;
1236
1237err_free_descs:
1238	memblock_free(new_descs, new_descs_size);
1239err_free_log_buf:
1240	memblock_free(new_log_buf, new_log_buf_len);
1241}
1242
1243static bool __read_mostly ignore_loglevel;
1244
1245static int __init ignore_loglevel_setup(char *str)
1246{
1247	ignore_loglevel = true;
1248	pr_info("debug: ignoring loglevel setting.\n");
1249
1250	return 0;
1251}
1252
1253early_param("ignore_loglevel", ignore_loglevel_setup);
1254module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1255MODULE_PARM_DESC(ignore_loglevel,
1256		 "ignore loglevel setting (prints all kernel messages to the console)");
1257
1258static bool suppress_message_printing(int level)
1259{
1260	return (level >= console_loglevel && !ignore_loglevel);
1261}
1262
1263#ifdef CONFIG_BOOT_PRINTK_DELAY
1264
1265static int boot_delay; /* msecs delay after each printk during bootup */
1266static unsigned long long loops_per_msec;	/* based on boot_delay */
1267
1268static int __init boot_delay_setup(char *str)
1269{
1270	unsigned long lpj;
1271
1272	lpj = preset_lpj ? preset_lpj : 1000000;	/* some guess */
1273	loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1274
1275	get_option(&str, &boot_delay);
1276	if (boot_delay > 10 * 1000)
1277		boot_delay = 0;
1278
1279	pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1280		"HZ: %d, loops_per_msec: %llu\n",
1281		boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1282	return 0;
1283}
1284early_param("boot_delay", boot_delay_setup);
1285
1286static void boot_delay_msec(int level)
1287{
1288	unsigned long long k;
1289	unsigned long timeout;
1290
1291	if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
1292		|| suppress_message_printing(level)) {
1293		return;
1294	}
1295
1296	k = (unsigned long long)loops_per_msec * boot_delay;
1297
1298	timeout = jiffies + msecs_to_jiffies(boot_delay);
1299	while (k) {
1300		k--;
1301		cpu_relax();
1302		/*
1303		 * use (volatile) jiffies to prevent
1304		 * compiler reduction; loop termination via jiffies
1305		 * is secondary and may or may not happen.
1306		 */
1307		if (time_after(jiffies, timeout))
1308			break;
1309		touch_nmi_watchdog();
1310	}
1311}
1312#else
1313static inline void boot_delay_msec(int level)
1314{
1315}
1316#endif
1317
1318static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1319module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1320
1321static size_t print_syslog(unsigned int level, char *buf)
1322{
1323	return sprintf(buf, "<%u>", level);
1324}
1325
1326static size_t print_time(u64 ts, char *buf)
1327{
1328	unsigned long rem_nsec = do_div(ts, 1000000000);
1329
1330	return sprintf(buf, "[%5lu.%06lu]",
1331		       (unsigned long)ts, rem_nsec / 1000);
1332}
1333
1334#ifdef CONFIG_PRINTK_CALLER
1335static size_t print_caller(u32 id, char *buf)
1336{
1337	char caller[12];
1338
1339	snprintf(caller, sizeof(caller), "%c%u",
1340		 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1341	return sprintf(buf, "[%6s]", caller);
1342}
1343#else
1344#define print_caller(id, buf) 0
1345#endif
1346
1347static size_t info_print_prefix(const struct printk_info  *info, bool syslog,
1348				bool time, char *buf)
1349{
1350	size_t len = 0;
1351
1352	if (syslog)
1353		len = print_syslog((info->facility << 3) | info->level, buf);
1354
1355	if (time)
1356		len += print_time(info->ts_nsec, buf + len);
1357
1358	len += print_caller(info->caller_id, buf + len);
1359
1360	if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1361		buf[len++] = ' ';
1362		buf[len] = '\0';
1363	}
1364
1365	return len;
1366}
1367
1368/*
1369 * Prepare the record for printing. The text is shifted within the given
1370 * buffer to avoid a need for another one. The following operations are
1371 * done:
1372 *
1373 *   - Add prefix for each line.
1374 *   - Drop truncated lines that no longer fit into the buffer.
1375 *   - Add the trailing newline that has been removed in vprintk_store().
1376 *   - Add a string terminator.
1377 *
1378 * Since the produced string is always terminated, the maximum possible
1379 * return value is @r->text_buf_size - 1;
1380 *
1381 * Return: The length of the updated/prepared text, including the added
1382 * prefixes and the newline. The terminator is not counted. The dropped
1383 * line(s) are not counted.
1384 */
1385static size_t record_print_text(struct printk_record *r, bool syslog,
1386				bool time)
1387{
1388	size_t text_len = r->info->text_len;
1389	size_t buf_size = r->text_buf_size;
1390	char *text = r->text_buf;
1391	char prefix[PRINTK_PREFIX_MAX];
1392	bool truncated = false;
1393	size_t prefix_len;
1394	size_t line_len;
1395	size_t len = 0;
1396	char *next;
 
1397
1398	/*
1399	 * If the message was truncated because the buffer was not large
1400	 * enough, treat the available text as if it were the full text.
1401	 */
1402	if (text_len > buf_size)
1403		text_len = buf_size;
1404
1405	prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1406
1407	/*
1408	 * @text_len: bytes of unprocessed text
1409	 * @line_len: bytes of current line _without_ newline
1410	 * @text:     pointer to beginning of current line
1411	 * @len:      number of bytes prepared in r->text_buf
1412	 */
1413	for (;;) {
1414		next = memchr(text, '\n', text_len);
1415		if (next) {
1416			line_len = next - text;
 
 
1417		} else {
1418			/* Drop truncated line(s). */
1419			if (truncated)
1420				break;
1421			line_len = text_len;
1422		}
1423
1424		/*
1425		 * Truncate the text if there is not enough space to add the
1426		 * prefix and a trailing newline and a terminator.
1427		 */
1428		if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1429			/* Drop even the current line if no space. */
1430			if (len + prefix_len + line_len + 1 + 1 > buf_size)
1431				break;
1432
1433			text_len = buf_size - len - prefix_len - 1 - 1;
1434			truncated = true;
 
 
 
 
 
 
1435		}
1436
1437		memmove(text + prefix_len, text, text_len);
1438		memcpy(text, prefix, prefix_len);
1439
1440		/*
1441		 * Increment the prepared length to include the text and
1442		 * prefix that were just moved+copied. Also increment for the
1443		 * newline at the end of this line. If this is the last line,
1444		 * there is no newline, but it will be added immediately below.
1445		 */
1446		len += prefix_len + line_len + 1;
1447		if (text_len == line_len) {
1448			/*
1449			 * This is the last line. Add the trailing newline
1450			 * removed in vprintk_store().
1451			 */
1452			text[prefix_len + line_len] = '\n';
1453			break;
1454		}
1455
1456		/*
1457		 * Advance beyond the added prefix and the related line with
1458		 * its newline.
1459		 */
1460		text += prefix_len + line_len + 1;
1461
1462		/*
1463		 * The remaining text has only decreased by the line with its
1464		 * newline.
1465		 *
1466		 * Note that @text_len can become zero. It happens when @text
1467		 * ended with a newline (either due to truncation or the
1468		 * original string ending with "\n\n"). The loop is correctly
1469		 * repeated and (if not truncated) an empty line with a prefix
1470		 * will be prepared.
1471		 */
1472		text_len -= line_len + 1;
1473	}
1474
1475	/*
1476	 * If a buffer was provided, it will be terminated. Space for the
1477	 * string terminator is guaranteed to be available. The terminator is
1478	 * not counted in the return value.
1479	 */
1480	if (buf_size > 0)
1481		r->text_buf[len] = 0;
1482
1483	return len;
1484}
1485
1486static size_t get_record_print_text_size(struct printk_info *info,
1487					 unsigned int line_count,
1488					 bool syslog, bool time)
1489{
1490	char prefix[PRINTK_PREFIX_MAX];
1491	size_t prefix_len;
1492
1493	prefix_len = info_print_prefix(info, syslog, time, prefix);
1494
1495	/*
1496	 * Each line will be preceded with a prefix. The intermediate
1497	 * newlines are already within the text, but a final trailing
1498	 * newline will be added.
1499	 */
1500	return ((prefix_len * line_count) + info->text_len + 1);
1501}
1502
1503/*
1504 * Beginning with @start_seq, find the first record where it and all following
1505 * records up to (but not including) @max_seq fit into @size.
1506 *
1507 * @max_seq is simply an upper bound and does not need to exist. If the caller
1508 * does not require an upper bound, -1 can be used for @max_seq.
1509 */
1510static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1511				  bool syslog, bool time)
1512{
1513	struct printk_info info;
1514	unsigned int line_count;
1515	size_t len = 0;
1516	u64 seq;
1517
1518	/* Determine the size of the records up to @max_seq. */
1519	prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1520		if (info.seq >= max_seq)
1521			break;
1522		len += get_record_print_text_size(&info, line_count, syslog, time);
1523	}
1524
1525	/*
1526	 * Adjust the upper bound for the next loop to avoid subtracting
1527	 * lengths that were never added.
1528	 */
1529	if (seq < max_seq)
1530		max_seq = seq;
1531
1532	/*
1533	 * Move first record forward until length fits into the buffer. Ignore
1534	 * newest messages that were not counted in the above cycle. Messages
1535	 * might appear and get lost in the meantime. This is a best effort
1536	 * that prevents an infinite loop that could occur with a retry.
1537	 */
1538	prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1539		if (len <= size || info.seq >= max_seq)
1540			break;
1541		len -= get_record_print_text_size(&info, line_count, syslog, time);
1542	}
1543
1544	return seq;
1545}
1546
1547/* The caller is responsible for making sure @size is greater than 0. */
1548static int syslog_print(char __user *buf, int size)
1549{
1550	struct printk_info info;
1551	struct printk_record r;
1552	char *text;
 
1553	int len = 0;
1554	u64 seq;
1555
1556	text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1557	if (!text)
1558		return -ENOMEM;
1559
1560	prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
1561
1562	mutex_lock(&syslog_lock);
1563
1564	/*
1565	 * Wait for the @syslog_seq record to be available. @syslog_seq may
1566	 * change while waiting.
1567	 */
1568	do {
1569		seq = syslog_seq;
1570
1571		mutex_unlock(&syslog_lock);
1572		/*
1573		 * Guarantee this task is visible on the waitqueue before
1574		 * checking the wake condition.
1575		 *
1576		 * The full memory barrier within set_current_state() of
1577		 * prepare_to_wait_event() pairs with the full memory barrier
1578		 * within wq_has_sleeper().
1579		 *
1580		 * This pairs with __wake_up_klogd:A.
1581		 */
1582		len = wait_event_interruptible(log_wait,
1583				prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1584		mutex_lock(&syslog_lock);
1585
1586		if (len)
1587			goto out;
1588	} while (syslog_seq != seq);
1589
1590	/*
1591	 * Copy records that fit into the buffer. The above cycle makes sure
1592	 * that the first record is always available.
1593	 */
1594	do {
1595		size_t n;
1596		size_t skip;
1597		int err;
1598
1599		if (!prb_read_valid(prb, syslog_seq, &r))
 
 
 
 
 
 
 
 
1600			break;
1601
1602		if (r.info->seq != syslog_seq) {
1603			/* message is gone, move to next valid one */
1604			syslog_seq = r.info->seq;
1605			syslog_partial = 0;
1606		}
1607
1608		/*
1609		 * To keep reading/counting partial line consistent,
1610		 * use printk_time value as of the beginning of a line.
1611		 */
1612		if (!syslog_partial)
1613			syslog_time = printk_time;
1614
1615		skip = syslog_partial;
1616		n = record_print_text(&r, true, syslog_time);
 
 
1617		if (n - syslog_partial <= size) {
1618			/* message fits into buffer, move forward */
1619			syslog_seq = r.info->seq + 1;
 
1620			n -= syslog_partial;
1621			syslog_partial = 0;
1622		} else if (!len){
1623			/* partial read(), remember position */
1624			n = size;
1625			syslog_partial += n;
1626		} else
1627			n = 0;
 
1628
1629		if (!n)
1630			break;
1631
1632		mutex_unlock(&syslog_lock);
1633		err = copy_to_user(buf, text + skip, n);
1634		mutex_lock(&syslog_lock);
1635
1636		if (err) {
1637			if (!len)
1638				len = -EFAULT;
1639			break;
1640		}
1641
1642		len += n;
1643		size -= n;
1644		buf += n;
1645	} while (size);
1646out:
1647	mutex_unlock(&syslog_lock);
1648	kfree(text);
1649	return len;
1650}
1651
1652static int syslog_print_all(char __user *buf, int size, bool clear)
1653{
1654	struct printk_info info;
1655	struct printk_record r;
1656	char *text;
1657	int len = 0;
 
1658	u64 seq;
 
1659	bool time;
1660
1661	text = kmalloc(PRINTK_MESSAGE_MAX, GFP_KERNEL);
1662	if (!text)
1663		return -ENOMEM;
1664
1665	time = printk_time;
 
1666	/*
1667	 * Find first record that fits, including all following records,
1668	 * into the user-provided buffer for this dump.
1669	 */
1670	seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1671				     size, true, time);
 
 
1672
1673	prb_rec_init_rd(&r, &info, text, PRINTK_MESSAGE_MAX);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1674
1675	prb_for_each_record(seq, prb, seq, &r) {
1676		int textlen;
1677
1678		textlen = record_print_text(&r, true, time);
 
 
 
 
1679
1680		if (len + textlen > size) {
1681			seq--;
1682			break;
1683		}
1684
 
1685		if (copy_to_user(buf + len, text, textlen))
1686			len = -EFAULT;
1687		else
1688			len += textlen;
 
1689
1690		if (len < 0)
1691			break;
 
 
 
1692	}
1693
1694	if (clear) {
1695		mutex_lock(&syslog_lock);
1696		latched_seq_write(&clear_seq, seq);
1697		mutex_unlock(&syslog_lock);
1698	}
 
1699
1700	kfree(text);
1701	return len;
1702}
1703
1704static void syslog_clear(void)
1705{
1706	mutex_lock(&syslog_lock);
1707	latched_seq_write(&clear_seq, prb_next_seq(prb));
1708	mutex_unlock(&syslog_lock);
 
1709}
1710
1711int do_syslog(int type, char __user *buf, int len, int source)
1712{
1713	struct printk_info info;
1714	bool clear = false;
1715	static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1716	int error;
1717
1718	error = check_syslog_permissions(type, source);
1719	if (error)
1720		return error;
1721
1722	switch (type) {
1723	case SYSLOG_ACTION_CLOSE:	/* Close log */
1724		break;
1725	case SYSLOG_ACTION_OPEN:	/* Open log */
1726		break;
1727	case SYSLOG_ACTION_READ:	/* Read from log */
1728		if (!buf || len < 0)
1729			return -EINVAL;
1730		if (!len)
1731			return 0;
1732		if (!access_ok(buf, len))
1733			return -EFAULT;
 
 
 
 
1734		error = syslog_print(buf, len);
1735		break;
1736	/* Read/clear last kernel messages */
1737	case SYSLOG_ACTION_READ_CLEAR:
1738		clear = true;
1739		fallthrough;
1740	/* Read last kernel messages */
1741	case SYSLOG_ACTION_READ_ALL:
1742		if (!buf || len < 0)
1743			return -EINVAL;
1744		if (!len)
1745			return 0;
1746		if (!access_ok(buf, len))
1747			return -EFAULT;
1748		error = syslog_print_all(buf, len, clear);
1749		break;
1750	/* Clear ring buffer */
1751	case SYSLOG_ACTION_CLEAR:
1752		syslog_clear();
1753		break;
1754	/* Disable logging to console */
1755	case SYSLOG_ACTION_CONSOLE_OFF:
1756		if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1757			saved_console_loglevel = console_loglevel;
1758		console_loglevel = minimum_console_loglevel;
1759		break;
1760	/* Enable logging to console */
1761	case SYSLOG_ACTION_CONSOLE_ON:
1762		if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1763			console_loglevel = saved_console_loglevel;
1764			saved_console_loglevel = LOGLEVEL_DEFAULT;
1765		}
1766		break;
1767	/* Set level of messages printed to console */
1768	case SYSLOG_ACTION_CONSOLE_LEVEL:
1769		if (len < 1 || len > 8)
1770			return -EINVAL;
1771		if (len < minimum_console_loglevel)
1772			len = minimum_console_loglevel;
1773		console_loglevel = len;
1774		/* Implicitly re-enable logging to console */
1775		saved_console_loglevel = LOGLEVEL_DEFAULT;
1776		break;
1777	/* Number of chars in the log buffer */
1778	case SYSLOG_ACTION_SIZE_UNREAD:
1779		mutex_lock(&syslog_lock);
1780		if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1781			/* No unread messages. */
1782			mutex_unlock(&syslog_lock);
1783			return 0;
1784		}
1785		if (info.seq != syslog_seq) {
1786			/* messages are gone, move to first one */
1787			syslog_seq = info.seq;
 
1788			syslog_partial = 0;
1789		}
1790		if (source == SYSLOG_FROM_PROC) {
1791			/*
1792			 * Short-cut for poll(/"proc/kmsg") which simply checks
1793			 * for pending data, not the size; return the count of
1794			 * records, not the length.
1795			 */
1796			error = prb_next_seq(prb) - syslog_seq;
1797		} else {
 
 
1798			bool time = syslog_partial ? syslog_time : printk_time;
1799			unsigned int line_count;
1800			u64 seq;
1801
1802			prb_for_each_info(syslog_seq, prb, seq, &info,
1803					  &line_count) {
1804				error += get_record_print_text_size(&info, line_count,
1805								    true, time);
 
1806				time = printk_time;
 
 
1807			}
1808			error -= syslog_partial;
1809		}
1810		mutex_unlock(&syslog_lock);
1811		break;
1812	/* Size of the log buffer */
1813	case SYSLOG_ACTION_SIZE_BUFFER:
1814		error = log_buf_len;
1815		break;
1816	default:
1817		error = -EINVAL;
1818		break;
1819	}
1820
1821	return error;
1822}
1823
1824SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1825{
1826	return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1827}
1828
1829/*
1830 * Special console_lock variants that help to reduce the risk of soft-lockups.
1831 * They allow to pass console_lock to another printk() call using a busy wait.
1832 */
1833
1834#ifdef CONFIG_LOCKDEP
1835static struct lockdep_map console_owner_dep_map = {
1836	.name = "console_owner"
1837};
1838#endif
1839
1840static DEFINE_RAW_SPINLOCK(console_owner_lock);
1841static struct task_struct *console_owner;
1842static bool console_waiter;
1843
1844/**
1845 * console_lock_spinning_enable - mark beginning of code where another
1846 *	thread might safely busy wait
1847 *
1848 * This basically converts console_lock into a spinlock. This marks
1849 * the section where the console_lock owner can not sleep, because
1850 * there may be a waiter spinning (like a spinlock). Also it must be
1851 * ready to hand over the lock at the end of the section.
1852 */
1853static void console_lock_spinning_enable(void)
1854{
1855	/*
1856	 * Do not use spinning in panic(). The panic CPU wants to keep the lock.
1857	 * Non-panic CPUs abandon the flush anyway.
1858	 *
1859	 * Just keep the lockdep annotation. The panic-CPU should avoid
1860	 * taking console_owner_lock because it might cause a deadlock.
1861	 * This looks like the easiest way how to prevent false lockdep
1862	 * reports without handling races a lockless way.
1863	 */
1864	if (panic_in_progress())
1865		goto lockdep;
1866
1867	raw_spin_lock(&console_owner_lock);
1868	console_owner = current;
1869	raw_spin_unlock(&console_owner_lock);
1870
1871lockdep:
1872	/* The waiter may spin on us after setting console_owner */
1873	spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1874}
1875
1876/**
1877 * console_lock_spinning_disable_and_check - mark end of code where another
1878 *	thread was able to busy wait and check if there is a waiter
1879 * @cookie: cookie returned from console_srcu_read_lock()
1880 *
1881 * This is called at the end of the section where spinning is allowed.
1882 * It has two functions. First, it is a signal that it is no longer
1883 * safe to start busy waiting for the lock. Second, it checks if
1884 * there is a busy waiter and passes the lock rights to her.
1885 *
1886 * Important: Callers lose both the console_lock and the SRCU read lock if
1887 *	there was a busy waiter. They must not touch items synchronized by
1888 *	console_lock or SRCU read lock in this case.
1889 *
1890 * Return: 1 if the lock rights were passed, 0 otherwise.
1891 */
1892static int console_lock_spinning_disable_and_check(int cookie)
1893{
1894	int waiter;
1895
1896	/*
1897	 * Ignore spinning waiters during panic() because they might get stopped
1898	 * or blocked at any time,
1899	 *
1900	 * It is safe because nobody is allowed to start spinning during panic
1901	 * in the first place. If there has been a waiter then non panic CPUs
1902	 * might stay spinning. They would get stopped anyway. The panic context
1903	 * will never start spinning and an interrupted spin on panic CPU will
1904	 * never continue.
1905	 */
1906	if (panic_in_progress()) {
1907		/* Keep lockdep happy. */
1908		spin_release(&console_owner_dep_map, _THIS_IP_);
1909		return 0;
1910	}
1911
1912	raw_spin_lock(&console_owner_lock);
1913	waiter = READ_ONCE(console_waiter);
1914	console_owner = NULL;
1915	raw_spin_unlock(&console_owner_lock);
1916
1917	if (!waiter) {
1918		spin_release(&console_owner_dep_map, _THIS_IP_);
1919		return 0;
1920	}
1921
1922	/* The waiter is now free to continue */
1923	WRITE_ONCE(console_waiter, false);
1924
1925	spin_release(&console_owner_dep_map, _THIS_IP_);
1926
1927	/*
1928	 * Preserve lockdep lock ordering. Release the SRCU read lock before
1929	 * releasing the console_lock.
1930	 */
1931	console_srcu_read_unlock(cookie);
1932
1933	/*
1934	 * Hand off console_lock to waiter. The waiter will perform
1935	 * the up(). After this, the waiter is the console_lock owner.
1936	 */
1937	mutex_release(&console_lock_dep_map, _THIS_IP_);
1938	return 1;
1939}
1940
1941/**
1942 * console_trylock_spinning - try to get console_lock by busy waiting
1943 *
1944 * This allows to busy wait for the console_lock when the current
1945 * owner is running in specially marked sections. It means that
1946 * the current owner is running and cannot reschedule until it
1947 * is ready to lose the lock.
1948 *
1949 * Return: 1 if we got the lock, 0 othrewise
1950 */
1951static int console_trylock_spinning(void)
1952{
1953	struct task_struct *owner = NULL;
1954	bool waiter;
1955	bool spin = false;
1956	unsigned long flags;
1957
1958	if (console_trylock())
1959		return 1;
1960
1961	/*
1962	 * It's unsafe to spin once a panic has begun. If we are the
1963	 * panic CPU, we may have already halted the owner of the
1964	 * console_sem. If we are not the panic CPU, then we should
1965	 * avoid taking console_sem, so the panic CPU has a better
1966	 * chance of cleanly acquiring it later.
1967	 */
1968	if (panic_in_progress())
1969		return 0;
1970
1971	printk_safe_enter_irqsave(flags);
1972
1973	raw_spin_lock(&console_owner_lock);
1974	owner = READ_ONCE(console_owner);
1975	waiter = READ_ONCE(console_waiter);
1976	if (!waiter && owner && owner != current) {
1977		WRITE_ONCE(console_waiter, true);
1978		spin = true;
1979	}
1980	raw_spin_unlock(&console_owner_lock);
1981
1982	/*
1983	 * If there is an active printk() writing to the
1984	 * consoles, instead of having it write our data too,
1985	 * see if we can offload that load from the active
1986	 * printer, and do some printing ourselves.
1987	 * Go into a spin only if there isn't already a waiter
1988	 * spinning, and there is an active printer, and
1989	 * that active printer isn't us (recursive printk?).
1990	 */
1991	if (!spin) {
1992		printk_safe_exit_irqrestore(flags);
1993		return 0;
1994	}
1995
1996	/* We spin waiting for the owner to release us */
1997	spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1998	/* Owner will clear console_waiter on hand off */
1999	while (READ_ONCE(console_waiter))
2000		cpu_relax();
2001	spin_release(&console_owner_dep_map, _THIS_IP_);
2002
2003	printk_safe_exit_irqrestore(flags);
2004	/*
2005	 * The owner passed the console lock to us.
2006	 * Since we did not spin on console lock, annotate
2007	 * this as a trylock. Otherwise lockdep will
2008	 * complain.
2009	 */
2010	mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
2011
2012	/*
2013	 * Update @console_may_schedule for trylock because the previous
2014	 * owner may have been schedulable.
2015	 */
2016	console_may_schedule = 0;
2017
2018	return 1;
2019}
2020
2021/*
2022 * Recursion is tracked separately on each CPU. If NMIs are supported, an
2023 * additional NMI context per CPU is also separately tracked. Until per-CPU
2024 * is available, a separate "early tracking" is performed.
2025 */
2026static DEFINE_PER_CPU(u8, printk_count);
2027static u8 printk_count_early;
2028#ifdef CONFIG_HAVE_NMI
2029static DEFINE_PER_CPU(u8, printk_count_nmi);
2030static u8 printk_count_nmi_early;
2031#endif
2032
2033/*
2034 * Recursion is limited to keep the output sane. printk() should not require
2035 * more than 1 level of recursion (allowing, for example, printk() to trigger
2036 * a WARN), but a higher value is used in case some printk-internal errors
2037 * exist, such as the ringbuffer validation checks failing.
2038 */
2039#define PRINTK_MAX_RECURSION 3
2040
2041/*
2042 * Return a pointer to the dedicated counter for the CPU+context of the
2043 * caller.
2044 */
2045static u8 *__printk_recursion_counter(void)
2046{
2047#ifdef CONFIG_HAVE_NMI
2048	if (in_nmi()) {
2049		if (printk_percpu_data_ready())
2050			return this_cpu_ptr(&printk_count_nmi);
2051		return &printk_count_nmi_early;
 
 
 
2052	}
2053#endif
2054	if (printk_percpu_data_ready())
2055		return this_cpu_ptr(&printk_count);
2056	return &printk_count_early;
2057}
2058
2059/*
2060 * Enter recursion tracking. Interrupts are disabled to simplify tracking.
2061 * The caller must check the boolean return value to see if the recursion is
2062 * allowed. On failure, interrupts are not disabled.
2063 *
2064 * @recursion_ptr must be a variable of type (u8 *) and is the same variable
2065 * that is passed to printk_exit_irqrestore().
2066 */
2067#define printk_enter_irqsave(recursion_ptr, flags)	\
2068({							\
2069	bool success = true;				\
2070							\
2071	typecheck(u8 *, recursion_ptr);			\
2072	local_irq_save(flags);				\
2073	(recursion_ptr) = __printk_recursion_counter();	\
2074	if (*(recursion_ptr) > PRINTK_MAX_RECURSION) {	\
2075		local_irq_restore(flags);		\
2076		success = false;			\
2077	} else {					\
2078		(*(recursion_ptr))++;			\
2079	}						\
2080	success;					\
2081})
2082
2083/* Exit recursion tracking, restoring interrupts. */
2084#define printk_exit_irqrestore(recursion_ptr, flags)	\
2085	do {						\
2086		typecheck(u8 *, recursion_ptr);		\
2087		(*(recursion_ptr))--;			\
2088		local_irq_restore(flags);		\
2089	} while (0)
2090
2091int printk_delay_msec __read_mostly;
2092
2093static inline void printk_delay(int level)
2094{
2095	boot_delay_msec(level);
2096
2097	if (unlikely(printk_delay_msec)) {
2098		int m = printk_delay_msec;
2099
2100		while (m--) {
2101			mdelay(1);
2102			touch_nmi_watchdog();
2103		}
2104	}
2105}
2106
2107static inline u32 printk_caller_id(void)
2108{
2109	return in_task() ? task_pid_nr(current) :
2110		0x80000000 + smp_processor_id();
2111}
2112
2113/**
2114 * printk_parse_prefix - Parse level and control flags.
2115 *
2116 * @text:     The terminated text message.
2117 * @level:    A pointer to the current level value, will be updated.
2118 * @flags:    A pointer to the current printk_info flags, will be updated.
2119 *
2120 * @level may be NULL if the caller is not interested in the parsed value.
2121 * Otherwise the variable pointed to by @level must be set to
2122 * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2123 *
2124 * @flags may be NULL if the caller is not interested in the parsed value.
2125 * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2126 * value.
2127 *
2128 * Return: The length of the parsed level and control flags.
2129 */
2130u16 printk_parse_prefix(const char *text, int *level,
2131			enum printk_info_flags *flags)
 
 
 
 
 
 
 
 
 
2132{
2133	u16 prefix_len = 0;
2134	int kern_level;
2135
2136	while (*text) {
2137		kern_level = printk_get_level(text);
2138		if (!kern_level)
2139			break;
2140
2141		switch (kern_level) {
2142		case '0' ... '7':
2143			if (level && *level == LOGLEVEL_DEFAULT)
2144				*level = kern_level - '0';
2145			break;
2146		case 'c':	/* KERN_CONT */
2147			if (flags)
2148				*flags |= LOG_CONT;
2149		}
2150
2151		prefix_len += 2;
2152		text += 2;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2153	}
2154
2155	return prefix_len;
2156}
2157
2158__printf(5, 0)
2159static u16 printk_sprint(char *text, u16 size, int facility,
2160			 enum printk_info_flags *flags, const char *fmt,
2161			 va_list args)
2162{
2163	u16 text_len;
2164
2165	text_len = vscnprintf(text, size, fmt, args);
2166
2167	/* Mark and strip a trailing newline. */
2168	if (text_len && text[text_len - 1] == '\n') {
2169		text_len--;
2170		*flags |= LOG_NEWLINE;
 
 
 
 
 
2171	}
2172
2173	/* Strip log level and control flags. */
2174	if (facility == 0) {
2175		u16 prefix_len;
2176
2177		prefix_len = printk_parse_prefix(text, NULL, NULL);
2178		if (prefix_len) {
2179			text_len -= prefix_len;
2180			memmove(text, text + prefix_len, text_len);
2181		}
2182	}
2183
2184	trace_console(text, text_len);
2185
2186	return text_len;
2187}
2188
2189__printf(4, 0)
2190int vprintk_store(int facility, int level,
2191		  const struct dev_printk_info *dev_info,
2192		  const char *fmt, va_list args)
2193{
2194	struct prb_reserved_entry e;
2195	enum printk_info_flags flags = 0;
2196	struct printk_record r;
2197	unsigned long irqflags;
2198	u16 trunc_msg_len = 0;
2199	char prefix_buf[8];
2200	u8 *recursion_ptr;
2201	u16 reserve_size;
2202	va_list args2;
2203	u32 caller_id;
2204	u16 text_len;
2205	int ret = 0;
2206	u64 ts_nsec;
2207
2208	if (!printk_enter_irqsave(recursion_ptr, irqflags))
2209		return 0;
2210
2211	/*
2212	 * Since the duration of printk() can vary depending on the message
2213	 * and state of the ringbuffer, grab the timestamp now so that it is
2214	 * close to the call of printk(). This provides a more deterministic
2215	 * timestamp with respect to the caller.
2216	 */
2217	ts_nsec = local_clock();
2218
2219	caller_id = printk_caller_id();
 
 
 
 
2220
2221	/*
2222	 * The sprintf needs to come first since the syslog prefix might be
2223	 * passed in as a parameter. An extra byte must be reserved so that
2224	 * later the vscnprintf() into the reserved buffer has room for the
2225	 * terminating '\0', which is not counted by vsnprintf().
2226	 */
2227	va_copy(args2, args);
2228	reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2229	va_end(args2);
2230
2231	if (reserve_size > PRINTKRB_RECORD_MAX)
2232		reserve_size = PRINTKRB_RECORD_MAX;
2233
2234	/* Extract log level or control flags. */
2235	if (facility == 0)
2236		printk_parse_prefix(&prefix_buf[0], &level, &flags);
2237
2238	if (level == LOGLEVEL_DEFAULT)
2239		level = default_message_loglevel;
2240
2241	if (dev_info)
2242		flags |= LOG_NEWLINE;
2243
2244	if (flags & LOG_CONT) {
2245		prb_rec_init_wr(&r, reserve_size);
2246		if (prb_reserve_in_last(&e, prb, &r, caller_id, PRINTKRB_RECORD_MAX)) {
2247			text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2248						 facility, &flags, fmt, args);
2249			r.info->text_len += text_len;
2250
2251			if (flags & LOG_NEWLINE) {
2252				r.info->flags |= LOG_NEWLINE;
2253				prb_final_commit(&e);
2254			} else {
2255				prb_commit(&e);
2256			}
2257
2258			ret = text_len;
2259			goto out;
2260		}
2261	}
2262
2263	/*
2264	 * Explicitly initialize the record before every prb_reserve() call.
2265	 * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2266	 * structure when they fail.
2267	 */
2268	prb_rec_init_wr(&r, reserve_size);
2269	if (!prb_reserve(&e, prb, &r)) {
2270		/* truncate the message if it is too long for empty buffer */
2271		truncate_msg(&reserve_size, &trunc_msg_len);
2272
2273		prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2274		if (!prb_reserve(&e, prb, &r))
2275			goto out;
2276	}
2277
2278	/* fill message */
2279	text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2280	if (trunc_msg_len)
2281		memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2282	r.info->text_len = text_len + trunc_msg_len;
2283	r.info->facility = facility;
2284	r.info->level = level & 7;
2285	r.info->flags = flags & 0x1f;
2286	r.info->ts_nsec = ts_nsec;
2287	r.info->caller_id = caller_id;
2288	if (dev_info)
2289		memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2290
2291	/* A message without a trailing newline can be continued. */
2292	if (!(flags & LOG_NEWLINE))
2293		prb_commit(&e);
2294	else
2295		prb_final_commit(&e);
2296
2297	ret = text_len + trunc_msg_len;
2298out:
2299	printk_exit_irqrestore(recursion_ptr, irqflags);
2300	return ret;
2301}
2302
2303asmlinkage int vprintk_emit(int facility, int level,
2304			    const struct dev_printk_info *dev_info,
2305			    const char *fmt, va_list args)
2306{
2307	int printed_len;
2308	bool in_sched = false;
 
 
2309
2310	/* Suppress unimportant messages after panic happens */
2311	if (unlikely(suppress_printk))
2312		return 0;
2313
2314	/*
2315	 * The messages on the panic CPU are the most important. If
2316	 * non-panic CPUs are generating any messages, they will be
2317	 * silently dropped.
2318	 */
2319	if (other_cpu_in_panic())
2320		return 0;
2321
2322	if (level == LOGLEVEL_SCHED) {
2323		level = LOGLEVEL_DEFAULT;
2324		in_sched = true;
2325	}
2326
2327	printk_delay(level);
 
2328
2329	printed_len = vprintk_store(facility, level, dev_info, fmt, args);
 
 
 
 
 
2330
2331	/* If called from the scheduler, we can not call up(). */
2332	if (!in_sched) {
2333		/*
2334		 * The caller may be holding system-critical or
2335		 * timing-sensitive locks. Disable preemption during
2336		 * printing of all remaining records to all consoles so that
2337		 * this context can return as soon as possible. Hopefully
2338		 * another printk() caller will take over the printing.
2339		 */
2340		preempt_disable();
2341		/*
2342		 * Try to acquire and then immediately release the console
2343		 * semaphore. The release will print out buffers. With the
2344		 * spinning variant, this context tries to take over the
2345		 * printing from another printing context.
2346		 */
2347		if (console_trylock_spinning())
2348			console_unlock();
2349		preempt_enable();
2350	}
2351
2352	if (in_sched)
2353		defer_console_output();
2354	else
2355		wake_up_klogd();
2356
2357	return printed_len;
2358}
2359EXPORT_SYMBOL(vprintk_emit);
2360
 
 
 
 
 
 
2361int vprintk_default(const char *fmt, va_list args)
2362{
2363	return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
 
 
 
 
 
 
 
 
 
 
 
2364}
2365EXPORT_SYMBOL_GPL(vprintk_default);
2366
2367asmlinkage __visible int _printk(const char *fmt, ...)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2368{
2369	va_list args;
2370	int r;
2371
2372	va_start(args, fmt);
2373	r = vprintk(fmt, args);
2374	va_end(args);
2375
2376	return r;
2377}
2378EXPORT_SYMBOL(_printk);
2379
2380static bool pr_flush(int timeout_ms, bool reset_on_progress);
2381static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2382
2383#else /* CONFIG_PRINTK */
2384
 
 
2385#define printk_time		false
2386
2387#define prb_read_valid(rb, seq, r)	false
2388#define prb_first_valid_seq(rb)		0
2389#define prb_next_seq(rb)		0
2390
2391static u64 syslog_seq;
2392
2393static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
2394static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2395
2396#endif /* CONFIG_PRINTK */
2397
2398#ifdef CONFIG_EARLY_PRINTK
2399struct console *early_console;
2400
2401asmlinkage __visible void early_printk(const char *fmt, ...)
2402{
2403	va_list ap;
2404	char buf[512];
2405	int n;
2406
2407	if (!early_console)
2408		return;
2409
2410	va_start(ap, fmt);
2411	n = vscnprintf(buf, sizeof(buf), fmt, ap);
2412	va_end(ap);
2413
2414	early_console->write(early_console, buf, n);
2415}
2416#endif
2417
2418static void set_user_specified(struct console_cmdline *c, bool user_specified)
2419{
2420	if (!user_specified)
2421		return;
2422
2423	/*
2424	 * @c console was defined by the user on the command line.
2425	 * Do not clear when added twice also by SPCR or the device tree.
2426	 */
2427	c->user_specified = true;
2428	/* At least one console defined by the user on the command line. */
2429	console_set_on_cmdline = 1;
2430}
2431
2432static int __add_preferred_console(const char *name, const short idx, char *options,
2433				   char *brl_options, bool user_specified)
2434{
2435	struct console_cmdline *c;
2436	int i;
2437
2438	/*
2439	 * We use a signed short index for struct console for device drivers to
2440	 * indicate a not yet assigned index or port. However, a negative index
2441	 * value is not valid for preferred console.
2442	 */
2443	if (idx < 0)
2444		return -EINVAL;
2445
2446	/*
2447	 *	See if this tty is not yet registered, and
2448	 *	if we have a slot free.
2449	 */
2450	for (i = 0, c = console_cmdline;
2451	     i < MAX_CMDLINECONSOLES && c->name[0];
2452	     i++, c++) {
2453		if (strcmp(c->name, name) == 0 && c->index == idx) {
2454			if (!brl_options)
2455				preferred_console = i;
2456			set_user_specified(c, user_specified);
2457			return 0;
2458		}
2459	}
2460	if (i == MAX_CMDLINECONSOLES)
2461		return -E2BIG;
2462	if (!brl_options)
2463		preferred_console = i;
2464	strscpy(c->name, name, sizeof(c->name));
2465	c->options = options;
2466	set_user_specified(c, user_specified);
2467	braille_set_options(c, brl_options);
2468
2469	c->index = idx;
2470	return 0;
2471}
2472
2473static int __init console_msg_format_setup(char *str)
2474{
2475	if (!strcmp(str, "syslog"))
2476		console_msg_format = MSG_FORMAT_SYSLOG;
2477	if (!strcmp(str, "default"))
2478		console_msg_format = MSG_FORMAT_DEFAULT;
2479	return 1;
2480}
2481__setup("console_msg_format=", console_msg_format_setup);
2482
2483/*
2484 * Set up a console.  Called via do_early_param() in init/main.c
2485 * for each "console=" parameter in the boot command line.
2486 */
2487static int __init console_setup(char *str)
2488{
2489	char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */
2490	char *s, *options, *brl_options = NULL;
2491	int idx;
2492
2493	/*
2494	 * console="" or console=null have been suggested as a way to
2495	 * disable console output. Use ttynull that has been created
2496	 * for exactly this purpose.
2497	 */
2498	if (str[0] == 0 || strcmp(str, "null") == 0) {
2499		__add_preferred_console("ttynull", 0, NULL, NULL, true);
2500		return 1;
2501	}
2502
2503	if (_braille_console_setup(&str, &brl_options))
2504		return 1;
2505
2506	/*
2507	 * Decode str into name, index, options.
2508	 */
2509	if (str[0] >= '0' && str[0] <= '9') {
2510		strcpy(buf, "ttyS");
2511		strncpy(buf + 4, str, sizeof(buf) - 5);
2512	} else {
2513		strncpy(buf, str, sizeof(buf) - 1);
2514	}
2515	buf[sizeof(buf) - 1] = 0;
2516	options = strchr(str, ',');
2517	if (options)
2518		*(options++) = 0;
2519#ifdef __sparc__
2520	if (!strcmp(str, "ttya"))
2521		strcpy(buf, "ttyS0");
2522	if (!strcmp(str, "ttyb"))
2523		strcpy(buf, "ttyS1");
2524#endif
2525	for (s = buf; *s; s++)
2526		if (isdigit(*s) || *s == ',')
2527			break;
2528	idx = simple_strtoul(s, NULL, 10);
2529	*s = 0;
2530
2531	__add_preferred_console(buf, idx, options, brl_options, true);
 
2532	return 1;
2533}
2534__setup("console=", console_setup);
2535
2536/**
2537 * add_preferred_console - add a device to the list of preferred consoles.
2538 * @name: device name
2539 * @idx: device index
2540 * @options: options for this console
2541 *
2542 * The last preferred console added will be used for kernel messages
2543 * and stdin/out/err for init.  Normally this is used by console_setup
2544 * above to handle user-supplied console arguments; however it can also
2545 * be used by arch-specific code either to override the user or more
2546 * commonly to provide a default console (ie from PROM variables) when
2547 * the user has not supplied one.
2548 */
2549int add_preferred_console(const char *name, const short idx, char *options)
2550{
2551	return __add_preferred_console(name, idx, options, NULL, false);
2552}
2553
2554bool console_suspend_enabled = true;
2555EXPORT_SYMBOL(console_suspend_enabled);
2556
2557static int __init console_suspend_disable(char *str)
2558{
2559	console_suspend_enabled = false;
2560	return 1;
2561}
2562__setup("no_console_suspend", console_suspend_disable);
2563module_param_named(console_suspend, console_suspend_enabled,
2564		bool, S_IRUGO | S_IWUSR);
2565MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2566	" and hibernate operations");
2567
2568static bool printk_console_no_auto_verbose;
2569
2570void console_verbose(void)
2571{
2572	if (console_loglevel && !printk_console_no_auto_verbose)
2573		console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2574}
2575EXPORT_SYMBOL_GPL(console_verbose);
2576
2577module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2578MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2579
2580/**
2581 * suspend_console - suspend the console subsystem
2582 *
2583 * This disables printk() while we go into suspend states
2584 */
2585void suspend_console(void)
2586{
2587	struct console *con;
2588
2589	if (!console_suspend_enabled)
2590		return;
2591	pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2592	pr_flush(1000, true);
2593
2594	console_list_lock();
2595	for_each_console(con)
2596		console_srcu_write_flags(con, con->flags | CON_SUSPENDED);
2597	console_list_unlock();
2598
2599	/*
2600	 * Ensure that all SRCU list walks have completed. All printing
2601	 * contexts must be able to see that they are suspended so that it
2602	 * is guaranteed that all printing has stopped when this function
2603	 * completes.
2604	 */
2605	synchronize_srcu(&console_srcu);
2606}
2607
2608void resume_console(void)
2609{
2610	struct console *con;
2611
2612	if (!console_suspend_enabled)
2613		return;
2614
2615	console_list_lock();
2616	for_each_console(con)
2617		console_srcu_write_flags(con, con->flags & ~CON_SUSPENDED);
2618	console_list_unlock();
2619
2620	/*
2621	 * Ensure that all SRCU list walks have completed. All printing
2622	 * contexts must be able to see they are no longer suspended so
2623	 * that they are guaranteed to wake up and resume printing.
2624	 */
2625	synchronize_srcu(&console_srcu);
2626
2627	pr_flush(1000, true);
2628}
2629
2630/**
2631 * console_cpu_notify - print deferred console messages after CPU hotplug
2632 * @cpu: unused
2633 *
2634 * If printk() is called from a CPU that is not online yet, the messages
2635 * will be printed on the console only if there are CON_ANYTIME consoles.
2636 * This function is called when a new CPU comes online (or fails to come
2637 * up) or goes offline.
2638 */
2639static int console_cpu_notify(unsigned int cpu)
2640{
2641	if (!cpuhp_tasks_frozen) {
2642		/* If trylock fails, someone else is doing the printing */
2643		if (console_trylock())
2644			console_unlock();
2645	}
2646	return 0;
2647}
2648
2649/**
2650 * console_lock - block the console subsystem from printing
2651 *
2652 * Acquires a lock which guarantees that no consoles will
2653 * be in or enter their write() callback.
2654 *
2655 * Can sleep, returns nothing.
2656 */
2657void console_lock(void)
2658{
2659	might_sleep();
2660
2661	/* On panic, the console_lock must be left to the panic cpu. */
2662	while (other_cpu_in_panic())
2663		msleep(1000);
2664
2665	down_console_sem();
 
 
2666	console_locked = 1;
2667	console_may_schedule = 1;
2668}
2669EXPORT_SYMBOL(console_lock);
2670
2671/**
2672 * console_trylock - try to block the console subsystem from printing
2673 *
2674 * Try to acquire a lock which guarantees that no consoles will
2675 * be in or enter their write() callback.
2676 *
2677 * returns 1 on success, and 0 on failure to acquire the lock.
2678 */
2679int console_trylock(void)
2680{
2681	/* On panic, the console_lock must be left to the panic cpu. */
2682	if (other_cpu_in_panic())
2683		return 0;
2684	if (down_trylock_console_sem())
 
2685		return 0;
 
2686	console_locked = 1;
2687	console_may_schedule = 0;
2688	return 1;
2689}
2690EXPORT_SYMBOL(console_trylock);
2691
2692int is_console_locked(void)
2693{
2694	return console_locked;
2695}
2696EXPORT_SYMBOL(is_console_locked);
2697
2698/*
2699 * Check if the given console is currently capable and allowed to print
2700 * records.
2701 *
2702 * Requires the console_srcu_read_lock.
2703 */
2704static inline bool console_is_usable(struct console *con)
2705{
2706	short flags = console_srcu_read_flags(con);
2707
2708	if (!(flags & CON_ENABLED))
2709		return false;
 
 
2710
2711	if ((flags & CON_SUSPENDED))
2712		return false;
2713
2714	if (!con->write)
2715		return false;
2716
2717	/*
2718	 * Console drivers may assume that per-cpu resources have been
2719	 * allocated. So unless they're explicitly marked as being able to
2720	 * cope (CON_ANYTIME) don't call them until this CPU is officially up.
2721	 */
2722	if (!cpu_online(raw_smp_processor_id()) && !(flags & CON_ANYTIME))
2723		return false;
2724
2725	return true;
2726}
2727
2728static void __console_unlock(void)
2729{
2730	console_locked = 0;
2731	up_console_sem();
2732}
2733
2734#ifdef CONFIG_PRINTK
2735
2736/*
2737 * Prepend the message in @pmsg->pbufs->outbuf with a "dropped message". This
2738 * is achieved by shifting the existing message over and inserting the dropped
2739 * message.
2740 *
2741 * @pmsg is the printk message to prepend.
2742 *
2743 * @dropped is the dropped count to report in the dropped message.
2744 *
2745 * If the message text in @pmsg->pbufs->outbuf does not have enough space for
2746 * the dropped message, the message text will be sufficiently truncated.
2747 *
2748 * If @pmsg->pbufs->outbuf is modified, @pmsg->outbuf_len is updated.
2749 */
2750void console_prepend_dropped(struct printk_message *pmsg, unsigned long dropped)
2751{
2752	struct printk_buffers *pbufs = pmsg->pbufs;
2753	const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2754	const size_t outbuf_sz = sizeof(pbufs->outbuf);
2755	char *scratchbuf = &pbufs->scratchbuf[0];
2756	char *outbuf = &pbufs->outbuf[0];
2757	size_t len;
2758
2759	len = scnprintf(scratchbuf, scratchbuf_sz,
2760		       "** %lu printk messages dropped **\n", dropped);
2761
2762	/*
2763	 * Make sure outbuf is sufficiently large before prepending.
2764	 * Keep at least the prefix when the message must be truncated.
2765	 * It is a rather theoretical problem when someone tries to
2766	 * use a minimalist buffer.
2767	 */
2768	if (WARN_ON_ONCE(len + PRINTK_PREFIX_MAX >= outbuf_sz))
2769		return;
2770
2771	if (pmsg->outbuf_len + len >= outbuf_sz) {
2772		/* Truncate the message, but keep it terminated. */
2773		pmsg->outbuf_len = outbuf_sz - (len + 1);
2774		outbuf[pmsg->outbuf_len] = 0;
2775	}
2776
2777	memmove(outbuf + len, outbuf, pmsg->outbuf_len + 1);
2778	memcpy(outbuf, scratchbuf, len);
2779	pmsg->outbuf_len += len;
2780}
2781
2782/*
2783 * Read and format the specified record (or a later record if the specified
2784 * record is not available).
2785 *
2786 * @pmsg will contain the formatted result. @pmsg->pbufs must point to a
2787 * struct printk_buffers.
2788 *
2789 * @seq is the record to read and format. If it is not available, the next
2790 * valid record is read.
 
2791 *
2792 * @is_extended specifies if the message should be formatted for extended
2793 * console output.
2794 *
2795 * @may_supress specifies if records may be skipped based on loglevel.
2796 *
2797 * Returns false if no record is available. Otherwise true and all fields
2798 * of @pmsg are valid. (See the documentation of struct printk_message
2799 * for information about the @pmsg fields.)
2800 */
2801bool printk_get_next_message(struct printk_message *pmsg, u64 seq,
2802			     bool is_extended, bool may_suppress)
2803{
2804	struct printk_buffers *pbufs = pmsg->pbufs;
2805	const size_t scratchbuf_sz = sizeof(pbufs->scratchbuf);
2806	const size_t outbuf_sz = sizeof(pbufs->outbuf);
2807	char *scratchbuf = &pbufs->scratchbuf[0];
2808	char *outbuf = &pbufs->outbuf[0];
2809	struct printk_info info;
2810	struct printk_record r;
2811	size_t len = 0;
2812
2813	/*
2814	 * Formatting extended messages requires a separate buffer, so use the
2815	 * scratch buffer to read in the ringbuffer text.
2816	 *
2817	 * Formatting normal messages is done in-place, so read the ringbuffer
2818	 * text directly into the output buffer.
2819	 */
2820	if (is_extended)
2821		prb_rec_init_rd(&r, &info, scratchbuf, scratchbuf_sz);
2822	else
2823		prb_rec_init_rd(&r, &info, outbuf, outbuf_sz);
2824
2825	if (!prb_read_valid(prb, seq, &r))
2826		return false;
2827
2828	pmsg->seq = r.info->seq;
2829	pmsg->dropped = r.info->seq - seq;
2830
2831	/* Skip record that has level above the console loglevel. */
2832	if (may_suppress && suppress_message_printing(r.info->level))
2833		goto out;
2834
2835	if (is_extended) {
2836		len = info_print_ext_header(outbuf, outbuf_sz, r.info);
2837		len += msg_print_ext_body(outbuf + len, outbuf_sz - len,
2838					  &r.text_buf[0], r.info->text_len, &r.info->dev_info);
2839	} else {
2840		len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
2841	}
2842out:
2843	pmsg->outbuf_len = len;
2844	return true;
2845}
2846
2847/*
2848 * Used as the printk buffers for non-panic, serialized console printing.
2849 * This is for legacy (!CON_NBCON) as well as all boot (CON_BOOT) consoles.
2850 * Its usage requires the console_lock held.
2851 */
2852struct printk_buffers printk_shared_pbufs;
2853
2854/*
2855 * Print one record for the given console. The record printed is whatever
2856 * record is the next available record for the given console.
2857 *
2858 * @handover will be set to true if a printk waiter has taken over the
2859 * console_lock, in which case the caller is no longer holding both the
2860 * console_lock and the SRCU read lock. Otherwise it is set to false.
2861 *
2862 * @cookie is the cookie from the SRCU read lock.
2863 *
2864 * Returns false if the given console has no next record to print, otherwise
2865 * true.
2866 *
2867 * Requires the console_lock and the SRCU read lock.
2868 */
2869static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
2870{
2871	bool is_extended = console_srcu_read_flags(con) & CON_EXTENDED;
2872	char *outbuf = &printk_shared_pbufs.outbuf[0];
2873	struct printk_message pmsg = {
2874		.pbufs = &printk_shared_pbufs,
2875	};
2876	unsigned long flags;
 
2877
2878	*handover = false;
2879
2880	if (!printk_get_next_message(&pmsg, con->seq, is_extended, true))
2881		return false;
2882
2883	con->dropped += pmsg.dropped;
2884
2885	/* Skip messages of formatted length 0. */
2886	if (pmsg.outbuf_len == 0) {
2887		con->seq = pmsg.seq + 1;
2888		goto skip;
2889	}
2890
2891	if (con->dropped && !is_extended) {
2892		console_prepend_dropped(&pmsg, con->dropped);
2893		con->dropped = 0;
2894	}
2895
2896	/*
2897	 * While actively printing out messages, if another printk()
2898	 * were to occur on another CPU, it may wait for this one to
2899	 * finish. This task can not be preempted if there is a
2900	 * waiter waiting to take over.
 
 
 
 
2901	 *
2902	 * Interrupts are disabled because the hand over to a waiter
2903	 * must not be interrupted until the hand over is completed
2904	 * (@console_waiter is cleared).
2905	 */
2906	printk_safe_enter_irqsave(flags);
2907	console_lock_spinning_enable();
 
2908
2909	/* Do not trace print latency. */
2910	stop_critical_timings();
 
 
 
 
 
 
 
 
2911
2912	/* Write everything out to the hardware. */
2913	con->write(con, outbuf, pmsg.outbuf_len);
 
 
 
 
 
 
 
 
 
2914
2915	start_critical_timings();
2916
2917	con->seq = pmsg.seq + 1;
2918
2919	*handover = console_lock_spinning_disable_and_check(cookie);
2920	printk_safe_exit_irqrestore(flags);
2921skip:
2922	return true;
2923}
2924
2925#else
2926
2927static bool console_emit_next_record(struct console *con, bool *handover, int cookie)
2928{
2929	*handover = false;
2930	return false;
2931}
2932
2933#endif /* CONFIG_PRINTK */
2934
2935/*
2936 * Print out all remaining records to all consoles.
2937 *
2938 * @do_cond_resched is set by the caller. It can be true only in schedulable
2939 * context.
2940 *
2941 * @next_seq is set to the sequence number after the last available record.
2942 * The value is valid only when this function returns true. It means that all
2943 * usable consoles are completely flushed.
2944 *
2945 * @handover will be set to true if a printk waiter has taken over the
2946 * console_lock, in which case the caller is no longer holding the
2947 * console_lock. Otherwise it is set to false.
2948 *
2949 * Returns true when there was at least one usable console and all messages
2950 * were flushed to all usable consoles. A returned false informs the caller
2951 * that everything was not flushed (either there were no usable consoles or
2952 * another context has taken over printing or it is a panic situation and this
2953 * is not the panic CPU). Regardless the reason, the caller should assume it
2954 * is not useful to immediately try again.
2955 *
2956 * Requires the console_lock.
2957 */
2958static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
2959{
2960	bool any_usable = false;
2961	struct console *con;
2962	bool any_progress;
2963	int cookie;
2964
2965	*next_seq = 0;
2966	*handover = false;
2967
2968	do {
2969		any_progress = false;
2970
2971		cookie = console_srcu_read_lock();
2972		for_each_console_srcu(con) {
2973			bool progress;
2974
2975			if (!console_is_usable(con))
2976				continue;
2977			any_usable = true;
2978
2979			progress = console_emit_next_record(con, handover, cookie);
2980
 
 
2981			/*
2982			 * If a handover has occurred, the SRCU read lock
2983			 * is already released.
 
2984			 */
2985			if (*handover)
2986				return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2987
2988			/* Track the next of the highest seq flushed. */
2989			if (con->seq > *next_seq)
2990				*next_seq = con->seq;
 
 
 
 
2991
2992			if (!progress)
2993				continue;
2994			any_progress = true;
2995
2996			/* Allow panic_cpu to take over the consoles safely. */
2997			if (other_cpu_in_panic())
2998				goto abandon;
2999
3000			if (do_cond_resched)
3001				cond_resched();
3002		}
3003		console_srcu_read_unlock(cookie);
3004	} while (any_progress);
3005
3006	return any_usable;
3007
3008abandon:
3009	console_srcu_read_unlock(cookie);
3010	return false;
3011}
3012
3013/**
3014 * console_unlock - unblock the console subsystem from printing
3015 *
3016 * Releases the console_lock which the caller holds to block printing of
3017 * the console subsystem.
3018 *
3019 * While the console_lock was held, console output may have been buffered
3020 * by printk().  If this is the case, console_unlock(); emits
3021 * the output prior to releasing the lock.
3022 *
3023 * console_unlock(); may be called from any context.
3024 */
3025void console_unlock(void)
3026{
3027	bool do_cond_resched;
3028	bool handover;
3029	bool flushed;
3030	u64 next_seq;
3031
3032	/*
3033	 * Console drivers are called with interrupts disabled, so
3034	 * @console_may_schedule should be cleared before; however, we may
3035	 * end up dumping a lot of lines, for example, if called from
3036	 * console registration path, and should invoke cond_resched()
3037	 * between lines if allowable.  Not doing so can cause a very long
3038	 * scheduling stall on a slow console leading to RCU stall and
3039	 * softlockup warnings which exacerbate the issue with more
3040	 * messages practically incapacitating the system. Therefore, create
3041	 * a local to use for the printing loop.
3042	 */
3043	do_cond_resched = console_may_schedule;
3044
3045	do {
3046		console_may_schedule = 0;
3047
3048		flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
3049		if (!handover)
3050			__console_unlock();
3051
3052		/*
3053		 * Abort if there was a failure to flush all messages to all
3054		 * usable consoles. Either it is not possible to flush (in
3055		 * which case it would be an infinite loop of retrying) or
3056		 * another context has taken over printing.
3057		 */
3058		if (!flushed)
3059			break;
3060
3061		/*
3062		 * Some context may have added new records after
3063		 * console_flush_all() but before unlocking the console.
3064		 * Re-check if there is a new record to flush. If the trylock
3065		 * fails, another context is already handling the printing.
3066		 */
3067	} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3068}
3069EXPORT_SYMBOL(console_unlock);
3070
3071/**
3072 * console_conditional_schedule - yield the CPU if required
3073 *
3074 * If the console code is currently allowed to sleep, and
3075 * if this CPU should yield the CPU to another task, do
3076 * so here.
3077 *
3078 * Must be called within console_lock();.
3079 */
3080void __sched console_conditional_schedule(void)
3081{
3082	if (console_may_schedule)
3083		cond_resched();
3084}
3085EXPORT_SYMBOL(console_conditional_schedule);
3086
3087void console_unblank(void)
3088{
3089	bool found_unblank = false;
3090	struct console *c;
3091	int cookie;
3092
3093	/*
3094	 * First check if there are any consoles implementing the unblank()
3095	 * callback. If not, there is no reason to continue and take the
3096	 * console lock, which in particular can be dangerous if
3097	 * @oops_in_progress is set.
3098	 */
3099	cookie = console_srcu_read_lock();
3100	for_each_console_srcu(c) {
3101		if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank) {
3102			found_unblank = true;
3103			break;
3104		}
3105	}
3106	console_srcu_read_unlock(cookie);
3107	if (!found_unblank)
3108		return;
3109
3110	/*
3111	 * Stop console printing because the unblank() callback may
3112	 * assume the console is not within its write() callback.
3113	 *
3114	 * If @oops_in_progress is set, this may be an atomic context.
3115	 * In that case, attempt a trylock as best-effort.
3116	 */
3117	if (oops_in_progress) {
3118		/* Semaphores are not NMI-safe. */
3119		if (in_nmi())
3120			return;
3121
3122		/*
3123		 * Attempting to trylock the console lock can deadlock
3124		 * if another CPU was stopped while modifying the
3125		 * semaphore. "Hope and pray" that this is not the
3126		 * current situation.
3127		 */
3128		if (down_trylock_console_sem() != 0)
3129			return;
3130	} else
3131		console_lock();
3132
3133	console_locked = 1;
3134	console_may_schedule = 0;
3135
3136	cookie = console_srcu_read_lock();
3137	for_each_console_srcu(c) {
3138		if ((console_srcu_read_flags(c) & CON_ENABLED) && c->unblank)
3139			c->unblank();
3140	}
3141	console_srcu_read_unlock(cookie);
3142
3143	console_unlock();
3144
3145	if (!oops_in_progress)
3146		pr_flush(1000, true);
3147}
3148
3149/**
3150 * console_flush_on_panic - flush console content on panic
3151 * @mode: flush all messages in buffer or just the pending ones
3152 *
3153 * Immediately output all pending messages no matter what.
3154 */
3155void console_flush_on_panic(enum con_flush_mode mode)
3156{
3157	bool handover;
3158	u64 next_seq;
3159
3160	/*
3161	 * Ignore the console lock and flush out the messages. Attempting a
3162	 * trylock would not be useful because:
3163	 *
3164	 *   - if it is contended, it must be ignored anyway
3165	 *   - console_lock() and console_trylock() block and fail
3166	 *     respectively in panic for non-panic CPUs
3167	 *   - semaphores are not NMI-safe
3168	 */
3169
3170	/*
3171	 * If another context is holding the console lock,
3172	 * @console_may_schedule might be set. Clear it so that
3173	 * this context does not call cond_resched() while flushing.
 
 
3174	 */
 
3175	console_may_schedule = 0;
3176
3177	if (mode == CONSOLE_REPLAY_ALL) {
3178		struct console *c;
3179		short flags;
3180		int cookie;
3181		u64 seq;
3182
3183		seq = prb_first_valid_seq(prb);
3184
3185		cookie = console_srcu_read_lock();
3186		for_each_console_srcu(c) {
3187			flags = console_srcu_read_flags(c);
3188
3189			if (flags & CON_NBCON) {
3190				nbcon_seq_force(c, seq);
3191			} else {
3192				/*
3193				 * This is an unsynchronized assignment. On
3194				 * panic legacy consoles are only best effort.
3195				 */
3196				c->seq = seq;
3197			}
3198		}
3199		console_srcu_read_unlock(cookie);
3200	}
3201
3202	console_flush_all(false, &next_seq, &handover);
3203}
3204
3205/*
3206 * Return the console tty driver structure and its associated index
3207 */
3208struct tty_driver *console_device(int *index)
3209{
3210	struct console *c;
3211	struct tty_driver *driver = NULL;
3212	int cookie;
3213
3214	/*
3215	 * Take console_lock to serialize device() callback with
3216	 * other console operations. For example, fg_console is
3217	 * modified under console_lock when switching vt.
3218	 */
3219	console_lock();
3220
3221	cookie = console_srcu_read_lock();
3222	for_each_console_srcu(c) {
3223		if (!c->device)
3224			continue;
3225		driver = c->device(c, index);
3226		if (driver)
3227			break;
3228	}
3229	console_srcu_read_unlock(cookie);
3230
3231	console_unlock();
3232	return driver;
3233}
3234
3235/*
3236 * Prevent further output on the passed console device so that (for example)
3237 * serial drivers can disable console output before suspending a port, and can
3238 * re-enable output afterwards.
3239 */
3240void console_stop(struct console *console)
3241{
3242	__pr_flush(console, 1000, true);
3243	console_list_lock();
3244	console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
3245	console_list_unlock();
3246
3247	/*
3248	 * Ensure that all SRCU list walks have completed. All contexts must
3249	 * be able to see that this console is disabled so that (for example)
3250	 * the caller can suspend the port without risk of another context
3251	 * using the port.
3252	 */
3253	synchronize_srcu(&console_srcu);
3254}
3255EXPORT_SYMBOL(console_stop);
3256
3257void console_start(struct console *console)
3258{
3259	console_list_lock();
3260	console_srcu_write_flags(console, console->flags | CON_ENABLED);
3261	console_list_unlock();
3262	__pr_flush(console, 1000, true);
3263}
3264EXPORT_SYMBOL(console_start);
3265
3266static int __read_mostly keep_bootcon;
3267
3268static int __init keep_bootcon_setup(char *str)
3269{
3270	keep_bootcon = 1;
3271	pr_info("debug: skip boot console de-registration.\n");
3272
3273	return 0;
3274}
3275
3276early_param("keep_bootcon", keep_bootcon_setup);
3277
3278static int console_call_setup(struct console *newcon, char *options)
3279{
3280	int err;
3281
3282	if (!newcon->setup)
3283		return 0;
3284
3285	/* Synchronize with possible boot console. */
3286	console_lock();
3287	err = newcon->setup(newcon, options);
3288	console_unlock();
3289
3290	return err;
3291}
3292
3293/*
3294 * This is called by register_console() to try to match
3295 * the newly registered console with any of the ones selected
3296 * by either the command line or add_preferred_console() and
3297 * setup/enable it.
3298 *
3299 * Care need to be taken with consoles that are statically
3300 * enabled such as netconsole
3301 */
3302static int try_enable_preferred_console(struct console *newcon,
3303					bool user_specified)
3304{
3305	struct console_cmdline *c;
3306	int i, err;
3307
3308	for (i = 0, c = console_cmdline;
3309	     i < MAX_CMDLINECONSOLES && c->name[0];
3310	     i++, c++) {
3311		if (c->user_specified != user_specified)
3312			continue;
3313		if (!newcon->match ||
3314		    newcon->match(newcon, c->name, c->index, c->options) != 0) {
3315			/* default matching */
3316			BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3317			if (strcmp(c->name, newcon->name) != 0)
3318				continue;
3319			if (newcon->index >= 0 &&
3320			    newcon->index != c->index)
3321				continue;
3322			if (newcon->index < 0)
3323				newcon->index = c->index;
3324
3325			if (_braille_register_console(newcon, c))
3326				return 0;
3327
3328			err = console_call_setup(newcon, c->options);
3329			if (err)
3330				return err;
3331		}
3332		newcon->flags |= CON_ENABLED;
3333		if (i == preferred_console)
3334			newcon->flags |= CON_CONSDEV;
3335		return 0;
3336	}
3337
3338	/*
3339	 * Some consoles, such as pstore and netconsole, can be enabled even
3340	 * without matching. Accept the pre-enabled consoles only when match()
3341	 * and setup() had a chance to be called.
3342	 */
3343	if (newcon->flags & CON_ENABLED && c->user_specified ==	user_specified)
3344		return 0;
3345
3346	return -ENOENT;
3347}
3348
3349/* Try to enable the console unconditionally */
3350static void try_enable_default_console(struct console *newcon)
3351{
3352	if (newcon->index < 0)
3353		newcon->index = 0;
3354
3355	if (console_call_setup(newcon, NULL) != 0)
3356		return;
3357
3358	newcon->flags |= CON_ENABLED;
3359
3360	if (newcon->device)
3361		newcon->flags |= CON_CONSDEV;
3362}
3363
3364static void console_init_seq(struct console *newcon, bool bootcon_registered)
3365{
3366	struct console *con;
3367	bool handover;
3368
3369	if (newcon->flags & (CON_PRINTBUFFER | CON_BOOT)) {
3370		/* Get a consistent copy of @syslog_seq. */
3371		mutex_lock(&syslog_lock);
3372		newcon->seq = syslog_seq;
3373		mutex_unlock(&syslog_lock);
3374	} else {
3375		/* Begin with next message added to ringbuffer. */
3376		newcon->seq = prb_next_seq(prb);
3377
3378		/*
3379		 * If any enabled boot consoles are due to be unregistered
3380		 * shortly, some may not be caught up and may be the same
3381		 * device as @newcon. Since it is not known which boot console
3382		 * is the same device, flush all consoles and, if necessary,
3383		 * start with the message of the enabled boot console that is
3384		 * the furthest behind.
3385		 */
3386		if (bootcon_registered && !keep_bootcon) {
3387			/*
3388			 * Hold the console_lock to stop console printing and
3389			 * guarantee safe access to console->seq.
3390			 */
3391			console_lock();
3392
3393			/*
3394			 * Flush all consoles and set the console to start at
3395			 * the next unprinted sequence number.
3396			 */
3397			if (!console_flush_all(true, &newcon->seq, &handover)) {
3398				/*
3399				 * Flushing failed. Just choose the lowest
3400				 * sequence of the enabled boot consoles.
3401				 */
3402
3403				/*
3404				 * If there was a handover, this context no
3405				 * longer holds the console_lock.
3406				 */
3407				if (handover)
3408					console_lock();
3409
3410				newcon->seq = prb_next_seq(prb);
3411				for_each_console(con) {
3412					if ((con->flags & CON_BOOT) &&
3413					    (con->flags & CON_ENABLED) &&
3414					    con->seq < newcon->seq) {
3415						newcon->seq = con->seq;
3416					}
3417				}
3418			}
3419
3420			console_unlock();
3421		}
3422	}
3423}
3424
3425#define console_first()				\
3426	hlist_entry(console_list.first, struct console, node)
3427
3428static int unregister_console_locked(struct console *console);
3429
3430/*
3431 * The console driver calls this routine during kernel initialization
3432 * to register the console printing procedure with printk() and to
3433 * print any messages that were printed by the kernel before the
3434 * console driver was initialized.
3435 *
3436 * This can happen pretty early during the boot process (because of
3437 * early_printk) - sometimes before setup_arch() completes - be careful
3438 * of what kernel features are used - they may not be initialised yet.
3439 *
3440 * There are two types of consoles - bootconsoles (early_printk) and
3441 * "real" consoles (everything which is not a bootconsole) which are
3442 * handled differently.
3443 *  - Any number of bootconsoles can be registered at any time.
3444 *  - As soon as a "real" console is registered, all bootconsoles
3445 *    will be unregistered automatically.
3446 *  - Once a "real" console is registered, any attempt to register a
3447 *    bootconsoles will be rejected
3448 */
3449void register_console(struct console *newcon)
3450{
3451	struct console *con;
3452	bool bootcon_registered = false;
3453	bool realcon_registered = false;
3454	int err;
 
3455
3456	console_list_lock();
3457
3458	for_each_console(con) {
3459		if (WARN(con == newcon, "console '%s%d' already registered\n",
3460					 con->name, con->index)) {
3461			goto unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
3462		}
 
3463
3464		if (con->flags & CON_BOOT)
3465			bootcon_registered = true;
3466		else
3467			realcon_registered = true;
3468	}
3469
3470	/* Do not register boot consoles when there already is a real one. */
3471	if ((newcon->flags & CON_BOOT) && realcon_registered) {
3472		pr_info("Too late to register bootconsole %s%d\n",
3473			newcon->name, newcon->index);
3474		goto unlock;
3475	}
3476
3477	if (newcon->flags & CON_NBCON) {
3478		/*
3479		 * Ensure the nbcon console buffers can be allocated
3480		 * before modifying any global data.
3481		 */
3482		if (!nbcon_alloc(newcon))
3483			goto unlock;
 
 
 
 
 
 
 
 
 
3484	}
3485
3486	/*
3487	 * See if we want to enable this console driver by default.
3488	 *
3489	 * Nope when a console is preferred by the command line, device
3490	 * tree, or SPCR.
3491	 *
3492	 * The first real console with tty binding (driver) wins. More
3493	 * consoles might get enabled before the right one is found.
3494	 *
3495	 * Note that a console with tty binding will have CON_CONSDEV
3496	 * flag set and will be first in the list.
3497	 */
3498	if (preferred_console < 0) {
3499		if (hlist_empty(&console_list) || !console_first()->device ||
3500		    console_first()->flags & CON_BOOT) {
3501			try_enable_default_console(newcon);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3502		}
 
 
 
 
 
 
 
3503	}
3504
3505	/* See if this console matches one we selected on the command line */
3506	err = try_enable_preferred_console(newcon, true);
3507
3508	/* If not, try to match against the platform default(s) */
3509	if (err == -ENOENT)
3510		err = try_enable_preferred_console(newcon, false);
3511
3512	/* printk() messages are not printed to the Braille console. */
3513	if (err || newcon->flags & CON_BRL) {
3514		if (newcon->flags & CON_NBCON)
3515			nbcon_free(newcon);
3516		goto unlock;
3517	}
3518
3519	/*
3520	 * If we have a bootconsole, and are switching to a real console,
3521	 * don't print everything out again, since when the boot console, and
3522	 * the real console are the same physical device, it's annoying to
3523	 * see the beginning boot messages twice
3524	 */
3525	if (bootcon_registered &&
3526	    ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
3527		newcon->flags &= ~CON_PRINTBUFFER;
3528	}
3529
3530	newcon->dropped = 0;
3531	console_init_seq(newcon, bootcon_registered);
3532
3533	if (newcon->flags & CON_NBCON)
3534		nbcon_init(newcon);
3535
3536	/*
3537	 * Put this console in the list - keep the
3538	 * preferred driver at the head of the list.
3539	 */
3540	if (hlist_empty(&console_list)) {
3541		/* Ensure CON_CONSDEV is always set for the head. */
3542		newcon->flags |= CON_CONSDEV;
3543		hlist_add_head_rcu(&newcon->node, &console_list);
3544
3545	} else if (newcon->flags & CON_CONSDEV) {
3546		/* Only the new head can have CON_CONSDEV set. */
3547		console_srcu_write_flags(console_first(), console_first()->flags & ~CON_CONSDEV);
3548		hlist_add_head_rcu(&newcon->node, &console_list);
3549
3550	} else {
3551		hlist_add_behind_rcu(&newcon->node, console_list.first);
 
3552	}
3553
3554	/*
3555	 * No need to synchronize SRCU here! The caller does not rely
3556	 * on all contexts being able to see the new console before
3557	 * register_console() completes.
3558	 */
3559
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3560	console_sysfs_notify();
3561
3562	/*
3563	 * By unregistering the bootconsoles after we enable the real console
3564	 * we get the "console xxx enabled" message on all the consoles -
3565	 * boot consoles, real consoles, etc - this is to ensure that end
3566	 * users know there might be something in the kernel's log buffer that
3567	 * went to the bootconsole (that they do not see on the real console)
3568	 */
3569	con_printk(KERN_INFO, newcon, "enabled\n");
3570	if (bootcon_registered &&
 
 
3571	    ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
3572	    !keep_bootcon) {
3573		struct hlist_node *tmp;
3574
3575		hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3576			if (con->flags & CON_BOOT)
3577				unregister_console_locked(con);
3578		}
3579	}
3580unlock:
3581	console_list_unlock();
3582}
3583EXPORT_SYMBOL(register_console);
3584
3585/* Must be called under console_list_lock(). */
3586static int unregister_console_locked(struct console *console)
3587{
 
3588	int res;
3589
3590	lockdep_assert_console_list_lock_held();
3591
3592	con_printk(KERN_INFO, console, "disabled\n");
3593
3594	res = _braille_unregister_console(console);
3595	if (res < 0)
3596		return res;
3597	if (res > 0)
3598		return 0;
3599
3600	/* Disable it unconditionally */
3601	console_srcu_write_flags(console, console->flags & ~CON_ENABLED);
 
 
 
 
 
 
 
 
 
 
 
 
 
3602
3603	if (!console_is_registered_locked(console))
3604		return -ENODEV;
3605
3606	hlist_del_init_rcu(&console->node);
3607
3608	/*
3609	 * <HISTORICAL>
3610	 * If this isn't the last console and it has CON_CONSDEV set, we
3611	 * need to set it on the next preferred console.
3612	 * </HISTORICAL>
3613	 *
3614	 * The above makes no sense as there is no guarantee that the next
3615	 * console has any device attached. Oh well....
3616	 */
3617	if (!hlist_empty(&console_list) && console->flags & CON_CONSDEV)
3618		console_srcu_write_flags(console_first(), console_first()->flags | CON_CONSDEV);
3619
3620	/*
3621	 * Ensure that all SRCU list walks have completed. All contexts
3622	 * must not be able to see this console in the list so that any
3623	 * exit/cleanup routines can be performed safely.
3624	 */
3625	synchronize_srcu(&console_srcu);
3626
3627	if (console->flags & CON_NBCON)
3628		nbcon_free(console);
3629
 
 
3630	console_sysfs_notify();
3631
3632	if (console->exit)
3633		res = console->exit(console);
3634
3635	return res;
3636}
3637
3638int unregister_console(struct console *console)
3639{
3640	int res;
3641
3642	console_list_lock();
3643	res = unregister_console_locked(console);
3644	console_list_unlock();
3645	return res;
3646}
3647EXPORT_SYMBOL(unregister_console);
3648
3649/**
3650 * console_force_preferred_locked - force a registered console preferred
3651 * @con: The registered console to force preferred.
3652 *
3653 * Must be called under console_list_lock().
3654 */
3655void console_force_preferred_locked(struct console *con)
3656{
3657	struct console *cur_pref_con;
3658
3659	if (!console_is_registered_locked(con))
3660		return;
3661
3662	cur_pref_con = console_first();
3663
3664	/* Already preferred? */
3665	if (cur_pref_con == con)
3666		return;
3667
3668	/*
3669	 * Delete, but do not re-initialize the entry. This allows the console
3670	 * to continue to appear registered (via any hlist_unhashed_lockless()
3671	 * checks), even though it was briefly removed from the console list.
3672	 */
3673	hlist_del_rcu(&con->node);
3674
3675	/*
3676	 * Ensure that all SRCU list walks have completed so that the console
3677	 * can be added to the beginning of the console list and its forward
3678	 * list pointer can be re-initialized.
3679	 */
3680	synchronize_srcu(&console_srcu);
3681
3682	con->flags |= CON_CONSDEV;
3683	WARN_ON(!con->device);
3684
3685	/* Only the new head can have CON_CONSDEV set. */
3686	console_srcu_write_flags(cur_pref_con, cur_pref_con->flags & ~CON_CONSDEV);
3687	hlist_add_head_rcu(&con->node, &console_list);
3688}
3689EXPORT_SYMBOL(console_force_preferred_locked);
3690
3691/*
3692 * Initialize the console device. This is called *early*, so
3693 * we can't necessarily depend on lots of kernel help here.
3694 * Just do some early initializations, and do the complex setup
3695 * later.
3696 */
3697void __init console_init(void)
3698{
3699	int ret;
3700	initcall_t call;
3701	initcall_entry_t *ce;
3702
3703	/* Setup the default TTY line discipline. */
3704	n_tty_init();
3705
3706	/*
3707	 * set up the console device so that later boot sequences can
3708	 * inform about problems etc..
3709	 */
3710	ce = __con_initcall_start;
3711	trace_initcall_level("console");
3712	while (ce < __con_initcall_end) {
3713		call = initcall_from_entry(ce);
3714		trace_initcall_start(call);
3715		ret = call();
3716		trace_initcall_finish(call, ret);
3717		ce++;
3718	}
3719}
3720
3721/*
3722 * Some boot consoles access data that is in the init section and which will
3723 * be discarded after the initcalls have been run. To make sure that no code
3724 * will access this data, unregister the boot consoles in a late initcall.
3725 *
3726 * If for some reason, such as deferred probe or the driver being a loadable
3727 * module, the real console hasn't registered yet at this point, there will
3728 * be a brief interval in which no messages are logged to the console, which
3729 * makes it difficult to diagnose problems that occur during this time.
3730 *
3731 * To mitigate this problem somewhat, only unregister consoles whose memory
3732 * intersects with the init section. Note that all other boot consoles will
3733 * get unregistered when the real preferred console is registered.
3734 */
3735static int __init printk_late_init(void)
3736{
3737	struct hlist_node *tmp;
3738	struct console *con;
3739	int ret;
3740
3741	console_list_lock();
3742	hlist_for_each_entry_safe(con, tmp, &console_list, node) {
3743		if (!(con->flags & CON_BOOT))
3744			continue;
3745
3746		/* Check addresses that might be used for enabled consoles. */
3747		if (init_section_intersects(con, sizeof(*con)) ||
3748		    init_section_contains(con->write, 0) ||
3749		    init_section_contains(con->read, 0) ||
3750		    init_section_contains(con->device, 0) ||
3751		    init_section_contains(con->unblank, 0) ||
3752		    init_section_contains(con->data, 0)) {
3753			/*
3754			 * Please, consider moving the reported consoles out
3755			 * of the init section.
3756			 */
3757			pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
3758				con->name, con->index);
3759			unregister_console_locked(con);
3760		}
3761	}
3762	console_list_unlock();
3763
3764	ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
3765					console_cpu_notify);
3766	WARN_ON(ret < 0);
3767	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
3768					console_cpu_notify, NULL);
3769	WARN_ON(ret < 0);
3770	printk_sysctl_init();
3771	return 0;
3772}
3773late_initcall(printk_late_init);
3774
3775#if defined CONFIG_PRINTK
3776/* If @con is specified, only wait for that console. Otherwise wait for all. */
3777static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
3778{
3779	unsigned long timeout_jiffies = msecs_to_jiffies(timeout_ms);
3780	unsigned long remaining_jiffies = timeout_jiffies;
3781	struct console *c;
3782	u64 last_diff = 0;
3783	u64 printk_seq;
3784	short flags;
3785	int cookie;
3786	u64 diff;
3787	u64 seq;
3788
3789	might_sleep();
3790
3791	seq = prb_next_reserve_seq(prb);
3792
3793	/* Flush the consoles so that records up to @seq are printed. */
3794	console_lock();
3795	console_unlock();
3796
3797	for (;;) {
3798		unsigned long begin_jiffies;
3799		unsigned long slept_jiffies;
3800
3801		diff = 0;
3802
3803		/*
3804		 * Hold the console_lock to guarantee safe access to
3805		 * console->seq. Releasing console_lock flushes more
3806		 * records in case @seq is still not printed on all
3807		 * usable consoles.
3808		 */
3809		console_lock();
3810
3811		cookie = console_srcu_read_lock();
3812		for_each_console_srcu(c) {
3813			if (con && con != c)
3814				continue;
3815
3816			flags = console_srcu_read_flags(c);
3817
3818			/*
3819			 * If consoles are not usable, it cannot be expected
3820			 * that they make forward progress, so only increment
3821			 * @diff for usable consoles.
3822			 */
3823			if (!console_is_usable(c))
3824				continue;
3825
3826			if (flags & CON_NBCON) {
3827				printk_seq = nbcon_seq_read(c);
3828			} else {
3829				printk_seq = c->seq;
3830			}
3831
3832			if (printk_seq < seq)
3833				diff += seq - printk_seq;
3834		}
3835		console_srcu_read_unlock(cookie);
3836
3837		if (diff != last_diff && reset_on_progress)
3838			remaining_jiffies = timeout_jiffies;
3839
3840		console_unlock();
3841
3842		/* Note: @diff is 0 if there are no usable consoles. */
3843		if (diff == 0 || remaining_jiffies == 0)
3844			break;
3845
3846		/* msleep(1) might sleep much longer. Check time by jiffies. */
3847		begin_jiffies = jiffies;
3848		msleep(1);
3849		slept_jiffies = jiffies - begin_jiffies;
3850
3851		remaining_jiffies -= min(slept_jiffies, remaining_jiffies);
3852
3853		last_diff = diff;
3854	}
3855
3856	return (diff == 0);
3857}
3858
3859/**
3860 * pr_flush() - Wait for printing threads to catch up.
3861 *
3862 * @timeout_ms:        The maximum time (in ms) to wait.
3863 * @reset_on_progress: Reset the timeout if forward progress is seen.
3864 *
3865 * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
3866 * represents infinite waiting.
3867 *
3868 * If @reset_on_progress is true, the timeout will be reset whenever any
3869 * printer has been seen to make some forward progress.
3870 *
3871 * Context: Process context. May sleep while acquiring console lock.
3872 * Return: true if all usable printers are caught up.
3873 */
3874static bool pr_flush(int timeout_ms, bool reset_on_progress)
3875{
3876	return __pr_flush(NULL, timeout_ms, reset_on_progress);
3877}
3878
3879/*
3880 * Delayed printk version, for scheduler-internal messages:
3881 */
3882#define PRINTK_PENDING_WAKEUP	0x01
3883#define PRINTK_PENDING_OUTPUT	0x02
3884
3885static DEFINE_PER_CPU(int, printk_pending);
3886
3887static void wake_up_klogd_work_func(struct irq_work *irq_work)
3888{
3889	int pending = this_cpu_xchg(printk_pending, 0);
3890
3891	if (pending & PRINTK_PENDING_OUTPUT) {
3892		/* If trylock fails, someone else is doing the printing */
3893		if (console_trylock())
3894			console_unlock();
3895	}
3896
3897	if (pending & PRINTK_PENDING_WAKEUP)
3898		wake_up_interruptible(&log_wait);
3899}
3900
3901static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
3902	IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
 
 
3903
3904static void __wake_up_klogd(int val)
3905{
3906	if (!printk_percpu_data_ready())
3907		return;
3908
3909	preempt_disable();
3910	/*
3911	 * Guarantee any new records can be seen by tasks preparing to wait
3912	 * before this context checks if the wait queue is empty.
3913	 *
3914	 * The full memory barrier within wq_has_sleeper() pairs with the full
3915	 * memory barrier within set_current_state() of
3916	 * prepare_to_wait_event(), which is called after ___wait_event() adds
3917	 * the waiter but before it has checked the wait condition.
3918	 *
3919	 * This pairs with devkmsg_read:A and syslog_print:A.
3920	 */
3921	if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
3922	    (val & PRINTK_PENDING_OUTPUT)) {
3923		this_cpu_or(printk_pending, val);
3924		irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
3925	}
3926	preempt_enable();
3927}
3928
3929/**
3930 * wake_up_klogd - Wake kernel logging daemon
3931 *
3932 * Use this function when new records have been added to the ringbuffer
3933 * and the console printing of those records has already occurred or is
3934 * known to be handled by some other context. This function will only
3935 * wake the logging daemon.
3936 *
3937 * Context: Any context.
3938 */
3939void wake_up_klogd(void)
3940{
3941	__wake_up_klogd(PRINTK_PENDING_WAKEUP);
 
 
 
3942}
3943
3944/**
3945 * defer_console_output - Wake kernel logging daemon and trigger
3946 *	console printing in a deferred context
3947 *
3948 * Use this function when new records have been added to the ringbuffer,
3949 * this context is responsible for console printing those records, but
3950 * the current context is not allowed to perform the console printing.
3951 * Trigger an irq_work context to perform the console printing. This
3952 * function also wakes the logging daemon.
3953 *
3954 * Context: Any context.
3955 */
3956void defer_console_output(void)
3957{
3958	/*
3959	 * New messages may have been added directly to the ringbuffer
3960	 * using vprintk_store(), so wake any waiters as well.
3961	 */
3962	__wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT);
3963}
3964
3965void printk_trigger_flush(void)
3966{
3967	defer_console_output();
3968}
3969
3970int vprintk_deferred(const char *fmt, va_list args)
3971{
3972	return vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
3973}
3974
3975int _printk_deferred(const char *fmt, ...)
3976{
3977	va_list args;
3978	int r;
3979
3980	va_start(args, fmt);
3981	r = vprintk_deferred(fmt, args);
3982	va_end(args);
3983
3984	return r;
3985}
3986
3987/*
3988 * printk rate limiting, lifted from the networking subsystem.
3989 *
3990 * This enforces a rate limit: not more than 10 kernel messages
3991 * every 5s to make a denial-of-service attack impossible.
3992 */
3993DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
3994
3995int __printk_ratelimit(const char *func)
3996{
3997	return ___ratelimit(&printk_ratelimit_state, func);
3998}
3999EXPORT_SYMBOL(__printk_ratelimit);
4000
4001/**
4002 * printk_timed_ratelimit - caller-controlled printk ratelimiting
4003 * @caller_jiffies: pointer to caller's state
4004 * @interval_msecs: minimum interval between prints
4005 *
4006 * printk_timed_ratelimit() returns true if more than @interval_msecs
4007 * milliseconds have elapsed since the last time printk_timed_ratelimit()
4008 * returned true.
4009 */
4010bool printk_timed_ratelimit(unsigned long *caller_jiffies,
4011			unsigned int interval_msecs)
4012{
4013	unsigned long elapsed = jiffies - *caller_jiffies;
4014
4015	if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
4016		return false;
4017
4018	*caller_jiffies = jiffies;
4019	return true;
4020}
4021EXPORT_SYMBOL(printk_timed_ratelimit);
4022
4023static DEFINE_SPINLOCK(dump_list_lock);
4024static LIST_HEAD(dump_list);
4025
4026/**
4027 * kmsg_dump_register - register a kernel log dumper.
4028 * @dumper: pointer to the kmsg_dumper structure
4029 *
4030 * Adds a kernel log dumper to the system. The dump callback in the
4031 * structure will be called when the kernel oopses or panics and must be
4032 * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
4033 */
4034int kmsg_dump_register(struct kmsg_dumper *dumper)
4035{
4036	unsigned long flags;
4037	int err = -EBUSY;
4038
4039	/* The dump callback needs to be set */
4040	if (!dumper->dump)
4041		return -EINVAL;
4042
4043	spin_lock_irqsave(&dump_list_lock, flags);
4044	/* Don't allow registering multiple times */
4045	if (!dumper->registered) {
4046		dumper->registered = 1;
4047		list_add_tail_rcu(&dumper->list, &dump_list);
4048		err = 0;
4049	}
4050	spin_unlock_irqrestore(&dump_list_lock, flags);
4051
4052	return err;
4053}
4054EXPORT_SYMBOL_GPL(kmsg_dump_register);
4055
4056/**
4057 * kmsg_dump_unregister - unregister a kmsg dumper.
4058 * @dumper: pointer to the kmsg_dumper structure
4059 *
4060 * Removes a dump device from the system. Returns zero on success and
4061 * %-EINVAL otherwise.
4062 */
4063int kmsg_dump_unregister(struct kmsg_dumper *dumper)
4064{
4065	unsigned long flags;
4066	int err = -EINVAL;
4067
4068	spin_lock_irqsave(&dump_list_lock, flags);
4069	if (dumper->registered) {
4070		dumper->registered = 0;
4071		list_del_rcu(&dumper->list);
4072		err = 0;
4073	}
4074	spin_unlock_irqrestore(&dump_list_lock, flags);
4075	synchronize_rcu();
4076
4077	return err;
4078}
4079EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
4080
4081static bool always_kmsg_dump;
4082module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
4083
4084const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
4085{
4086	switch (reason) {
4087	case KMSG_DUMP_PANIC:
4088		return "Panic";
4089	case KMSG_DUMP_OOPS:
4090		return "Oops";
4091	case KMSG_DUMP_EMERG:
4092		return "Emergency";
4093	case KMSG_DUMP_SHUTDOWN:
4094		return "Shutdown";
4095	default:
4096		return "Unknown";
4097	}
4098}
4099EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
4100
4101/**
4102 * kmsg_dump - dump kernel log to kernel message dumpers.
4103 * @reason: the reason (oops, panic etc) for dumping
4104 *
4105 * Call each of the registered dumper's dump() callback, which can
4106 * retrieve the kmsg records with kmsg_dump_get_line() or
4107 * kmsg_dump_get_buffer().
4108 */
4109void kmsg_dump(enum kmsg_dump_reason reason)
4110{
4111	struct kmsg_dumper *dumper;
 
 
 
 
4112
4113	rcu_read_lock();
4114	list_for_each_entry_rcu(dumper, &dump_list, list) {
4115		enum kmsg_dump_reason max_reason = dumper->max_reason;
 
 
 
 
4116
4117		/*
4118		 * If client has not provided a specific max_reason, default
4119		 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
4120		 */
4121		if (max_reason == KMSG_DUMP_UNDEF) {
4122			max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
4123							KMSG_DUMP_OOPS;
4124		}
4125		if (reason > max_reason)
4126			continue;
4127
4128		/* invoke dumper which will iterate over records */
4129		dumper->dump(dumper, reason);
 
 
 
4130	}
4131	rcu_read_unlock();
4132}
4133
4134/**
4135 * kmsg_dump_get_line - retrieve one kmsg log line
4136 * @iter: kmsg dump iterator
4137 * @syslog: include the "<4>" prefixes
4138 * @line: buffer to copy the line to
4139 * @size: maximum size of the buffer
4140 * @len: length of line placed into buffer
4141 *
4142 * Start at the beginning of the kmsg buffer, with the oldest kmsg
4143 * record, and copy one record into the provided buffer.
4144 *
4145 * Consecutive calls will return the next available record moving
4146 * towards the end of the buffer with the youngest messages.
4147 *
4148 * A return value of FALSE indicates that there are no more records to
4149 * read.
 
 
4150 */
4151bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
4152			char *line, size_t size, size_t *len)
4153{
4154	u64 min_seq = latched_seq_read_nolock(&clear_seq);
4155	struct printk_info info;
4156	unsigned int line_count;
4157	struct printk_record r;
4158	size_t l = 0;
4159	bool ret = false;
4160
4161	if (iter->cur_seq < min_seq)
4162		iter->cur_seq = min_seq;
4163
4164	prb_rec_init_rd(&r, &info, line, size);
 
 
 
 
4165
4166	/* Read text or count text lines? */
4167	if (line) {
4168		if (!prb_read_valid(prb, iter->cur_seq, &r))
4169			goto out;
4170		l = record_print_text(&r, syslog, printk_time);
4171	} else {
4172		if (!prb_read_valid_info(prb, iter->cur_seq,
4173					 &info, &line_count)) {
4174			goto out;
4175		}
4176		l = get_record_print_text_size(&info, line_count, syslog,
4177					       printk_time);
4178
4179	}
 
4180
4181	iter->cur_seq = r.info->seq + 1;
 
4182	ret = true;
4183out:
4184	if (len)
4185		*len = l;
4186	return ret;
4187}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4188EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
4189
4190/**
4191 * kmsg_dump_get_buffer - copy kmsg log lines
4192 * @iter: kmsg dump iterator
4193 * @syslog: include the "<4>" prefixes
4194 * @buf: buffer to copy the line to
4195 * @size: maximum size of the buffer
4196 * @len_out: length of line placed into buffer
4197 *
4198 * Start at the end of the kmsg buffer and fill the provided buffer
4199 * with as many of the *youngest* kmsg records that fit into it.
4200 * If the buffer is large enough, all available kmsg records will be
4201 * copied with a single call.
4202 *
4203 * Consecutive calls will fill the buffer with the next block of
4204 * available older records, not including the earlier retrieved ones.
4205 *
4206 * A return value of FALSE indicates that there are no more records to
4207 * read.
4208 */
4209bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
4210			  char *buf, size_t size, size_t *len_out)
4211{
4212	u64 min_seq = latched_seq_read_nolock(&clear_seq);
4213	struct printk_info info;
4214	struct printk_record r;
4215	u64 seq;
 
4216	u64 next_seq;
4217	size_t len = 0;
 
4218	bool ret = false;
4219	bool time = printk_time;
4220
4221	if (!buf || !size)
4222		goto out;
4223
4224	if (iter->cur_seq < min_seq)
4225		iter->cur_seq = min_seq;
4226
4227	if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
4228		if (info.seq != iter->cur_seq) {
4229			/* messages are gone, move to first available one */
4230			iter->cur_seq = info.seq;
4231		}
4232	}
4233
4234	/* last entry */
4235	if (iter->cur_seq >= iter->next_seq)
 
4236		goto out;
 
 
 
 
 
 
 
4237
4238	/*
4239	 * Find first record that fits, including all following records,
4240	 * into the user-provided buffer for this dump. Pass in size-1
4241	 * because this function (by way of record_print_text()) will
4242	 * not write more than size-1 bytes of text into @buf.
4243	 */
4244	seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
4245				     size - 1, syslog, time);
4246
4247	/*
4248	 * Next kmsg_dump_get_buffer() invocation will dump block of
4249	 * older records stored right before this one.
4250	 */
4251	next_seq = seq;
4252
4253	prb_rec_init_rd(&r, &info, buf, size);
 
 
 
4254
4255	prb_for_each_record(seq, prb, seq, &r) {
4256		if (r.info->seq >= iter->next_seq)
4257			break;
4258
4259		len += record_print_text(&r, syslog, time);
 
 
4260
4261		/* Adjust record to store to remaining buffer space. */
4262		prb_rec_init_rd(&r, &info, buf + len, size - len);
 
4263	}
4264
4265	iter->next_seq = next_seq;
 
4266	ret = true;
 
4267out:
4268	if (len_out)
4269		*len_out = len;
4270	return ret;
4271}
4272EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
4273
4274/**
4275 * kmsg_dump_rewind - reset the iterator
4276 * @iter: kmsg dump iterator
4277 *
4278 * Reset the dumper's iterator so that kmsg_dump_get_line() and
4279 * kmsg_dump_get_buffer() can be called again and used multiple
4280 * times within the same dumper.dump() callback.
4281 */
4282void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
4283{
4284	iter->cur_seq = latched_seq_read_nolock(&clear_seq);
4285	iter->next_seq = prb_next_seq(prb);
4286}
4287EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
4288
4289#endif
4290
4291#ifdef CONFIG_SMP
4292static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
4293static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
4294
4295/**
4296 * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
4297 *                            spinning lock is not owned by any CPU.
4298 *
4299 * Context: Any context.
4300 */
4301void __printk_cpu_sync_wait(void)
4302{
4303	do {
4304		cpu_relax();
4305	} while (atomic_read(&printk_cpu_sync_owner) != -1);
 
4306}
4307EXPORT_SYMBOL(__printk_cpu_sync_wait);
4308
4309/**
4310 * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
4311 *                               spinning lock.
4312 *
4313 * If no processor has the lock, the calling processor takes the lock and
4314 * becomes the owner. If the calling processor is already the owner of the
4315 * lock, this function succeeds immediately.
4316 *
4317 * Context: Any context. Expects interrupts to be disabled.
4318 * Return: 1 on success, otherwise 0.
4319 */
4320int __printk_cpu_sync_try_get(void)
4321{
4322	int cpu;
4323	int old;
4324
4325	cpu = smp_processor_id();
4326
4327	/*
4328	 * Guarantee loads and stores from this CPU when it is the lock owner
4329	 * are _not_ visible to the previous lock owner. This pairs with
4330	 * __printk_cpu_sync_put:B.
4331	 *
4332	 * Memory barrier involvement:
4333	 *
4334	 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
4335	 * then __printk_cpu_sync_put:A can never read from
4336	 * __printk_cpu_sync_try_get:B.
4337	 *
4338	 * Relies on:
4339	 *
4340	 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
4341	 * of the previous CPU
4342	 *    matching
4343	 * ACQUIRE from __printk_cpu_sync_try_get:A to
4344	 * __printk_cpu_sync_try_get:B of this CPU
4345	 */
4346	old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
4347				     cpu); /* LMM(__printk_cpu_sync_try_get:A) */
4348	if (old == -1) {
4349		/*
4350		 * This CPU is now the owner and begins loading/storing
4351		 * data: LMM(__printk_cpu_sync_try_get:B)
4352		 */
4353		return 1;
4354
4355	} else if (old == cpu) {
4356		/* This CPU is already the owner. */
4357		atomic_inc(&printk_cpu_sync_nested);
4358		return 1;
4359	}
4360
4361	return 0;
4362}
4363EXPORT_SYMBOL(__printk_cpu_sync_try_get);
4364
4365/**
4366 * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
4367 *
4368 * The calling processor must be the owner of the lock.
4369 *
4370 * Context: Any context. Expects interrupts to be disabled.
4371 */
4372void __printk_cpu_sync_put(void)
4373{
4374	if (atomic_read(&printk_cpu_sync_nested)) {
4375		atomic_dec(&printk_cpu_sync_nested);
4376		return;
4377	}
4378
4379	/*
4380	 * This CPU is finished loading/storing data:
4381	 * LMM(__printk_cpu_sync_put:A)
4382	 */
4383
4384	/*
4385	 * Guarantee loads and stores from this CPU when it was the
4386	 * lock owner are visible to the next lock owner. This pairs
4387	 * with __printk_cpu_sync_try_get:A.
4388	 *
4389	 * Memory barrier involvement:
4390	 *
4391	 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
4392	 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
4393	 *
4394	 * Relies on:
4395	 *
4396	 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
4397	 * of this CPU
4398	 *    matching
4399	 * ACQUIRE from __printk_cpu_sync_try_get:A to
4400	 * __printk_cpu_sync_try_get:B of the next CPU
4401	 */
4402	atomic_set_release(&printk_cpu_sync_owner,
4403			   -1); /* LMM(__printk_cpu_sync_put:B) */
4404}
4405EXPORT_SYMBOL(__printk_cpu_sync_put);
4406#endif /* CONFIG_SMP */