Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
  4 */
  5
  6#include <linux/preempt.h>
  7#include <linux/spinlock.h>
  8#include <linux/debug_locks.h>
 
  9#include <linux/smp.h>
 10#include <linux/cpumask.h>
 11#include <linux/irq_work.h>
 12#include <linux/printk.h>
 
 13
 14#include "internal.h"
 15
 16/*
 17 * printk() could not take logbuf_lock in NMI context. Instead,
 18 * it uses an alternative implementation that temporary stores
 19 * the strings into a per-CPU buffer. The content of the buffer
 20 * is later flushed into the main ring buffer via IRQ work.
 21 *
 22 * The alternative implementation is chosen transparently
 23 * by examinig current printk() context mask stored in @printk_context
 24 * per-CPU variable.
 25 *
 26 * The implementation allows to flush the strings also from another CPU.
 27 * There are situations when we want to make sure that all buffers
 28 * were handled or when IRQs are blocked.
 29 */
 30static int printk_safe_irq_ready __read_mostly;
 31
 32#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) -	\
 33				sizeof(atomic_t) -			\
 34				sizeof(atomic_t) -			\
 35				sizeof(struct irq_work))
 36
 37struct printk_safe_seq_buf {
 38	atomic_t		len;	/* length of written data */
 39	atomic_t		message_lost;
 40	struct irq_work		work;	/* IRQ work that flushes the buffer */
 41	unsigned char		buffer[SAFE_LOG_BUF_LEN];
 42};
 43
 44static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
 45static DEFINE_PER_CPU(int, printk_context);
 46
 
 
 47#ifdef CONFIG_PRINTK_NMI
 48static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
 49#endif
 50
 51/* Get flushed in a more safe context. */
 52static void queue_flush_work(struct printk_safe_seq_buf *s)
 53{
 54	if (printk_safe_irq_ready)
 55		irq_work_queue(&s->work);
 56}
 57
 58/*
 59 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
 60 * have dedicated buffers, because otherwise printk-safe preempted by
 61 * NMI-printk would have overwritten the NMI messages.
 62 *
 63 * The messages are flushed from irq work (or from panic()), possibly,
 64 * from other CPU, concurrently with printk_safe_log_store(). Should this
 65 * happen, printk_safe_log_store() will notice the buffer->len mismatch
 66 * and repeat the write.
 67 */
 68static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
 69						const char *fmt, va_list args)
 70{
 71	int add;
 72	size_t len;
 73	va_list ap;
 74
 75again:
 76	len = atomic_read(&s->len);
 77
 78	/* The trailing '\0' is not counted into len. */
 79	if (len >= sizeof(s->buffer) - 1) {
 80		atomic_inc(&s->message_lost);
 81		queue_flush_work(s);
 82		return 0;
 83	}
 84
 85	/*
 86	 * Make sure that all old data have been read before the buffer
 87	 * was reset. This is not needed when we just append data.
 88	 */
 89	if (!len)
 90		smp_rmb();
 91
 92	va_copy(ap, args);
 93	add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
 94	va_end(ap);
 95	if (!add)
 96		return 0;
 97
 98	/*
 99	 * Do it once again if the buffer has been flushed in the meantime.
100	 * Note that atomic_cmpxchg() is an implicit memory barrier that
101	 * makes sure that the data were written before updating s->len.
102	 */
103	if (atomic_cmpxchg(&s->len, len, len + add) != len)
104		goto again;
105
106	queue_flush_work(s);
107	return add;
108}
109
110static inline void printk_safe_flush_line(const char *text, int len)
111{
112	/*
113	 * Avoid any console drivers calls from here, because we may be
114	 * in NMI or printk_safe context (when in panic). The messages
115	 * must go only into the ring buffer at this stage.  Consoles will
116	 * get explicitly called later when a crashdump is not generated.
117	 */
118	printk_deferred("%.*s", len, text);
119}
120
121/* printk part of the temporary buffer line by line */
122static int printk_safe_flush_buffer(const char *start, size_t len)
123{
124	const char *c, *end;
125	bool header;
126
127	c = start;
128	end = start + len;
129	header = true;
130
131	/* Print line by line. */
132	while (c < end) {
133		if (*c == '\n') {
134			printk_safe_flush_line(start, c - start + 1);
135			start = ++c;
136			header = true;
137			continue;
138		}
139
140		/* Handle continuous lines or missing new line. */
141		if ((c + 1 < end) && printk_get_level(c)) {
142			if (header) {
143				c = printk_skip_level(c);
144				continue;
145			}
146
147			printk_safe_flush_line(start, c - start);
148			start = c++;
149			header = true;
150			continue;
151		}
152
153		header = false;
154		c++;
155	}
156
157	/* Check if there was a partial line. Ignore pure header. */
158	if (start < end && !header) {
159		static const char newline[] = KERN_CONT "\n";
160
161		printk_safe_flush_line(start, end - start);
162		printk_safe_flush_line(newline, strlen(newline));
163	}
164
165	return len;
166}
167
168static void report_message_lost(struct printk_safe_seq_buf *s)
169{
170	int lost = atomic_xchg(&s->message_lost, 0);
171
172	if (lost)
173		printk_deferred("Lost %d message(s)!\n", lost);
174}
175
176/*
177 * Flush data from the associated per-CPU buffer. The function
178 * can be called either via IRQ work or independently.
179 */
180static void __printk_safe_flush(struct irq_work *work)
181{
182	static raw_spinlock_t read_lock =
183		__RAW_SPIN_LOCK_INITIALIZER(read_lock);
184	struct printk_safe_seq_buf *s =
185		container_of(work, struct printk_safe_seq_buf, work);
186	unsigned long flags;
187	size_t len;
188	int i;
189
190	/*
191	 * The lock has two functions. First, one reader has to flush all
192	 * available message to make the lockless synchronization with
193	 * writers easier. Second, we do not want to mix messages from
194	 * different CPUs. This is especially important when printing
195	 * a backtrace.
196	 */
197	raw_spin_lock_irqsave(&read_lock, flags);
198
199	i = 0;
200more:
201	len = atomic_read(&s->len);
202
203	/*
204	 * This is just a paranoid check that nobody has manipulated
205	 * the buffer an unexpected way. If we printed something then
206	 * @len must only increase. Also it should never overflow the
207	 * buffer size.
208	 */
209	if ((i && i >= len) || len > sizeof(s->buffer)) {
210		const char *msg = "printk_safe_flush: internal error\n";
211
212		printk_safe_flush_line(msg, strlen(msg));
213		len = 0;
214	}
215
216	if (!len)
217		goto out; /* Someone else has already flushed the buffer. */
218
219	/* Make sure that data has been written up to the @len */
220	smp_rmb();
221	i += printk_safe_flush_buffer(s->buffer + i, len - i);
222
223	/*
224	 * Check that nothing has got added in the meantime and truncate
225	 * the buffer. Note that atomic_cmpxchg() is an implicit memory
226	 * barrier that makes sure that the data were copied before
227	 * updating s->len.
228	 */
229	if (atomic_cmpxchg(&s->len, len, 0) != len)
230		goto more;
231
232out:
233	report_message_lost(s);
234	raw_spin_unlock_irqrestore(&read_lock, flags);
235}
236
237/**
238 * printk_safe_flush - flush all per-cpu nmi buffers.
239 *
240 * The buffers are flushed automatically via IRQ work. This function
241 * is useful only when someone wants to be sure that all buffers have
242 * been flushed at some point.
243 */
244void printk_safe_flush(void)
245{
246	int cpu;
247
248	for_each_possible_cpu(cpu) {
249#ifdef CONFIG_PRINTK_NMI
250		__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
251#endif
252		__printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
253	}
254}
255
256/**
257 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
258 *	goes down.
259 *
260 * Similar to printk_safe_flush() but it can be called even in NMI context when
261 * the system goes down. It does the best effort to get NMI messages into
262 * the main ring buffer.
263 *
264 * Note that it could try harder when there is only one CPU online.
265 */
266void printk_safe_flush_on_panic(void)
267{
268	/*
269	 * Make sure that we could access the main ring buffer.
270	 * Do not risk a double release when more CPUs are up.
271	 */
272	if (raw_spin_is_locked(&logbuf_lock)) {
273		if (num_online_cpus() > 1)
274			return;
275
276		debug_locks_off();
277		raw_spin_lock_init(&logbuf_lock);
278	}
279
280	printk_safe_flush();
281}
282
283#ifdef CONFIG_PRINTK_NMI
284/*
285 * Safe printk() for NMI context. It uses a per-CPU buffer to
286 * store the message. NMIs are not nested, so there is always only
287 * one writer running. But the buffer might get flushed from another
288 * CPU, so we need to be careful.
289 */
290static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
291{
292	struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
293
294	return printk_safe_log_store(s, fmt, args);
295}
296
297void notrace printk_nmi_enter(void)
298{
299	this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
300}
301
302void notrace printk_nmi_exit(void)
303{
304	this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
305}
306
307/*
308 * Marks a code that might produce many messages in NMI context
309 * and the risk of losing them is more critical than eventual
310 * reordering.
311 *
312 * It has effect only when called in NMI context. Then printk()
313 * will try to store the messages into the main logbuf directly
314 * and use the per-CPU buffers only as a fallback when the lock
315 * is not available.
316 */
317void printk_nmi_direct_enter(void)
318{
319	if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
320		this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
321}
322
323void printk_nmi_direct_exit(void)
324{
325	this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
326}
327
328#else
329
330static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
331{
332	return 0;
333}
334
335#endif /* CONFIG_PRINTK_NMI */
336
337/*
338 * Lock-less printk(), to avoid deadlocks should the printk() recurse
339 * into itself. It uses a per-CPU buffer to store the message, just like
340 * NMI.
341 */
342static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
343{
344	struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
345
346	return printk_safe_log_store(s, fmt, args);
347}
348
349/* Can be preempted by NMI. */
350void __printk_safe_enter(void)
351{
352	this_cpu_inc(printk_context);
353}
354
355/* Can be preempted by NMI. */
356void __printk_safe_exit(void)
357{
358	this_cpu_dec(printk_context);
359}
360
361__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
362{
 
 
 
 
 
 
363	/*
364	 * Try to use the main logbuf even in NMI. But avoid calling console
365	 * drivers that might have their own locks.
366	 */
367	if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
368	    raw_spin_trylock(&logbuf_lock)) {
369		int len;
370
371		len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
372		raw_spin_unlock(&logbuf_lock);
 
373		defer_console_output();
374		return len;
375	}
376
377	/* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
378	if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
379		return vprintk_nmi(fmt, args);
380
381	/* Use extra buffer to prevent a recursion deadlock in safe mode. */
382	if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
383		return vprintk_safe(fmt, args);
384
385	/* No obstacles. */
386	return vprintk_default(fmt, args);
387}
 
388
389void __init printk_safe_init(void)
390{
391	int cpu;
392
393	for_each_possible_cpu(cpu) {
394		struct printk_safe_seq_buf *s;
395
396		s = &per_cpu(safe_print_seq, cpu);
397		init_irq_work(&s->work, __printk_safe_flush);
398
399#ifdef CONFIG_PRINTK_NMI
400		s = &per_cpu(nmi_print_seq, cpu);
401		init_irq_work(&s->work, __printk_safe_flush);
402#endif
403	}
404
405	/*
406	 * In the highly unlikely event that a NMI were to trigger at
407	 * this moment. Make sure IRQ work is set up before this
408	 * variable is set.
409	 */
410	barrier();
411	printk_safe_irq_ready = 1;
412
413	/* Flush pending messages that did not have scheduled IRQ works. */
414	printk_safe_flush();
415}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
  4 */
  5
  6#include <linux/preempt.h>
  7#include <linux/spinlock.h>
  8#include <linux/debug_locks.h>
  9#include <linux/kdb.h>
 10#include <linux/smp.h>
 11#include <linux/cpumask.h>
 12#include <linux/irq_work.h>
 13#include <linux/printk.h>
 14#include <linux/kprobes.h>
 15
 16#include "internal.h"
 17
 18/*
 19 * In NMI and safe mode, printk() avoids taking locks. Instead,
 20 * it uses an alternative implementation that temporary stores
 21 * the strings into a per-CPU buffer. The content of the buffer
 22 * is later flushed into the main ring buffer via IRQ work.
 23 *
 24 * The alternative implementation is chosen transparently
 25 * by examining current printk() context mask stored in @printk_context
 26 * per-CPU variable.
 27 *
 28 * The implementation allows to flush the strings also from another CPU.
 29 * There are situations when we want to make sure that all buffers
 30 * were handled or when IRQs are blocked.
 31 */
 
 32
 33#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) -	\
 34				sizeof(atomic_t) -			\
 35				sizeof(atomic_t) -			\
 36				sizeof(struct irq_work))
 37
 38struct printk_safe_seq_buf {
 39	atomic_t		len;	/* length of written data */
 40	atomic_t		message_lost;
 41	struct irq_work		work;	/* IRQ work that flushes the buffer */
 42	unsigned char		buffer[SAFE_LOG_BUF_LEN];
 43};
 44
 45static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
 46static DEFINE_PER_CPU(int, printk_context);
 47
 48static DEFINE_RAW_SPINLOCK(safe_read_lock);
 49
 50#ifdef CONFIG_PRINTK_NMI
 51static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
 52#endif
 53
 54/* Get flushed in a more safe context. */
 55static void queue_flush_work(struct printk_safe_seq_buf *s)
 56{
 57	if (printk_percpu_data_ready())
 58		irq_work_queue(&s->work);
 59}
 60
 61/*
 62 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
 63 * have dedicated buffers, because otherwise printk-safe preempted by
 64 * NMI-printk would have overwritten the NMI messages.
 65 *
 66 * The messages are flushed from irq work (or from panic()), possibly,
 67 * from other CPU, concurrently with printk_safe_log_store(). Should this
 68 * happen, printk_safe_log_store() will notice the buffer->len mismatch
 69 * and repeat the write.
 70 */
 71static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
 72						const char *fmt, va_list args)
 73{
 74	int add;
 75	size_t len;
 76	va_list ap;
 77
 78again:
 79	len = atomic_read(&s->len);
 80
 81	/* The trailing '\0' is not counted into len. */
 82	if (len >= sizeof(s->buffer) - 1) {
 83		atomic_inc(&s->message_lost);
 84		queue_flush_work(s);
 85		return 0;
 86	}
 87
 88	/*
 89	 * Make sure that all old data have been read before the buffer
 90	 * was reset. This is not needed when we just append data.
 91	 */
 92	if (!len)
 93		smp_rmb();
 94
 95	va_copy(ap, args);
 96	add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
 97	va_end(ap);
 98	if (!add)
 99		return 0;
100
101	/*
102	 * Do it once again if the buffer has been flushed in the meantime.
103	 * Note that atomic_cmpxchg() is an implicit memory barrier that
104	 * makes sure that the data were written before updating s->len.
105	 */
106	if (atomic_cmpxchg(&s->len, len, len + add) != len)
107		goto again;
108
109	queue_flush_work(s);
110	return add;
111}
112
113static inline void printk_safe_flush_line(const char *text, int len)
114{
115	/*
116	 * Avoid any console drivers calls from here, because we may be
117	 * in NMI or printk_safe context (when in panic). The messages
118	 * must go only into the ring buffer at this stage.  Consoles will
119	 * get explicitly called later when a crashdump is not generated.
120	 */
121	printk_deferred("%.*s", len, text);
122}
123
124/* printk part of the temporary buffer line by line */
125static int printk_safe_flush_buffer(const char *start, size_t len)
126{
127	const char *c, *end;
128	bool header;
129
130	c = start;
131	end = start + len;
132	header = true;
133
134	/* Print line by line. */
135	while (c < end) {
136		if (*c == '\n') {
137			printk_safe_flush_line(start, c - start + 1);
138			start = ++c;
139			header = true;
140			continue;
141		}
142
143		/* Handle continuous lines or missing new line. */
144		if ((c + 1 < end) && printk_get_level(c)) {
145			if (header) {
146				c = printk_skip_level(c);
147				continue;
148			}
149
150			printk_safe_flush_line(start, c - start);
151			start = c++;
152			header = true;
153			continue;
154		}
155
156		header = false;
157		c++;
158	}
159
160	/* Check if there was a partial line. Ignore pure header. */
161	if (start < end && !header) {
162		static const char newline[] = KERN_CONT "\n";
163
164		printk_safe_flush_line(start, end - start);
165		printk_safe_flush_line(newline, strlen(newline));
166	}
167
168	return len;
169}
170
171static void report_message_lost(struct printk_safe_seq_buf *s)
172{
173	int lost = atomic_xchg(&s->message_lost, 0);
174
175	if (lost)
176		printk_deferred("Lost %d message(s)!\n", lost);
177}
178
179/*
180 * Flush data from the associated per-CPU buffer. The function
181 * can be called either via IRQ work or independently.
182 */
183static void __printk_safe_flush(struct irq_work *work)
184{
 
 
185	struct printk_safe_seq_buf *s =
186		container_of(work, struct printk_safe_seq_buf, work);
187	unsigned long flags;
188	size_t len;
189	int i;
190
191	/*
192	 * The lock has two functions. First, one reader has to flush all
193	 * available message to make the lockless synchronization with
194	 * writers easier. Second, we do not want to mix messages from
195	 * different CPUs. This is especially important when printing
196	 * a backtrace.
197	 */
198	raw_spin_lock_irqsave(&safe_read_lock, flags);
199
200	i = 0;
201more:
202	len = atomic_read(&s->len);
203
204	/*
205	 * This is just a paranoid check that nobody has manipulated
206	 * the buffer an unexpected way. If we printed something then
207	 * @len must only increase. Also it should never overflow the
208	 * buffer size.
209	 */
210	if ((i && i >= len) || len > sizeof(s->buffer)) {
211		const char *msg = "printk_safe_flush: internal error\n";
212
213		printk_safe_flush_line(msg, strlen(msg));
214		len = 0;
215	}
216
217	if (!len)
218		goto out; /* Someone else has already flushed the buffer. */
219
220	/* Make sure that data has been written up to the @len */
221	smp_rmb();
222	i += printk_safe_flush_buffer(s->buffer + i, len - i);
223
224	/*
225	 * Check that nothing has got added in the meantime and truncate
226	 * the buffer. Note that atomic_cmpxchg() is an implicit memory
227	 * barrier that makes sure that the data were copied before
228	 * updating s->len.
229	 */
230	if (atomic_cmpxchg(&s->len, len, 0) != len)
231		goto more;
232
233out:
234	report_message_lost(s);
235	raw_spin_unlock_irqrestore(&safe_read_lock, flags);
236}
237
238/**
239 * printk_safe_flush - flush all per-cpu nmi buffers.
240 *
241 * The buffers are flushed automatically via IRQ work. This function
242 * is useful only when someone wants to be sure that all buffers have
243 * been flushed at some point.
244 */
245void printk_safe_flush(void)
246{
247	int cpu;
248
249	for_each_possible_cpu(cpu) {
250#ifdef CONFIG_PRINTK_NMI
251		__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
252#endif
253		__printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
254	}
255}
256
257/**
258 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
259 *	goes down.
260 *
261 * Similar to printk_safe_flush() but it can be called even in NMI context when
262 * the system goes down. It does the best effort to get NMI messages into
263 * the main ring buffer.
264 *
265 * Note that it could try harder when there is only one CPU online.
266 */
267void printk_safe_flush_on_panic(void)
268{
269	/*
270	 * Make sure that we could access the safe buffers.
271	 * Do not risk a double release when more CPUs are up.
272	 */
273	if (raw_spin_is_locked(&safe_read_lock)) {
274		if (num_online_cpus() > 1)
275			return;
276
277		debug_locks_off();
278		raw_spin_lock_init(&safe_read_lock);
279	}
280
281	printk_safe_flush();
282}
283
284#ifdef CONFIG_PRINTK_NMI
285/*
286 * Safe printk() for NMI context. It uses a per-CPU buffer to
287 * store the message. NMIs are not nested, so there is always only
288 * one writer running. But the buffer might get flushed from another
289 * CPU, so we need to be careful.
290 */
291static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
292{
293	struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
294
295	return printk_safe_log_store(s, fmt, args);
296}
297
298void noinstr printk_nmi_enter(void)
299{
300	this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
301}
302
303void noinstr printk_nmi_exit(void)
304{
305	this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
306}
307
308/*
309 * Marks a code that might produce many messages in NMI context
310 * and the risk of losing them is more critical than eventual
311 * reordering.
312 *
313 * It has effect only when called in NMI context. Then printk()
314 * will store the messages into the main logbuf directly.
 
 
315 */
316void printk_nmi_direct_enter(void)
317{
318	if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
319		this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
320}
321
322void printk_nmi_direct_exit(void)
323{
324	this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
325}
326
327#else
328
329static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
330{
331	return 0;
332}
333
334#endif /* CONFIG_PRINTK_NMI */
335
336/*
337 * Lock-less printk(), to avoid deadlocks should the printk() recurse
338 * into itself. It uses a per-CPU buffer to store the message, just like
339 * NMI.
340 */
341static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
342{
343	struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
344
345	return printk_safe_log_store(s, fmt, args);
346}
347
348/* Can be preempted by NMI. */
349void __printk_safe_enter(void)
350{
351	this_cpu_inc(printk_context);
352}
353
354/* Can be preempted by NMI. */
355void __printk_safe_exit(void)
356{
357	this_cpu_dec(printk_context);
358}
359
360asmlinkage int vprintk(const char *fmt, va_list args)
361{
362#ifdef CONFIG_KGDB_KDB
363	/* Allow to pass printk() to kdb but avoid a recursion. */
364	if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
365		return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
366#endif
367
368	/*
369	 * Use the main logbuf even in NMI. But avoid calling console
370	 * drivers that might have their own locks.
371	 */
372	if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK)) {
373		unsigned long flags;
374		int len;
375
376		printk_safe_enter_irqsave(flags);
377		len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
378		printk_safe_exit_irqrestore(flags);
379		defer_console_output();
380		return len;
381	}
382
383	/* Use extra buffer in NMI. */
384	if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
385		return vprintk_nmi(fmt, args);
386
387	/* Use extra buffer to prevent a recursion deadlock in safe mode. */
388	if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
389		return vprintk_safe(fmt, args);
390
391	/* No obstacles. */
392	return vprintk_default(fmt, args);
393}
394EXPORT_SYMBOL(vprintk);
395
396void __init printk_safe_init(void)
397{
398	int cpu;
399
400	for_each_possible_cpu(cpu) {
401		struct printk_safe_seq_buf *s;
402
403		s = &per_cpu(safe_print_seq, cpu);
404		init_irq_work(&s->work, __printk_safe_flush);
405
406#ifdef CONFIG_PRINTK_NMI
407		s = &per_cpu(nmi_print_seq, cpu);
408		init_irq_work(&s->work, __printk_safe_flush);
409#endif
410	}
 
 
 
 
 
 
 
 
411
412	/* Flush pending messages that did not have scheduled IRQ works. */
413	printk_safe_flush();
414}