Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
4 */
5
6#include <linux/preempt.h>
7#include <linux/spinlock.h>
8#include <linux/debug_locks.h>
9#include <linux/smp.h>
10#include <linux/cpumask.h>
11#include <linux/irq_work.h>
12#include <linux/printk.h>
13
14#include "internal.h"
15
16/*
17 * printk() could not take logbuf_lock in NMI context. Instead,
18 * it uses an alternative implementation that temporary stores
19 * the strings into a per-CPU buffer. The content of the buffer
20 * is later flushed into the main ring buffer via IRQ work.
21 *
22 * The alternative implementation is chosen transparently
23 * by examinig current printk() context mask stored in @printk_context
24 * per-CPU variable.
25 *
26 * The implementation allows to flush the strings also from another CPU.
27 * There are situations when we want to make sure that all buffers
28 * were handled or when IRQs are blocked.
29 */
30static int printk_safe_irq_ready __read_mostly;
31
32#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
33 sizeof(atomic_t) - \
34 sizeof(atomic_t) - \
35 sizeof(struct irq_work))
36
37struct printk_safe_seq_buf {
38 atomic_t len; /* length of written data */
39 atomic_t message_lost;
40 struct irq_work work; /* IRQ work that flushes the buffer */
41 unsigned char buffer[SAFE_LOG_BUF_LEN];
42};
43
44static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
45static DEFINE_PER_CPU(int, printk_context);
46
47#ifdef CONFIG_PRINTK_NMI
48static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
49#endif
50
51/* Get flushed in a more safe context. */
52static void queue_flush_work(struct printk_safe_seq_buf *s)
53{
54 if (printk_safe_irq_ready)
55 irq_work_queue(&s->work);
56}
57
58/*
59 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
60 * have dedicated buffers, because otherwise printk-safe preempted by
61 * NMI-printk would have overwritten the NMI messages.
62 *
63 * The messages are flushed from irq work (or from panic()), possibly,
64 * from other CPU, concurrently with printk_safe_log_store(). Should this
65 * happen, printk_safe_log_store() will notice the buffer->len mismatch
66 * and repeat the write.
67 */
68static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
69 const char *fmt, va_list args)
70{
71 int add;
72 size_t len;
73 va_list ap;
74
75again:
76 len = atomic_read(&s->len);
77
78 /* The trailing '\0' is not counted into len. */
79 if (len >= sizeof(s->buffer) - 1) {
80 atomic_inc(&s->message_lost);
81 queue_flush_work(s);
82 return 0;
83 }
84
85 /*
86 * Make sure that all old data have been read before the buffer
87 * was reset. This is not needed when we just append data.
88 */
89 if (!len)
90 smp_rmb();
91
92 va_copy(ap, args);
93 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
94 va_end(ap);
95 if (!add)
96 return 0;
97
98 /*
99 * Do it once again if the buffer has been flushed in the meantime.
100 * Note that atomic_cmpxchg() is an implicit memory barrier that
101 * makes sure that the data were written before updating s->len.
102 */
103 if (atomic_cmpxchg(&s->len, len, len + add) != len)
104 goto again;
105
106 queue_flush_work(s);
107 return add;
108}
109
110static inline void printk_safe_flush_line(const char *text, int len)
111{
112 /*
113 * Avoid any console drivers calls from here, because we may be
114 * in NMI or printk_safe context (when in panic). The messages
115 * must go only into the ring buffer at this stage. Consoles will
116 * get explicitly called later when a crashdump is not generated.
117 */
118 printk_deferred("%.*s", len, text);
119}
120
121/* printk part of the temporary buffer line by line */
122static int printk_safe_flush_buffer(const char *start, size_t len)
123{
124 const char *c, *end;
125 bool header;
126
127 c = start;
128 end = start + len;
129 header = true;
130
131 /* Print line by line. */
132 while (c < end) {
133 if (*c == '\n') {
134 printk_safe_flush_line(start, c - start + 1);
135 start = ++c;
136 header = true;
137 continue;
138 }
139
140 /* Handle continuous lines or missing new line. */
141 if ((c + 1 < end) && printk_get_level(c)) {
142 if (header) {
143 c = printk_skip_level(c);
144 continue;
145 }
146
147 printk_safe_flush_line(start, c - start);
148 start = c++;
149 header = true;
150 continue;
151 }
152
153 header = false;
154 c++;
155 }
156
157 /* Check if there was a partial line. Ignore pure header. */
158 if (start < end && !header) {
159 static const char newline[] = KERN_CONT "\n";
160
161 printk_safe_flush_line(start, end - start);
162 printk_safe_flush_line(newline, strlen(newline));
163 }
164
165 return len;
166}
167
168static void report_message_lost(struct printk_safe_seq_buf *s)
169{
170 int lost = atomic_xchg(&s->message_lost, 0);
171
172 if (lost)
173 printk_deferred("Lost %d message(s)!\n", lost);
174}
175
176/*
177 * Flush data from the associated per-CPU buffer. The function
178 * can be called either via IRQ work or independently.
179 */
180static void __printk_safe_flush(struct irq_work *work)
181{
182 static raw_spinlock_t read_lock =
183 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
184 struct printk_safe_seq_buf *s =
185 container_of(work, struct printk_safe_seq_buf, work);
186 unsigned long flags;
187 size_t len;
188 int i;
189
190 /*
191 * The lock has two functions. First, one reader has to flush all
192 * available message to make the lockless synchronization with
193 * writers easier. Second, we do not want to mix messages from
194 * different CPUs. This is especially important when printing
195 * a backtrace.
196 */
197 raw_spin_lock_irqsave(&read_lock, flags);
198
199 i = 0;
200more:
201 len = atomic_read(&s->len);
202
203 /*
204 * This is just a paranoid check that nobody has manipulated
205 * the buffer an unexpected way. If we printed something then
206 * @len must only increase. Also it should never overflow the
207 * buffer size.
208 */
209 if ((i && i >= len) || len > sizeof(s->buffer)) {
210 const char *msg = "printk_safe_flush: internal error\n";
211
212 printk_safe_flush_line(msg, strlen(msg));
213 len = 0;
214 }
215
216 if (!len)
217 goto out; /* Someone else has already flushed the buffer. */
218
219 /* Make sure that data has been written up to the @len */
220 smp_rmb();
221 i += printk_safe_flush_buffer(s->buffer + i, len - i);
222
223 /*
224 * Check that nothing has got added in the meantime and truncate
225 * the buffer. Note that atomic_cmpxchg() is an implicit memory
226 * barrier that makes sure that the data were copied before
227 * updating s->len.
228 */
229 if (atomic_cmpxchg(&s->len, len, 0) != len)
230 goto more;
231
232out:
233 report_message_lost(s);
234 raw_spin_unlock_irqrestore(&read_lock, flags);
235}
236
237/**
238 * printk_safe_flush - flush all per-cpu nmi buffers.
239 *
240 * The buffers are flushed automatically via IRQ work. This function
241 * is useful only when someone wants to be sure that all buffers have
242 * been flushed at some point.
243 */
244void printk_safe_flush(void)
245{
246 int cpu;
247
248 for_each_possible_cpu(cpu) {
249#ifdef CONFIG_PRINTK_NMI
250 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
251#endif
252 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
253 }
254}
255
256/**
257 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
258 * goes down.
259 *
260 * Similar to printk_safe_flush() but it can be called even in NMI context when
261 * the system goes down. It does the best effort to get NMI messages into
262 * the main ring buffer.
263 *
264 * Note that it could try harder when there is only one CPU online.
265 */
266void printk_safe_flush_on_panic(void)
267{
268 /*
269 * Make sure that we could access the main ring buffer.
270 * Do not risk a double release when more CPUs are up.
271 */
272 if (raw_spin_is_locked(&logbuf_lock)) {
273 if (num_online_cpus() > 1)
274 return;
275
276 debug_locks_off();
277 raw_spin_lock_init(&logbuf_lock);
278 }
279
280 printk_safe_flush();
281}
282
283#ifdef CONFIG_PRINTK_NMI
284/*
285 * Safe printk() for NMI context. It uses a per-CPU buffer to
286 * store the message. NMIs are not nested, so there is always only
287 * one writer running. But the buffer might get flushed from another
288 * CPU, so we need to be careful.
289 */
290static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
291{
292 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
293
294 return printk_safe_log_store(s, fmt, args);
295}
296
297void notrace printk_nmi_enter(void)
298{
299 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
300}
301
302void notrace printk_nmi_exit(void)
303{
304 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
305}
306
307/*
308 * Marks a code that might produce many messages in NMI context
309 * and the risk of losing them is more critical than eventual
310 * reordering.
311 *
312 * It has effect only when called in NMI context. Then printk()
313 * will try to store the messages into the main logbuf directly
314 * and use the per-CPU buffers only as a fallback when the lock
315 * is not available.
316 */
317void printk_nmi_direct_enter(void)
318{
319 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
320 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
321}
322
323void printk_nmi_direct_exit(void)
324{
325 this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
326}
327
328#else
329
330static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
331{
332 return 0;
333}
334
335#endif /* CONFIG_PRINTK_NMI */
336
337/*
338 * Lock-less printk(), to avoid deadlocks should the printk() recurse
339 * into itself. It uses a per-CPU buffer to store the message, just like
340 * NMI.
341 */
342static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
343{
344 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
345
346 return printk_safe_log_store(s, fmt, args);
347}
348
349/* Can be preempted by NMI. */
350void __printk_safe_enter(void)
351{
352 this_cpu_inc(printk_context);
353}
354
355/* Can be preempted by NMI. */
356void __printk_safe_exit(void)
357{
358 this_cpu_dec(printk_context);
359}
360
361__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
362{
363 /*
364 * Try to use the main logbuf even in NMI. But avoid calling console
365 * drivers that might have their own locks.
366 */
367 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
368 raw_spin_trylock(&logbuf_lock)) {
369 int len;
370
371 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
372 raw_spin_unlock(&logbuf_lock);
373 defer_console_output();
374 return len;
375 }
376
377 /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
378 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
379 return vprintk_nmi(fmt, args);
380
381 /* Use extra buffer to prevent a recursion deadlock in safe mode. */
382 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
383 return vprintk_safe(fmt, args);
384
385 /* No obstacles. */
386 return vprintk_default(fmt, args);
387}
388
389void __init printk_safe_init(void)
390{
391 int cpu;
392
393 for_each_possible_cpu(cpu) {
394 struct printk_safe_seq_buf *s;
395
396 s = &per_cpu(safe_print_seq, cpu);
397 init_irq_work(&s->work, __printk_safe_flush);
398
399#ifdef CONFIG_PRINTK_NMI
400 s = &per_cpu(nmi_print_seq, cpu);
401 init_irq_work(&s->work, __printk_safe_flush);
402#endif
403 }
404
405 /*
406 * In the highly unlikely event that a NMI were to trigger at
407 * this moment. Make sure IRQ work is set up before this
408 * variable is set.
409 */
410 barrier();
411 printk_safe_irq_ready = 1;
412
413 /* Flush pending messages that did not have scheduled IRQ works. */
414 printk_safe_flush();
415}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
4 */
5
6#include <linux/preempt.h>
7#include <linux/spinlock.h>
8#include <linux/debug_locks.h>
9#include <linux/kdb.h>
10#include <linux/smp.h>
11#include <linux/cpumask.h>
12#include <linux/irq_work.h>
13#include <linux/printk.h>
14#include <linux/kprobes.h>
15
16#include "internal.h"
17
18/*
19 * printk() could not take logbuf_lock in NMI context. Instead,
20 * it uses an alternative implementation that temporary stores
21 * the strings into a per-CPU buffer. The content of the buffer
22 * is later flushed into the main ring buffer via IRQ work.
23 *
24 * The alternative implementation is chosen transparently
25 * by examinig current printk() context mask stored in @printk_context
26 * per-CPU variable.
27 *
28 * The implementation allows to flush the strings also from another CPU.
29 * There are situations when we want to make sure that all buffers
30 * were handled or when IRQs are blocked.
31 */
32
33#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
34 sizeof(atomic_t) - \
35 sizeof(atomic_t) - \
36 sizeof(struct irq_work))
37
38struct printk_safe_seq_buf {
39 atomic_t len; /* length of written data */
40 atomic_t message_lost;
41 struct irq_work work; /* IRQ work that flushes the buffer */
42 unsigned char buffer[SAFE_LOG_BUF_LEN];
43};
44
45static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
46static DEFINE_PER_CPU(int, printk_context);
47
48#ifdef CONFIG_PRINTK_NMI
49static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
50#endif
51
52/* Get flushed in a more safe context. */
53static void queue_flush_work(struct printk_safe_seq_buf *s)
54{
55 if (printk_percpu_data_ready())
56 irq_work_queue(&s->work);
57}
58
59/*
60 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
61 * have dedicated buffers, because otherwise printk-safe preempted by
62 * NMI-printk would have overwritten the NMI messages.
63 *
64 * The messages are flushed from irq work (or from panic()), possibly,
65 * from other CPU, concurrently with printk_safe_log_store(). Should this
66 * happen, printk_safe_log_store() will notice the buffer->len mismatch
67 * and repeat the write.
68 */
69static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
70 const char *fmt, va_list args)
71{
72 int add;
73 size_t len;
74 va_list ap;
75
76again:
77 len = atomic_read(&s->len);
78
79 /* The trailing '\0' is not counted into len. */
80 if (len >= sizeof(s->buffer) - 1) {
81 atomic_inc(&s->message_lost);
82 queue_flush_work(s);
83 return 0;
84 }
85
86 /*
87 * Make sure that all old data have been read before the buffer
88 * was reset. This is not needed when we just append data.
89 */
90 if (!len)
91 smp_rmb();
92
93 va_copy(ap, args);
94 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
95 va_end(ap);
96 if (!add)
97 return 0;
98
99 /*
100 * Do it once again if the buffer has been flushed in the meantime.
101 * Note that atomic_cmpxchg() is an implicit memory barrier that
102 * makes sure that the data were written before updating s->len.
103 */
104 if (atomic_cmpxchg(&s->len, len, len + add) != len)
105 goto again;
106
107 queue_flush_work(s);
108 return add;
109}
110
111static inline void printk_safe_flush_line(const char *text, int len)
112{
113 /*
114 * Avoid any console drivers calls from here, because we may be
115 * in NMI or printk_safe context (when in panic). The messages
116 * must go only into the ring buffer at this stage. Consoles will
117 * get explicitly called later when a crashdump is not generated.
118 */
119 printk_deferred("%.*s", len, text);
120}
121
122/* printk part of the temporary buffer line by line */
123static int printk_safe_flush_buffer(const char *start, size_t len)
124{
125 const char *c, *end;
126 bool header;
127
128 c = start;
129 end = start + len;
130 header = true;
131
132 /* Print line by line. */
133 while (c < end) {
134 if (*c == '\n') {
135 printk_safe_flush_line(start, c - start + 1);
136 start = ++c;
137 header = true;
138 continue;
139 }
140
141 /* Handle continuous lines or missing new line. */
142 if ((c + 1 < end) && printk_get_level(c)) {
143 if (header) {
144 c = printk_skip_level(c);
145 continue;
146 }
147
148 printk_safe_flush_line(start, c - start);
149 start = c++;
150 header = true;
151 continue;
152 }
153
154 header = false;
155 c++;
156 }
157
158 /* Check if there was a partial line. Ignore pure header. */
159 if (start < end && !header) {
160 static const char newline[] = KERN_CONT "\n";
161
162 printk_safe_flush_line(start, end - start);
163 printk_safe_flush_line(newline, strlen(newline));
164 }
165
166 return len;
167}
168
169static void report_message_lost(struct printk_safe_seq_buf *s)
170{
171 int lost = atomic_xchg(&s->message_lost, 0);
172
173 if (lost)
174 printk_deferred("Lost %d message(s)!\n", lost);
175}
176
177/*
178 * Flush data from the associated per-CPU buffer. The function
179 * can be called either via IRQ work or independently.
180 */
181static void __printk_safe_flush(struct irq_work *work)
182{
183 static raw_spinlock_t read_lock =
184 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
185 struct printk_safe_seq_buf *s =
186 container_of(work, struct printk_safe_seq_buf, work);
187 unsigned long flags;
188 size_t len;
189 int i;
190
191 /*
192 * The lock has two functions. First, one reader has to flush all
193 * available message to make the lockless synchronization with
194 * writers easier. Second, we do not want to mix messages from
195 * different CPUs. This is especially important when printing
196 * a backtrace.
197 */
198 raw_spin_lock_irqsave(&read_lock, flags);
199
200 i = 0;
201more:
202 len = atomic_read(&s->len);
203
204 /*
205 * This is just a paranoid check that nobody has manipulated
206 * the buffer an unexpected way. If we printed something then
207 * @len must only increase. Also it should never overflow the
208 * buffer size.
209 */
210 if ((i && i >= len) || len > sizeof(s->buffer)) {
211 const char *msg = "printk_safe_flush: internal error\n";
212
213 printk_safe_flush_line(msg, strlen(msg));
214 len = 0;
215 }
216
217 if (!len)
218 goto out; /* Someone else has already flushed the buffer. */
219
220 /* Make sure that data has been written up to the @len */
221 smp_rmb();
222 i += printk_safe_flush_buffer(s->buffer + i, len - i);
223
224 /*
225 * Check that nothing has got added in the meantime and truncate
226 * the buffer. Note that atomic_cmpxchg() is an implicit memory
227 * barrier that makes sure that the data were copied before
228 * updating s->len.
229 */
230 if (atomic_cmpxchg(&s->len, len, 0) != len)
231 goto more;
232
233out:
234 report_message_lost(s);
235 raw_spin_unlock_irqrestore(&read_lock, flags);
236}
237
238/**
239 * printk_safe_flush - flush all per-cpu nmi buffers.
240 *
241 * The buffers are flushed automatically via IRQ work. This function
242 * is useful only when someone wants to be sure that all buffers have
243 * been flushed at some point.
244 */
245void printk_safe_flush(void)
246{
247 int cpu;
248
249 for_each_possible_cpu(cpu) {
250#ifdef CONFIG_PRINTK_NMI
251 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
252#endif
253 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
254 }
255}
256
257/**
258 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
259 * goes down.
260 *
261 * Similar to printk_safe_flush() but it can be called even in NMI context when
262 * the system goes down. It does the best effort to get NMI messages into
263 * the main ring buffer.
264 *
265 * Note that it could try harder when there is only one CPU online.
266 */
267void printk_safe_flush_on_panic(void)
268{
269 /*
270 * Make sure that we could access the main ring buffer.
271 * Do not risk a double release when more CPUs are up.
272 */
273 if (raw_spin_is_locked(&logbuf_lock)) {
274 if (num_online_cpus() > 1)
275 return;
276
277 debug_locks_off();
278 raw_spin_lock_init(&logbuf_lock);
279 }
280
281 printk_safe_flush();
282}
283
284#ifdef CONFIG_PRINTK_NMI
285/*
286 * Safe printk() for NMI context. It uses a per-CPU buffer to
287 * store the message. NMIs are not nested, so there is always only
288 * one writer running. But the buffer might get flushed from another
289 * CPU, so we need to be careful.
290 */
291static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
292{
293 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
294
295 return printk_safe_log_store(s, fmt, args);
296}
297
298void noinstr printk_nmi_enter(void)
299{
300 this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
301}
302
303void noinstr printk_nmi_exit(void)
304{
305 this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET);
306}
307
308/*
309 * Marks a code that might produce many messages in NMI context
310 * and the risk of losing them is more critical than eventual
311 * reordering.
312 *
313 * It has effect only when called in NMI context. Then printk()
314 * will try to store the messages into the main logbuf directly
315 * and use the per-CPU buffers only as a fallback when the lock
316 * is not available.
317 */
318void printk_nmi_direct_enter(void)
319{
320 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
321 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
322}
323
324void printk_nmi_direct_exit(void)
325{
326 this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
327}
328
329#else
330
331static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
332{
333 return 0;
334}
335
336#endif /* CONFIG_PRINTK_NMI */
337
338/*
339 * Lock-less printk(), to avoid deadlocks should the printk() recurse
340 * into itself. It uses a per-CPU buffer to store the message, just like
341 * NMI.
342 */
343static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
344{
345 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
346
347 return printk_safe_log_store(s, fmt, args);
348}
349
350/* Can be preempted by NMI. */
351void __printk_safe_enter(void)
352{
353 this_cpu_inc(printk_context);
354}
355
356/* Can be preempted by NMI. */
357void __printk_safe_exit(void)
358{
359 this_cpu_dec(printk_context);
360}
361
362__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
363{
364#ifdef CONFIG_KGDB_KDB
365 /* Allow to pass printk() to kdb but avoid a recursion. */
366 if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
367 return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
368#endif
369
370 /*
371 * Try to use the main logbuf even in NMI. But avoid calling console
372 * drivers that might have their own locks.
373 */
374 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
375 raw_spin_trylock(&logbuf_lock)) {
376 int len;
377
378 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
379 raw_spin_unlock(&logbuf_lock);
380 defer_console_output();
381 return len;
382 }
383
384 /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
385 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
386 return vprintk_nmi(fmt, args);
387
388 /* Use extra buffer to prevent a recursion deadlock in safe mode. */
389 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
390 return vprintk_safe(fmt, args);
391
392 /* No obstacles. */
393 return vprintk_default(fmt, args);
394}
395
396void __init printk_safe_init(void)
397{
398 int cpu;
399
400 for_each_possible_cpu(cpu) {
401 struct printk_safe_seq_buf *s;
402
403 s = &per_cpu(safe_print_seq, cpu);
404 init_irq_work(&s->work, __printk_safe_flush);
405
406#ifdef CONFIG_PRINTK_NMI
407 s = &per_cpu(nmi_print_seq, cpu);
408 init_irq_work(&s->work, __printk_safe_flush);
409#endif
410 }
411
412 /* Flush pending messages that did not have scheduled IRQ works. */
413 printk_safe_flush();
414}