Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
4 */
5
6#include <linux/preempt.h>
7#include <linux/spinlock.h>
8#include <linux/debug_locks.h>
9#include <linux/smp.h>
10#include <linux/cpumask.h>
11#include <linux/irq_work.h>
12#include <linux/printk.h>
13
14#include "internal.h"
15
16/*
17 * printk() could not take logbuf_lock in NMI context. Instead,
18 * it uses an alternative implementation that temporary stores
19 * the strings into a per-CPU buffer. The content of the buffer
20 * is later flushed into the main ring buffer via IRQ work.
21 *
22 * The alternative implementation is chosen transparently
23 * by examinig current printk() context mask stored in @printk_context
24 * per-CPU variable.
25 *
26 * The implementation allows to flush the strings also from another CPU.
27 * There are situations when we want to make sure that all buffers
28 * were handled or when IRQs are blocked.
29 */
30static int printk_safe_irq_ready __read_mostly;
31
32#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
33 sizeof(atomic_t) - \
34 sizeof(atomic_t) - \
35 sizeof(struct irq_work))
36
37struct printk_safe_seq_buf {
38 atomic_t len; /* length of written data */
39 atomic_t message_lost;
40 struct irq_work work; /* IRQ work that flushes the buffer */
41 unsigned char buffer[SAFE_LOG_BUF_LEN];
42};
43
44static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
45static DEFINE_PER_CPU(int, printk_context);
46
47#ifdef CONFIG_PRINTK_NMI
48static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
49#endif
50
51/* Get flushed in a more safe context. */
52static void queue_flush_work(struct printk_safe_seq_buf *s)
53{
54 if (printk_safe_irq_ready)
55 irq_work_queue(&s->work);
56}
57
58/*
59 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
60 * have dedicated buffers, because otherwise printk-safe preempted by
61 * NMI-printk would have overwritten the NMI messages.
62 *
63 * The messages are flushed from irq work (or from panic()), possibly,
64 * from other CPU, concurrently with printk_safe_log_store(). Should this
65 * happen, printk_safe_log_store() will notice the buffer->len mismatch
66 * and repeat the write.
67 */
68static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
69 const char *fmt, va_list args)
70{
71 int add;
72 size_t len;
73 va_list ap;
74
75again:
76 len = atomic_read(&s->len);
77
78 /* The trailing '\0' is not counted into len. */
79 if (len >= sizeof(s->buffer) - 1) {
80 atomic_inc(&s->message_lost);
81 queue_flush_work(s);
82 return 0;
83 }
84
85 /*
86 * Make sure that all old data have been read before the buffer
87 * was reset. This is not needed when we just append data.
88 */
89 if (!len)
90 smp_rmb();
91
92 va_copy(ap, args);
93 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
94 va_end(ap);
95 if (!add)
96 return 0;
97
98 /*
99 * Do it once again if the buffer has been flushed in the meantime.
100 * Note that atomic_cmpxchg() is an implicit memory barrier that
101 * makes sure that the data were written before updating s->len.
102 */
103 if (atomic_cmpxchg(&s->len, len, len + add) != len)
104 goto again;
105
106 queue_flush_work(s);
107 return add;
108}
109
110static inline void printk_safe_flush_line(const char *text, int len)
111{
112 /*
113 * Avoid any console drivers calls from here, because we may be
114 * in NMI or printk_safe context (when in panic). The messages
115 * must go only into the ring buffer at this stage. Consoles will
116 * get explicitly called later when a crashdump is not generated.
117 */
118 printk_deferred("%.*s", len, text);
119}
120
121/* printk part of the temporary buffer line by line */
122static int printk_safe_flush_buffer(const char *start, size_t len)
123{
124 const char *c, *end;
125 bool header;
126
127 c = start;
128 end = start + len;
129 header = true;
130
131 /* Print line by line. */
132 while (c < end) {
133 if (*c == '\n') {
134 printk_safe_flush_line(start, c - start + 1);
135 start = ++c;
136 header = true;
137 continue;
138 }
139
140 /* Handle continuous lines or missing new line. */
141 if ((c + 1 < end) && printk_get_level(c)) {
142 if (header) {
143 c = printk_skip_level(c);
144 continue;
145 }
146
147 printk_safe_flush_line(start, c - start);
148 start = c++;
149 header = true;
150 continue;
151 }
152
153 header = false;
154 c++;
155 }
156
157 /* Check if there was a partial line. Ignore pure header. */
158 if (start < end && !header) {
159 static const char newline[] = KERN_CONT "\n";
160
161 printk_safe_flush_line(start, end - start);
162 printk_safe_flush_line(newline, strlen(newline));
163 }
164
165 return len;
166}
167
168static void report_message_lost(struct printk_safe_seq_buf *s)
169{
170 int lost = atomic_xchg(&s->message_lost, 0);
171
172 if (lost)
173 printk_deferred("Lost %d message(s)!\n", lost);
174}
175
176/*
177 * Flush data from the associated per-CPU buffer. The function
178 * can be called either via IRQ work or independently.
179 */
180static void __printk_safe_flush(struct irq_work *work)
181{
182 static raw_spinlock_t read_lock =
183 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
184 struct printk_safe_seq_buf *s =
185 container_of(work, struct printk_safe_seq_buf, work);
186 unsigned long flags;
187 size_t len;
188 int i;
189
190 /*
191 * The lock has two functions. First, one reader has to flush all
192 * available message to make the lockless synchronization with
193 * writers easier. Second, we do not want to mix messages from
194 * different CPUs. This is especially important when printing
195 * a backtrace.
196 */
197 raw_spin_lock_irqsave(&read_lock, flags);
198
199 i = 0;
200more:
201 len = atomic_read(&s->len);
202
203 /*
204 * This is just a paranoid check that nobody has manipulated
205 * the buffer an unexpected way. If we printed something then
206 * @len must only increase. Also it should never overflow the
207 * buffer size.
208 */
209 if ((i && i >= len) || len > sizeof(s->buffer)) {
210 const char *msg = "printk_safe_flush: internal error\n";
211
212 printk_safe_flush_line(msg, strlen(msg));
213 len = 0;
214 }
215
216 if (!len)
217 goto out; /* Someone else has already flushed the buffer. */
218
219 /* Make sure that data has been written up to the @len */
220 smp_rmb();
221 i += printk_safe_flush_buffer(s->buffer + i, len - i);
222
223 /*
224 * Check that nothing has got added in the meantime and truncate
225 * the buffer. Note that atomic_cmpxchg() is an implicit memory
226 * barrier that makes sure that the data were copied before
227 * updating s->len.
228 */
229 if (atomic_cmpxchg(&s->len, len, 0) != len)
230 goto more;
231
232out:
233 report_message_lost(s);
234 raw_spin_unlock_irqrestore(&read_lock, flags);
235}
236
237/**
238 * printk_safe_flush - flush all per-cpu nmi buffers.
239 *
240 * The buffers are flushed automatically via IRQ work. This function
241 * is useful only when someone wants to be sure that all buffers have
242 * been flushed at some point.
243 */
244void printk_safe_flush(void)
245{
246 int cpu;
247
248 for_each_possible_cpu(cpu) {
249#ifdef CONFIG_PRINTK_NMI
250 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
251#endif
252 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
253 }
254}
255
256/**
257 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
258 * goes down.
259 *
260 * Similar to printk_safe_flush() but it can be called even in NMI context when
261 * the system goes down. It does the best effort to get NMI messages into
262 * the main ring buffer.
263 *
264 * Note that it could try harder when there is only one CPU online.
265 */
266void printk_safe_flush_on_panic(void)
267{
268 /*
269 * Make sure that we could access the main ring buffer.
270 * Do not risk a double release when more CPUs are up.
271 */
272 if (raw_spin_is_locked(&logbuf_lock)) {
273 if (num_online_cpus() > 1)
274 return;
275
276 debug_locks_off();
277 raw_spin_lock_init(&logbuf_lock);
278 }
279
280 printk_safe_flush();
281}
282
283#ifdef CONFIG_PRINTK_NMI
284/*
285 * Safe printk() for NMI context. It uses a per-CPU buffer to
286 * store the message. NMIs are not nested, so there is always only
287 * one writer running. But the buffer might get flushed from another
288 * CPU, so we need to be careful.
289 */
290static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
291{
292 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
293
294 return printk_safe_log_store(s, fmt, args);
295}
296
297void notrace printk_nmi_enter(void)
298{
299 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
300}
301
302void notrace printk_nmi_exit(void)
303{
304 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
305}
306
307/*
308 * Marks a code that might produce many messages in NMI context
309 * and the risk of losing them is more critical than eventual
310 * reordering.
311 *
312 * It has effect only when called in NMI context. Then printk()
313 * will try to store the messages into the main logbuf directly
314 * and use the per-CPU buffers only as a fallback when the lock
315 * is not available.
316 */
317void printk_nmi_direct_enter(void)
318{
319 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
320 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
321}
322
323void printk_nmi_direct_exit(void)
324{
325 this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
326}
327
328#else
329
330static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
331{
332 return 0;
333}
334
335#endif /* CONFIG_PRINTK_NMI */
336
337/*
338 * Lock-less printk(), to avoid deadlocks should the printk() recurse
339 * into itself. It uses a per-CPU buffer to store the message, just like
340 * NMI.
341 */
342static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
343{
344 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
345
346 return printk_safe_log_store(s, fmt, args);
347}
348
349/* Can be preempted by NMI. */
350void __printk_safe_enter(void)
351{
352 this_cpu_inc(printk_context);
353}
354
355/* Can be preempted by NMI. */
356void __printk_safe_exit(void)
357{
358 this_cpu_dec(printk_context);
359}
360
361__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
362{
363 /*
364 * Try to use the main logbuf even in NMI. But avoid calling console
365 * drivers that might have their own locks.
366 */
367 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
368 raw_spin_trylock(&logbuf_lock)) {
369 int len;
370
371 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
372 raw_spin_unlock(&logbuf_lock);
373 defer_console_output();
374 return len;
375 }
376
377 /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
378 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
379 return vprintk_nmi(fmt, args);
380
381 /* Use extra buffer to prevent a recursion deadlock in safe mode. */
382 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
383 return vprintk_safe(fmt, args);
384
385 /* No obstacles. */
386 return vprintk_default(fmt, args);
387}
388
389void __init printk_safe_init(void)
390{
391 int cpu;
392
393 for_each_possible_cpu(cpu) {
394 struct printk_safe_seq_buf *s;
395
396 s = &per_cpu(safe_print_seq, cpu);
397 init_irq_work(&s->work, __printk_safe_flush);
398
399#ifdef CONFIG_PRINTK_NMI
400 s = &per_cpu(nmi_print_seq, cpu);
401 init_irq_work(&s->work, __printk_safe_flush);
402#endif
403 }
404
405 /*
406 * In the highly unlikely event that a NMI were to trigger at
407 * this moment. Make sure IRQ work is set up before this
408 * variable is set.
409 */
410 barrier();
411 printk_safe_irq_ready = 1;
412
413 /* Flush pending messages that did not have scheduled IRQ works. */
414 printk_safe_flush();
415}
1/*
2 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/preempt.h>
19#include <linux/spinlock.h>
20#include <linux/debug_locks.h>
21#include <linux/smp.h>
22#include <linux/cpumask.h>
23#include <linux/irq_work.h>
24#include <linux/printk.h>
25
26#include "internal.h"
27
28/*
29 * printk() could not take logbuf_lock in NMI context. Instead,
30 * it uses an alternative implementation that temporary stores
31 * the strings into a per-CPU buffer. The content of the buffer
32 * is later flushed into the main ring buffer via IRQ work.
33 *
34 * The alternative implementation is chosen transparently
35 * by examinig current printk() context mask stored in @printk_context
36 * per-CPU variable.
37 *
38 * The implementation allows to flush the strings also from another CPU.
39 * There are situations when we want to make sure that all buffers
40 * were handled or when IRQs are blocked.
41 */
42static int printk_safe_irq_ready __read_mostly;
43
44#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
45 sizeof(atomic_t) - \
46 sizeof(atomic_t) - \
47 sizeof(struct irq_work))
48
49struct printk_safe_seq_buf {
50 atomic_t len; /* length of written data */
51 atomic_t message_lost;
52 struct irq_work work; /* IRQ work that flushes the buffer */
53 unsigned char buffer[SAFE_LOG_BUF_LEN];
54};
55
56static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
57static DEFINE_PER_CPU(int, printk_context);
58
59#ifdef CONFIG_PRINTK_NMI
60static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
61#endif
62
63/* Get flushed in a more safe context. */
64static void queue_flush_work(struct printk_safe_seq_buf *s)
65{
66 if (printk_safe_irq_ready)
67 irq_work_queue(&s->work);
68}
69
70/*
71 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
72 * have dedicated buffers, because otherwise printk-safe preempted by
73 * NMI-printk would have overwritten the NMI messages.
74 *
75 * The messages are flushed from irq work (or from panic()), possibly,
76 * from other CPU, concurrently with printk_safe_log_store(). Should this
77 * happen, printk_safe_log_store() will notice the buffer->len mismatch
78 * and repeat the write.
79 */
80static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
81 const char *fmt, va_list args)
82{
83 int add;
84 size_t len;
85
86again:
87 len = atomic_read(&s->len);
88
89 /* The trailing '\0' is not counted into len. */
90 if (len >= sizeof(s->buffer) - 1) {
91 atomic_inc(&s->message_lost);
92 queue_flush_work(s);
93 return 0;
94 }
95
96 /*
97 * Make sure that all old data have been read before the buffer
98 * was reset. This is not needed when we just append data.
99 */
100 if (!len)
101 smp_rmb();
102
103 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
104 if (!add)
105 return 0;
106
107 /*
108 * Do it once again if the buffer has been flushed in the meantime.
109 * Note that atomic_cmpxchg() is an implicit memory barrier that
110 * makes sure that the data were written before updating s->len.
111 */
112 if (atomic_cmpxchg(&s->len, len, len + add) != len)
113 goto again;
114
115 queue_flush_work(s);
116 return add;
117}
118
119static inline void printk_safe_flush_line(const char *text, int len)
120{
121 /*
122 * Avoid any console drivers calls from here, because we may be
123 * in NMI or printk_safe context (when in panic). The messages
124 * must go only into the ring buffer at this stage. Consoles will
125 * get explicitly called later when a crashdump is not generated.
126 */
127 printk_deferred("%.*s", len, text);
128}
129
130/* printk part of the temporary buffer line by line */
131static int printk_safe_flush_buffer(const char *start, size_t len)
132{
133 const char *c, *end;
134 bool header;
135
136 c = start;
137 end = start + len;
138 header = true;
139
140 /* Print line by line. */
141 while (c < end) {
142 if (*c == '\n') {
143 printk_safe_flush_line(start, c - start + 1);
144 start = ++c;
145 header = true;
146 continue;
147 }
148
149 /* Handle continuous lines or missing new line. */
150 if ((c + 1 < end) && printk_get_level(c)) {
151 if (header) {
152 c = printk_skip_level(c);
153 continue;
154 }
155
156 printk_safe_flush_line(start, c - start);
157 start = c++;
158 header = true;
159 continue;
160 }
161
162 header = false;
163 c++;
164 }
165
166 /* Check if there was a partial line. Ignore pure header. */
167 if (start < end && !header) {
168 static const char newline[] = KERN_CONT "\n";
169
170 printk_safe_flush_line(start, end - start);
171 printk_safe_flush_line(newline, strlen(newline));
172 }
173
174 return len;
175}
176
177static void report_message_lost(struct printk_safe_seq_buf *s)
178{
179 int lost = atomic_xchg(&s->message_lost, 0);
180
181 if (lost)
182 printk_deferred("Lost %d message(s)!\n", lost);
183}
184
185/*
186 * Flush data from the associated per-CPU buffer. The function
187 * can be called either via IRQ work or independently.
188 */
189static void __printk_safe_flush(struct irq_work *work)
190{
191 static raw_spinlock_t read_lock =
192 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
193 struct printk_safe_seq_buf *s =
194 container_of(work, struct printk_safe_seq_buf, work);
195 unsigned long flags;
196 size_t len;
197 int i;
198
199 /*
200 * The lock has two functions. First, one reader has to flush all
201 * available message to make the lockless synchronization with
202 * writers easier. Second, we do not want to mix messages from
203 * different CPUs. This is especially important when printing
204 * a backtrace.
205 */
206 raw_spin_lock_irqsave(&read_lock, flags);
207
208 i = 0;
209more:
210 len = atomic_read(&s->len);
211
212 /*
213 * This is just a paranoid check that nobody has manipulated
214 * the buffer an unexpected way. If we printed something then
215 * @len must only increase. Also it should never overflow the
216 * buffer size.
217 */
218 if ((i && i >= len) || len > sizeof(s->buffer)) {
219 const char *msg = "printk_safe_flush: internal error\n";
220
221 printk_safe_flush_line(msg, strlen(msg));
222 len = 0;
223 }
224
225 if (!len)
226 goto out; /* Someone else has already flushed the buffer. */
227
228 /* Make sure that data has been written up to the @len */
229 smp_rmb();
230 i += printk_safe_flush_buffer(s->buffer + i, len - i);
231
232 /*
233 * Check that nothing has got added in the meantime and truncate
234 * the buffer. Note that atomic_cmpxchg() is an implicit memory
235 * barrier that makes sure that the data were copied before
236 * updating s->len.
237 */
238 if (atomic_cmpxchg(&s->len, len, 0) != len)
239 goto more;
240
241out:
242 report_message_lost(s);
243 raw_spin_unlock_irqrestore(&read_lock, flags);
244}
245
246/**
247 * printk_safe_flush - flush all per-cpu nmi buffers.
248 *
249 * The buffers are flushed automatically via IRQ work. This function
250 * is useful only when someone wants to be sure that all buffers have
251 * been flushed at some point.
252 */
253void printk_safe_flush(void)
254{
255 int cpu;
256
257 for_each_possible_cpu(cpu) {
258#ifdef CONFIG_PRINTK_NMI
259 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
260#endif
261 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
262 }
263}
264
265/**
266 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
267 * goes down.
268 *
269 * Similar to printk_safe_flush() but it can be called even in NMI context when
270 * the system goes down. It does the best effort to get NMI messages into
271 * the main ring buffer.
272 *
273 * Note that it could try harder when there is only one CPU online.
274 */
275void printk_safe_flush_on_panic(void)
276{
277 /*
278 * Make sure that we could access the main ring buffer.
279 * Do not risk a double release when more CPUs are up.
280 */
281 if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) {
282 if (num_online_cpus() > 1)
283 return;
284
285 debug_locks_off();
286 raw_spin_lock_init(&logbuf_lock);
287 }
288
289 printk_safe_flush();
290}
291
292#ifdef CONFIG_PRINTK_NMI
293/*
294 * Safe printk() for NMI context. It uses a per-CPU buffer to
295 * store the message. NMIs are not nested, so there is always only
296 * one writer running. But the buffer might get flushed from another
297 * CPU, so we need to be careful.
298 */
299static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
300{
301 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
302
303 return printk_safe_log_store(s, fmt, args);
304}
305
306void printk_nmi_enter(void)
307{
308 /*
309 * The size of the extra per-CPU buffer is limited. Use it only when
310 * the main one is locked. If this CPU is not in the safe context,
311 * the lock must be taken on another CPU and we could wait for it.
312 */
313 if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) &&
314 raw_spin_is_locked(&logbuf_lock)) {
315 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
316 } else {
317 this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK);
318 }
319}
320
321void printk_nmi_exit(void)
322{
323 this_cpu_and(printk_context,
324 ~(PRINTK_NMI_CONTEXT_MASK |
325 PRINTK_NMI_DEFERRED_CONTEXT_MASK));
326}
327
328#else
329
330static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
331{
332 return 0;
333}
334
335#endif /* CONFIG_PRINTK_NMI */
336
337/*
338 * Lock-less printk(), to avoid deadlocks should the printk() recurse
339 * into itself. It uses a per-CPU buffer to store the message, just like
340 * NMI.
341 */
342static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
343{
344 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
345
346 return printk_safe_log_store(s, fmt, args);
347}
348
349/* Can be preempted by NMI. */
350void __printk_safe_enter(void)
351{
352 this_cpu_inc(printk_context);
353}
354
355/* Can be preempted by NMI. */
356void __printk_safe_exit(void)
357{
358 this_cpu_dec(printk_context);
359}
360
361__printf(1, 0) int vprintk_func(const char *fmt, va_list args)
362{
363 /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
364 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
365 return vprintk_nmi(fmt, args);
366
367 /* Use extra buffer to prevent a recursion deadlock in safe mode. */
368 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
369 return vprintk_safe(fmt, args);
370
371 /*
372 * Use the main logbuf when logbuf_lock is available in NMI.
373 * But avoid calling console drivers that might have their own locks.
374 */
375 if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK)
376 return vprintk_deferred(fmt, args);
377
378 /* No obstacles. */
379 return vprintk_default(fmt, args);
380}
381
382void __init printk_safe_init(void)
383{
384 int cpu;
385
386 for_each_possible_cpu(cpu) {
387 struct printk_safe_seq_buf *s;
388
389 s = &per_cpu(safe_print_seq, cpu);
390 init_irq_work(&s->work, __printk_safe_flush);
391
392#ifdef CONFIG_PRINTK_NMI
393 s = &per_cpu(nmi_print_seq, cpu);
394 init_irq_work(&s->work, __printk_safe_flush);
395#endif
396 }
397
398 /*
399 * In the highly unlikely event that a NMI were to trigger at
400 * this moment. Make sure IRQ work is set up before this
401 * variable is set.
402 */
403 barrier();
404 printk_safe_irq_ready = 1;
405
406 /* Flush pending messages that did not have scheduled IRQ works. */
407 printk_safe_flush();
408}