Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1/*
  2 * nmi.c - Safe printk in NMI context
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of the GNU General Public License
  6 * as published by the Free Software Foundation; either version 2
  7 * of the License, or (at your option) any later version.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 12 * GNU General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
 16 */
 17
 18#include <linux/preempt.h>
 19#include <linux/spinlock.h>
 20#include <linux/debug_locks.h>
 21#include <linux/smp.h>
 22#include <linux/cpumask.h>
 23#include <linux/irq_work.h>
 24#include <linux/printk.h>
 25
 26#include "internal.h"
 27
 28/*
 29 * printk() could not take logbuf_lock in NMI context. Instead,
 30 * it uses an alternative implementation that temporary stores
 31 * the strings into a per-CPU buffer. The content of the buffer
 32 * is later flushed into the main ring buffer via IRQ work.
 33 *
 34 * The alternative implementation is chosen transparently
 35 * via @printk_func per-CPU variable.
 36 *
 37 * The implementation allows to flush the strings also from another CPU.
 38 * There are situations when we want to make sure that all buffers
 39 * were handled or when IRQs are blocked.
 40 */
 41DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default;
 42static int printk_nmi_irq_ready;
 43atomic_t nmi_message_lost;
 44
 45#define NMI_LOG_BUF_LEN ((1 << CONFIG_NMI_LOG_BUF_SHIFT) -		\
 46			 sizeof(atomic_t) - sizeof(struct irq_work))
 47
 48struct nmi_seq_buf {
 49	atomic_t		len;	/* length of written data */
 50	struct irq_work		work;	/* IRQ work that flushes the buffer */
 51	unsigned char		buffer[NMI_LOG_BUF_LEN];
 52};
 53static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq);
 54
 55/*
 56 * Safe printk() for NMI context. It uses a per-CPU buffer to
 57 * store the message. NMIs are not nested, so there is always only
 58 * one writer running. But the buffer might get flushed from another
 59 * CPU, so we need to be careful.
 60 */
 61static int vprintk_nmi(const char *fmt, va_list args)
 62{
 63	struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
 64	int add = 0;
 65	size_t len;
 66
 67again:
 68	len = atomic_read(&s->len);
 69
 70	/* The trailing '\0' is not counted into len. */
 71	if (len >= sizeof(s->buffer) - 1) {
 72		atomic_inc(&nmi_message_lost);
 73		return 0;
 74	}
 75
 76	/*
 77	 * Make sure that all old data have been read before the buffer was
 78	 * reseted. This is not needed when we just append data.
 79	 */
 80	if (!len)
 81		smp_rmb();
 82
 83	add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
 84
 85	/*
 86	 * Do it once again if the buffer has been flushed in the meantime.
 87	 * Note that atomic_cmpxchg() is an implicit memory barrier that
 88	 * makes sure that the data were written before updating s->len.
 89	 */
 90	if (atomic_cmpxchg(&s->len, len, len + add) != len)
 91		goto again;
 92
 93	/* Get flushed in a more safe context. */
 94	if (add && printk_nmi_irq_ready) {
 95		/* Make sure that IRQ work is really initialized. */
 96		smp_rmb();
 97		irq_work_queue(&s->work);
 98	}
 99
100	return add;
101}
102
103static void printk_nmi_flush_line(const char *text, int len)
104{
105	/*
106	 * The buffers are flushed in NMI only on panic.  The messages must
107	 * go only into the ring buffer at this stage.  Consoles will get
108	 * explicitly called later when a crashdump is not generated.
109	 */
110	if (in_nmi())
111		printk_deferred("%.*s", len, text);
112	else
113		printk("%.*s", len, text);
114
115}
116
117/* printk part of the temporary buffer line by line */
118static int printk_nmi_flush_buffer(const char *start, size_t len)
119{
120	const char *c, *end;
121	bool header;
122
123	c = start;
124	end = start + len;
125	header = true;
126
127	/* Print line by line. */
128	while (c < end) {
129		if (*c == '\n') {
130			printk_nmi_flush_line(start, c - start + 1);
131			start = ++c;
132			header = true;
133			continue;
134		}
135
136		/* Handle continuous lines or missing new line. */
137		if ((c + 1 < end) && printk_get_level(c)) {
138			if (header) {
139				c = printk_skip_level(c);
140				continue;
141			}
142
143			printk_nmi_flush_line(start, c - start);
144			start = c++;
145			header = true;
146			continue;
147		}
148
149		header = false;
150		c++;
151	}
152
153	/* Check if there was a partial line. Ignore pure header. */
154	if (start < end && !header) {
155		static const char newline[] = KERN_CONT "\n";
156
157		printk_nmi_flush_line(start, end - start);
158		printk_nmi_flush_line(newline, strlen(newline));
159	}
160
161	return len;
162}
163
164/*
165 * Flush data from the associated per_CPU buffer. The function
166 * can be called either via IRQ work or independently.
167 */
168static void __printk_nmi_flush(struct irq_work *work)
169{
170	static raw_spinlock_t read_lock =
171		__RAW_SPIN_LOCK_INITIALIZER(read_lock);
172	struct nmi_seq_buf *s = container_of(work, struct nmi_seq_buf, work);
173	unsigned long flags;
174	size_t len;
175	int i;
176
177	/*
178	 * The lock has two functions. First, one reader has to flush all
179	 * available message to make the lockless synchronization with
180	 * writers easier. Second, we do not want to mix messages from
181	 * different CPUs. This is especially important when printing
182	 * a backtrace.
183	 */
184	raw_spin_lock_irqsave(&read_lock, flags);
185
186	i = 0;
187more:
188	len = atomic_read(&s->len);
189
190	/*
191	 * This is just a paranoid check that nobody has manipulated
192	 * the buffer an unexpected way. If we printed something then
193	 * @len must only increase. Also it should never overflow the
194	 * buffer size.
195	 */
196	if ((i && i >= len) || len > sizeof(s->buffer)) {
197		const char *msg = "printk_nmi_flush: internal error\n";
198
199		printk_nmi_flush_line(msg, strlen(msg));
200		len = 0;
201	}
202
203	if (!len)
204		goto out; /* Someone else has already flushed the buffer. */
205
206	/* Make sure that data has been written up to the @len */
207	smp_rmb();
208	i += printk_nmi_flush_buffer(s->buffer + i, len - i);
209
210	/*
211	 * Check that nothing has got added in the meantime and truncate
212	 * the buffer. Note that atomic_cmpxchg() is an implicit memory
213	 * barrier that makes sure that the data were copied before
214	 * updating s->len.
215	 */
216	if (atomic_cmpxchg(&s->len, len, 0) != len)
217		goto more;
218
219out:
220	raw_spin_unlock_irqrestore(&read_lock, flags);
221}
222
223/**
224 * printk_nmi_flush - flush all per-cpu nmi buffers.
225 *
226 * The buffers are flushed automatically via IRQ work. This function
227 * is useful only when someone wants to be sure that all buffers have
228 * been flushed at some point.
229 */
230void printk_nmi_flush(void)
231{
232	int cpu;
233
234	for_each_possible_cpu(cpu)
235		__printk_nmi_flush(&per_cpu(nmi_print_seq, cpu).work);
236}
237
238/**
239 * printk_nmi_flush_on_panic - flush all per-cpu nmi buffers when the system
240 *	goes down.
241 *
242 * Similar to printk_nmi_flush() but it can be called even in NMI context when
243 * the system goes down. It does the best effort to get NMI messages into
244 * the main ring buffer.
245 *
246 * Note that it could try harder when there is only one CPU online.
247 */
248void printk_nmi_flush_on_panic(void)
249{
250	/*
251	 * Make sure that we could access the main ring buffer.
252	 * Do not risk a double release when more CPUs are up.
253	 */
254	if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) {
255		if (num_online_cpus() > 1)
256			return;
257
258		debug_locks_off();
259		raw_spin_lock_init(&logbuf_lock);
260	}
261
262	printk_nmi_flush();
263}
264
265void __init printk_nmi_init(void)
266{
267	int cpu;
268
269	for_each_possible_cpu(cpu) {
270		struct nmi_seq_buf *s = &per_cpu(nmi_print_seq, cpu);
271
272		init_irq_work(&s->work, __printk_nmi_flush);
273	}
274
275	/* Make sure that IRQ works are initialized before enabling. */
276	smp_wmb();
277	printk_nmi_irq_ready = 1;
278
279	/* Flush pending messages that did not have scheduled IRQ works. */
280	printk_nmi_flush();
281}
282
283void printk_nmi_enter(void)
284{
285	this_cpu_write(printk_func, vprintk_nmi);
286}
287
288void printk_nmi_exit(void)
289{
290	this_cpu_write(printk_func, vprintk_default);
291}