Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v3.5.6
  1/*
  2 *  Copyright (C) 1991, 1992  Linus Torvalds
  3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4 */
  5#include <linux/kallsyms.h>
  6#include <linux/kprobes.h>
  7#include <linux/uaccess.h>
  8#include <linux/hardirq.h>
  9#include <linux/kdebug.h>
 10#include <linux/module.h>
 11#include <linux/ptrace.h>
 12#include <linux/kexec.h>
 13#include <linux/sysfs.h>
 14#include <linux/bug.h>
 15#include <linux/nmi.h>
 16
 17#include <asm/stacktrace.h>
 18
 19
 20#define N_EXCEPTION_STACKS_END \
 21		(N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
 22
 23static char x86_stack_ids[][8] = {
 24		[ DEBUG_STACK-1			]	= "#DB",
 25		[ NMI_STACK-1			]	= "NMI",
 26		[ DOUBLEFAULT_STACK-1		]	= "#DF",
 27		[ STACKFAULT_STACK-1		]	= "#SS",
 28		[ MCE_STACK-1			]	= "#MC",
 29#if DEBUG_STKSZ > EXCEPTION_STKSZ
 30		[ N_EXCEPTION_STACKS ...
 31		  N_EXCEPTION_STACKS_END	]	= "#DB[?]"
 32#endif
 33};
 34
 35static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
 36					 unsigned *usedp, char **idp)
 37{
 38	unsigned k;
 39
 40	/*
 41	 * Iterate over all exception stacks, and figure out whether
 42	 * 'stack' is in one of them:
 43	 */
 44	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
 45		unsigned long end = per_cpu(orig_ist, cpu).ist[k];
 46		/*
 47		 * Is 'stack' above this exception frame's end?
 48		 * If yes then skip to the next frame.
 49		 */
 50		if (stack >= end)
 51			continue;
 52		/*
 53		 * Is 'stack' above this exception frame's start address?
 54		 * If yes then we found the right frame.
 55		 */
 56		if (stack >= end - EXCEPTION_STKSZ) {
 57			/*
 58			 * Make sure we only iterate through an exception
 59			 * stack once. If it comes up for the second time
 60			 * then there's something wrong going on - just
 61			 * break out and return NULL:
 62			 */
 63			if (*usedp & (1U << k))
 64				break;
 65			*usedp |= 1U << k;
 66			*idp = x86_stack_ids[k];
 67			return (unsigned long *)end;
 68		}
 69		/*
 70		 * If this is a debug stack, and if it has a larger size than
 71		 * the usual exception stacks, then 'stack' might still
 72		 * be within the lower portion of the debug stack:
 73		 */
 74#if DEBUG_STKSZ > EXCEPTION_STKSZ
 75		if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
 76			unsigned j = N_EXCEPTION_STACKS - 1;
 77
 78			/*
 79			 * Black magic. A large debug stack is composed of
 80			 * multiple exception stack entries, which we
 81			 * iterate through now. Dont look:
 82			 */
 83			do {
 84				++j;
 85				end -= EXCEPTION_STKSZ;
 86				x86_stack_ids[j][4] = '1' +
 87						(j - N_EXCEPTION_STACKS);
 88			} while (stack < end - EXCEPTION_STKSZ);
 89			if (*usedp & (1U << j))
 90				break;
 91			*usedp |= 1U << j;
 92			*idp = x86_stack_ids[j];
 93			return (unsigned long *)end;
 94		}
 95#endif
 96	}
 97	return NULL;
 98}
 99
100static inline int
101in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
102	     unsigned long *irq_stack_end)
103{
104	return (stack >= irq_stack && stack < irq_stack_end);
105}
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107/*
108 * x86-64 can have up to three kernel stacks:
109 * process stack
110 * interrupt stack
111 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
112 */
113
114void dump_trace(struct task_struct *task, struct pt_regs *regs,
115		unsigned long *stack, unsigned long bp,
116		const struct stacktrace_ops *ops, void *data)
117{
118	const unsigned cpu = get_cpu();
119	unsigned long *irq_stack_end =
120		(unsigned long *)per_cpu(irq_stack_ptr, cpu);
121	unsigned used = 0;
122	struct thread_info *tinfo;
123	int graph = 0;
124	unsigned long dummy;
 
 
 
125
126	if (!task)
127		task = current;
128
129	if (!stack) {
130		if (regs)
131			stack = (unsigned long *)regs->sp;
132		else if (task != current)
133			stack = (unsigned long *)task->thread.sp;
134		else
135			stack = &dummy;
136	}
137
138	if (!bp)
139		bp = stack_frame(task, regs);
140	/*
141	 * Print function call entries in all stacks, starting at the
142	 * current stack address. If the stacks consist of nested
143	 * exceptions
144	 */
145	tinfo = task_thread_info(task);
146	for (;;) {
 
 
147		char *id;
148		unsigned long *estack_end;
149		estack_end = in_exception_stack(cpu, (unsigned long)stack,
150						&used, &id);
151
152		if (estack_end) {
 
 
 
 
 
 
 
 
 
 
 
 
 
153			if (ops->stack(data, id) < 0)
154				break;
155
156			bp = ops->walk_stack(tinfo, stack, bp, ops,
157					     data, estack_end, &graph);
158			ops->stack(data, "<EOE>");
159			/*
160			 * We link to the next stack via the
161			 * second-to-last pointer (index -2 to end) in the
162			 * exception stack:
163			 */
164			stack = (unsigned long *) estack_end[-2];
165			continue;
166		}
167		if (irq_stack_end) {
168			unsigned long *irq_stack;
169			irq_stack = irq_stack_end -
170				(IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
171
172			if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
173				if (ops->stack(data, "IRQ") < 0)
174					break;
175				bp = ops->walk_stack(tinfo, stack, bp,
176					ops, data, irq_stack_end, &graph);
177				/*
178				 * We link to the next stack (which would be
179				 * the process stack normally) the last
180				 * pointer (index -1 to end) in the IRQ stack:
181				 */
182				stack = (unsigned long *) (irq_stack_end[-1]);
183				irq_stack_end = NULL;
184				ops->stack(data, "EOI");
185				continue;
186			}
 
187		}
188		break;
189	}
190
191	/*
192	 * This handles the process stack:
193	 */
194	bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
195	put_cpu();
196}
197EXPORT_SYMBOL(dump_trace);
198
199void
200show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
201		   unsigned long *sp, unsigned long bp, char *log_lvl)
202{
203	unsigned long *irq_stack_end;
204	unsigned long *irq_stack;
205	unsigned long *stack;
206	int cpu;
207	int i;
208
209	preempt_disable();
210	cpu = smp_processor_id();
211
212	irq_stack_end	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
213	irq_stack	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
214
215	/*
216	 * Debugging aid: "show_stack(NULL, NULL);" prints the
217	 * back trace for this cpu:
218	 */
219	if (sp == NULL) {
220		if (task)
221			sp = (unsigned long *)task->thread.sp;
222		else
223			sp = (unsigned long *)&sp;
224	}
225
226	stack = sp;
227	for (i = 0; i < kstack_depth_to_print; i++) {
228		if (stack >= irq_stack && stack <= irq_stack_end) {
229			if (stack == irq_stack_end) {
230				stack = (unsigned long *) (irq_stack_end[-1]);
231				printk(KERN_CONT " <EOI> ");
232			}
233		} else {
234		if (((long) stack & (THREAD_SIZE-1)) == 0)
235			break;
236		}
237		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
238			printk(KERN_CONT "\n");
239		printk(KERN_CONT " %016lx", *stack++);
240		touch_nmi_watchdog();
241	}
242	preempt_enable();
243
244	printk(KERN_CONT "\n");
245	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
246}
247
248void show_regs(struct pt_regs *regs)
249{
250	int i;
251	unsigned long sp;
252	const int cpu = smp_processor_id();
253	struct task_struct *cur = current;
254
255	sp = regs->sp;
256	printk("CPU %d ", cpu);
257	print_modules();
258	__show_regs(regs, 1);
259	printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
260		cur->comm, cur->pid, task_thread_info(cur), cur);
261
262	/*
263	 * When in-kernel, we also print out the stack and code at the
264	 * time of the fault..
265	 */
266	if (!user_mode(regs)) {
267		unsigned int code_prologue = code_bytes * 43 / 64;
268		unsigned int code_len = code_bytes;
269		unsigned char c;
270		u8 *ip;
271
272		printk(KERN_DEFAULT "Stack:\n");
273		show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
274				   0, KERN_DEFAULT);
275
276		printk(KERN_DEFAULT "Code: ");
277
278		ip = (u8 *)regs->ip - code_prologue;
279		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
280			/* try starting at IP */
281			ip = (u8 *)regs->ip;
282			code_len = code_len - code_prologue + 1;
283		}
284		for (i = 0; i < code_len; i++, ip++) {
285			if (ip < (u8 *)PAGE_OFFSET ||
286					probe_kernel_address(ip, c)) {
287				printk(KERN_CONT " Bad RIP value.");
288				break;
289			}
290			if (ip == (u8 *)regs->ip)
291				printk(KERN_CONT "<%02x> ", c);
292			else
293				printk(KERN_CONT "%02x ", c);
294		}
295	}
296	printk(KERN_CONT "\n");
297}
298
299int is_valid_bugaddr(unsigned long ip)
300{
301	unsigned short ud2;
302
303	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
304		return 0;
305
306	return ud2 == 0x0b0f;
307}
v3.15
  1/*
  2 *  Copyright (C) 1991, 1992  Linus Torvalds
  3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4 */
  5#include <linux/kallsyms.h>
  6#include <linux/kprobes.h>
  7#include <linux/uaccess.h>
  8#include <linux/hardirq.h>
  9#include <linux/kdebug.h>
 10#include <linux/module.h>
 11#include <linux/ptrace.h>
 12#include <linux/kexec.h>
 13#include <linux/sysfs.h>
 14#include <linux/bug.h>
 15#include <linux/nmi.h>
 16
 17#include <asm/stacktrace.h>
 18
 19
 20#define N_EXCEPTION_STACKS_END \
 21		(N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
 22
 23static char x86_stack_ids[][8] = {
 24		[ DEBUG_STACK-1			]	= "#DB",
 25		[ NMI_STACK-1			]	= "NMI",
 26		[ DOUBLEFAULT_STACK-1		]	= "#DF",
 27		[ STACKFAULT_STACK-1		]	= "#SS",
 28		[ MCE_STACK-1			]	= "#MC",
 29#if DEBUG_STKSZ > EXCEPTION_STKSZ
 30		[ N_EXCEPTION_STACKS ...
 31		  N_EXCEPTION_STACKS_END	]	= "#DB[?]"
 32#endif
 33};
 34
 35static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
 36					 unsigned *usedp, char **idp)
 37{
 38	unsigned k;
 39
 40	/*
 41	 * Iterate over all exception stacks, and figure out whether
 42	 * 'stack' is in one of them:
 43	 */
 44	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
 45		unsigned long end = per_cpu(orig_ist, cpu).ist[k];
 46		/*
 47		 * Is 'stack' above this exception frame's end?
 48		 * If yes then skip to the next frame.
 49		 */
 50		if (stack >= end)
 51			continue;
 52		/*
 53		 * Is 'stack' above this exception frame's start address?
 54		 * If yes then we found the right frame.
 55		 */
 56		if (stack >= end - EXCEPTION_STKSZ) {
 57			/*
 58			 * Make sure we only iterate through an exception
 59			 * stack once. If it comes up for the second time
 60			 * then there's something wrong going on - just
 61			 * break out and return NULL:
 62			 */
 63			if (*usedp & (1U << k))
 64				break;
 65			*usedp |= 1U << k;
 66			*idp = x86_stack_ids[k];
 67			return (unsigned long *)end;
 68		}
 69		/*
 70		 * If this is a debug stack, and if it has a larger size than
 71		 * the usual exception stacks, then 'stack' might still
 72		 * be within the lower portion of the debug stack:
 73		 */
 74#if DEBUG_STKSZ > EXCEPTION_STKSZ
 75		if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
 76			unsigned j = N_EXCEPTION_STACKS - 1;
 77
 78			/*
 79			 * Black magic. A large debug stack is composed of
 80			 * multiple exception stack entries, which we
 81			 * iterate through now. Dont look:
 82			 */
 83			do {
 84				++j;
 85				end -= EXCEPTION_STKSZ;
 86				x86_stack_ids[j][4] = '1' +
 87						(j - N_EXCEPTION_STACKS);
 88			} while (stack < end - EXCEPTION_STKSZ);
 89			if (*usedp & (1U << j))
 90				break;
 91			*usedp |= 1U << j;
 92			*idp = x86_stack_ids[j];
 93			return (unsigned long *)end;
 94		}
 95#endif
 96	}
 97	return NULL;
 98}
 99
100static inline int
101in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
102	     unsigned long *irq_stack_end)
103{
104	return (stack >= irq_stack && stack < irq_stack_end);
105}
106
107static const unsigned long irq_stack_size =
108	(IRQ_STACK_SIZE - 64) / sizeof(unsigned long);
109
110enum stack_type {
111	STACK_IS_UNKNOWN,
112	STACK_IS_NORMAL,
113	STACK_IS_EXCEPTION,
114	STACK_IS_IRQ,
115};
116
117static enum stack_type
118analyze_stack(int cpu, struct task_struct *task, unsigned long *stack,
119	      unsigned long **stack_end, unsigned long *irq_stack,
120	      unsigned *used, char **id)
121{
122	unsigned long addr;
123
124	addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
125	if ((unsigned long)task_stack_page(task) == addr)
126		return STACK_IS_NORMAL;
127
128	*stack_end = in_exception_stack(cpu, (unsigned long)stack,
129					used, id);
130	if (*stack_end)
131		return STACK_IS_EXCEPTION;
132
133	if (!irq_stack)
134		return STACK_IS_NORMAL;
135
136	*stack_end = irq_stack;
137	irq_stack = irq_stack - irq_stack_size;
138
139	if (in_irq_stack(stack, irq_stack, *stack_end))
140		return STACK_IS_IRQ;
141
142	return STACK_IS_UNKNOWN;
143}
144
145/*
146 * x86-64 can have up to three kernel stacks:
147 * process stack
148 * interrupt stack
149 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
150 */
151
152void dump_trace(struct task_struct *task, struct pt_regs *regs,
153		unsigned long *stack, unsigned long bp,
154		const struct stacktrace_ops *ops, void *data)
155{
156	const unsigned cpu = get_cpu();
 
 
 
157	struct thread_info *tinfo;
158	unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
159	unsigned long dummy;
160	unsigned used = 0;
161	int graph = 0;
162	int done = 0;
163
164	if (!task)
165		task = current;
166
167	if (!stack) {
168		if (regs)
169			stack = (unsigned long *)regs->sp;
170		else if (task != current)
171			stack = (unsigned long *)task->thread.sp;
172		else
173			stack = &dummy;
174	}
175
176	if (!bp)
177		bp = stack_frame(task, regs);
178	/*
179	 * Print function call entries in all stacks, starting at the
180	 * current stack address. If the stacks consist of nested
181	 * exceptions
182	 */
183	tinfo = task_thread_info(task);
184	while (!done) {
185		unsigned long *stack_end;
186		enum stack_type stype;
187		char *id;
 
 
 
188
189		stype = analyze_stack(cpu, task, stack, &stack_end,
190				      irq_stack, &used, &id);
191
192		/* Default finish unless specified to continue */
193		done = 1;
194
195		switch (stype) {
196
197		/* Break out early if we are on the thread stack */
198		case STACK_IS_NORMAL:
199			break;
200
201		case STACK_IS_EXCEPTION:
202
203			if (ops->stack(data, id) < 0)
204				break;
205
206			bp = ops->walk_stack(tinfo, stack, bp, ops,
207					     data, stack_end, &graph);
208			ops->stack(data, "<EOE>");
209			/*
210			 * We link to the next stack via the
211			 * second-to-last pointer (index -2 to end) in the
212			 * exception stack:
213			 */
214			stack = (unsigned long *) stack_end[-2];
215			done = 0;
216			break;
217
218		case STACK_IS_IRQ:
219
220			if (ops->stack(data, "IRQ") < 0)
221				break;
222			bp = ops->walk_stack(tinfo, stack, bp,
223				     ops, data, stack_end, &graph);
224			/*
225			 * We link to the next stack (which would be
226			 * the process stack normally) the last
227			 * pointer (index -1 to end) in the IRQ stack:
228			 */
229			stack = (unsigned long *) (stack_end[-1]);
230			irq_stack = NULL;
231			ops->stack(data, "EOI");
232			done = 0;
233			break;
234
235		case STACK_IS_UNKNOWN:
236			ops->stack(data, "UNK");
237			break;
238		}
 
239	}
240
241	/*
242	 * This handles the process stack:
243	 */
244	bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
245	put_cpu();
246}
247EXPORT_SYMBOL(dump_trace);
248
249void
250show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
251		   unsigned long *sp, unsigned long bp, char *log_lvl)
252{
253	unsigned long *irq_stack_end;
254	unsigned long *irq_stack;
255	unsigned long *stack;
256	int cpu;
257	int i;
258
259	preempt_disable();
260	cpu = smp_processor_id();
261
262	irq_stack_end	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
263	irq_stack	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
264
265	/*
266	 * Debugging aid: "show_stack(NULL, NULL);" prints the
267	 * back trace for this cpu:
268	 */
269	if (sp == NULL) {
270		if (task)
271			sp = (unsigned long *)task->thread.sp;
272		else
273			sp = (unsigned long *)&sp;
274	}
275
276	stack = sp;
277	for (i = 0; i < kstack_depth_to_print; i++) {
278		if (stack >= irq_stack && stack <= irq_stack_end) {
279			if (stack == irq_stack_end) {
280				stack = (unsigned long *) (irq_stack_end[-1]);
281				pr_cont(" <EOI> ");
282			}
283		} else {
284		if (((long) stack & (THREAD_SIZE-1)) == 0)
285			break;
286		}
287		if (i && ((i % STACKSLOTS_PER_LINE) == 0))
288			pr_cont("\n");
289		pr_cont(" %016lx", *stack++);
290		touch_nmi_watchdog();
291	}
292	preempt_enable();
293
294	pr_cont("\n");
295	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
296}
297
298void show_regs(struct pt_regs *regs)
299{
300	int i;
301	unsigned long sp;
 
 
302
303	sp = regs->sp;
304	show_regs_print_info(KERN_DEFAULT);
 
305	__show_regs(regs, 1);
 
 
306
307	/*
308	 * When in-kernel, we also print out the stack and code at the
309	 * time of the fault..
310	 */
311	if (!user_mode(regs)) {
312		unsigned int code_prologue = code_bytes * 43 / 64;
313		unsigned int code_len = code_bytes;
314		unsigned char c;
315		u8 *ip;
316
317		printk(KERN_DEFAULT "Stack:\n");
318		show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
319				   0, KERN_DEFAULT);
320
321		printk(KERN_DEFAULT "Code: ");
322
323		ip = (u8 *)regs->ip - code_prologue;
324		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
325			/* try starting at IP */
326			ip = (u8 *)regs->ip;
327			code_len = code_len - code_prologue + 1;
328		}
329		for (i = 0; i < code_len; i++, ip++) {
330			if (ip < (u8 *)PAGE_OFFSET ||
331					probe_kernel_address(ip, c)) {
332				pr_cont(" Bad RIP value.");
333				break;
334			}
335			if (ip == (u8 *)regs->ip)
336				pr_cont("<%02x> ", c);
337			else
338				pr_cont("%02x ", c);
339		}
340	}
341	pr_cont("\n");
342}
343
344int is_valid_bugaddr(unsigned long ip)
345{
346	unsigned short ud2;
347
348	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
349		return 0;
350
351	return ud2 == 0x0b0f;
352}