Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Copyright (C) 1991, 1992  Linus Torvalds
  4 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  5 */
  6#include <linux/sched/debug.h>
  7#include <linux/kallsyms.h>
  8#include <linux/kprobes.h>
  9#include <linux/uaccess.h>
 10#include <linux/hardirq.h>
 11#include <linux/kdebug.h>
 12#include <linux/export.h>
 13#include <linux/ptrace.h>
 14#include <linux/kexec.h>
 15#include <linux/sysfs.h>
 16#include <linux/bug.h>
 17#include <linux/nmi.h>
 18
 19#include <asm/cpu_entry_area.h>
 20#include <asm/stacktrace.h>
 21
 22static const char * const exception_stack_names[] = {
 23		[ ESTACK_DF	]	= "#DF",
 24		[ ESTACK_NMI	]	= "NMI",
 25		[ ESTACK_DB	]	= "#DB",
 26		[ ESTACK_MCE	]	= "#MC",
 27		[ ESTACK_VC	]	= "#VC",
 28		[ ESTACK_VC2	]	= "#VC2",
 29};
 30
 31const char *stack_type_name(enum stack_type type)
 32{
 33	BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
 34
 35	if (type == STACK_TYPE_TASK)
 36		return "TASK";
 37
 38	if (type == STACK_TYPE_IRQ)
 39		return "IRQ";
 
 
 
 
 
 
 
 
 40
 41	if (type == STACK_TYPE_SOFTIRQ)
 42		return "SOFTIRQ";
 
 
 43
 44	if (type == STACK_TYPE_ENTRY) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45		/*
 46		 * On 64-bit, we have a generic entry stack that we
 47		 * use for all the kernel entry points, including
 48		 * SYSENTER.
 49		 */
 50		return "ENTRY_TRAMPOLINE";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51	}
 52
 53	if (type >= STACK_TYPE_EXCEPTION && type <= STACK_TYPE_EXCEPTION_LAST)
 54		return exception_stack_names[type - STACK_TYPE_EXCEPTION];
 55
 56	return NULL;
 57}
 58
 59/**
 60 * struct estack_pages - Page descriptor for exception stacks
 61 * @offs:	Offset from the start of the exception stack area
 62 * @size:	Size of the exception stack
 63 * @type:	Type to store in the stack_info struct
 64 */
 65struct estack_pages {
 66	u32	offs;
 67	u16	size;
 68	u16	type;
 69};
 70
 71#define EPAGERANGE(st)							\
 72	[PFN_DOWN(CEA_ESTACK_OFFS(st)) ...				\
 73	 PFN_DOWN(CEA_ESTACK_OFFS(st) + CEA_ESTACK_SIZE(st) - 1)] = {	\
 74		.offs	= CEA_ESTACK_OFFS(st),				\
 75		.size	= CEA_ESTACK_SIZE(st),				\
 76		.type	= STACK_TYPE_EXCEPTION + ESTACK_ ##st, }
 77
 78/*
 79 * Array of exception stack page descriptors. If the stack is larger than
 80 * PAGE_SIZE, all pages covering a particular stack will have the same
 81 * info. The guard pages including the not mapped DB2 stack are zeroed
 82 * out.
 83 */
 84static const
 85struct estack_pages estack_pages[CEA_ESTACK_PAGES] ____cacheline_aligned = {
 86	EPAGERANGE(DF),
 87	EPAGERANGE(NMI),
 88	EPAGERANGE(DB),
 89	EPAGERANGE(MCE),
 90	EPAGERANGE(VC),
 91	EPAGERANGE(VC2),
 92};
 93
 94static __always_inline bool in_exception_stack(unsigned long *stack, struct stack_info *info)
 
 
 
 95{
 96	unsigned long begin, end, stk = (unsigned long)stack;
 97	const struct estack_pages *ep;
 98	struct pt_regs *regs;
 99	unsigned int k;
100
101	BUILD_BUG_ON(N_EXCEPTION_STACKS != 6);
 
 
102
103	begin = (unsigned long)__this_cpu_read(cea_exception_stacks);
104	/*
105	 * Handle the case where stack trace is collected _before_
106	 * cea_exception_stacks had been initialized.
107	 */
108	if (!begin)
109		return false;
110
111	end = begin + sizeof(struct cea_exception_stacks);
112	/* Bail if @stack is outside the exception stack area. */
113	if (stk < begin || stk >= end)
114		return false;
115
116	/* Calc page offset from start of exception stacks */
117	k = (stk - begin) >> PAGE_SHIFT;
118	/* Lookup the page descriptor */
119	ep = &estack_pages[k];
120	/* Guard page? */
121	if (!ep->size)
122		return false;
123
124	begin += (unsigned long)ep->offs;
125	end = begin + (unsigned long)ep->size;
126	regs = (struct pt_regs *)end - 1;
127
128	info->type	= ep->type;
129	info->begin	= (unsigned long *)begin;
130	info->end	= (unsigned long *)end;
131	info->next_sp	= (unsigned long *)regs->sp;
132	return true;
133}
134
135static __always_inline bool in_irq_stack(unsigned long *stack, struct stack_info *info)
136{
137	unsigned long *end = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr);
138	unsigned long *begin;
 
 
139
140	/*
141	 * @end points directly to the top most stack entry to avoid a -8
142	 * adjustment in the stack switch hotpath. Adjust it back before
143	 * calculating @begin.
144	 */
145	end++;
146	begin = end - (IRQ_STACK_SIZE / sizeof(long));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
 
148	/*
149	 * Due to the switching logic RSP can never be == @end because the
150	 * final operation is 'popq %rsp' which means after that RSP points
151	 * to the original stack and not to @end.
152	 */
153	if (stack < begin || stack >= end)
154		return false;
155
156	info->type	= STACK_TYPE_IRQ;
157	info->begin	= begin;
158	info->end	= end;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
160	/*
161	 * The next stack pointer is stored at the top of the irq stack
162	 * before switching to the irq stack. Actual stack entries are all
163	 * below that.
164	 */
165	info->next_sp = (unsigned long *)*(end - 1);
166
167	return true;
168}
 
169
170bool noinstr get_stack_info_noinstr(unsigned long *stack, struct task_struct *task,
171				    struct stack_info *info)
 
172{
173	if (in_task_stack(stack, task, info))
174		return true;
 
 
 
175
176	if (task != current)
177		return false;
178
179	if (in_exception_stack(stack, info))
180		return true;
181
182	if (in_irq_stack(stack, info))
183		return true;
 
 
 
 
 
 
 
 
184
185	if (in_entry_stack(stack, info))
186		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
188	return false;
 
189}
190
191int get_stack_info(unsigned long *stack, struct task_struct *task,
192		   struct stack_info *info, unsigned long *visit_mask)
193{
194	task = task ? : current;
 
195
196	if (!stack)
197		goto unknown;
198
199	if (!get_stack_info_noinstr(stack, task, info))
200		goto unknown;
201
202	/*
203	 * Make sure we don't iterate through any given stack more than once.
204	 * If it comes up a second time then there's something wrong going on:
205	 * just break out and report an unknown stack type.
206	 */
207	if (visit_mask) {
208		if (*visit_mask & (1UL << info->type)) {
209			if (task == current)
210				printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
211			goto unknown;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212		}
213		*visit_mask |= 1UL << info->type;
214	}
 
 
 
 
 
 
215
216	return 0;
 
217
218unknown:
219	info->type = STACK_TYPE_UNKNOWN;
220	return -EINVAL;
221}
v4.6
 
  1/*
  2 *  Copyright (C) 1991, 1992  Linus Torvalds
  3 *  Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
  4 */
 
  5#include <linux/kallsyms.h>
  6#include <linux/kprobes.h>
  7#include <linux/uaccess.h>
  8#include <linux/hardirq.h>
  9#include <linux/kdebug.h>
 10#include <linux/module.h>
 11#include <linux/ptrace.h>
 12#include <linux/kexec.h>
 13#include <linux/sysfs.h>
 14#include <linux/bug.h>
 15#include <linux/nmi.h>
 16
 
 17#include <asm/stacktrace.h>
 18
 
 
 
 
 
 
 
 
 
 
 
 
 19
 20#define N_EXCEPTION_STACKS_END \
 21		(N_EXCEPTION_STACKS + DEBUG_STKSZ/EXCEPTION_STKSZ - 2)
 22
 23static char x86_stack_ids[][8] = {
 24		[ DEBUG_STACK-1			]	= "#DB",
 25		[ NMI_STACK-1			]	= "NMI",
 26		[ DOUBLEFAULT_STACK-1		]	= "#DF",
 27		[ MCE_STACK-1			]	= "#MC",
 28#if DEBUG_STKSZ > EXCEPTION_STKSZ
 29		[ N_EXCEPTION_STACKS ...
 30		  N_EXCEPTION_STACKS_END	]	= "#DB[?]"
 31#endif
 32};
 33
 34static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
 35					 unsigned *usedp, char **idp)
 36{
 37	unsigned k;
 38
 39	/*
 40	 * Iterate over all exception stacks, and figure out whether
 41	 * 'stack' is in one of them:
 42	 */
 43	for (k = 0; k < N_EXCEPTION_STACKS; k++) {
 44		unsigned long end = per_cpu(orig_ist, cpu).ist[k];
 45		/*
 46		 * Is 'stack' above this exception frame's end?
 47		 * If yes then skip to the next frame.
 48		 */
 49		if (stack >= end)
 50			continue;
 51		/*
 52		 * Is 'stack' above this exception frame's start address?
 53		 * If yes then we found the right frame.
 54		 */
 55		if (stack >= end - EXCEPTION_STKSZ) {
 56			/*
 57			 * Make sure we only iterate through an exception
 58			 * stack once. If it comes up for the second time
 59			 * then there's something wrong going on - just
 60			 * break out and return NULL:
 61			 */
 62			if (*usedp & (1U << k))
 63				break;
 64			*usedp |= 1U << k;
 65			*idp = x86_stack_ids[k];
 66			return (unsigned long *)end;
 67		}
 68		/*
 69		 * If this is a debug stack, and if it has a larger size than
 70		 * the usual exception stacks, then 'stack' might still
 71		 * be within the lower portion of the debug stack:
 72		 */
 73#if DEBUG_STKSZ > EXCEPTION_STKSZ
 74		if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
 75			unsigned j = N_EXCEPTION_STACKS - 1;
 76
 77			/*
 78			 * Black magic. A large debug stack is composed of
 79			 * multiple exception stack entries, which we
 80			 * iterate through now. Dont look:
 81			 */
 82			do {
 83				++j;
 84				end -= EXCEPTION_STKSZ;
 85				x86_stack_ids[j][4] = '1' +
 86						(j - N_EXCEPTION_STACKS);
 87			} while (stack < end - EXCEPTION_STKSZ);
 88			if (*usedp & (1U << j))
 89				break;
 90			*usedp |= 1U << j;
 91			*idp = x86_stack_ids[j];
 92			return (unsigned long *)end;
 93		}
 94#endif
 95	}
 
 
 
 
 96	return NULL;
 97}
 98
 99static inline int
100in_irq_stack(unsigned long *stack, unsigned long *irq_stack,
101	     unsigned long *irq_stack_end)
102{
103	return (stack >= irq_stack && stack < irq_stack_end);
104}
 
 
 
 
 
105
106static const unsigned long irq_stack_size =
107	(IRQ_STACK_SIZE - 64) / sizeof(unsigned long);
 
 
 
 
108
109enum stack_type {
110	STACK_IS_UNKNOWN,
111	STACK_IS_NORMAL,
112	STACK_IS_EXCEPTION,
113	STACK_IS_IRQ,
 
 
 
 
 
 
 
 
 
114};
115
116static enum stack_type
117analyze_stack(int cpu, struct task_struct *task, unsigned long *stack,
118	      unsigned long **stack_end, unsigned long *irq_stack,
119	      unsigned *used, char **id)
120{
121	unsigned long addr;
 
 
 
122
123	addr = ((unsigned long)stack & (~(THREAD_SIZE - 1)));
124	if ((unsigned long)task_stack_page(task) == addr)
125		return STACK_IS_NORMAL;
126
127	*stack_end = in_exception_stack(cpu, (unsigned long)stack,
128					used, id);
129	if (*stack_end)
130		return STACK_IS_EXCEPTION;
131
132	if (!irq_stack)
133		return STACK_IS_NORMAL;
134
135	*stack_end = irq_stack;
136	irq_stack = irq_stack - irq_stack_size;
137
138	if (in_irq_stack(stack, irq_stack, *stack_end))
139		return STACK_IS_IRQ;
140
141	return STACK_IS_UNKNOWN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142}
143
144/*
145 * x86-64 can have up to three kernel stacks:
146 * process stack
147 * interrupt stack
148 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
149 */
150
151void dump_trace(struct task_struct *task, struct pt_regs *regs,
152		unsigned long *stack, unsigned long bp,
153		const struct stacktrace_ops *ops, void *data)
154{
155	const unsigned cpu = get_cpu();
156	struct thread_info *tinfo;
157	unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
158	unsigned long dummy;
159	unsigned used = 0;
160	int graph = 0;
161	int done = 0;
162
163	if (!task)
164		task = current;
165
166	if (!stack) {
167		if (regs)
168			stack = (unsigned long *)regs->sp;
169		else if (task != current)
170			stack = (unsigned long *)task->thread.sp;
171		else
172			stack = &dummy;
173	}
174
175	if (!bp)
176		bp = stack_frame(task, regs);
177	/*
178	 * Print function call entries in all stacks, starting at the
179	 * current stack address. If the stacks consist of nested
180	 * exceptions
181	 */
182	tinfo = task_thread_info(task);
183	while (!done) {
184		unsigned long *stack_end;
185		enum stack_type stype;
186		char *id;
187
188		stype = analyze_stack(cpu, task, stack, &stack_end,
189				      irq_stack, &used, &id);
190
191		/* Default finish unless specified to continue */
192		done = 1;
193
194		switch (stype) {
195
196		/* Break out early if we are on the thread stack */
197		case STACK_IS_NORMAL:
198			break;
199
200		case STACK_IS_EXCEPTION:
201
202			if (ops->stack(data, id) < 0)
203				break;
204
205			bp = ops->walk_stack(tinfo, stack, bp, ops,
206					     data, stack_end, &graph);
207			ops->stack(data, "<EOE>");
208			/*
209			 * We link to the next stack via the
210			 * second-to-last pointer (index -2 to end) in the
211			 * exception stack:
212			 */
213			stack = (unsigned long *) stack_end[-2];
214			done = 0;
215			break;
216
217		case STACK_IS_IRQ:
218
219			if (ops->stack(data, "IRQ") < 0)
220				break;
221			bp = ops->walk_stack(tinfo, stack, bp,
222				     ops, data, stack_end, &graph);
223			/*
224			 * We link to the next stack (which would be
225			 * the process stack normally) the last
226			 * pointer (index -1 to end) in the IRQ stack:
227			 */
228			stack = (unsigned long *) (stack_end[-1]);
229			irq_stack = NULL;
230			ops->stack(data, "EOI");
231			done = 0;
232			break;
233
234		case STACK_IS_UNKNOWN:
235			ops->stack(data, "UNK");
236			break;
237		}
238	}
239
240	/*
241	 * This handles the process stack:
 
 
242	 */
243	bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
244	put_cpu();
 
245}
246EXPORT_SYMBOL(dump_trace);
247
248void
249show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
250		   unsigned long *sp, unsigned long bp, char *log_lvl)
251{
252	unsigned long *irq_stack_end;
253	unsigned long *irq_stack;
254	unsigned long *stack;
255	int cpu;
256	int i;
257
258	preempt_disable();
259	cpu = smp_processor_id();
260
261	irq_stack_end	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
262	irq_stack	= (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
263
264	/*
265	 * Debugging aid: "show_stack(NULL, NULL);" prints the
266	 * back trace for this cpu:
267	 */
268	if (sp == NULL) {
269		if (task)
270			sp = (unsigned long *)task->thread.sp;
271		else
272			sp = (unsigned long *)&sp;
273	}
274
275	stack = sp;
276	for (i = 0; i < kstack_depth_to_print; i++) {
277		if (stack >= irq_stack && stack <= irq_stack_end) {
278			if (stack == irq_stack_end) {
279				stack = (unsigned long *) (irq_stack_end[-1]);
280				pr_cont(" <EOI> ");
281			}
282		} else {
283		if (kstack_end(stack))
284			break;
285		}
286		if ((i % STACKSLOTS_PER_LINE) == 0) {
287			if (i != 0)
288				pr_cont("\n");
289			printk("%s %016lx", log_lvl, *stack++);
290		} else
291			pr_cont(" %016lx", *stack++);
292		touch_nmi_watchdog();
293	}
294	preempt_enable();
295
296	pr_cont("\n");
297	show_trace_log_lvl(task, regs, sp, bp, log_lvl);
298}
299
300void show_regs(struct pt_regs *regs)
 
301{
302	int i;
303	unsigned long sp;
304
305	sp = regs->sp;
306	show_regs_print_info(KERN_DEFAULT);
307	__show_regs(regs, 1);
 
 
308
309	/*
310	 * When in-kernel, we also print out the stack and code at the
311	 * time of the fault..
 
312	 */
313	if (!user_mode(regs)) {
314		unsigned int code_prologue = code_bytes * 43 / 64;
315		unsigned int code_len = code_bytes;
316		unsigned char c;
317		u8 *ip;
318
319		printk(KERN_DEFAULT "Stack:\n");
320		show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
321				   0, KERN_DEFAULT);
322
323		printk(KERN_DEFAULT "Code: ");
324
325		ip = (u8 *)regs->ip - code_prologue;
326		if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
327			/* try starting at IP */
328			ip = (u8 *)regs->ip;
329			code_len = code_len - code_prologue + 1;
330		}
331		for (i = 0; i < code_len; i++, ip++) {
332			if (ip < (u8 *)PAGE_OFFSET ||
333					probe_kernel_address(ip, c)) {
334				pr_cont(" Bad RIP value.");
335				break;
336			}
337			if (ip == (u8 *)regs->ip)
338				pr_cont("<%02x> ", c);
339			else
340				pr_cont("%02x ", c);
341		}
 
342	}
343	pr_cont("\n");
344}
345
346int is_valid_bugaddr(unsigned long ip)
347{
348	unsigned short ud2;
349
350	if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
351		return 0;
352
353	return ud2 == 0x0b0f;
 
 
354}