Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012 Regents of the University of California
  4 */
  5
  6#include <linux/cpu.h>
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/sched.h>
 10#include <linux/sched/debug.h>
 11#include <linux/sched/signal.h>
 12#include <linux/signal.h>
 13#include <linux/kdebug.h>
 14#include <linux/uaccess.h>
 15#include <linux/kprobes.h>
 16#include <linux/uprobes.h>
 17#include <asm/uprobes.h>
 18#include <linux/mm.h>
 19#include <linux/module.h>
 20#include <linux/irq.h>
 21#include <linux/kexec.h>
 22#include <linux/entry-common.h>
 23
 24#include <asm/asm-prototypes.h>
 25#include <asm/bug.h>
 26#include <asm/cfi.h>
 27#include <asm/csr.h>
 28#include <asm/processor.h>
 29#include <asm/ptrace.h>
 30#include <asm/syscall.h>
 31#include <asm/thread_info.h>
 32#include <asm/vector.h>
 33#include <asm/irq_stack.h>
 34
 35int show_unhandled_signals = 1;
 36
 37static DEFINE_SPINLOCK(die_lock);
 38
 39static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
 40{
 41	const void __user *uaddr = (__force const void __user *)insns;
 42
 43	if (!user_mode(regs))
 44		return get_kernel_nofault(*val, insns);
 45
 46	/* The user space code from other tasks cannot be accessed. */
 47	if (regs != task_pt_regs(current))
 48		return -EPERM;
 49
 50	return copy_from_user_nofault(val, uaddr, sizeof(*val));
 51}
 52
 53static void dump_instr(const char *loglvl, struct pt_regs *regs)
 54{
 55	char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
 56	const u16 *insns = (u16 *)instruction_pointer(regs);
 57	long bad;
 58	u16 val;
 59	int i;
 60
 61	for (i = -10; i < 2; i++) {
 62		bad = copy_code(regs, &val, &insns[i]);
 63		if (!bad) {
 64			p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
 65		} else {
 66			printk("%sCode: Unable to access instruction at 0x%px.\n",
 67			       loglvl, &insns[i]);
 68			return;
 69		}
 70	}
 71	printk("%sCode: %s\n", loglvl, str);
 72}
 73
 74void die(struct pt_regs *regs, const char *str)
 75{
 76	static int die_counter;
 77	int ret;
 78	long cause;
 79	unsigned long flags;
 80
 81	oops_enter();
 82
 83	spin_lock_irqsave(&die_lock, flags);
 84	console_verbose();
 85	bust_spinlocks(1);
 86
 87	pr_emerg("%s [#%d]\n", str, ++die_counter);
 88	print_modules();
 89	if (regs) {
 90		show_regs(regs);
 91		dump_instr(KERN_EMERG, regs);
 92	}
 93
 94	cause = regs ? regs->cause : -1;
 95	ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
 96
 97	if (kexec_should_crash(current))
 98		crash_kexec(regs);
 99
100	bust_spinlocks(0);
101	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
102	spin_unlock_irqrestore(&die_lock, flags);
103	oops_exit();
104
105	if (in_interrupt())
106		panic("Fatal exception in interrupt");
107	if (panic_on_oops)
108		panic("Fatal exception");
109	if (ret != NOTIFY_STOP)
110		make_task_dead(SIGSEGV);
111}
112
113void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
114{
115	struct task_struct *tsk = current;
116
117	if (show_unhandled_signals && unhandled_signal(tsk, signo)
118	    && printk_ratelimit()) {
119		pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
120			tsk->comm, task_pid_nr(tsk), signo, code, addr);
121		print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
122		pr_cont("\n");
123		__show_regs(regs);
124		dump_instr(KERN_EMERG, regs);
125	}
126
127	force_sig_fault(signo, code, (void __user *)addr);
128}
129
130static void do_trap_error(struct pt_regs *regs, int signo, int code,
131	unsigned long addr, const char *str)
132{
133	current->thread.bad_cause = regs->cause;
134
135	if (user_mode(regs)) {
136		do_trap(regs, signo, code, addr);
137	} else {
138		if (!fixup_exception(regs))
139			die(regs, str);
140	}
141}
142
143#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
144#define __trap_section __noinstr_section(".xip.traps")
145#else
146#define __trap_section noinstr
147#endif
148#define DO_ERROR_INFO(name, signo, code, str)					\
149asmlinkage __visible __trap_section void name(struct pt_regs *regs)		\
150{										\
151	if (user_mode(regs)) {							\
152		irqentry_enter_from_user_mode(regs);				\
153		do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
154		irqentry_exit_to_user_mode(regs);				\
155	} else {								\
156		irqentry_state_t state = irqentry_nmi_enter(regs);		\
157		do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
158		irqentry_nmi_exit(regs, state);					\
159	}									\
160}
161
162DO_ERROR_INFO(do_trap_unknown,
163	SIGILL, ILL_ILLTRP, "unknown exception");
164DO_ERROR_INFO(do_trap_insn_misaligned,
165	SIGBUS, BUS_ADRALN, "instruction address misaligned");
166DO_ERROR_INFO(do_trap_insn_fault,
167	SIGSEGV, SEGV_ACCERR, "instruction access fault");
168
169asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *regs)
170{
171	bool handled;
172
173	if (user_mode(regs)) {
174		irqentry_enter_from_user_mode(regs);
175
176		local_irq_enable();
177
178		handled = riscv_v_first_use_handler(regs);
179
180		local_irq_disable();
181
182		if (!handled)
183			do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
184				      "Oops - illegal instruction");
185
186		irqentry_exit_to_user_mode(regs);
187	} else {
188		irqentry_state_t state = irqentry_nmi_enter(regs);
189
190		do_trap_error(regs, SIGILL, ILL_ILLOPC, regs->epc,
191			      "Oops - illegal instruction");
192
193		irqentry_nmi_exit(regs, state);
194	}
195}
196
197DO_ERROR_INFO(do_trap_load_fault,
198	SIGSEGV, SEGV_ACCERR, "load access fault");
 
 
 
 
 
 
 
 
199
200asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
201{
202	if (user_mode(regs)) {
203		irqentry_enter_from_user_mode(regs);
204
205		if (handle_misaligned_load(regs))
206			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
207			      "Oops - load address misaligned");
208
209		irqentry_exit_to_user_mode(regs);
210	} else {
211		irqentry_state_t state = irqentry_nmi_enter(regs);
212
213		if (handle_misaligned_load(regs))
214			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
215			      "Oops - load address misaligned");
216
217		irqentry_nmi_exit(regs, state);
218	}
219}
220
221asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs *regs)
222{
223	if (user_mode(regs)) {
224		irqentry_enter_from_user_mode(regs);
225
226		if (handle_misaligned_store(regs))
227			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
228				"Oops - store (or AMO) address misaligned");
229
230		irqentry_exit_to_user_mode(regs);
231	} else {
232		irqentry_state_t state = irqentry_nmi_enter(regs);
233
234		if (handle_misaligned_store(regs))
235			do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
236				"Oops - store (or AMO) address misaligned");
237
238		irqentry_nmi_exit(regs, state);
239	}
240}
 
241DO_ERROR_INFO(do_trap_store_fault,
242	SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
 
 
243DO_ERROR_INFO(do_trap_ecall_s,
244	SIGILL, ILL_ILLTRP, "environment call from S-mode");
245DO_ERROR_INFO(do_trap_ecall_m,
246	SIGILL, ILL_ILLTRP, "environment call from M-mode");
247
248static inline unsigned long get_break_insn_length(unsigned long pc)
249{
250	bug_insn_t insn;
251
252	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
253		return 0;
254
255	return GET_INSN_LENGTH(insn);
256}
257
258static bool probe_single_step_handler(struct pt_regs *regs)
259{
260	bool user = user_mode(regs);
261
262	return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
263}
264
265static bool probe_breakpoint_handler(struct pt_regs *regs)
266{
267	bool user = user_mode(regs);
268
269	return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
270}
271
272void handle_break(struct pt_regs *regs)
273{
274	if (probe_single_step_handler(regs))
 
275		return;
276
277	if (probe_breakpoint_handler(regs))
 
 
 
 
278		return;
279
 
 
 
280	current->thread.bad_cause = regs->cause;
281
282	if (user_mode(regs))
283		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
284#ifdef CONFIG_KGDB
285	else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
286								== NOTIFY_STOP)
287		return;
288#endif
289	else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN ||
290		 handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN)
291		regs->epc += get_break_insn_length(regs->epc);
292	else
293		die(regs, "Kernel BUG");
294}
295
296asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
297{
298	if (user_mode(regs)) {
299		irqentry_enter_from_user_mode(regs);
300
301		handle_break(regs);
302
303		irqentry_exit_to_user_mode(regs);
304	} else {
305		irqentry_state_t state = irqentry_nmi_enter(regs);
306
307		handle_break(regs);
308
309		irqentry_nmi_exit(regs, state);
310	}
311}
312
313asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
314{
315	if (user_mode(regs)) {
316		long syscall = regs->a7;
317
318		regs->epc += 4;
319		regs->orig_a0 = regs->a0;
320
321		riscv_v_vstate_discard(regs);
322
323		syscall = syscall_enter_from_user_mode(regs, syscall);
324
325		if (syscall >= 0 && syscall < NR_syscalls)
326			syscall_handler(regs, syscall);
327		else if (syscall != -1)
328			regs->a0 = -ENOSYS;
329
330		syscall_exit_to_user_mode(regs);
331	} else {
332		irqentry_state_t state = irqentry_nmi_enter(regs);
333
334		do_trap_error(regs, SIGILL, ILL_ILLTRP, regs->epc,
335			"Oops - environment call from U-mode");
336
337		irqentry_nmi_exit(regs, state);
338	}
339
340}
341
342#ifdef CONFIG_MMU
343asmlinkage __visible noinstr void do_page_fault(struct pt_regs *regs)
344{
345	irqentry_state_t state = irqentry_enter(regs);
346
347	handle_page_fault(regs);
348
349	local_irq_disable();
350
351	irqentry_exit(regs, state);
352}
353#endif
354
355static void noinstr handle_riscv_irq(struct pt_regs *regs)
356{
357	struct pt_regs *old_regs;
358
359	irq_enter_rcu();
360	old_regs = set_irq_regs(regs);
361	handle_arch_irq(regs);
362	set_irq_regs(old_regs);
363	irq_exit_rcu();
364}
365
366asmlinkage void noinstr do_irq(struct pt_regs *regs)
367{
368	irqentry_state_t state = irqentry_enter(regs);
369
370	if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
371		call_on_irq_stack(regs, handle_riscv_irq);
372	else
373		handle_riscv_irq(regs);
374
375	irqentry_exit(regs, state);
376}
377
378#ifdef CONFIG_GENERIC_BUG
379int is_valid_bugaddr(unsigned long pc)
380{
381	bug_insn_t insn;
382
383	if (pc < VMALLOC_START)
384		return 0;
385	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
386		return 0;
387	if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
388		return (insn == __BUG_INSN_32);
389	else
390		return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
391}
392#endif /* CONFIG_GENERIC_BUG */
393
394#ifdef CONFIG_VMAP_STACK
395DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
 
 
 
 
396		overflow_stack)__aligned(16);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
398asmlinkage void handle_bad_stack(struct pt_regs *regs)
399{
400	unsigned long tsk_stk = (unsigned long)current->stack;
401	unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
 
 
 
 
 
 
 
 
 
402
403	console_verbose();
404
405	pr_emerg("Insufficient stack space to handle exception!\n");
406	pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
407			tsk_stk, tsk_stk + THREAD_SIZE);
408	pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
409			ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
410
411	__show_regs(regs);
412	panic("Kernel stack overflow");
413
414	for (;;)
415		wait_for_interrupt();
416}
417#endif
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2012 Regents of the University of California
  4 */
  5
  6#include <linux/cpu.h>
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/sched.h>
 10#include <linux/sched/debug.h>
 11#include <linux/sched/signal.h>
 12#include <linux/signal.h>
 13#include <linux/kdebug.h>
 14#include <linux/uaccess.h>
 15#include <linux/kprobes.h>
 
 
 16#include <linux/mm.h>
 17#include <linux/module.h>
 18#include <linux/irq.h>
 19#include <linux/kexec.h>
 
 20
 21#include <asm/asm-prototypes.h>
 22#include <asm/bug.h>
 
 23#include <asm/csr.h>
 24#include <asm/processor.h>
 25#include <asm/ptrace.h>
 
 26#include <asm/thread_info.h>
 
 
 27
 28int show_unhandled_signals = 1;
 29
 30static DEFINE_SPINLOCK(die_lock);
 31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32void die(struct pt_regs *regs, const char *str)
 33{
 34	static int die_counter;
 35	int ret;
 36	long cause;
 
 37
 38	oops_enter();
 39
 40	spin_lock_irq(&die_lock);
 41	console_verbose();
 42	bust_spinlocks(1);
 43
 44	pr_emerg("%s [#%d]\n", str, ++die_counter);
 45	print_modules();
 46	if (regs)
 47		show_regs(regs);
 
 
 48
 49	cause = regs ? regs->cause : -1;
 50	ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
 51
 52	if (kexec_should_crash(current))
 53		crash_kexec(regs);
 54
 55	bust_spinlocks(0);
 56	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 57	spin_unlock_irq(&die_lock);
 58	oops_exit();
 59
 60	if (in_interrupt())
 61		panic("Fatal exception in interrupt");
 62	if (panic_on_oops)
 63		panic("Fatal exception");
 64	if (ret != NOTIFY_STOP)
 65		make_task_dead(SIGSEGV);
 66}
 67
 68void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
 69{
 70	struct task_struct *tsk = current;
 71
 72	if (show_unhandled_signals && unhandled_signal(tsk, signo)
 73	    && printk_ratelimit()) {
 74		pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
 75			tsk->comm, task_pid_nr(tsk), signo, code, addr);
 76		print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
 77		pr_cont("\n");
 78		__show_regs(regs);
 
 79	}
 80
 81	force_sig_fault(signo, code, (void __user *)addr);
 82}
 83
 84static void do_trap_error(struct pt_regs *regs, int signo, int code,
 85	unsigned long addr, const char *str)
 86{
 87	current->thread.bad_cause = regs->cause;
 88
 89	if (user_mode(regs)) {
 90		do_trap(regs, signo, code, addr);
 91	} else {
 92		if (!fixup_exception(regs))
 93			die(regs, str);
 94	}
 95}
 96
 97#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_RISCV_ALTERNATIVE)
 98#define __trap_section		__section(".xip.traps")
 99#else
100#define __trap_section
101#endif
102#define DO_ERROR_INFO(name, signo, code, str)				\
103asmlinkage __visible __trap_section void name(struct pt_regs *regs)	\
104{									\
105	do_trap_error(regs, signo, code, regs->epc, "Oops - " str);	\
 
 
 
 
 
 
 
 
106}
107
108DO_ERROR_INFO(do_trap_unknown,
109	SIGILL, ILL_ILLTRP, "unknown exception");
110DO_ERROR_INFO(do_trap_insn_misaligned,
111	SIGBUS, BUS_ADRALN, "instruction address misaligned");
112DO_ERROR_INFO(do_trap_insn_fault,
113	SIGSEGV, SEGV_ACCERR, "instruction access fault");
114DO_ERROR_INFO(do_trap_insn_illegal,
115	SIGILL, ILL_ILLOPC, "illegal instruction");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116DO_ERROR_INFO(do_trap_load_fault,
117	SIGSEGV, SEGV_ACCERR, "load access fault");
118#ifndef CONFIG_RISCV_M_MODE
119DO_ERROR_INFO(do_trap_load_misaligned,
120	SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
121DO_ERROR_INFO(do_trap_store_misaligned,
122	SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
123#else
124int handle_misaligned_load(struct pt_regs *regs);
125int handle_misaligned_store(struct pt_regs *regs);
126
127asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
128{
129	if (!handle_misaligned_load(regs))
130		return;
131	do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
132		      "Oops - load address misaligned");
 
 
 
 
 
 
 
 
 
 
 
 
 
133}
134
135asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
136{
137	if (!handle_misaligned_store(regs))
138		return;
139	do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
140		      "Oops - store (or AMO) address misaligned");
 
 
 
 
 
 
 
 
 
 
 
 
 
141}
142#endif
143DO_ERROR_INFO(do_trap_store_fault,
144	SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
145DO_ERROR_INFO(do_trap_ecall_u,
146	SIGILL, ILL_ILLTRP, "environment call from U-mode");
147DO_ERROR_INFO(do_trap_ecall_s,
148	SIGILL, ILL_ILLTRP, "environment call from S-mode");
149DO_ERROR_INFO(do_trap_ecall_m,
150	SIGILL, ILL_ILLTRP, "environment call from M-mode");
151
152static inline unsigned long get_break_insn_length(unsigned long pc)
153{
154	bug_insn_t insn;
155
156	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
157		return 0;
158
159	return GET_INSN_LENGTH(insn);
160}
161
162asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163{
164#ifdef CONFIG_KPROBES
165	if (kprobe_single_step_handler(regs))
166		return;
167
168	if (kprobe_breakpoint_handler(regs))
169		return;
170#endif
171#ifdef CONFIG_UPROBES
172	if (uprobe_single_step_handler(regs))
173		return;
174
175	if (uprobe_breakpoint_handler(regs))
176		return;
177#endif
178	current->thread.bad_cause = regs->cause;
179
180	if (user_mode(regs))
181		force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
182#ifdef CONFIG_KGDB
183	else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
184								== NOTIFY_STOP)
185		return;
186#endif
187	else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
 
188		regs->epc += get_break_insn_length(regs->epc);
189	else
190		die(regs, "Kernel BUG");
191}
192NOKPROBE_SYMBOL(do_trap_break);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
194#ifdef CONFIG_GENERIC_BUG
195int is_valid_bugaddr(unsigned long pc)
196{
197	bug_insn_t insn;
198
199	if (pc < VMALLOC_START)
200		return 0;
201	if (get_kernel_nofault(insn, (bug_insn_t *)pc))
202		return 0;
203	if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
204		return (insn == __BUG_INSN_32);
205	else
206		return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
207}
208#endif /* CONFIG_GENERIC_BUG */
209
210#ifdef CONFIG_VMAP_STACK
211/*
212 * Extra stack space that allows us to provide panic messages when the kernel
213 * has overflowed its stack.
214 */
215static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
216		overflow_stack)__aligned(16);
217/*
218 * A temporary stack for use by handle_kernel_stack_overflow.  This is used so
219 * we can call into C code to get the per-hart overflow stack.  Usage of this
220 * stack must be protected by spin_shadow_stack.
221 */
222long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
223
224/*
225 * A pseudo spinlock to protect the shadow stack from being used by multiple
226 * harts concurrently.  This isn't a real spinlock because the lock side must
227 * be taken without a valid stack and only a single register, it's only taken
228 * while in the process of panicing anyway so the performance and error
229 * checking a proper spinlock gives us doesn't matter.
230 */
231unsigned long spin_shadow_stack;
232
233asmlinkage unsigned long get_overflow_stack(void)
234{
235	return (unsigned long)this_cpu_ptr(overflow_stack) +
236		OVERFLOW_STACK_SIZE;
237}
238
239asmlinkage void handle_bad_stack(struct pt_regs *regs)
240{
241	unsigned long tsk_stk = (unsigned long)current->stack;
242	unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
243
244	/*
245	 * We're done with the shadow stack by this point, as we're on the
246	 * overflow stack.  Tell any other concurrent overflowing harts that
247	 * they can proceed with panicing by releasing the pseudo-spinlock.
248	 *
249	 * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
250	 */
251	smp_store_release(&spin_shadow_stack, 0);
252
253	console_verbose();
254
255	pr_emerg("Insufficient stack space to handle exception!\n");
256	pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
257			tsk_stk, tsk_stk + THREAD_SIZE);
258	pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
259			ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
260
261	__show_regs(regs);
262	panic("Kernel stack overflow");
263
264	for (;;)
265		wait_for_interrupt();
266}
267#endif