Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
4 *
5 * This file contains the lowest level x86-specific interrupt
6 * entry, irq-stacks and irq statistics code. All the remaining
7 * irq logic is done by the generic kernel/irq/ code and
8 * by the x86-specific irq controller code. (e.g. i8259.c and
9 * io_apic.c.)
10 */
11
12#include <linux/seq_file.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/notifier.h>
16#include <linux/cpu.h>
17#include <linux/delay.h>
18#include <linux/uaccess.h>
19#include <linux/percpu.h>
20#include <linux/mm.h>
21
22#include <asm/apic.h>
23#include <asm/nospec-branch.h>
24
25#ifdef CONFIG_DEBUG_STACKOVERFLOW
26
27int sysctl_panic_on_stackoverflow __read_mostly;
28
29/* Debugging check for stack overflow: is there less than 1KB free? */
30static int check_stack_overflow(void)
31{
32 long sp;
33
34 __asm__ __volatile__("andl %%esp,%0" :
35 "=r" (sp) : "0" (THREAD_SIZE - 1));
36
37 return sp < (sizeof(struct thread_info) + STACK_WARN);
38}
39
40static void print_stack_overflow(void)
41{
42 printk(KERN_WARNING "low stack detected by irq handler\n");
43 dump_stack();
44 if (sysctl_panic_on_stackoverflow)
45 panic("low stack detected by irq handler - check messages\n");
46}
47
48#else
49static inline int check_stack_overflow(void) { return 0; }
50static inline void print_stack_overflow(void) { }
51#endif
52
53DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
54DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
55
56static void call_on_stack(void *func, void *stack)
57{
58 asm volatile("xchgl %%ebx,%%esp \n"
59 CALL_NOSPEC
60 "movl %%ebx,%%esp \n"
61 : "=b" (stack)
62 : "0" (stack),
63 [thunk_target] "D"(func)
64 : "memory", "cc", "edx", "ecx", "eax");
65}
66
67static inline void *current_stack(void)
68{
69 return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
70}
71
72static inline int execute_on_irq_stack(int overflow, struct irq_desc *desc)
73{
74 struct irq_stack *curstk, *irqstk;
75 u32 *isp, *prev_esp, arg1;
76
77 curstk = (struct irq_stack *) current_stack();
78 irqstk = __this_cpu_read(hardirq_stack);
79
80 /*
81 * this is where we switch to the IRQ stack. However, if we are
82 * already using the IRQ stack (because we interrupted a hardirq
83 * handler) we can't do that and just have to keep using the
84 * current stack (which is the irq stack already after all)
85 */
86 if (unlikely(curstk == irqstk))
87 return 0;
88
89 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
90
91 /* Save the next esp at the bottom of the stack */
92 prev_esp = (u32 *)irqstk;
93 *prev_esp = current_stack_pointer;
94
95 if (unlikely(overflow))
96 call_on_stack(print_stack_overflow, isp);
97
98 asm volatile("xchgl %%ebx,%%esp \n"
99 CALL_NOSPEC
100 "movl %%ebx,%%esp \n"
101 : "=a" (arg1), "=b" (isp)
102 : "0" (desc), "1" (isp),
103 [thunk_target] "D" (desc->handle_irq)
104 : "memory", "cc", "ecx");
105 return 1;
106}
107
108/*
109 * allocate per-cpu stacks for hardirq and for softirq processing
110 */
111void irq_ctx_init(int cpu)
112{
113 struct irq_stack *irqstk;
114
115 if (per_cpu(hardirq_stack, cpu))
116 return;
117
118 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
119 THREADINFO_GFP,
120 THREAD_SIZE_ORDER));
121 per_cpu(hardirq_stack, cpu) = irqstk;
122
123 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
124 THREADINFO_GFP,
125 THREAD_SIZE_ORDER));
126 per_cpu(softirq_stack, cpu) = irqstk;
127
128 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
129 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
130}
131
132void do_softirq_own_stack(void)
133{
134 struct irq_stack *irqstk;
135 u32 *isp, *prev_esp;
136
137 irqstk = __this_cpu_read(softirq_stack);
138
139 /* build the stack frame on the softirq stack */
140 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
141
142 /* Push the previous esp onto the stack */
143 prev_esp = (u32 *)irqstk;
144 *prev_esp = current_stack_pointer;
145
146 call_on_stack(__do_softirq, isp);
147}
148
149bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
150{
151 int overflow = check_stack_overflow();
152
153 if (IS_ERR_OR_NULL(desc))
154 return false;
155
156 if (user_mode(regs) || !execute_on_irq_stack(overflow, desc)) {
157 if (unlikely(overflow))
158 print_stack_overflow();
159 generic_handle_irq_desc(desc);
160 }
161
162 return true;
163}
1/*
2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3 *
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
8 * io_apic.c.)
9 */
10
11#include <linux/module.h>
12#include <linux/seq_file.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/notifier.h>
16#include <linux/cpu.h>
17#include <linux/delay.h>
18#include <linux/uaccess.h>
19#include <linux/percpu.h>
20#include <linux/mm.h>
21
22#include <asm/apic.h>
23
24DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25EXPORT_PER_CPU_SYMBOL(irq_stat);
26
27DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28EXPORT_PER_CPU_SYMBOL(irq_regs);
29
30#ifdef CONFIG_DEBUG_STACKOVERFLOW
31
32int sysctl_panic_on_stackoverflow __read_mostly;
33
34/* Debugging check for stack overflow: is there less than 1KB free? */
35static int check_stack_overflow(void)
36{
37 long sp;
38
39 __asm__ __volatile__("andl %%esp,%0" :
40 "=r" (sp) : "0" (THREAD_SIZE - 1));
41
42 return sp < (sizeof(struct thread_info) + STACK_WARN);
43}
44
45static void print_stack_overflow(void)
46{
47 printk(KERN_WARNING "low stack detected by irq handler\n");
48 dump_stack();
49 if (sysctl_panic_on_stackoverflow)
50 panic("low stack detected by irq handler - check messages\n");
51}
52
53#else
54static inline int check_stack_overflow(void) { return 0; }
55static inline void print_stack_overflow(void) { }
56#endif
57
58DEFINE_PER_CPU(struct irq_stack *, hardirq_stack);
59DEFINE_PER_CPU(struct irq_stack *, softirq_stack);
60
61static void call_on_stack(void *func, void *stack)
62{
63 asm volatile("xchgl %%ebx,%%esp \n"
64 "call *%%edi \n"
65 "movl %%ebx,%%esp \n"
66 : "=b" (stack)
67 : "0" (stack),
68 "D"(func)
69 : "memory", "cc", "edx", "ecx", "eax");
70}
71
72/* how to get the current stack pointer from C */
73#define current_stack_pointer ({ \
74 unsigned long sp; \
75 asm("mov %%esp,%0" : "=g" (sp)); \
76 sp; \
77})
78
79static inline void *current_stack(void)
80{
81 return (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
82}
83
84static inline int
85execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
86{
87 struct irq_stack *curstk, *irqstk;
88 u32 *isp, *prev_esp, arg1, arg2;
89
90 curstk = (struct irq_stack *) current_stack();
91 irqstk = __this_cpu_read(hardirq_stack);
92
93 /*
94 * this is where we switch to the IRQ stack. However, if we are
95 * already using the IRQ stack (because we interrupted a hardirq
96 * handler) we can't do that and just have to keep using the
97 * current stack (which is the irq stack already after all)
98 */
99 if (unlikely(curstk == irqstk))
100 return 0;
101
102 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
103
104 /* Save the next esp at the bottom of the stack */
105 prev_esp = (u32 *)irqstk;
106 *prev_esp = current_stack_pointer;
107
108 if (unlikely(overflow))
109 call_on_stack(print_stack_overflow, isp);
110
111 asm volatile("xchgl %%ebx,%%esp \n"
112 "call *%%edi \n"
113 "movl %%ebx,%%esp \n"
114 : "=a" (arg1), "=d" (arg2), "=b" (isp)
115 : "0" (irq), "1" (desc), "2" (isp),
116 "D" (desc->handle_irq)
117 : "memory", "cc", "ecx");
118 return 1;
119}
120
121/*
122 * allocate per-cpu stacks for hardirq and for softirq processing
123 */
124void irq_ctx_init(int cpu)
125{
126 struct irq_stack *irqstk;
127
128 if (per_cpu(hardirq_stack, cpu))
129 return;
130
131 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
132 THREADINFO_GFP,
133 THREAD_SIZE_ORDER));
134 per_cpu(hardirq_stack, cpu) = irqstk;
135
136 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu),
137 THREADINFO_GFP,
138 THREAD_SIZE_ORDER));
139 per_cpu(softirq_stack, cpu) = irqstk;
140
141 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
142 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
143}
144
145void do_softirq_own_stack(void)
146{
147 struct thread_info *curstk;
148 struct irq_stack *irqstk;
149 u32 *isp, *prev_esp;
150
151 curstk = current_stack();
152 irqstk = __this_cpu_read(softirq_stack);
153
154 /* build the stack frame on the softirq stack */
155 isp = (u32 *) ((char *)irqstk + sizeof(*irqstk));
156
157 /* Push the previous esp onto the stack */
158 prev_esp = (u32 *)irqstk;
159 *prev_esp = current_stack_pointer;
160
161 call_on_stack(__do_softirq, isp);
162}
163
164bool handle_irq(unsigned irq, struct pt_regs *regs)
165{
166 struct irq_desc *desc;
167 int overflow;
168
169 overflow = check_stack_overflow();
170
171 desc = irq_to_desc(irq);
172 if (unlikely(!desc))
173 return false;
174
175 if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
176 if (unlikely(overflow))
177 print_stack_overflow();
178 desc->handle_irq(irq, desc);
179 }
180
181 return true;
182}