Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_IRQ_STACK_H
  3#define _ASM_X86_IRQ_STACK_H
  4
  5#include <linux/ptrace.h>
  6#include <linux/objtool.h>
  7
  8#include <asm/processor.h>
  9
 10#ifdef CONFIG_X86_64
 11
 12/*
 13 * Macro to inline switching to an interrupt stack and invoking function
 14 * calls from there. The following rules apply:
 15 *
 16 * - Ordering:
 17 *
 18 *   1. Write the stack pointer into the top most place of the irq
 19 *	stack. This ensures that the various unwinders can link back to the
 20 *	original stack.
 21 *
 22 *   2. Switch the stack pointer to the top of the irq stack.
 23 *
 24 *   3. Invoke whatever needs to be done (@asm_call argument)
 25 *
 26 *   4. Pop the original stack pointer from the top of the irq stack
 27 *	which brings it back to the original stack where it left off.
 28 *
 29 * - Function invocation:
 30 *
 31 *   To allow flexible usage of the macro, the actual function code including
 32 *   the store of the arguments in the call ABI registers is handed in via
 33 *   the @asm_call argument.
 34 *
 35 * - Local variables:
 36 *
 37 *   @tos:
 38 *	The @tos variable holds a pointer to the top of the irq stack and
 39 *	_must_ be allocated in a non-callee saved register as this is a
 40 *	restriction coming from objtool.
 41 *
 42 *	Note, that (tos) is both in input and output constraints to ensure
 43 *	that the compiler does not assume that R11 is left untouched in
 44 *	case this macro is used in some place where the per cpu interrupt
 45 *	stack pointer is used again afterwards
 46 *
 47 * - Function arguments:
 48 *	The function argument(s), if any, have to be defined in register
 49 *	variables at the place where this is invoked. Storing the
 50 *	argument(s) in the proper register(s) is part of the @asm_call
 51 *
 52 * - Constraints:
 53 *
 54 *   The constraints have to be done very carefully because the compiler
 55 *   does not know about the assembly call.
 56 *
 57 *   output:
 58 *     As documented already above the @tos variable is required to be in
 59 *     the output constraints to make the compiler aware that R11 cannot be
 60 *     reused after the asm() statement.
 61 *
 62 *     For builds with CONFIG_UNWINDER_FRAME_POINTER, ASM_CALL_CONSTRAINT is
 63 *     required as well as this prevents certain creative GCC variants from
 64 *     misplacing the ASM code.
 65 *
 66 *  input:
 67 *    - func:
 68 *	  Immediate, which tells the compiler that the function is referenced.
 69 *
 70 *    - tos:
 71 *	  Register. The actual register is defined by the variable declaration.
 72 *
 73 *    - function arguments:
 74 *	  The constraints are handed in via the 'argconstr' argument list. They
 75 *	  describe the register arguments which are used in @asm_call.
 76 *
 77 *  clobbers:
 78 *     Function calls can clobber anything except the callee-saved
 79 *     registers. Tell the compiler.
 80 */
 81#define call_on_stack(stack, func, asm_call, argconstr...)		\
 82{									\
 83	register void *tos asm("r11");					\
 84									\
 85	tos = ((void *)(stack));					\
 86									\
 87	asm_inline volatile(						\
 88	"movq	%%rsp, (%[tos])				\n"		\
 89	"movq	%[tos], %%rsp				\n"		\
 90									\
 91	asm_call							\
 92									\
 93	"popq	%%rsp					\n"		\
 94									\
 95	: "+r" (tos), ASM_CALL_CONSTRAINT				\
 96	: [__func] "i" (func), [tos] "r" (tos) argconstr		\
 97	: "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10",	\
 98	  "memory"							\
 99	);								\
100}
101
102#define ASM_CALL_ARG0							\
103	"call %P[__func]				\n"		\
104	ASM_REACHABLE
105
106#define ASM_CALL_ARG1							\
107	"movq	%[arg1], %%rdi				\n"		\
108	ASM_CALL_ARG0
109
110#define ASM_CALL_ARG2							\
111	"movq	%[arg2], %%rsi				\n"		\
112	ASM_CALL_ARG1
113
114#define ASM_CALL_ARG3							\
115	"movq	%[arg3], %%rdx				\n"		\
116	ASM_CALL_ARG2
117
118#define call_on_irqstack(func, asm_call, argconstr...)			\
119	call_on_stack(__this_cpu_read(pcpu_hot.hardirq_stack_ptr),	\
120		      func, asm_call, argconstr)
121
122/* Macros to assert type correctness for run_*_on_irqstack macros */
123#define assert_function_type(func, proto)				\
124	static_assert(__builtin_types_compatible_p(typeof(&func), proto))
125
126#define assert_arg_type(arg, proto)					\
127	static_assert(__builtin_types_compatible_p(typeof(arg), proto))
128
129/*
130 * Macro to invoke system vector and device interrupt C handlers.
131 */
132#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...)	\
133{									\
134	/*								\
135	 * User mode entry and interrupt on the irq stack do not	\
136	 * switch stacks. If from user mode the task stack is empty.	\
137	 */								\
138	if (user_mode(regs) || __this_cpu_read(pcpu_hot.hardirq_stack_inuse)) { \
139		irq_enter_rcu();					\
140		func(c_args);						\
141		irq_exit_rcu();						\
142	} else {							\
143		/*							\
144		 * Mark the irq stack inuse _before_ and unmark _after_	\
145		 * switching stacks. Interrupts are disabled in both	\
146		 * places. Invoke the stack switch macro with the call	\
147		 * sequence which matches the above direct invocation.	\
148		 */							\
149		__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true);	\
150		call_on_irqstack(func, asm_call, constr);		\
151		__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false);	\
152	}								\
153}
154
155/*
156 * Function call sequence for __call_on_irqstack() for system vectors.
157 *
158 * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
159 * mechanism because these functions are global and cannot be optimized out
160 * when compiling a particular source file which uses one of these macros.
161 *
162 * The argument (regs) does not need to be pushed or stashed in a callee
163 * saved register to be safe vs. the irq_enter_rcu() call because the
164 * clobbers already prevent the compiler from storing it in a callee
165 * clobbered register. As the compiler has to preserve @regs for the final
166 * call to idtentry_exit() anyway, it's likely that it does not cause extra
167 * effort for this asm magic.
168 */
169#define ASM_CALL_SYSVEC							\
170	"call irq_enter_rcu				\n"		\
171	ASM_CALL_ARG1							\
 
172	"call irq_exit_rcu				\n"
173
174#define SYSVEC_CONSTRAINTS	, [arg1] "r" (regs)
175
176#define run_sysvec_on_irqstack_cond(func, regs)				\
177{									\
178	assert_function_type(func, void (*)(struct pt_regs *));		\
179	assert_arg_type(regs, struct pt_regs *);			\
180									\
181	call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC,		\
182			      SYSVEC_CONSTRAINTS, regs);		\
183}
184
185/*
186 * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
187 * @regs and @vector in callee saved registers.
188 */
189#define ASM_CALL_IRQ							\
190	"call irq_enter_rcu				\n"		\
191	ASM_CALL_ARG2							\
 
 
192	"call irq_exit_rcu				\n"
193
194#define IRQ_CONSTRAINTS	, [arg1] "r" (regs), [arg2] "r" ((unsigned long)vector)
195
196#define run_irq_on_irqstack_cond(func, regs, vector)			\
197{									\
198	assert_function_type(func, void (*)(struct pt_regs *, u32));	\
199	assert_arg_type(regs, struct pt_regs *);			\
200	assert_arg_type(vector, u32);					\
201									\
202	call_on_irqstack_cond(func, regs, ASM_CALL_IRQ,			\
203			      IRQ_CONSTRAINTS, regs, vector);		\
204}
205
206#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 
 
207/*
208 * Macro to invoke __do_softirq on the irq stack. This is only called from
209 * task context when bottom halves are about to be reenabled and soft
210 * interrupts are pending to be processed. The interrupt stack cannot be in
211 * use here.
212 */
213#define do_softirq_own_stack()						\
214{									\
215	__this_cpu_write(pcpu_hot.hardirq_stack_inuse, true);		\
216	call_on_irqstack(__do_softirq, ASM_CALL_ARG0);			\
217	__this_cpu_write(pcpu_hot.hardirq_stack_inuse, false);		\
218}
219
220#endif
221
222#else /* CONFIG_X86_64 */
223/* System vector handlers always run on the stack they interrupted. */
224#define run_sysvec_on_irqstack_cond(func, regs)				\
225{									\
226	irq_enter_rcu();						\
227	func(regs);							\
228	irq_exit_rcu();							\
229}
230
231/* Switches to the irq stack within func() */
232#define run_irq_on_irqstack_cond(func, regs, vector)			\
233{									\
234	irq_enter_rcu();						\
235	func(regs, vector);						\
236	irq_exit_rcu();							\
237}
238
239#endif /* !CONFIG_X86_64 */
240
241#endif
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_IRQ_STACK_H
  3#define _ASM_X86_IRQ_STACK_H
  4
  5#include <linux/ptrace.h>
 
  6
  7#include <asm/processor.h>
  8
  9#ifdef CONFIG_X86_64
 10
 11/*
 12 * Macro to inline switching to an interrupt stack and invoking function
 13 * calls from there. The following rules apply:
 14 *
 15 * - Ordering:
 16 *
 17 *   1. Write the stack pointer into the top most place of the irq
 18 *	stack. This ensures that the various unwinders can link back to the
 19 *	original stack.
 20 *
 21 *   2. Switch the stack pointer to the top of the irq stack.
 22 *
 23 *   3. Invoke whatever needs to be done (@asm_call argument)
 24 *
 25 *   4. Pop the original stack pointer from the top of the irq stack
 26 *	which brings it back to the original stack where it left off.
 27 *
 28 * - Function invocation:
 29 *
 30 *   To allow flexible usage of the macro, the actual function code including
 31 *   the store of the arguments in the call ABI registers is handed in via
 32 *   the @asm_call argument.
 33 *
 34 * - Local variables:
 35 *
 36 *   @tos:
 37 *	The @tos variable holds a pointer to the top of the irq stack and
 38 *	_must_ be allocated in a non-callee saved register as this is a
 39 *	restriction coming from objtool.
 40 *
 41 *	Note, that (tos) is both in input and output constraints to ensure
 42 *	that the compiler does not assume that R11 is left untouched in
 43 *	case this macro is used in some place where the per cpu interrupt
 44 *	stack pointer is used again afterwards
 45 *
 46 * - Function arguments:
 47 *	The function argument(s), if any, have to be defined in register
 48 *	variables at the place where this is invoked. Storing the
 49 *	argument(s) in the proper register(s) is part of the @asm_call
 50 *
 51 * - Constraints:
 52 *
 53 *   The constraints have to be done very carefully because the compiler
 54 *   does not know about the assembly call.
 55 *
 56 *   output:
 57 *     As documented already above the @tos variable is required to be in
 58 *     the output constraints to make the compiler aware that R11 cannot be
 59 *     reused after the asm() statement.
 60 *
 61 *     For builds with CONFIG_UNWIND_FRAME_POINTER ASM_CALL_CONSTRAINT is
 62 *     required as well as this prevents certain creative GCC variants from
 63 *     misplacing the ASM code.
 64 *
 65 *  input:
 66 *    - func:
 67 *	  Immediate, which tells the compiler that the function is referenced.
 68 *
 69 *    - tos:
 70 *	  Register. The actual register is defined by the variable declaration.
 71 *
 72 *    - function arguments:
 73 *	  The constraints are handed in via the 'argconstr' argument list. They
 74 *	  describe the register arguments which are used in @asm_call.
 75 *
 76 *  clobbers:
 77 *     Function calls can clobber anything except the callee-saved
 78 *     registers. Tell the compiler.
 79 */
 80#define call_on_irqstack(func, asm_call, argconstr...)			\
 81{									\
 82	register void *tos asm("r11");					\
 83									\
 84	tos = ((void *)__this_cpu_read(hardirq_stack_ptr));		\
 85									\
 86	asm_inline volatile(						\
 87	"movq	%%rsp, (%[tos])				\n"		\
 88	"movq	%[tos], %%rsp				\n"		\
 89									\
 90	asm_call							\
 91									\
 92	"popq	%%rsp					\n"		\
 93									\
 94	: "+r" (tos), ASM_CALL_CONSTRAINT				\
 95	: [__func] "i" (func), [tos] "r" (tos) argconstr		\
 96	: "cc", "rax", "rcx", "rdx", "rsi", "rdi", "r8", "r9", "r10",	\
 97	  "memory"							\
 98	);								\
 99}
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101/* Macros to assert type correctness for run_*_on_irqstack macros */
102#define assert_function_type(func, proto)				\
103	static_assert(__builtin_types_compatible_p(typeof(&func), proto))
104
105#define assert_arg_type(arg, proto)					\
106	static_assert(__builtin_types_compatible_p(typeof(arg), proto))
107
108/*
109 * Macro to invoke system vector and device interrupt C handlers.
110 */
111#define call_on_irqstack_cond(func, regs, asm_call, constr, c_args...)	\
112{									\
113	/*								\
114	 * User mode entry and interrupt on the irq stack do not	\
115	 * switch stacks. If from user mode the task stack is empty.	\
116	 */								\
117	if (user_mode(regs) || __this_cpu_read(hardirq_stack_inuse)) {	\
118		irq_enter_rcu();					\
119		func(c_args);						\
120		irq_exit_rcu();						\
121	} else {							\
122		/*							\
123		 * Mark the irq stack inuse _before_ and unmark _after_	\
124		 * switching stacks. Interrupts are disabled in both	\
125		 * places. Invoke the stack switch macro with the call	\
126		 * sequence which matches the above direct invocation.	\
127		 */							\
128		__this_cpu_write(hardirq_stack_inuse, true);		\
129		call_on_irqstack(func, asm_call, constr);		\
130		__this_cpu_write(hardirq_stack_inuse, false);		\
131	}								\
132}
133
134/*
135 * Function call sequence for __call_on_irqstack() for system vectors.
136 *
137 * Note that irq_enter_rcu() and irq_exit_rcu() do not use the input
138 * mechanism because these functions are global and cannot be optimized out
139 * when compiling a particular source file which uses one of these macros.
140 *
141 * The argument (regs) does not need to be pushed or stashed in a callee
142 * saved register to be safe vs. the irq_enter_rcu() call because the
143 * clobbers already prevent the compiler from storing it in a callee
144 * clobbered register. As the compiler has to preserve @regs for the final
145 * call to idtentry_exit() anyway, it's likely that it does not cause extra
146 * effort for this asm magic.
147 */
148#define ASM_CALL_SYSVEC							\
149	"call irq_enter_rcu				\n"		\
150	"movq	%[arg1], %%rdi				\n"		\
151	"call %P[__func]				\n"		\
152	"call irq_exit_rcu				\n"
153
154#define SYSVEC_CONSTRAINTS	, [arg1] "r" (regs)
155
156#define run_sysvec_on_irqstack_cond(func, regs)				\
157{									\
158	assert_function_type(func, void (*)(struct pt_regs *));		\
159	assert_arg_type(regs, struct pt_regs *);			\
160									\
161	call_on_irqstack_cond(func, regs, ASM_CALL_SYSVEC,		\
162			      SYSVEC_CONSTRAINTS, regs);		\
163}
164
165/*
166 * As in ASM_CALL_SYSVEC above the clobbers force the compiler to store
167 * @regs and @vector in callee saved registers.
168 */
169#define ASM_CALL_IRQ							\
170	"call irq_enter_rcu				\n"		\
171	"movq	%[arg1], %%rdi				\n"		\
172	"movl	%[arg2], %%esi				\n"		\
173	"call %P[__func]				\n"		\
174	"call irq_exit_rcu				\n"
175
176#define IRQ_CONSTRAINTS	, [arg1] "r" (regs), [arg2] "r" (vector)
177
178#define run_irq_on_irqstack_cond(func, regs, vector)			\
179{									\
180	assert_function_type(func, void (*)(struct pt_regs *, u32));	\
181	assert_arg_type(regs, struct pt_regs *);			\
182	assert_arg_type(vector, u32);					\
183									\
184	call_on_irqstack_cond(func, regs, ASM_CALL_IRQ,			\
185			      IRQ_CONSTRAINTS, regs, vector);		\
186}
187
188#define ASM_CALL_SOFTIRQ						\
189	"call %P[__func]				\n"
190
191/*
192 * Macro to invoke __do_softirq on the irq stack. This is only called from
193 * task context when bottom halves are about to be reenabled and soft
194 * interrupts are pending to be processed. The interrupt stack cannot be in
195 * use here.
196 */
197#define do_softirq_own_stack()						\
198{									\
199	__this_cpu_write(hardirq_stack_inuse, true);			\
200	call_on_irqstack(__do_softirq, ASM_CALL_SOFTIRQ);		\
201	__this_cpu_write(hardirq_stack_inuse, false);			\
202}
 
 
203
204#else /* CONFIG_X86_64 */
205/* System vector handlers always run on the stack they interrupted. */
206#define run_sysvec_on_irqstack_cond(func, regs)				\
207{									\
208	irq_enter_rcu();						\
209	func(regs);							\
210	irq_exit_rcu();							\
211}
212
213/* Switches to the irq stack within func() */
214#define run_irq_on_irqstack_cond(func, regs, vector)			\
215{									\
216	irq_enter_rcu();						\
217	func(regs, vector);						\
218	irq_exit_rcu();							\
219}
220
221#endif /* !CONFIG_X86_64 */
222
223#endif