Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _X86_IRQFLAGS_H_
3#define _X86_IRQFLAGS_H_
4
5#include <asm/processor-flags.h>
6
7#ifndef __ASSEMBLY__
8
9#include <asm/nospec-branch.h>
10
11/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
12#define __cpuidle __section(".cpuidle.text")
13
14/*
15 * Interrupt control:
16 */
17
18/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
19extern inline unsigned long native_save_fl(void);
20extern __always_inline unsigned long native_save_fl(void)
21{
22 unsigned long flags;
23
24 /*
25 * "=rm" is safe here, because "pop" adjusts the stack before
26 * it evaluates its effective address -- this is part of the
27 * documented behavior of the "pop" instruction.
28 */
29 asm volatile("# __raw_save_flags\n\t"
30 "pushf ; pop %0"
31 : "=rm" (flags)
32 : /* no input */
33 : "memory");
34
35 return flags;
36}
37
38static __always_inline void native_irq_disable(void)
39{
40 asm volatile("cli": : :"memory");
41}
42
43static __always_inline void native_irq_enable(void)
44{
45 asm volatile("sti": : :"memory");
46}
47
48static inline __cpuidle void native_safe_halt(void)
49{
50 mds_idle_clear_cpu_buffers();
51 asm volatile("sti; hlt": : :"memory");
52}
53
54static inline __cpuidle void native_halt(void)
55{
56 mds_idle_clear_cpu_buffers();
57 asm volatile("hlt": : :"memory");
58}
59
60#endif
61
62#ifdef CONFIG_PARAVIRT_XXL
63#include <asm/paravirt.h>
64#else
65#ifndef __ASSEMBLY__
66#include <linux/types.h>
67
68static __always_inline unsigned long arch_local_save_flags(void)
69{
70 return native_save_fl();
71}
72
73static __always_inline void arch_local_irq_disable(void)
74{
75 native_irq_disable();
76}
77
78static __always_inline void arch_local_irq_enable(void)
79{
80 native_irq_enable();
81}
82
83/*
84 * Used in the idle loop; sti takes one instruction cycle
85 * to complete:
86 */
87static inline __cpuidle void arch_safe_halt(void)
88{
89 native_safe_halt();
90}
91
92/*
93 * Used when interrupts are already enabled or to
94 * shutdown the processor:
95 */
96static inline __cpuidle void halt(void)
97{
98 native_halt();
99}
100
101/*
102 * For spinlocks, etc:
103 */
104static __always_inline unsigned long arch_local_irq_save(void)
105{
106 unsigned long flags = arch_local_save_flags();
107 arch_local_irq_disable();
108 return flags;
109}
110#else
111
112#ifdef CONFIG_X86_64
113#ifdef CONFIG_DEBUG_ENTRY
114#define SAVE_FLAGS pushfq; popq %rax
115#endif
116
117#endif
118
119#endif /* __ASSEMBLY__ */
120#endif /* CONFIG_PARAVIRT_XXL */
121
122#ifndef __ASSEMBLY__
123static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
124{
125 return !(flags & X86_EFLAGS_IF);
126}
127
128static __always_inline int arch_irqs_disabled(void)
129{
130 unsigned long flags = arch_local_save_flags();
131
132 return arch_irqs_disabled_flags(flags);
133}
134
135static __always_inline void arch_local_irq_restore(unsigned long flags)
136{
137 if (!arch_irqs_disabled_flags(flags))
138 arch_local_irq_enable();
139}
140#endif /* !__ASSEMBLY__ */
141
142#endif
1#ifndef _X86_IRQFLAGS_H_
2#define _X86_IRQFLAGS_H_
3
4#include <asm/processor-flags.h>
5
6#ifndef __ASSEMBLY__
7/*
8 * Interrupt control:
9 */
10
11static inline unsigned long native_save_fl(void)
12{
13 unsigned long flags;
14
15 /*
16 * "=rm" is safe here, because "pop" adjusts the stack before
17 * it evaluates its effective address -- this is part of the
18 * documented behavior of the "pop" instruction.
19 */
20 asm volatile("# __raw_save_flags\n\t"
21 "pushf ; pop %0"
22 : "=rm" (flags)
23 : /* no input */
24 : "memory");
25
26 return flags;
27}
28
29static inline void native_restore_fl(unsigned long flags)
30{
31 asm volatile("push %0 ; popf"
32 : /* no output */
33 :"g" (flags)
34 :"memory", "cc");
35}
36
37static inline void native_irq_disable(void)
38{
39 asm volatile("cli": : :"memory");
40}
41
42static inline void native_irq_enable(void)
43{
44 asm volatile("sti": : :"memory");
45}
46
47static inline void native_safe_halt(void)
48{
49 asm volatile("sti; hlt": : :"memory");
50}
51
52static inline void native_halt(void)
53{
54 asm volatile("hlt": : :"memory");
55}
56
57#endif
58
59#ifdef CONFIG_PARAVIRT
60#include <asm/paravirt.h>
61#else
62#ifndef __ASSEMBLY__
63#include <linux/types.h>
64
65static inline notrace unsigned long arch_local_save_flags(void)
66{
67 return native_save_fl();
68}
69
70static inline notrace void arch_local_irq_restore(unsigned long flags)
71{
72 native_restore_fl(flags);
73}
74
75static inline notrace void arch_local_irq_disable(void)
76{
77 native_irq_disable();
78}
79
80static inline notrace void arch_local_irq_enable(void)
81{
82 native_irq_enable();
83}
84
85/*
86 * Used in the idle loop; sti takes one instruction cycle
87 * to complete:
88 */
89static inline void arch_safe_halt(void)
90{
91 native_safe_halt();
92}
93
94/*
95 * Used when interrupts are already enabled or to
96 * shutdown the processor:
97 */
98static inline void halt(void)
99{
100 native_halt();
101}
102
103/*
104 * For spinlocks, etc:
105 */
106static inline notrace unsigned long arch_local_irq_save(void)
107{
108 unsigned long flags = arch_local_save_flags();
109 arch_local_irq_disable();
110 return flags;
111}
112#else
113
114#define ENABLE_INTERRUPTS(x) sti
115#define DISABLE_INTERRUPTS(x) cli
116
117#ifdef CONFIG_X86_64
118#define SWAPGS swapgs
119/*
120 * Currently paravirt can't handle swapgs nicely when we
121 * don't have a stack we can rely on (such as a user space
122 * stack). So we either find a way around these or just fault
123 * and emulate if a guest tries to call swapgs directly.
124 *
125 * Either way, this is a good way to document that we don't
126 * have a reliable stack. x86_64 only.
127 */
128#define SWAPGS_UNSAFE_STACK swapgs
129
130#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
131
132#define INTERRUPT_RETURN jmp native_iret
133#define USERGS_SYSRET64 \
134 swapgs; \
135 sysretq;
136#define USERGS_SYSRET32 \
137 swapgs; \
138 sysretl
139
140#else
141#define INTERRUPT_RETURN iret
142#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
143#define GET_CR0_INTO_EAX movl %cr0, %eax
144#endif
145
146
147#endif /* __ASSEMBLY__ */
148#endif /* CONFIG_PARAVIRT */
149
150#ifndef __ASSEMBLY__
151static inline int arch_irqs_disabled_flags(unsigned long flags)
152{
153 return !(flags & X86_EFLAGS_IF);
154}
155
156static inline int arch_irqs_disabled(void)
157{
158 unsigned long flags = arch_local_save_flags();
159
160 return arch_irqs_disabled_flags(flags);
161}
162#endif /* !__ASSEMBLY__ */
163
164#ifdef __ASSEMBLY__
165#ifdef CONFIG_TRACE_IRQFLAGS
166# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
167# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
168#else
169# define TRACE_IRQS_ON
170# define TRACE_IRQS_OFF
171#endif
172#ifdef CONFIG_DEBUG_LOCK_ALLOC
173# ifdef CONFIG_X86_64
174# define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
175# define LOCKDEP_SYS_EXIT_IRQ \
176 TRACE_IRQS_ON; \
177 sti; \
178 call lockdep_sys_exit_thunk; \
179 cli; \
180 TRACE_IRQS_OFF;
181# else
182# define LOCKDEP_SYS_EXIT \
183 pushl %eax; \
184 pushl %ecx; \
185 pushl %edx; \
186 call lockdep_sys_exit; \
187 popl %edx; \
188 popl %ecx; \
189 popl %eax;
190# define LOCKDEP_SYS_EXIT_IRQ
191# endif
192#else
193# define LOCKDEP_SYS_EXIT
194# define LOCKDEP_SYS_EXIT_IRQ
195#endif
196#endif /* __ASSEMBLY__ */
197
198#endif