Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1#ifndef _X86_IRQFLAGS_H_
  2#define _X86_IRQFLAGS_H_
  3
  4#include <asm/processor-flags.h>
  5
  6#ifndef __ASSEMBLY__
  7
 
 
  8/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
  9#define __cpuidle __attribute__((__section__(".cpuidle.text")))
 10
 11/*
 12 * Interrupt control:
 13 */
 14
 15static inline unsigned long native_save_fl(void)
 
 
 16{
 17	unsigned long flags;
 18
 19	/*
 20	 * "=rm" is safe here, because "pop" adjusts the stack before
 21	 * it evaluates its effective address -- this is part of the
 22	 * documented behavior of the "pop" instruction.
 23	 */
 24	asm volatile("# __raw_save_flags\n\t"
 25		     "pushf ; pop %0"
 26		     : "=rm" (flags)
 27		     : /* no input */
 28		     : "memory");
 29
 30	return flags;
 31}
 32
 33static inline void native_restore_fl(unsigned long flags)
 34{
 35	asm volatile("push %0 ; popf"
 36		     : /* no output */
 37		     :"g" (flags)
 38		     :"memory", "cc");
 39}
 40
 41static inline void native_irq_disable(void)
 42{
 43	asm volatile("cli": : :"memory");
 44}
 45
 46static inline void native_irq_enable(void)
 47{
 48	asm volatile("sti": : :"memory");
 49}
 50
 51static inline __cpuidle void native_safe_halt(void)
 52{
 
 53	asm volatile("sti; hlt": : :"memory");
 54}
 55
 56static inline __cpuidle void native_halt(void)
 57{
 
 58	asm volatile("hlt": : :"memory");
 59}
 60
 61#endif
 62
 63#ifdef CONFIG_PARAVIRT
 64#include <asm/paravirt.h>
 65#else
 66#ifndef __ASSEMBLY__
 67#include <linux/types.h>
 68
 69static inline notrace unsigned long arch_local_save_flags(void)
 70{
 71	return native_save_fl();
 72}
 73
 74static inline notrace void arch_local_irq_restore(unsigned long flags)
 75{
 76	native_restore_fl(flags);
 77}
 78
 79static inline notrace void arch_local_irq_disable(void)
 80{
 81	native_irq_disable();
 82}
 83
 84static inline notrace void arch_local_irq_enable(void)
 85{
 86	native_irq_enable();
 87}
 88
 89/*
 90 * Used in the idle loop; sti takes one instruction cycle
 91 * to complete:
 92 */
 93static inline __cpuidle void arch_safe_halt(void)
 94{
 95	native_safe_halt();
 96}
 97
 98/*
 99 * Used when interrupts are already enabled or to
100 * shutdown the processor:
101 */
102static inline __cpuidle void halt(void)
103{
104	native_halt();
105}
106
107/*
108 * For spinlocks, etc:
109 */
110static inline notrace unsigned long arch_local_irq_save(void)
111{
112	unsigned long flags = arch_local_save_flags();
113	arch_local_irq_disable();
114	return flags;
115}
116#else
117
118#define ENABLE_INTERRUPTS(x)	sti
119#define DISABLE_INTERRUPTS(x)	cli
120
121#ifdef CONFIG_X86_64
122#define SWAPGS	swapgs
123/*
124 * Currently paravirt can't handle swapgs nicely when we
125 * don't have a stack we can rely on (such as a user space
126 * stack).  So we either find a way around these or just fault
127 * and emulate if a guest tries to call swapgs directly.
128 *
129 * Either way, this is a good way to document that we don't
130 * have a reliable stack. x86_64 only.
131 */
132#define SWAPGS_UNSAFE_STACK	swapgs
133
134#define PARAVIRT_ADJUST_EXCEPTION_FRAME	/*  */
135
136#define INTERRUPT_RETURN	jmp native_iret
137#define USERGS_SYSRET64				\
138	swapgs;					\
139	sysretq;
140#define USERGS_SYSRET32				\
141	swapgs;					\
142	sysretl
143
144#else
145#define INTERRUPT_RETURN		iret
146#define ENABLE_INTERRUPTS_SYSEXIT	sti; sysexit
147#define GET_CR0_INTO_EAX		movl %cr0, %eax
148#endif
149
 
150
151#endif /* __ASSEMBLY__ */
152#endif /* CONFIG_PARAVIRT */
153
154#ifndef __ASSEMBLY__
155static inline int arch_irqs_disabled_flags(unsigned long flags)
156{
157	return !(flags & X86_EFLAGS_IF);
158}
159
160static inline int arch_irqs_disabled(void)
161{
162	unsigned long flags = arch_local_save_flags();
163
164	return arch_irqs_disabled_flags(flags);
165}
166#endif /* !__ASSEMBLY__ */
167
168#ifdef __ASSEMBLY__
169#ifdef CONFIG_TRACE_IRQFLAGS
170#  define TRACE_IRQS_ON		call trace_hardirqs_on_thunk;
171#  define TRACE_IRQS_OFF	call trace_hardirqs_off_thunk;
172#else
173#  define TRACE_IRQS_ON
174#  define TRACE_IRQS_OFF
175#endif
176#ifdef CONFIG_DEBUG_LOCK_ALLOC
177#  ifdef CONFIG_X86_64
178#    define LOCKDEP_SYS_EXIT		call lockdep_sys_exit_thunk
179#    define LOCKDEP_SYS_EXIT_IRQ \
180	TRACE_IRQS_ON; \
181	sti; \
182	call lockdep_sys_exit_thunk; \
183	cli; \
184	TRACE_IRQS_OFF;
185#  else
186#    define LOCKDEP_SYS_EXIT \
187	pushl %eax;				\
188	pushl %ecx;				\
189	pushl %edx;				\
190	call lockdep_sys_exit;			\
191	popl %edx;				\
192	popl %ecx;				\
193	popl %eax;
194#    define LOCKDEP_SYS_EXIT_IRQ
195#  endif
196#else
197#  define LOCKDEP_SYS_EXIT
198#  define LOCKDEP_SYS_EXIT_IRQ
199#endif
200#endif /* __ASSEMBLY__ */
201
202#endif
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _X86_IRQFLAGS_H_
  3#define _X86_IRQFLAGS_H_
  4
  5#include <asm/processor-flags.h>
  6
  7#ifndef __ASSEMBLY__
  8
  9#include <asm/nospec-branch.h>
 10
 11/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
 12#define __cpuidle __section(".cpuidle.text")
 13
 14/*
 15 * Interrupt control:
 16 */
 17
 18/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
 19extern inline unsigned long native_save_fl(void);
 20extern __always_inline unsigned long native_save_fl(void)
 21{
 22	unsigned long flags;
 23
 24	/*
 25	 * "=rm" is safe here, because "pop" adjusts the stack before
 26	 * it evaluates its effective address -- this is part of the
 27	 * documented behavior of the "pop" instruction.
 28	 */
 29	asm volatile("# __raw_save_flags\n\t"
 30		     "pushf ; pop %0"
 31		     : "=rm" (flags)
 32		     : /* no input */
 33		     : "memory");
 34
 35	return flags;
 36}
 37
 38static __always_inline void native_irq_disable(void)
 
 
 
 
 
 
 
 
 39{
 40	asm volatile("cli": : :"memory");
 41}
 42
 43static __always_inline void native_irq_enable(void)
 44{
 45	asm volatile("sti": : :"memory");
 46}
 47
 48static inline __cpuidle void native_safe_halt(void)
 49{
 50	mds_idle_clear_cpu_buffers();
 51	asm volatile("sti; hlt": : :"memory");
 52}
 53
 54static inline __cpuidle void native_halt(void)
 55{
 56	mds_idle_clear_cpu_buffers();
 57	asm volatile("hlt": : :"memory");
 58}
 59
 60#endif
 61
 62#ifdef CONFIG_PARAVIRT_XXL
 63#include <asm/paravirt.h>
 64#else
 65#ifndef __ASSEMBLY__
 66#include <linux/types.h>
 67
 68static __always_inline unsigned long arch_local_save_flags(void)
 69{
 70	return native_save_fl();
 71}
 72
 73static __always_inline void arch_local_irq_disable(void)
 
 
 
 
 
 74{
 75	native_irq_disable();
 76}
 77
 78static __always_inline void arch_local_irq_enable(void)
 79{
 80	native_irq_enable();
 81}
 82
 83/*
 84 * Used in the idle loop; sti takes one instruction cycle
 85 * to complete:
 86 */
 87static inline __cpuidle void arch_safe_halt(void)
 88{
 89	native_safe_halt();
 90}
 91
 92/*
 93 * Used when interrupts are already enabled or to
 94 * shutdown the processor:
 95 */
 96static inline __cpuidle void halt(void)
 97{
 98	native_halt();
 99}
100
101/*
102 * For spinlocks, etc:
103 */
104static __always_inline unsigned long arch_local_irq_save(void)
105{
106	unsigned long flags = arch_local_save_flags();
107	arch_local_irq_disable();
108	return flags;
109}
110#else
111
 
 
 
112#ifdef CONFIG_X86_64
113#ifdef CONFIG_DEBUG_ENTRY
114#define SAVE_FLAGS		pushfq; popq %rax
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115#endif
116
117#endif
118
119#endif /* __ASSEMBLY__ */
120#endif /* CONFIG_PARAVIRT_XXL */
121
122#ifndef __ASSEMBLY__
123static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
124{
125	return !(flags & X86_EFLAGS_IF);
126}
127
128static __always_inline int arch_irqs_disabled(void)
129{
130	unsigned long flags = arch_local_save_flags();
131
132	return arch_irqs_disabled_flags(flags);
133}
 
134
135static __always_inline void arch_local_irq_restore(unsigned long flags)
136{
137	if (!arch_irqs_disabled_flags(flags))
138		arch_local_irq_enable();
139}
140#endif /* !__ASSEMBLY__ */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
142#endif