Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5#ifndef __ASM_IRQFLAGS_H
6#define __ASM_IRQFLAGS_H
7
8#include <asm/alternative.h>
9#include <asm/ptrace.h>
10#include <asm/sysreg.h>
11
12/*
13 * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
14 * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
15 * order:
16 * Masking debug exceptions causes all other exceptions to be masked too/
17 * Masking SError masks irq, but not debug exceptions. Masking irqs has no
18 * side effects for other flags. Keeping to this order makes it easier for
19 * entry.S to know which exceptions should be unmasked.
20 *
21 * FIQ is never expected, but we mask it when we disable debug exceptions, and
22 * unmask it at all other times.
23 */
24
25/*
26 * CPU interrupt mask handling.
27 */
28static inline void arch_local_irq_enable(void)
29{
30 if (system_has_prio_mask_debugging()) {
31 u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
32
33 WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
34 }
35
36 asm volatile(ALTERNATIVE(
37 "msr daifclr, #2 // arch_local_irq_enable\n"
38 "nop",
39 __msr_s(SYS_ICC_PMR_EL1, "%0")
40 "dsb sy",
41 ARM64_HAS_IRQ_PRIO_MASKING)
42 :
43 : "r" ((unsigned long) GIC_PRIO_IRQON)
44 : "memory");
45}
46
47static inline void arch_local_irq_disable(void)
48{
49 if (system_has_prio_mask_debugging()) {
50 u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
51
52 WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
53 }
54
55 asm volatile(ALTERNATIVE(
56 "msr daifset, #2 // arch_local_irq_disable",
57 __msr_s(SYS_ICC_PMR_EL1, "%0"),
58 ARM64_HAS_IRQ_PRIO_MASKING)
59 :
60 : "r" ((unsigned long) GIC_PRIO_IRQOFF)
61 : "memory");
62}
63
64/*
65 * Save the current interrupt enable state.
66 */
67static inline unsigned long arch_local_save_flags(void)
68{
69 unsigned long flags;
70
71 asm volatile(ALTERNATIVE(
72 "mrs %0, daif",
73 __mrs_s("%0", SYS_ICC_PMR_EL1),
74 ARM64_HAS_IRQ_PRIO_MASKING)
75 : "=&r" (flags)
76 :
77 : "memory");
78
79 return flags;
80}
81
82static inline int arch_irqs_disabled_flags(unsigned long flags)
83{
84 int res;
85
86 asm volatile(ALTERNATIVE(
87 "and %w0, %w1, #" __stringify(PSR_I_BIT),
88 "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON),
89 ARM64_HAS_IRQ_PRIO_MASKING)
90 : "=&r" (res)
91 : "r" ((int) flags)
92 : "memory");
93
94 return res;
95}
96
97static inline unsigned long arch_local_irq_save(void)
98{
99 unsigned long flags;
100
101 flags = arch_local_save_flags();
102
103 /*
104 * There are too many states with IRQs disabled, just keep the current
105 * state if interrupts are already disabled/masked.
106 */
107 if (!arch_irqs_disabled_flags(flags))
108 arch_local_irq_disable();
109
110 return flags;
111}
112
113/*
114 * restore saved IRQ state
115 */
116static inline void arch_local_irq_restore(unsigned long flags)
117{
118 asm volatile(ALTERNATIVE(
119 "msr daif, %0\n"
120 "nop",
121 __msr_s(SYS_ICC_PMR_EL1, "%0")
122 "dsb sy",
123 ARM64_HAS_IRQ_PRIO_MASKING)
124 :
125 : "r" (flags)
126 : "memory");
127}
128
129#endif /* __ASM_IRQFLAGS_H */
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012 ARM Ltd.
4 */
5#ifndef __ASM_IRQFLAGS_H
6#define __ASM_IRQFLAGS_H
7
8#include <asm/alternative.h>
9#include <asm/barrier.h>
10#include <asm/ptrace.h>
11#include <asm/sysreg.h>
12
13/*
14 * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
15 * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
16 * order:
17 * Masking debug exceptions causes all other exceptions to be masked too/
18 * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
19 * always masked and unmasked together, and have no side effects for other
20 * flags. Keeping to this order makes it easier for entry.S to know which
21 * exceptions should be unmasked.
22 */
23
24static __always_inline void __daif_local_irq_enable(void)
25{
26 barrier();
27 asm volatile("msr daifclr, #3");
28 barrier();
29}
30
31static __always_inline void __pmr_local_irq_enable(void)
32{
33 if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
34 u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
35 WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
36 }
37
38 barrier();
39 write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1);
40 pmr_sync();
41 barrier();
42}
43
44static inline void arch_local_irq_enable(void)
45{
46 if (system_uses_irq_prio_masking()) {
47 __pmr_local_irq_enable();
48 } else {
49 __daif_local_irq_enable();
50 }
51}
52
53static __always_inline void __daif_local_irq_disable(void)
54{
55 barrier();
56 asm volatile("msr daifset, #3");
57 barrier();
58}
59
60static __always_inline void __pmr_local_irq_disable(void)
61{
62 if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) {
63 u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
64 WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF);
65 }
66
67 barrier();
68 write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1);
69 barrier();
70}
71
72static inline void arch_local_irq_disable(void)
73{
74 if (system_uses_irq_prio_masking()) {
75 __pmr_local_irq_disable();
76 } else {
77 __daif_local_irq_disable();
78 }
79}
80
81static __always_inline unsigned long __daif_local_save_flags(void)
82{
83 return read_sysreg(daif);
84}
85
86static __always_inline unsigned long __pmr_local_save_flags(void)
87{
88 return read_sysreg_s(SYS_ICC_PMR_EL1);
89}
90
91/*
92 * Save the current interrupt enable state.
93 */
94static inline unsigned long arch_local_save_flags(void)
95{
96 if (system_uses_irq_prio_masking()) {
97 return __pmr_local_save_flags();
98 } else {
99 return __daif_local_save_flags();
100 }
101}
102
103static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags)
104{
105 return flags & PSR_I_BIT;
106}
107
108static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
109{
110 return flags != GIC_PRIO_IRQON;
111}
112
113static inline bool arch_irqs_disabled_flags(unsigned long flags)
114{
115 if (system_uses_irq_prio_masking()) {
116 return __pmr_irqs_disabled_flags(flags);
117 } else {
118 return __daif_irqs_disabled_flags(flags);
119 }
120}
121
122static __always_inline bool __daif_irqs_disabled(void)
123{
124 return __daif_irqs_disabled_flags(__daif_local_save_flags());
125}
126
127static __always_inline bool __pmr_irqs_disabled(void)
128{
129 return __pmr_irqs_disabled_flags(__pmr_local_save_flags());
130}
131
132static inline bool arch_irqs_disabled(void)
133{
134 if (system_uses_irq_prio_masking()) {
135 return __pmr_irqs_disabled();
136 } else {
137 return __daif_irqs_disabled();
138 }
139}
140
141static __always_inline unsigned long __daif_local_irq_save(void)
142{
143 unsigned long flags = __daif_local_save_flags();
144
145 __daif_local_irq_disable();
146
147 return flags;
148}
149
150static __always_inline unsigned long __pmr_local_irq_save(void)
151{
152 unsigned long flags = __pmr_local_save_flags();
153
154 /*
155 * There are too many states with IRQs disabled, just keep the current
156 * state if interrupts are already disabled/masked.
157 */
158 if (!__pmr_irqs_disabled_flags(flags))
159 __pmr_local_irq_disable();
160
161 return flags;
162}
163
164static inline unsigned long arch_local_irq_save(void)
165{
166 if (system_uses_irq_prio_masking()) {
167 return __pmr_local_irq_save();
168 } else {
169 return __daif_local_irq_save();
170 }
171}
172
173static __always_inline void __daif_local_irq_restore(unsigned long flags)
174{
175 barrier();
176 write_sysreg(flags, daif);
177 barrier();
178}
179
180static __always_inline void __pmr_local_irq_restore(unsigned long flags)
181{
182 barrier();
183 write_sysreg_s(flags, SYS_ICC_PMR_EL1);
184 pmr_sync();
185 barrier();
186}
187
188/*
189 * restore saved IRQ state
190 */
191static inline void arch_local_irq_restore(unsigned long flags)
192{
193 if (system_uses_irq_prio_masking()) {
194 __pmr_local_irq_restore(flags);
195 } else {
196 __daif_local_irq_restore(flags);
197 }
198}
199
200#endif /* __ASM_IRQFLAGS_H */