Loading...
1/*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4#ifndef _ASM_POWERPC_HW_IRQ_H
5#define _ASM_POWERPC_HW_IRQ_H
6
7#ifdef __KERNEL__
8
9#include <linux/errno.h>
10#include <linux/compiler.h>
11#include <asm/ptrace.h>
12#include <asm/processor.h>
13
14#ifdef CONFIG_PPC64
15
16/*
17 * PACA flags in paca->irq_happened.
18 *
19 * This bits are set when interrupts occur while soft-disabled
20 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
21 * is set whenever we manually hard disable.
22 */
23#define PACA_IRQ_HARD_DIS 0x01
24#define PACA_IRQ_DBELL 0x02
25#define PACA_IRQ_EE 0x04
26#define PACA_IRQ_DEC 0x08 /* Or FIT */
27#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
28#define PACA_IRQ_HMI 0x20
29
30#endif /* CONFIG_PPC64 */
31
32#ifndef __ASSEMBLY__
33
34extern void __replay_interrupt(unsigned int vector);
35
36extern void timer_interrupt(struct pt_regs *);
37extern void performance_monitor_exception(struct pt_regs *regs);
38extern void WatchdogException(struct pt_regs *regs);
39extern void unknown_exception(struct pt_regs *regs);
40
41#ifdef CONFIG_PPC64
42#include <asm/paca.h>
43
44static inline unsigned long arch_local_save_flags(void)
45{
46 unsigned long flags;
47
48 asm volatile(
49 "lbz %0,%1(13)"
50 : "=r" (flags)
51 : "i" (offsetof(struct paca_struct, soft_enabled)));
52
53 return flags;
54}
55
56static inline unsigned long arch_local_irq_disable(void)
57{
58 unsigned long flags, zero;
59
60 asm volatile(
61 "li %1,0; lbz %0,%2(13); stb %1,%2(13)"
62 : "=r" (flags), "=&r" (zero)
63 : "i" (offsetof(struct paca_struct, soft_enabled))
64 : "memory");
65
66 return flags;
67}
68
69extern void arch_local_irq_restore(unsigned long);
70
71static inline void arch_local_irq_enable(void)
72{
73 arch_local_irq_restore(1);
74}
75
76static inline unsigned long arch_local_irq_save(void)
77{
78 return arch_local_irq_disable();
79}
80
81static inline bool arch_irqs_disabled_flags(unsigned long flags)
82{
83 return flags == 0;
84}
85
86static inline bool arch_irqs_disabled(void)
87{
88 return arch_irqs_disabled_flags(arch_local_save_flags());
89}
90
91#ifdef CONFIG_PPC_BOOK3E
92#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
93#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
94#else
95#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
96#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
97#endif
98
99#define hard_irq_disable() do { \
100 u8 _was_enabled; \
101 __hard_irq_disable(); \
102 _was_enabled = local_paca->soft_enabled; \
103 local_paca->soft_enabled = 0; \
104 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
105 if (_was_enabled) \
106 trace_hardirqs_off(); \
107} while(0)
108
109static inline bool lazy_irq_pending(void)
110{
111 return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
112}
113
114/*
115 * This is called by asynchronous interrupts to conditionally
116 * re-enable hard interrupts when soft-disabled after having
117 * cleared the source of the interrupt
118 */
119static inline void may_hard_irq_enable(void)
120{
121 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
122 if (!(get_paca()->irq_happened & PACA_IRQ_EE))
123 __hard_irq_enable();
124}
125
126static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
127{
128 return !regs->softe;
129}
130
131extern bool prep_irq_for_idle(void);
132
133#else /* CONFIG_PPC64 */
134
135#define SET_MSR_EE(x) mtmsr(x)
136
137static inline unsigned long arch_local_save_flags(void)
138{
139 return mfmsr();
140}
141
142static inline void arch_local_irq_restore(unsigned long flags)
143{
144#if defined(CONFIG_BOOKE)
145 asm volatile("wrtee %0" : : "r" (flags) : "memory");
146#else
147 mtmsr(flags);
148#endif
149}
150
151static inline unsigned long arch_local_irq_save(void)
152{
153 unsigned long flags = arch_local_save_flags();
154#ifdef CONFIG_BOOKE
155 asm volatile("wrteei 0" : : : "memory");
156#else
157 SET_MSR_EE(flags & ~MSR_EE);
158#endif
159 return flags;
160}
161
162static inline void arch_local_irq_disable(void)
163{
164#ifdef CONFIG_BOOKE
165 asm volatile("wrteei 0" : : : "memory");
166#else
167 arch_local_irq_save();
168#endif
169}
170
171static inline void arch_local_irq_enable(void)
172{
173#ifdef CONFIG_BOOKE
174 asm volatile("wrteei 1" : : : "memory");
175#else
176 unsigned long msr = mfmsr();
177 SET_MSR_EE(msr | MSR_EE);
178#endif
179}
180
181static inline bool arch_irqs_disabled_flags(unsigned long flags)
182{
183 return (flags & MSR_EE) == 0;
184}
185
186static inline bool arch_irqs_disabled(void)
187{
188 return arch_irqs_disabled_flags(arch_local_save_flags());
189}
190
191#define hard_irq_disable() arch_local_irq_disable()
192
193static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
194{
195 return !(regs->msr & MSR_EE);
196}
197
198static inline void may_hard_irq_enable(void) { }
199
200#endif /* CONFIG_PPC64 */
201
202#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
203
204/*
205 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
206 * or should we not care like we do now ? --BenH.
207 */
208struct irq_chip;
209
210#endif /* __ASSEMBLY__ */
211#endif /* __KERNEL__ */
212#endif /* _ASM_POWERPC_HW_IRQ_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4 */
5#ifndef _ASM_POWERPC_HW_IRQ_H
6#define _ASM_POWERPC_HW_IRQ_H
7
8#ifdef __KERNEL__
9
10#include <linux/errno.h>
11#include <linux/compiler.h>
12#include <asm/ptrace.h>
13#include <asm/processor.h>
14
15#ifdef CONFIG_PPC64
16
17/*
18 * PACA flags in paca->irq_happened.
19 *
20 * This bits are set when interrupts occur while soft-disabled
21 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
22 * is set whenever we manually hard disable.
23 */
24#define PACA_IRQ_HARD_DIS 0x01
25#define PACA_IRQ_DBELL 0x02
26#define PACA_IRQ_EE 0x04
27#define PACA_IRQ_DEC 0x08 /* Or FIT */
28#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
29#define PACA_IRQ_HMI 0x20
30#define PACA_IRQ_PMI 0x40
31
32/*
33 * Some soft-masked interrupts must be hard masked until they are replayed
34 * (e.g., because the soft-masked handler does not clear the exception).
35 */
36#ifdef CONFIG_PPC_BOOK3S
37#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
38#else
39#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
40#endif
41
42/*
43 * flags for paca->irq_soft_mask
44 */
45#define IRQS_ENABLED 0
46#define IRQS_DISABLED 1 /* local_irq_disable() interrupts */
47#define IRQS_PMI_DISABLED 2
48#define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
49
50#endif /* CONFIG_PPC64 */
51
52#ifndef __ASSEMBLY__
53
54extern void replay_system_reset(void);
55extern void replay_soft_interrupts(void);
56
57extern void timer_interrupt(struct pt_regs *);
58extern void timer_broadcast_interrupt(void);
59extern void performance_monitor_exception(struct pt_regs *regs);
60extern void WatchdogException(struct pt_regs *regs);
61extern void unknown_exception(struct pt_regs *regs);
62
63#ifdef CONFIG_PPC64
64#include <asm/paca.h>
65
66static inline notrace unsigned long irq_soft_mask_return(void)
67{
68 unsigned long flags;
69
70 asm volatile(
71 "lbz %0,%1(13)"
72 : "=r" (flags)
73 : "i" (offsetof(struct paca_struct, irq_soft_mask)));
74
75 return flags;
76}
77
78/*
79 * The "memory" clobber acts as both a compiler barrier
80 * for the critical section and as a clobber because
81 * we changed paca->irq_soft_mask
82 */
83static inline notrace void irq_soft_mask_set(unsigned long mask)
84{
85#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
86 /*
87 * The irq mask must always include the STD bit if any are set.
88 *
89 * and interrupts don't get replayed until the standard
90 * interrupt (local_irq_disable()) is unmasked.
91 *
92 * Other masks must only provide additional masking beyond
93 * the standard, and they are also not replayed until the
94 * standard interrupt becomes unmasked.
95 *
96 * This could be changed, but it will require partial
97 * unmasks to be replayed, among other things. For now, take
98 * the simple approach.
99 */
100 WARN_ON(mask && !(mask & IRQS_DISABLED));
101#endif
102
103 asm volatile(
104 "stb %0,%1(13)"
105 :
106 : "r" (mask),
107 "i" (offsetof(struct paca_struct, irq_soft_mask))
108 : "memory");
109}
110
111static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
112{
113 unsigned long flags;
114
115#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
116 WARN_ON(mask && !(mask & IRQS_DISABLED));
117#endif
118
119 asm volatile(
120 "lbz %0,%1(13); stb %2,%1(13)"
121 : "=&r" (flags)
122 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
123 "r" (mask)
124 : "memory");
125
126 return flags;
127}
128
129static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
130{
131 unsigned long flags, tmp;
132
133 asm volatile(
134 "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
135 : "=&r" (flags), "=r" (tmp)
136 : "i" (offsetof(struct paca_struct, irq_soft_mask)),
137 "r" (mask)
138 : "memory");
139
140#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
141 WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
142#endif
143
144 return flags;
145}
146
147static inline unsigned long arch_local_save_flags(void)
148{
149 return irq_soft_mask_return();
150}
151
152static inline void arch_local_irq_disable(void)
153{
154 irq_soft_mask_set(IRQS_DISABLED);
155}
156
157extern void arch_local_irq_restore(unsigned long);
158
159static inline void arch_local_irq_enable(void)
160{
161 arch_local_irq_restore(IRQS_ENABLED);
162}
163
164static inline unsigned long arch_local_irq_save(void)
165{
166 return irq_soft_mask_set_return(IRQS_DISABLED);
167}
168
169static inline bool arch_irqs_disabled_flags(unsigned long flags)
170{
171 return flags & IRQS_DISABLED;
172}
173
174static inline bool arch_irqs_disabled(void)
175{
176 return arch_irqs_disabled_flags(arch_local_save_flags());
177}
178
179#ifdef CONFIG_PPC_BOOK3S
180/*
181 * To support disabling and enabling of irq with PMI, set of
182 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
183 * functions are added. These macros are implemented using generic
184 * linux local_irq_* code from include/linux/irqflags.h.
185 */
186#define raw_local_irq_pmu_save(flags) \
187 do { \
188 typecheck(unsigned long, flags); \
189 flags = irq_soft_mask_or_return(IRQS_DISABLED | \
190 IRQS_PMI_DISABLED); \
191 } while(0)
192
193#define raw_local_irq_pmu_restore(flags) \
194 do { \
195 typecheck(unsigned long, flags); \
196 arch_local_irq_restore(flags); \
197 } while(0)
198
199#ifdef CONFIG_TRACE_IRQFLAGS
200#define powerpc_local_irq_pmu_save(flags) \
201 do { \
202 raw_local_irq_pmu_save(flags); \
203 if (!raw_irqs_disabled_flags(flags)) \
204 trace_hardirqs_off(); \
205 } while(0)
206#define powerpc_local_irq_pmu_restore(flags) \
207 do { \
208 if (!raw_irqs_disabled_flags(flags)) \
209 trace_hardirqs_on(); \
210 raw_local_irq_pmu_restore(flags); \
211 } while(0)
212#else
213#define powerpc_local_irq_pmu_save(flags) \
214 do { \
215 raw_local_irq_pmu_save(flags); \
216 } while(0)
217#define powerpc_local_irq_pmu_restore(flags) \
218 do { \
219 raw_local_irq_pmu_restore(flags); \
220 } while (0)
221#endif /* CONFIG_TRACE_IRQFLAGS */
222
223#endif /* CONFIG_PPC_BOOK3S */
224
225#ifdef CONFIG_PPC_BOOK3E
226#define __hard_irq_enable() wrtee(MSR_EE)
227#define __hard_irq_disable() wrtee(0)
228#define __hard_EE_RI_disable() wrtee(0)
229#define __hard_RI_enable() do { } while (0)
230#else
231#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
232#define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
233#define __hard_EE_RI_disable() __mtmsrd(0, 1)
234#define __hard_RI_enable() __mtmsrd(MSR_RI, 1)
235#endif
236
237#define hard_irq_disable() do { \
238 unsigned long flags; \
239 __hard_irq_disable(); \
240 flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
241 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
242 if (!arch_irqs_disabled_flags(flags)) { \
243 asm ("stdx %%r1, 0, %1 ;" \
244 : "=m" (local_paca->saved_r1) \
245 : "b" (&local_paca->saved_r1)); \
246 trace_hardirqs_off(); \
247 } \
248} while(0)
249
250static inline bool __lazy_irq_pending(u8 irq_happened)
251{
252 return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
253}
254
255/*
256 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
257 */
258static inline bool lazy_irq_pending(void)
259{
260 return __lazy_irq_pending(get_paca()->irq_happened);
261}
262
263/*
264 * Check if a lazy IRQ is pending, with no debugging checks.
265 * Should be called with IRQs hard disabled.
266 * For use in RI disabled code or other constrained situations.
267 */
268static inline bool lazy_irq_pending_nocheck(void)
269{
270 return __lazy_irq_pending(local_paca->irq_happened);
271}
272
273/*
274 * This is called by asynchronous interrupts to conditionally
275 * re-enable hard interrupts after having cleared the source
276 * of the interrupt. They are kept disabled if there is a different
277 * soft-masked interrupt pending that requires hard masking.
278 */
279static inline void may_hard_irq_enable(void)
280{
281 if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
282 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
283 __hard_irq_enable();
284 }
285}
286
287static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
288{
289 return (regs->softe & IRQS_DISABLED);
290}
291
292extern bool prep_irq_for_idle(void);
293extern bool prep_irq_for_idle_irqsoff(void);
294extern void irq_set_pending_from_srr1(unsigned long srr1);
295
296#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
297
298extern void force_external_irq_replay(void);
299
300#else /* CONFIG_PPC64 */
301
302static inline unsigned long arch_local_save_flags(void)
303{
304 return mfmsr();
305}
306
307static inline void arch_local_irq_restore(unsigned long flags)
308{
309 if (IS_ENABLED(CONFIG_BOOKE))
310 wrtee(flags);
311 else
312 mtmsr(flags);
313}
314
315static inline unsigned long arch_local_irq_save(void)
316{
317 unsigned long flags = arch_local_save_flags();
318
319 if (IS_ENABLED(CONFIG_BOOKE))
320 wrtee(0);
321 else if (IS_ENABLED(CONFIG_PPC_8xx))
322 wrtspr(SPRN_EID);
323 else
324 mtmsr(flags & ~MSR_EE);
325
326 return flags;
327}
328
329static inline void arch_local_irq_disable(void)
330{
331 if (IS_ENABLED(CONFIG_BOOKE))
332 wrtee(0);
333 else if (IS_ENABLED(CONFIG_PPC_8xx))
334 wrtspr(SPRN_EID);
335 else
336 mtmsr(mfmsr() & ~MSR_EE);
337}
338
339static inline void arch_local_irq_enable(void)
340{
341 if (IS_ENABLED(CONFIG_BOOKE))
342 wrtee(MSR_EE);
343 else if (IS_ENABLED(CONFIG_PPC_8xx))
344 wrtspr(SPRN_EIE);
345 else
346 mtmsr(mfmsr() | MSR_EE);
347}
348
349static inline bool arch_irqs_disabled_flags(unsigned long flags)
350{
351 return (flags & MSR_EE) == 0;
352}
353
354static inline bool arch_irqs_disabled(void)
355{
356 return arch_irqs_disabled_flags(arch_local_save_flags());
357}
358
359#define hard_irq_disable() arch_local_irq_disable()
360
361static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
362{
363 return !(regs->msr & MSR_EE);
364}
365
366static inline void may_hard_irq_enable(void) { }
367
368#endif /* CONFIG_PPC64 */
369
370#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
371
372/*
373 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
374 * or should we not care like we do now ? --BenH.
375 */
376struct irq_chip;
377
378#endif /* __ASSEMBLY__ */
379#endif /* __KERNEL__ */
380#endif /* _ASM_POWERPC_HW_IRQ_H */