Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  3 */
  4#ifndef _ASM_POWERPC_HW_IRQ_H
  5#define _ASM_POWERPC_HW_IRQ_H
  6
  7#ifdef __KERNEL__
  8
  9#include <linux/errno.h>
 10#include <linux/compiler.h>
 11#include <asm/ptrace.h>
 12#include <asm/processor.h>
 13
 14#ifdef CONFIG_PPC64
 15
 16/*
 17 * PACA flags in paca->irq_happened.
 18 *
 19 * This bits are set when interrupts occur while soft-disabled
 20 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
 21 * is set whenever we manually hard disable.
 
 
 
 
 
 
 
 
 
 22 */
 23#define PACA_IRQ_HARD_DIS	0x01
 24#define PACA_IRQ_DBELL		0x02
 25#define PACA_IRQ_EE		0x04
 26#define PACA_IRQ_DEC		0x08 /* Or FIT */
 27#define PACA_IRQ_EE_EDGE	0x10 /* BookE only */
 28#define PACA_IRQ_HMI		0x20
 
 
 
 
 
 
 
 
 
 
 29
 30#endif /* CONFIG_PPC64 */
 31
 32#ifndef __ASSEMBLY__
 
 
 
 
 
 
 33
 34extern void __replay_interrupt(unsigned int vector);
 35
 36extern void timer_interrupt(struct pt_regs *);
 37extern void performance_monitor_exception(struct pt_regs *regs);
 38extern void WatchdogException(struct pt_regs *regs);
 39extern void unknown_exception(struct pt_regs *regs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40
 41#ifdef CONFIG_PPC64
 42#include <asm/paca.h>
 43
 44static inline unsigned long arch_local_save_flags(void)
 45{
 46	unsigned long flags;
 47
 48	asm volatile(
 49		"lbz %0,%1(13)"
 50		: "=r" (flags)
 51		: "i" (offsetof(struct paca_struct, soft_enabled)));
 52
 53	return flags;
 54}
 55
 56static inline unsigned long arch_local_irq_disable(void)
 
 
 
 
 
 57{
 58	unsigned long flags, zero;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59
 60	asm volatile(
 61		"li %1,0; lbz %0,%2(13); stb %1,%2(13)"
 62		: "=r" (flags), "=&r" (zero)
 63		: "i" (offsetof(struct paca_struct, soft_enabled))
 
 64		: "memory");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 65
 66	return flags;
 67}
 68
 
 
 
 
 
 
 
 
 
 
 69extern void arch_local_irq_restore(unsigned long);
 70
 71static inline void arch_local_irq_enable(void)
 72{
 73	arch_local_irq_restore(1);
 74}
 75
 76static inline unsigned long arch_local_irq_save(void)
 77{
 78	return arch_local_irq_disable();
 79}
 80
 81static inline bool arch_irqs_disabled_flags(unsigned long flags)
 82{
 83	return flags == 0;
 84}
 85
 86static inline bool arch_irqs_disabled(void)
 87{
 88	return arch_irqs_disabled_flags(arch_local_save_flags());
 89}
 90
 91#ifdef CONFIG_PPC_BOOK3E
 92#define __hard_irq_enable()	asm volatile("wrteei 1" : : : "memory")
 93#define __hard_irq_disable()	asm volatile("wrteei 0" : : : "memory")
 94#else
 95#define __hard_irq_enable()	__mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
 96#define __hard_irq_disable()	__mtmsrd(local_paca->kernel_msr, 1)
 97#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98
 99#define hard_irq_disable()	do {			\
100	u8 _was_enabled;				\
101	__hard_irq_disable();				\
102	_was_enabled = local_paca->soft_enabled;	\
103	local_paca->soft_enabled = 0;			\
104	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;	\
105	if (_was_enabled)				\
106		trace_hardirqs_off();			\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107} while(0)
108
 
 
 
 
 
 
 
 
109static inline bool lazy_irq_pending(void)
110{
111	return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
 
 
 
 
 
 
 
 
 
 
112}
113
 
 
114/*
115 * This is called by asynchronous interrupts to conditionally
116 * re-enable hard interrupts when soft-disabled after having
117 * cleared the source of the interrupt
 
 
118 */
119static inline void may_hard_irq_enable(void)
120{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
122	if (!(get_paca()->irq_happened & PACA_IRQ_EE))
123		__hard_irq_enable();
124}
125
126static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
127{
128	return !regs->softe;
129}
130
131extern bool prep_irq_for_idle(void);
 
 
 
 
 
 
132
 
 
 
 
133#else /* CONFIG_PPC64 */
134
135#define SET_MSR_EE(x)	mtmsr(x)
 
 
 
136
137static inline unsigned long arch_local_save_flags(void)
138{
139	return mfmsr();
140}
141
142static inline void arch_local_irq_restore(unsigned long flags)
143{
144#if defined(CONFIG_BOOKE)
145	asm volatile("wrtee %0" : : "r" (flags) : "memory");
146#else
147	mtmsr(flags);
148#endif
149}
150
151static inline unsigned long arch_local_irq_save(void)
152{
153	unsigned long flags = arch_local_save_flags();
154#ifdef CONFIG_BOOKE
155	asm volatile("wrteei 0" : : : "memory");
156#else
157	SET_MSR_EE(flags & ~MSR_EE);
158#endif
 
 
 
159	return flags;
160}
161
162static inline void arch_local_irq_disable(void)
163{
164#ifdef CONFIG_BOOKE
165	asm volatile("wrteei 0" : : : "memory");
166#else
167	arch_local_irq_save();
168#endif
169}
170
171static inline void arch_local_irq_enable(void)
172{
173#ifdef CONFIG_BOOKE
174	asm volatile("wrteei 1" : : : "memory");
175#else
176	unsigned long msr = mfmsr();
177	SET_MSR_EE(msr | MSR_EE);
178#endif
179}
180
181static inline bool arch_irqs_disabled_flags(unsigned long flags)
182{
183	return (flags & MSR_EE) == 0;
184}
185
186static inline bool arch_irqs_disabled(void)
187{
188	return arch_irqs_disabled_flags(arch_local_save_flags());
189}
190
191#define hard_irq_disable()		arch_local_irq_disable()
192
193static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
194{
195	return !(regs->msr & MSR_EE);
196}
197
198static inline void may_hard_irq_enable(void) { }
 
 
 
 
 
 
 
 
 
 
 
 
199
 
 
 
200#endif /* CONFIG_PPC64 */
201
202#define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203
204/*
205 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
206 * or should we not care like we do now ? --BenH.
207 */
208struct irq_chip;
209
210#endif  /* __ASSEMBLY__ */
211#endif	/* __KERNEL__ */
212#endif	/* _ASM_POWERPC_HW_IRQ_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
  4 */
  5#ifndef _ASM_POWERPC_HW_IRQ_H
  6#define _ASM_POWERPC_HW_IRQ_H
  7
  8#ifdef __KERNEL__
  9
 10#include <linux/errno.h>
 11#include <linux/compiler.h>
 12#include <asm/ptrace.h>
 13#include <asm/processor.h>
 14
 15#ifdef CONFIG_PPC64
 16
 17/*
 18 * PACA flags in paca->irq_happened.
 19 *
 20 * This bits are set when interrupts occur while soft-disabled
 21 * and allow a proper replay.
 22 *
 23 * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
 24 * always in synch with the MSR[EE] state, except:
 25 * - A window in interrupt entry, where hardware disables MSR[EE] and that
 26 *   must be "reconciled" with the soft mask state.
 27 * - NMI interrupts that hit in awkward places, until they fix the state.
 28 * - When local irqs are being enabled and state is being fixed up.
 29 * - When returning from an interrupt there are some windows where this
 30 *   can become out of synch, but gets fixed before the RFI or before
 31 *   executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
 32 */
 33#define PACA_IRQ_HARD_DIS	0x01
 34#define PACA_IRQ_DBELL		0x02
 35#define PACA_IRQ_EE		0x04
 36#define PACA_IRQ_DEC		0x08 /* Or FIT */
 37#define PACA_IRQ_HMI		0x10
 38#define PACA_IRQ_PMI		0x20
 39
 40/*
 41 * Some soft-masked interrupts must be hard masked until they are replayed
 42 * (e.g., because the soft-masked handler does not clear the exception).
 43 */
 44#ifdef CONFIG_PPC_BOOK3S
 45#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE|PACA_IRQ_PMI)
 46#else
 47#define PACA_IRQ_MUST_HARD_MASK	(PACA_IRQ_EE)
 48#endif
 49
 50#endif /* CONFIG_PPC64 */
 51
 52/*
 53 * flags for paca->irq_soft_mask
 54 */
 55#define IRQS_ENABLED		0
 56#define IRQS_DISABLED		1 /* local_irq_disable() interrupts */
 57#define IRQS_PMI_DISABLED	2
 58#define IRQS_ALL_DISABLED	(IRQS_DISABLED | IRQS_PMI_DISABLED)
 59
 60#ifndef __ASSEMBLY__
 61
 62static inline void __hard_irq_enable(void)
 63{
 64	if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
 65		wrtee(MSR_EE);
 66	else if (IS_ENABLED(CONFIG_PPC_8xx))
 67		wrtspr(SPRN_EIE);
 68	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
 69		__mtmsrd(MSR_EE | MSR_RI, 1);
 70	else
 71		mtmsr(mfmsr() | MSR_EE);
 72}
 73
 74static inline void __hard_irq_disable(void)
 75{
 76	if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
 77		wrtee(0);
 78	else if (IS_ENABLED(CONFIG_PPC_8xx))
 79		wrtspr(SPRN_EID);
 80	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
 81		__mtmsrd(MSR_RI, 1);
 82	else
 83		mtmsr(mfmsr() & ~MSR_EE);
 84}
 85
 86static inline void __hard_EE_RI_disable(void)
 87{
 88	if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
 89		wrtee(0);
 90	else if (IS_ENABLED(CONFIG_PPC_8xx))
 91		wrtspr(SPRN_NRI);
 92	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
 93		__mtmsrd(0, 1);
 94	else
 95		mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
 96}
 97
 98static inline void __hard_RI_enable(void)
 99{
100	if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
101		return;
102
103	if (IS_ENABLED(CONFIG_PPC_8xx))
104		wrtspr(SPRN_EID);
105	else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
106		__mtmsrd(MSR_RI, 1);
107	else
108		mtmsr(mfmsr() | MSR_RI);
109}
110
111#ifdef CONFIG_PPC64
112#include <asm/paca.h>
113
114static inline notrace unsigned long irq_soft_mask_return(void)
115{
116	unsigned long flags;
117
118	asm volatile(
119		"lbz %0,%1(13)"
120		: "=r" (flags)
121		: "i" (offsetof(struct paca_struct, irq_soft_mask)));
122
123	return flags;
124}
125
126/*
127 * The "memory" clobber acts as both a compiler barrier
128 * for the critical section and as a clobber because
129 * we changed paca->irq_soft_mask
130 */
131static inline notrace void irq_soft_mask_set(unsigned long mask)
132{
133	/*
134	 * The irq mask must always include the STD bit if any are set.
135	 *
136	 * and interrupts don't get replayed until the standard
137	 * interrupt (local_irq_disable()) is unmasked.
138	 *
139	 * Other masks must only provide additional masking beyond
140	 * the standard, and they are also not replayed until the
141	 * standard interrupt becomes unmasked.
142	 *
143	 * This could be changed, but it will require partial
144	 * unmasks to be replayed, among other things. For now, take
145	 * the simple approach.
146	 */
147	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
148		WARN_ON(mask && !(mask & IRQS_DISABLED));
149
150	asm volatile(
151		"stb %0,%1(13)"
152		:
153		: "r" (mask),
154		  "i" (offsetof(struct paca_struct, irq_soft_mask))
155		: "memory");
156}
157
158static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
159{
160	unsigned long flags = irq_soft_mask_return();
161
162	irq_soft_mask_set(mask);
163
164	return flags;
165}
166
167static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
168{
169	unsigned long flags = irq_soft_mask_return();
170
171	irq_soft_mask_set(flags | mask);
172
173	return flags;
174}
175
176static inline notrace unsigned long irq_soft_mask_andc_return(unsigned long mask)
177{
178	unsigned long flags = irq_soft_mask_return();
179
180	irq_soft_mask_set(flags & ~mask);
181
182	return flags;
183}
184
185static inline unsigned long arch_local_save_flags(void)
186{
187	return irq_soft_mask_return();
188}
189
190static inline void arch_local_irq_disable(void)
191{
192	irq_soft_mask_set(IRQS_DISABLED);
193}
194
195extern void arch_local_irq_restore(unsigned long);
196
197static inline void arch_local_irq_enable(void)
198{
199	arch_local_irq_restore(IRQS_ENABLED);
200}
201
202static inline unsigned long arch_local_irq_save(void)
203{
204	return irq_soft_mask_or_return(IRQS_DISABLED);
205}
206
207static inline bool arch_irqs_disabled_flags(unsigned long flags)
208{
209	return flags & IRQS_DISABLED;
210}
211
212static inline bool arch_irqs_disabled(void)
213{
214	return arch_irqs_disabled_flags(arch_local_save_flags());
215}
216
217static inline void set_pmi_irq_pending(void)
218{
219	/*
220	 * Invoked from PMU callback functions to set PMI bit in the paca.
221	 * This has to be called with irq's disabled (via hard_irq_disable()).
222	 */
223	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
224		WARN_ON_ONCE(mfmsr() & MSR_EE);
225
226	get_paca()->irq_happened |= PACA_IRQ_PMI;
227}
228
229static inline void clear_pmi_irq_pending(void)
230{
231	/*
232	 * Invoked from PMU callback functions to clear the pending PMI bit
233	 * in the paca.
234	 */
235	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
236		WARN_ON_ONCE(mfmsr() & MSR_EE);
237
238	get_paca()->irq_happened &= ~PACA_IRQ_PMI;
239}
240
241static inline bool pmi_irq_pending(void)
242{
243	/*
244	 * Invoked from PMU callback functions to check if there is a pending
245	 * PMI bit in the paca.
246	 */
247	if (get_paca()->irq_happened & PACA_IRQ_PMI)
248		return true;
249
250	return false;
251}
252
253#ifdef CONFIG_PPC_BOOK3S
254/*
255 * To support disabling and enabling of irq with PMI, set of
256 * new powerpc_local_irq_pmu_save() and powerpc_local_irq_restore()
257 * functions are added. These macros are implemented using generic
258 * linux local_irq_* code from include/linux/irqflags.h.
259 */
260#define raw_local_irq_pmu_save(flags)					\
261	do {								\
262		typecheck(unsigned long, flags);			\
263		flags = irq_soft_mask_or_return(IRQS_DISABLED |	\
264				IRQS_PMI_DISABLED);			\
265	} while(0)
266
267#define raw_local_irq_pmu_restore(flags)				\
268	do {								\
269		typecheck(unsigned long, flags);			\
270		arch_local_irq_restore(flags);				\
271	} while(0)
272
273#ifdef CONFIG_TRACE_IRQFLAGS
274#define powerpc_local_irq_pmu_save(flags)			\
275	 do {							\
276		raw_local_irq_pmu_save(flags);			\
277		if (!raw_irqs_disabled_flags(flags))		\
278			trace_hardirqs_off();			\
279	} while(0)
280#define powerpc_local_irq_pmu_restore(flags)			\
281	do {							\
282		if (!raw_irqs_disabled_flags(flags))		\
283			trace_hardirqs_on();			\
284		raw_local_irq_pmu_restore(flags);		\
285	} while(0)
286#else
287#define powerpc_local_irq_pmu_save(flags)			\
288	do {							\
289		raw_local_irq_pmu_save(flags);			\
290	} while(0)
291#define powerpc_local_irq_pmu_restore(flags)			\
292	do {							\
293		raw_local_irq_pmu_restore(flags);		\
294	} while (0)
295#endif  /* CONFIG_TRACE_IRQFLAGS */
296
297#endif /* CONFIG_PPC_BOOK3S */
298
299#define hard_irq_disable()	do {					\
300	unsigned long flags;						\
301	__hard_irq_disable();						\
302	flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED);		\
303	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;			\
304	if (!arch_irqs_disabled_flags(flags)) {				\
305		asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
306					    : "r" (current_stack_pointer)); \
307		trace_hardirqs_off();					\
308	}								\
309} while(0)
310
311static inline bool __lazy_irq_pending(u8 irq_happened)
312{
313	return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
314}
315
316/*
317 * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
318 */
319static inline bool lazy_irq_pending(void)
320{
321	return __lazy_irq_pending(get_paca()->irq_happened);
322}
323
324/*
325 * Check if a lazy IRQ is pending, with no debugging checks.
326 * Should be called with IRQs hard disabled.
327 * For use in RI disabled code or other constrained situations.
328 */
329static inline bool lazy_irq_pending_nocheck(void)
330{
331	return __lazy_irq_pending(local_paca->irq_happened);
332}
333
334bool power_pmu_wants_prompt_pmi(void);
335
336/*
337 * This is called by asynchronous interrupts to check whether to
338 * conditionally re-enable hard interrupts after having cleared
339 * the source of the interrupt. They are kept disabled if there
340 * is a different soft-masked interrupt pending that requires hard
341 * masking.
342 */
343static inline bool should_hard_irq_enable(struct pt_regs *regs)
344{
345	if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
346		WARN_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
347		WARN_ON(!(get_paca()->irq_happened & PACA_IRQ_HARD_DIS));
348		WARN_ON(mfmsr() & MSR_EE);
349	}
350
351	if (!IS_ENABLED(CONFIG_PERF_EVENTS))
352		return false;
353	/*
354	 * If the PMU is not running, there is not much reason to enable
355	 * MSR[EE] in irq handlers because any interrupts would just be
356	 * soft-masked.
357	 *
358	 * TODO: Add test for 64e
359	 */
360	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
361		if (!power_pmu_wants_prompt_pmi())
362			return false;
363		/*
364		 * If PMIs are disabled then IRQs should be disabled as well,
365		 * so we shouldn't see this condition, check for it just in
366		 * case because we are about to enable PMIs.
367		 */
368		if (WARN_ON_ONCE(regs->softe & IRQS_PMI_DISABLED))
369			return false;
370	}
371
372	if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
373		return false;
374
375	return true;
376}
377
378/*
379 * Do the hard enabling, only call this if should_hard_irq_enable is true.
380 * This allows PMI interrupts to profile irq handlers.
381 */
382static inline void do_hard_irq_enable(void)
383{
384	/*
385	 * Asynch interrupts come in with IRQS_ALL_DISABLED,
386	 * PACA_IRQ_HARD_DIS, and MSR[EE]=0.
387	 */
388	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
389		irq_soft_mask_andc_return(IRQS_PMI_DISABLED);
390	get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
391	__hard_irq_enable();
 
392}
393
394static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
395{
396	return (regs->softe & IRQS_DISABLED);
397}
398
399extern bool prep_irq_for_idle(void);
400extern bool prep_irq_for_idle_irqsoff(void);
401extern void irq_set_pending_from_srr1(unsigned long srr1);
402
403#define fini_irq_for_idle_irqsoff() trace_hardirqs_off();
404
405extern void force_external_irq_replay(void);
406
407static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
408{
409	regs->softe = val;
410}
411#else /* CONFIG_PPC64 */
412
413static inline notrace unsigned long irq_soft_mask_return(void)
414{
415	return 0;
416}
417
418static inline unsigned long arch_local_save_flags(void)
419{
420	return mfmsr();
421}
422
423static inline void arch_local_irq_restore(unsigned long flags)
424{
425	if (IS_ENABLED(CONFIG_BOOKE))
426		wrtee(flags);
427	else
428		mtmsr(flags);
 
429}
430
431static inline unsigned long arch_local_irq_save(void)
432{
433	unsigned long flags = arch_local_save_flags();
434
435	if (IS_ENABLED(CONFIG_BOOKE))
436		wrtee(0);
437	else if (IS_ENABLED(CONFIG_PPC_8xx))
438		wrtspr(SPRN_EID);
439	else
440		mtmsr(flags & ~MSR_EE);
441
442	return flags;
443}
444
445static inline void arch_local_irq_disable(void)
446{
447	__hard_irq_disable();
 
 
 
 
448}
449
450static inline void arch_local_irq_enable(void)
451{
452	__hard_irq_enable();
 
 
 
 
 
453}
454
455static inline bool arch_irqs_disabled_flags(unsigned long flags)
456{
457	return (flags & MSR_EE) == 0;
458}
459
460static inline bool arch_irqs_disabled(void)
461{
462	return arch_irqs_disabled_flags(arch_local_save_flags());
463}
464
465#define hard_irq_disable()		arch_local_irq_disable()
466
467static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
468{
469	return !(regs->msr & MSR_EE);
470}
471
472static __always_inline bool should_hard_irq_enable(struct pt_regs *regs)
473{
474	return false;
475}
476
477static inline void do_hard_irq_enable(void)
478{
479	BUILD_BUG();
480}
481
482static inline void clear_pmi_irq_pending(void) { }
483static inline void set_pmi_irq_pending(void) { }
484static inline bool pmi_irq_pending(void) { return false; }
485
486static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
487{
488}
489#endif /* CONFIG_PPC64 */
490
491static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr)
492{
493#ifdef CONFIG_PPC64
494	if (arch_irqs_disabled()) {
495		/*
496		 * With soft-masking, MSR[EE] can change from 1 to 0
497		 * asynchronously when irqs are disabled, and we don't want to
498		 * set MSR[EE] back to 1 here if that has happened. A race-free
499		 * way to do this is ensure EE is already 0. Another way it
500		 * could be done is with a RESTART_TABLE handler, but that's
501		 * probably overkill here.
502		 */
503		msr &= ~MSR_EE;
504		mtmsr_isync(msr);
505		irq_soft_mask_set(IRQS_ALL_DISABLED);
506		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
507	} else
508#endif
509		mtmsr_isync(msr);
510
511	return msr;
512}
513
514
515#define ARCH_IRQ_INIT_FLAGS	IRQ_NOREQUEST
516
517#endif  /* __ASSEMBLY__ */
518#endif	/* __KERNEL__ */
519#endif	/* _ASM_POWERPC_HW_IRQ_H */