Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Copyright (C) 2000 MIPS Technologies, Inc.
10 */
11#ifndef _ASM_IRQFLAGS_H
12#define _ASM_IRQFLAGS_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/compiler.h>
17#include <linux/stringify.h>
18#include <asm/compiler.h>
19#include <asm/hazards.h>
20
21#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
22
23static inline void arch_local_irq_disable(void)
24{
25 __asm__ __volatile__(
26 " .set push \n"
27 " .set noat \n"
28 " di \n"
29 " " __stringify(__irq_disable_hazard) " \n"
30 " .set pop \n"
31 : /* no outputs */
32 : /* no inputs */
33 : "memory");
34}
35
36static inline unsigned long arch_local_irq_save(void)
37{
38 unsigned long flags;
39
40 asm __volatile__(
41 " .set push \n"
42 " .set reorder \n"
43 " .set noat \n"
44#if defined(CONFIG_CPU_LOONGSON3) || defined (CONFIG_CPU_LOONGSON1)
45 " mfc0 %[flags], $12 \n"
46 " di \n"
47#else
48 " di %[flags] \n"
49#endif
50 " andi %[flags], 1 \n"
51 " " __stringify(__irq_disable_hazard) " \n"
52 " .set pop \n"
53 : [flags] "=r" (flags)
54 : /* no inputs */
55 : "memory");
56
57 return flags;
58}
59
60static inline void arch_local_irq_restore(unsigned long flags)
61{
62 unsigned long __tmp1;
63
64 __asm__ __volatile__(
65 " .set push \n"
66 " .set noreorder \n"
67 " .set noat \n"
68#if defined(CONFIG_IRQ_MIPS_CPU)
69 /*
70 * Slow, but doesn't suffer from a relatively unlikely race
71 * condition we're having since days 1.
72 */
73 " beqz %[flags], 1f \n"
74 " di \n"
75 " ei \n"
76 "1: \n"
77#else
78 /*
79 * Fast, dangerous. Life is fun, life is good.
80 */
81 " mfc0 $1, $12 \n"
82 " ins $1, %[flags], 0, 1 \n"
83 " mtc0 $1, $12 \n"
84#endif
85 " " __stringify(__irq_disable_hazard) " \n"
86 " .set pop \n"
87 : [flags] "=r" (__tmp1)
88 : "0" (flags)
89 : "memory");
90}
91
92#else
93/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
94void arch_local_irq_disable(void);
95unsigned long arch_local_irq_save(void);
96void arch_local_irq_restore(unsigned long flags);
97#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
98
99static inline void arch_local_irq_enable(void)
100{
101 __asm__ __volatile__(
102 " .set push \n"
103 " .set reorder \n"
104 " .set noat \n"
105#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
106 " ei \n"
107#else
108 " mfc0 $1,$12 \n"
109 " ori $1,0x1f \n"
110 " xori $1,0x1e \n"
111 " mtc0 $1,$12 \n"
112#endif
113 " " __stringify(__irq_enable_hazard) " \n"
114 " .set pop \n"
115 : /* no outputs */
116 : /* no inputs */
117 : "memory");
118}
119
120static inline unsigned long arch_local_save_flags(void)
121{
122 unsigned long flags;
123
124 asm __volatile__(
125 " .set push \n"
126 " .set reorder \n"
127 " mfc0 %[flags], $12 \n"
128 " .set pop \n"
129 : [flags] "=r" (flags));
130
131 return flags;
132}
133
134
135static inline int arch_irqs_disabled_flags(unsigned long flags)
136{
137 return !(flags & 1);
138}
139
140#endif /* #ifndef __ASSEMBLY__ */
141
142/*
143 * Do the CPU's IRQ-state tracing from assembly code.
144 */
145#ifdef CONFIG_TRACE_IRQFLAGS
146/* Reload some registers clobbered by trace_hardirqs_on */
147#ifdef CONFIG_64BIT
148# define TRACE_IRQS_RELOAD_REGS \
149 LONG_L $11, PT_R11(sp); \
150 LONG_L $10, PT_R10(sp); \
151 LONG_L $9, PT_R9(sp); \
152 LONG_L $8, PT_R8(sp); \
153 LONG_L $7, PT_R7(sp); \
154 LONG_L $6, PT_R6(sp); \
155 LONG_L $5, PT_R5(sp); \
156 LONG_L $4, PT_R4(sp); \
157 LONG_L $2, PT_R2(sp)
158#else
159# define TRACE_IRQS_RELOAD_REGS \
160 LONG_L $7, PT_R7(sp); \
161 LONG_L $6, PT_R6(sp); \
162 LONG_L $5, PT_R5(sp); \
163 LONG_L $4, PT_R4(sp); \
164 LONG_L $2, PT_R2(sp)
165#endif
166# define TRACE_IRQS_ON \
167 CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
168 jal trace_hardirqs_on
169# define TRACE_IRQS_ON_RELOAD \
170 TRACE_IRQS_ON; \
171 TRACE_IRQS_RELOAD_REGS
172# define TRACE_IRQS_OFF \
173 jal trace_hardirqs_off
174#else
175# define TRACE_IRQS_ON
176# define TRACE_IRQS_ON_RELOAD
177# define TRACE_IRQS_OFF
178#endif
179
180#endif /* _ASM_IRQFLAGS_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Copyright (C) 2000 MIPS Technologies, Inc.
10 */
11#ifndef _ASM_IRQFLAGS_H
12#define _ASM_IRQFLAGS_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/compiler.h>
17#include <linux/stringify.h>
18#include <asm/hazards.h>
19
20#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
21
22static inline void arch_local_irq_disable(void)
23{
24 __asm__ __volatile__(
25 " .set push \n"
26 " .set noat \n"
27 " di \n"
28 " " __stringify(__irq_disable_hazard) " \n"
29 " .set pop \n"
30 : /* no outputs */
31 : /* no inputs */
32 : "memory");
33}
34
35static inline unsigned long arch_local_irq_save(void)
36{
37 unsigned long flags;
38
39 asm __volatile__(
40 " .set push \n"
41 " .set reorder \n"
42 " .set noat \n"
43 " di %[flags] \n"
44 " andi %[flags], 1 \n"
45 " " __stringify(__irq_disable_hazard) " \n"
46 " .set pop \n"
47 : [flags] "=r" (flags)
48 : /* no inputs */
49 : "memory");
50
51 return flags;
52}
53
54static inline void arch_local_irq_restore(unsigned long flags)
55{
56 unsigned long __tmp1;
57
58 __asm__ __volatile__(
59 " .set push \n"
60 " .set noreorder \n"
61 " .set noat \n"
62#if defined(CONFIG_IRQ_CPU)
63 /*
64 * Slow, but doesn't suffer from a relatively unlikely race
65 * condition we're having since days 1.
66 */
67 " beqz %[flags], 1f \n"
68 " di \n"
69 " ei \n"
70 "1: \n"
71#else
72 /*
73 * Fast, dangerous. Life is fun, life is good.
74 */
75 " mfc0 $1, $12 \n"
76 " ins $1, %[flags], 0, 1 \n"
77 " mtc0 $1, $12 \n"
78#endif
79 " " __stringify(__irq_disable_hazard) " \n"
80 " .set pop \n"
81 : [flags] "=r" (__tmp1)
82 : "0" (flags)
83 : "memory");
84}
85
86static inline void __arch_local_irq_restore(unsigned long flags)
87{
88 __asm__ __volatile__(
89 " .set push \n"
90 " .set noreorder \n"
91 " .set noat \n"
92#if defined(CONFIG_IRQ_CPU)
93 /*
94 * Slow, but doesn't suffer from a relatively unlikely race
95 * condition we're having since days 1.
96 */
97 " beqz %[flags], 1f \n"
98 " di \n"
99 " ei \n"
100 "1: \n"
101#else
102 /*
103 * Fast, dangerous. Life is fun, life is good.
104 */
105 " mfc0 $1, $12 \n"
106 " ins $1, %[flags], 0, 1 \n"
107 " mtc0 $1, $12 \n"
108#endif
109 " " __stringify(__irq_disable_hazard) " \n"
110 " .set pop \n"
111 : [flags] "=r" (flags)
112 : "0" (flags)
113 : "memory");
114}
115#else
116/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
117void arch_local_irq_disable(void);
118unsigned long arch_local_irq_save(void);
119void arch_local_irq_restore(unsigned long flags);
120void __arch_local_irq_restore(unsigned long flags);
121#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
122
123
124extern void smtc_ipi_replay(void);
125
126static inline void arch_local_irq_enable(void)
127{
128#ifdef CONFIG_MIPS_MT_SMTC
129 /*
130 * SMTC kernel needs to do a software replay of queued
131 * IPIs, at the cost of call overhead on each local_irq_enable()
132 */
133 smtc_ipi_replay();
134#endif
135 __asm__ __volatile__(
136 " .set push \n"
137 " .set reorder \n"
138 " .set noat \n"
139#ifdef CONFIG_MIPS_MT_SMTC
140 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
141 " ori $1, 0x400 \n"
142 " xori $1, 0x400 \n"
143 " mtc0 $1, $2, 1 \n"
144#elif defined(CONFIG_CPU_MIPSR2)
145 " ei \n"
146#else
147 " mfc0 $1,$12 \n"
148 " ori $1,0x1f \n"
149 " xori $1,0x1e \n"
150 " mtc0 $1,$12 \n"
151#endif
152 " " __stringify(__irq_enable_hazard) " \n"
153 " .set pop \n"
154 : /* no outputs */
155 : /* no inputs */
156 : "memory");
157}
158
159static inline unsigned long arch_local_save_flags(void)
160{
161 unsigned long flags;
162
163 asm __volatile__(
164 " .set push \n"
165 " .set reorder \n"
166#ifdef CONFIG_MIPS_MT_SMTC
167 " mfc0 %[flags], $2, 1 \n"
168#else
169 " mfc0 %[flags], $12 \n"
170#endif
171 " .set pop \n"
172 : [flags] "=r" (flags));
173
174 return flags;
175}
176
177
178static inline int arch_irqs_disabled_flags(unsigned long flags)
179{
180#ifdef CONFIG_MIPS_MT_SMTC
181 /*
182 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
183 */
184 return flags & 0x400;
185#else
186 return !(flags & 1);
187#endif
188}
189
190#endif /* #ifndef __ASSEMBLY__ */
191
192/*
193 * Do the CPU's IRQ-state tracing from assembly code.
194 */
195#ifdef CONFIG_TRACE_IRQFLAGS
196/* Reload some registers clobbered by trace_hardirqs_on */
197#ifdef CONFIG_64BIT
198# define TRACE_IRQS_RELOAD_REGS \
199 LONG_L $11, PT_R11(sp); \
200 LONG_L $10, PT_R10(sp); \
201 LONG_L $9, PT_R9(sp); \
202 LONG_L $8, PT_R8(sp); \
203 LONG_L $7, PT_R7(sp); \
204 LONG_L $6, PT_R6(sp); \
205 LONG_L $5, PT_R5(sp); \
206 LONG_L $4, PT_R4(sp); \
207 LONG_L $2, PT_R2(sp)
208#else
209# define TRACE_IRQS_RELOAD_REGS \
210 LONG_L $7, PT_R7(sp); \
211 LONG_L $6, PT_R6(sp); \
212 LONG_L $5, PT_R5(sp); \
213 LONG_L $4, PT_R4(sp); \
214 LONG_L $2, PT_R2(sp)
215#endif
216# define TRACE_IRQS_ON \
217 CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
218 jal trace_hardirqs_on
219# define TRACE_IRQS_ON_RELOAD \
220 TRACE_IRQS_ON; \
221 TRACE_IRQS_RELOAD_REGS
222# define TRACE_IRQS_OFF \
223 jal trace_hardirqs_off
224#else
225# define TRACE_IRQS_ON
226# define TRACE_IRQS_ON_RELOAD
227# define TRACE_IRQS_OFF
228#endif
229
230#endif /* _ASM_IRQFLAGS_H */