Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Inline assembly cache operations.
7 *
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11 */
12#ifndef _ASM_R4KCACHE_H
13#define _ASM_R4KCACHE_H
14
15#include <linux/stringify.h>
16
17#include <asm/asm.h>
18#include <asm/asm-eva.h>
19#include <asm/cacheops.h>
20#include <asm/compiler.h>
21#include <asm/cpu-features.h>
22#include <asm/cpu-type.h>
23#include <asm/mipsmtregs.h>
24#include <asm/mmzone.h>
25#include <asm/unroll.h>
26
27extern void (*r4k_blast_dcache)(void);
28extern void (*r4k_blast_icache)(void);
29
30/*
31 * This macro return a properly sign-extended address suitable as base address
32 * for indexed cache operations. Two issues here:
33 *
34 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
35 * the index bits from the virtual address. This breaks with tradition
36 * set by the R4000. To keep unpleasant surprises from happening we pick
37 * an address in KSEG0 / CKSEG0.
38 * - We need a properly sign extended address for 64-bit code. To get away
39 * without ifdefs we let the compiler do it by a type cast.
40 */
41#define INDEX_BASE CKSEG0
42
43#define _cache_op(insn, op, addr) \
44 __asm__ __volatile__( \
45 " .set push \n" \
46 " .set noreorder \n" \
47 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
48 " " insn("%0", "%1") " \n" \
49 " .set pop \n" \
50 : \
51 : "i" (op), "R" (*(unsigned char *)(addr)))
52
53#define cache_op(op, addr) \
54 _cache_op(kernel_cache, op, addr)
55
56static inline void flush_icache_line_indexed(unsigned long addr)
57{
58 cache_op(Index_Invalidate_I, addr);
59}
60
61static inline void flush_dcache_line_indexed(unsigned long addr)
62{
63 cache_op(Index_Writeback_Inv_D, addr);
64}
65
66static inline void flush_scache_line_indexed(unsigned long addr)
67{
68 cache_op(Index_Writeback_Inv_SD, addr);
69}
70
71static inline void flush_icache_line(unsigned long addr)
72{
73 switch (boot_cpu_type()) {
74 case CPU_LOONGSON2EF:
75 cache_op(Hit_Invalidate_I_Loongson2, addr);
76 break;
77
78 default:
79 cache_op(Hit_Invalidate_I, addr);
80 break;
81 }
82}
83
84static inline void flush_dcache_line(unsigned long addr)
85{
86 cache_op(Hit_Writeback_Inv_D, addr);
87}
88
89static inline void invalidate_dcache_line(unsigned long addr)
90{
91 cache_op(Hit_Invalidate_D, addr);
92}
93
94static inline void invalidate_scache_line(unsigned long addr)
95{
96 cache_op(Hit_Invalidate_SD, addr);
97}
98
99static inline void flush_scache_line(unsigned long addr)
100{
101 cache_op(Hit_Writeback_Inv_SD, addr);
102}
103
104#ifdef CONFIG_EVA
105
106#define protected_cache_op(op, addr) \
107({ \
108 int __err = 0; \
109 __asm__ __volatile__( \
110 " .set push \n" \
111 " .set noreorder \n" \
112 " .set mips0 \n" \
113 " .set eva \n" \
114 "1: cachee %1, (%2) \n" \
115 "2: .insn \n" \
116 " .set pop \n" \
117 " .section .fixup,\"ax\" \n" \
118 "3: li %0, %3 \n" \
119 " j 2b \n" \
120 " .previous \n" \
121 " .section __ex_table,\"a\" \n" \
122 " "STR(PTR_WD)" 1b, 3b \n" \
123 " .previous" \
124 : "+r" (__err) \
125 : "i" (op), "r" (addr), "i" (-EFAULT)); \
126 __err; \
127})
128#else
129
130#define protected_cache_op(op, addr) \
131({ \
132 int __err = 0; \
133 __asm__ __volatile__( \
134 " .set push \n" \
135 " .set noreorder \n" \
136 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
137 "1: cache %1, (%2) \n" \
138 "2: .insn \n" \
139 " .set pop \n" \
140 " .section .fixup,\"ax\" \n" \
141 "3: li %0, %3 \n" \
142 " j 2b \n" \
143 " .previous \n" \
144 " .section __ex_table,\"a\" \n" \
145 " "STR(PTR_WD)" 1b, 3b \n" \
146 " .previous" \
147 : "+r" (__err) \
148 : "i" (op), "r" (addr), "i" (-EFAULT)); \
149 __err; \
150})
151#endif
152
153/*
154 * The next two are for badland addresses like signal trampolines.
155 */
156static inline int protected_flush_icache_line(unsigned long addr)
157{
158 switch (boot_cpu_type()) {
159 case CPU_LOONGSON2EF:
160 return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
161
162 default:
163 return protected_cache_op(Hit_Invalidate_I, addr);
164 }
165}
166
167/*
168 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
169 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
170 * caches. We're talking about one cacheline unnecessarily getting invalidated
171 * here so the penalty isn't overly hard.
172 */
173static inline int protected_writeback_dcache_line(unsigned long addr)
174{
175 return protected_cache_op(Hit_Writeback_Inv_D, addr);
176}
177
178static inline int protected_writeback_scache_line(unsigned long addr)
179{
180 return protected_cache_op(Hit_Writeback_Inv_SD, addr);
181}
182
183/*
184 * This one is RM7000-specific
185 */
186static inline void invalidate_tcache_page(unsigned long addr)
187{
188 cache_op(Page_Invalidate_T, addr);
189}
190
191#define cache_unroll(times, insn, op, addr, lsize) do { \
192 int i = 0; \
193 unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize))); \
194} while (0)
195
196/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
197#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
198static inline void extra##blast_##pfx##cache##lsize(void) \
199{ \
200 unsigned long start = INDEX_BASE; \
201 unsigned long end = start + current_cpu_data.desc.waysize; \
202 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
203 unsigned long ws_end = current_cpu_data.desc.ways << \
204 current_cpu_data.desc.waybit; \
205 unsigned long ws, addr; \
206 \
207 for (ws = 0; ws < ws_end; ws += ws_inc) \
208 for (addr = start; addr < end; addr += lsize * 32) \
209 cache_unroll(32, kernel_cache, indexop, \
210 addr | ws, lsize); \
211} \
212 \
213static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
214{ \
215 unsigned long start = page; \
216 unsigned long end = page + PAGE_SIZE; \
217 \
218 do { \
219 cache_unroll(32, kernel_cache, hitop, start, lsize); \
220 start += lsize * 32; \
221 } while (start < end); \
222} \
223 \
224static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
225{ \
226 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
227 unsigned long start = INDEX_BASE + (page & indexmask); \
228 unsigned long end = start + PAGE_SIZE; \
229 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
230 unsigned long ws_end = current_cpu_data.desc.ways << \
231 current_cpu_data.desc.waybit; \
232 unsigned long ws, addr; \
233 \
234 for (ws = 0; ws < ws_end; ws += ws_inc) \
235 for (addr = start; addr < end; addr += lsize * 32) \
236 cache_unroll(32, kernel_cache, indexop, \
237 addr | ws, lsize); \
238}
239
240__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
241__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
242__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
243__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
244__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
245__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
246__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
247__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
248__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
249__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
250__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
251__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
252__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
253
254__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
255__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
256__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
257__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
258__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
259__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
260
261#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
262static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
263{ \
264 unsigned long start = page; \
265 unsigned long end = page + PAGE_SIZE; \
266 \
267 do { \
268 cache_unroll(32, user_cache, hitop, start, lsize); \
269 start += lsize * 32; \
270 } while (start < end); \
271}
272
273__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
274 16)
275__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
276__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
277 32)
278__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
279__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
280 64)
281__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
282
283/* build blast_xxx_range, protected_blast_xxx_range */
284#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
285static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
286 unsigned long end) \
287{ \
288 unsigned long lsize = cpu_##desc##_line_size(); \
289 unsigned long addr = start & ~(lsize - 1); \
290 unsigned long aend = (end - 1) & ~(lsize - 1); \
291 \
292 while (1) { \
293 prot##cache_op(hitop, addr); \
294 if (addr == aend) \
295 break; \
296 addr += lsize; \
297 } \
298}
299
300__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
301__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
302__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
303__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
304 protected_, loongson2_)
305__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
306__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
307__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
308/* blast_inv_dcache_range */
309__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
310__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
311
312/* Currently, this is very specific to Loongson-3 */
313#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
314static inline void blast_##pfx##cache##lsize##_node(long node) \
315{ \
316 unsigned long start = CAC_BASE | nid_to_addrbase(node); \
317 unsigned long end = start + current_cpu_data.desc.waysize; \
318 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
319 unsigned long ws_end = current_cpu_data.desc.ways << \
320 current_cpu_data.desc.waybit; \
321 unsigned long ws, addr; \
322 \
323 for (ws = 0; ws < ws_end; ws += ws_inc) \
324 for (addr = start; addr < end; addr += lsize * 32) \
325 cache_unroll(32, kernel_cache, indexop, \
326 addr | ws, lsize); \
327}
328
329__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
330__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
331__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
332__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
333
334#endif /* _ASM_R4KCACHE_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Inline assembly cache operations.
7 *
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11 */
12#ifndef _ASM_R4KCACHE_H
13#define _ASM_R4KCACHE_H
14
15#include <linux/stringify.h>
16
17#include <asm/asm.h>
18#include <asm/cacheops.h>
19#include <asm/compiler.h>
20#include <asm/cpu-features.h>
21#include <asm/cpu-type.h>
22#include <asm/mipsmtregs.h>
23#include <asm/uaccess.h> /* for segment_eq() */
24
25extern void (*r4k_blast_dcache)(void);
26extern void (*r4k_blast_icache)(void);
27
28/*
29 * This macro return a properly sign-extended address suitable as base address
30 * for indexed cache operations. Two issues here:
31 *
32 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
33 * the index bits from the virtual address. This breaks with tradition
34 * set by the R4000. To keep unpleasant surprises from happening we pick
35 * an address in KSEG0 / CKSEG0.
36 * - We need a properly sign extended address for 64-bit code. To get away
37 * without ifdefs we let the compiler do it by a type cast.
38 */
39#define INDEX_BASE CKSEG0
40
41#define cache_op(op,addr) \
42 __asm__ __volatile__( \
43 " .set push \n" \
44 " .set noreorder \n" \
45 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
46 " cache %0, %1 \n" \
47 " .set pop \n" \
48 : \
49 : "i" (op), "R" (*(unsigned char *)(addr)))
50
51#ifdef CONFIG_MIPS_MT
52
53#define __iflush_prologue \
54 unsigned long redundance; \
55 extern int mt_n_iflushes; \
56 for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
57
58#define __iflush_epilogue \
59 }
60
61#define __dflush_prologue \
62 unsigned long redundance; \
63 extern int mt_n_dflushes; \
64 for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
65
66#define __dflush_epilogue \
67 }
68
69#define __inv_dflush_prologue __dflush_prologue
70#define __inv_dflush_epilogue __dflush_epilogue
71#define __sflush_prologue {
72#define __sflush_epilogue }
73#define __inv_sflush_prologue __sflush_prologue
74#define __inv_sflush_epilogue __sflush_epilogue
75
76#else /* CONFIG_MIPS_MT */
77
78#define __iflush_prologue {
79#define __iflush_epilogue }
80#define __dflush_prologue {
81#define __dflush_epilogue }
82#define __inv_dflush_prologue {
83#define __inv_dflush_epilogue }
84#define __sflush_prologue {
85#define __sflush_epilogue }
86#define __inv_sflush_prologue {
87#define __inv_sflush_epilogue }
88
89#endif /* CONFIG_MIPS_MT */
90
91static inline void flush_icache_line_indexed(unsigned long addr)
92{
93 __iflush_prologue
94 cache_op(Index_Invalidate_I, addr);
95 __iflush_epilogue
96}
97
98static inline void flush_dcache_line_indexed(unsigned long addr)
99{
100 __dflush_prologue
101 cache_op(Index_Writeback_Inv_D, addr);
102 __dflush_epilogue
103}
104
105static inline void flush_scache_line_indexed(unsigned long addr)
106{
107 cache_op(Index_Writeback_Inv_SD, addr);
108}
109
110static inline void flush_icache_line(unsigned long addr)
111{
112 __iflush_prologue
113 switch (boot_cpu_type()) {
114 case CPU_LOONGSON2:
115 cache_op(Hit_Invalidate_I_Loongson2, addr);
116 break;
117
118 default:
119 cache_op(Hit_Invalidate_I, addr);
120 break;
121 }
122 __iflush_epilogue
123}
124
125static inline void flush_dcache_line(unsigned long addr)
126{
127 __dflush_prologue
128 cache_op(Hit_Writeback_Inv_D, addr);
129 __dflush_epilogue
130}
131
132static inline void invalidate_dcache_line(unsigned long addr)
133{
134 __dflush_prologue
135 cache_op(Hit_Invalidate_D, addr);
136 __dflush_epilogue
137}
138
139static inline void invalidate_scache_line(unsigned long addr)
140{
141 cache_op(Hit_Invalidate_SD, addr);
142}
143
144static inline void flush_scache_line(unsigned long addr)
145{
146 cache_op(Hit_Writeback_Inv_SD, addr);
147}
148
149#define protected_cache_op(op,addr) \
150 __asm__ __volatile__( \
151 " .set push \n" \
152 " .set noreorder \n" \
153 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
154 "1: cache %0, (%1) \n" \
155 "2: .set pop \n" \
156 " .section __ex_table,\"a\" \n" \
157 " "STR(PTR)" 1b, 2b \n" \
158 " .previous" \
159 : \
160 : "i" (op), "r" (addr))
161
162#define protected_cachee_op(op,addr) \
163 __asm__ __volatile__( \
164 " .set push \n" \
165 " .set noreorder \n" \
166 " .set mips0 \n" \
167 " .set eva \n" \
168 "1: cachee %0, (%1) \n" \
169 "2: .set pop \n" \
170 " .section __ex_table,\"a\" \n" \
171 " "STR(PTR)" 1b, 2b \n" \
172 " .previous" \
173 : \
174 : "i" (op), "r" (addr))
175
176/*
177 * The next two are for badland addresses like signal trampolines.
178 */
179static inline void protected_flush_icache_line(unsigned long addr)
180{
181 switch (boot_cpu_type()) {
182 case CPU_LOONGSON2:
183 protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
184 break;
185
186 default:
187#ifdef CONFIG_EVA
188 protected_cachee_op(Hit_Invalidate_I, addr);
189#else
190 protected_cache_op(Hit_Invalidate_I, addr);
191#endif
192 break;
193 }
194}
195
196/*
197 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
198 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
199 * caches. We're talking about one cacheline unnecessarily getting invalidated
200 * here so the penalty isn't overly hard.
201 */
202static inline void protected_writeback_dcache_line(unsigned long addr)
203{
204#ifdef CONFIG_EVA
205 protected_cachee_op(Hit_Writeback_Inv_D, addr);
206#else
207 protected_cache_op(Hit_Writeback_Inv_D, addr);
208#endif
209}
210
211static inline void protected_writeback_scache_line(unsigned long addr)
212{
213 protected_cache_op(Hit_Writeback_Inv_SD, addr);
214}
215
216/*
217 * This one is RM7000-specific
218 */
219static inline void invalidate_tcache_page(unsigned long addr)
220{
221 cache_op(Page_Invalidate_T, addr);
222}
223
224#ifndef CONFIG_CPU_MIPSR6
225#define cache16_unroll32(base,op) \
226 __asm__ __volatile__( \
227 " .set push \n" \
228 " .set noreorder \n" \
229 " .set mips3 \n" \
230 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
231 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
232 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
233 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
234 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
235 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
236 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
237 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
238 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
239 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
240 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
241 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
242 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
243 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
244 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
245 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
246 " .set pop \n" \
247 : \
248 : "r" (base), \
249 "i" (op));
250
251#define cache32_unroll32(base,op) \
252 __asm__ __volatile__( \
253 " .set push \n" \
254 " .set noreorder \n" \
255 " .set mips3 \n" \
256 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
257 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
258 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
259 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
260 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
261 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
262 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
263 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
264 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
265 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
266 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
267 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
268 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
269 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
270 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
271 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
272 " .set pop \n" \
273 : \
274 : "r" (base), \
275 "i" (op));
276
277#define cache64_unroll32(base,op) \
278 __asm__ __volatile__( \
279 " .set push \n" \
280 " .set noreorder \n" \
281 " .set mips3 \n" \
282 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
283 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
284 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
285 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
286 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
287 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
288 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
289 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
290 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
291 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
292 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
293 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
294 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
295 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
296 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
297 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
298 " .set pop \n" \
299 : \
300 : "r" (base), \
301 "i" (op));
302
303#define cache128_unroll32(base,op) \
304 __asm__ __volatile__( \
305 " .set push \n" \
306 " .set noreorder \n" \
307 " .set mips3 \n" \
308 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
309 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
310 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
311 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
312 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
313 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
314 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
315 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
316 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
317 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
318 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
319 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
320 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
321 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
322 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
323 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
324 " .set pop \n" \
325 : \
326 : "r" (base), \
327 "i" (op));
328
329#else
330/*
331 * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
332 * This means we now need to increment the base register before we flush
333 * more cache lines
334 */
335#define cache16_unroll32(base,op) \
336 __asm__ __volatile__( \
337 " .set push\n" \
338 " .set noreorder\n" \
339 " .set mips64r6\n" \
340 " .set noat\n" \
341 " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
342 " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
343 " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
344 " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
345 " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
346 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
347 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
348 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
349 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
350 " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
351 " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
352 " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
353 " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
354 " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
355 " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
356 " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
357 " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
358 " .set pop\n" \
359 : \
360 : "r" (base), \
361 "i" (op));
362
363#define cache32_unroll32(base,op) \
364 __asm__ __volatile__( \
365 " .set push\n" \
366 " .set noreorder\n" \
367 " .set mips64r6\n" \
368 " .set noat\n" \
369 " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
370 " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
371 " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
372 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
373 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
374 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
375 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
376 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
377 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
378 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
379 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
380 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
381 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
382 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
383 " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
384 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
385 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
386 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
387 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
388 " .set pop\n" \
389 : \
390 : "r" (base), \
391 "i" (op));
392
393#define cache64_unroll32(base,op) \
394 __asm__ __volatile__( \
395 " .set push\n" \
396 " .set noreorder\n" \
397 " .set mips64r6\n" \
398 " .set noat\n" \
399 " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
400 " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
401 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
402 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
403 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
404 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
405 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
406 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
407 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
408 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
409 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
410 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
411 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
412 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
413 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
414 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
415 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
416 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
417 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
418 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
419 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
420 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
421 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
422 " .set pop\n" \
423 : \
424 : "r" (base), \
425 "i" (op));
426
427#define cache128_unroll32(base,op) \
428 __asm__ __volatile__( \
429 " .set push\n" \
430 " .set noreorder\n" \
431 " .set mips64r6\n" \
432 " .set noat\n" \
433 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
434 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
435 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
436 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
437 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
438 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
439 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
440 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
441 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
442 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
443 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
444 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
445 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
446 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
447 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
448 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
449 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
450 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
451 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
452 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
453 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
454 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
455 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
456 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
457 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
458 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
459 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
460 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
461 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
462 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
463 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
464 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
465 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
466 " .set pop\n" \
467 : \
468 : "r" (base), \
469 "i" (op));
470#endif /* CONFIG_CPU_MIPSR6 */
471
472/*
473 * Perform the cache operation specified by op using a user mode virtual
474 * address while in kernel mode.
475 */
476#define cache16_unroll32_user(base,op) \
477 __asm__ __volatile__( \
478 " .set push \n" \
479 " .set noreorder \n" \
480 " .set mips0 \n" \
481 " .set eva \n" \
482 " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
483 " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
484 " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
485 " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
486 " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
487 " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
488 " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
489 " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
490 " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
491 " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
492 " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
493 " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
494 " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
495 " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
496 " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
497 " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
498 " .set pop \n" \
499 : \
500 : "r" (base), \
501 "i" (op));
502
503#define cache32_unroll32_user(base, op) \
504 __asm__ __volatile__( \
505 " .set push \n" \
506 " .set noreorder \n" \
507 " .set mips0 \n" \
508 " .set eva \n" \
509 " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
510 " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
511 " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
512 " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
513 " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
514 " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
515 " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
516 " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
517 " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
518 " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
519 " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
520 " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
521 " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
522 " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
523 " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
524 " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
525 " .set pop \n" \
526 : \
527 : "r" (base), \
528 "i" (op));
529
530#define cache64_unroll32_user(base, op) \
531 __asm__ __volatile__( \
532 " .set push \n" \
533 " .set noreorder \n" \
534 " .set mips0 \n" \
535 " .set eva \n" \
536 " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
537 " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
538 " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
539 " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
540 " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
541 " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
542 " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
543 " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
544 " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
545 " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
546 " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
547 " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
548 " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
549 " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
550 " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
551 " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
552 " .set pop \n" \
553 : \
554 : "r" (base), \
555 "i" (op));
556
557/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
558#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
559static inline void extra##blast_##pfx##cache##lsize(void) \
560{ \
561 unsigned long start = INDEX_BASE; \
562 unsigned long end = start + current_cpu_data.desc.waysize; \
563 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
564 unsigned long ws_end = current_cpu_data.desc.ways << \
565 current_cpu_data.desc.waybit; \
566 unsigned long ws, addr; \
567 \
568 __##pfx##flush_prologue \
569 \
570 for (ws = 0; ws < ws_end; ws += ws_inc) \
571 for (addr = start; addr < end; addr += lsize * 32) \
572 cache##lsize##_unroll32(addr|ws, indexop); \
573 \
574 __##pfx##flush_epilogue \
575} \
576 \
577static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
578{ \
579 unsigned long start = page; \
580 unsigned long end = page + PAGE_SIZE; \
581 \
582 __##pfx##flush_prologue \
583 \
584 do { \
585 cache##lsize##_unroll32(start, hitop); \
586 start += lsize * 32; \
587 } while (start < end); \
588 \
589 __##pfx##flush_epilogue \
590} \
591 \
592static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
593{ \
594 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
595 unsigned long start = INDEX_BASE + (page & indexmask); \
596 unsigned long end = start + PAGE_SIZE; \
597 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
598 unsigned long ws_end = current_cpu_data.desc.ways << \
599 current_cpu_data.desc.waybit; \
600 unsigned long ws, addr; \
601 \
602 __##pfx##flush_prologue \
603 \
604 for (ws = 0; ws < ws_end; ws += ws_inc) \
605 for (addr = start; addr < end; addr += lsize * 32) \
606 cache##lsize##_unroll32(addr|ws, indexop); \
607 \
608 __##pfx##flush_epilogue \
609}
610
611__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
612__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
613__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
614__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
615__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
616__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
617__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
618__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
619__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
620__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
621__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
622__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
623__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
624
625__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
626__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
627__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
628__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
629__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
630__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
631
632#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
633static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
634{ \
635 unsigned long start = page; \
636 unsigned long end = page + PAGE_SIZE; \
637 \
638 __##pfx##flush_prologue \
639 \
640 do { \
641 cache##lsize##_unroll32_user(start, hitop); \
642 start += lsize * 32; \
643 } while (start < end); \
644 \
645 __##pfx##flush_epilogue \
646}
647
648__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
649 16)
650__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
651__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
652 32)
653__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
654__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
655 64)
656__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
657
658/* build blast_xxx_range, protected_blast_xxx_range */
659#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
660static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
661 unsigned long end) \
662{ \
663 unsigned long lsize = cpu_##desc##_line_size(); \
664 unsigned long addr = start & ~(lsize - 1); \
665 unsigned long aend = (end - 1) & ~(lsize - 1); \
666 \
667 __##pfx##flush_prologue \
668 \
669 while (1) { \
670 prot##cache_op(hitop, addr); \
671 if (addr == aend) \
672 break; \
673 addr += lsize; \
674 } \
675 \
676 __##pfx##flush_epilogue \
677}
678
679#ifndef CONFIG_EVA
680
681__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
682__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
683
684#else
685
686#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
687static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
688 unsigned long end) \
689{ \
690 unsigned long lsize = cpu_##desc##_line_size(); \
691 unsigned long addr = start & ~(lsize - 1); \
692 unsigned long aend = (end - 1) & ~(lsize - 1); \
693 \
694 __##pfx##flush_prologue \
695 \
696 if (segment_eq(get_fs(), USER_DS)) { \
697 while (1) { \
698 protected_cachee_op(hitop, addr); \
699 if (addr == aend) \
700 break; \
701 addr += lsize; \
702 } \
703 } else { \
704 while (1) { \
705 protected_cache_op(hitop, addr); \
706 if (addr == aend) \
707 break; \
708 addr += lsize; \
709 } \
710 \
711 } \
712 __##pfx##flush_epilogue \
713}
714
715__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
716__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
717
718#endif
719__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
720__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
721 protected_, loongson2_)
722__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
723__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
724__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
725/* blast_inv_dcache_range */
726__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
727__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
728
729#endif /* _ASM_R4KCACHE_H */