Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Inline assembly cache operations.
7 *
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11 */
12#ifndef _ASM_R4KCACHE_H
13#define _ASM_R4KCACHE_H
14
15#include <linux/stringify.h>
16
17#include <asm/asm.h>
18#include <asm/cacheops.h>
19#include <asm/compiler.h>
20#include <asm/cpu-features.h>
21#include <asm/cpu-type.h>
22#include <asm/mipsmtregs.h>
23#include <asm/mmzone.h>
24#include <linux/uaccess.h> /* for uaccess_kernel() */
25
26extern void (*r4k_blast_dcache)(void);
27extern void (*r4k_blast_icache)(void);
28
29/*
30 * This macro return a properly sign-extended address suitable as base address
31 * for indexed cache operations. Two issues here:
32 *
33 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
34 * the index bits from the virtual address. This breaks with tradition
35 * set by the R4000. To keep unpleasant surprises from happening we pick
36 * an address in KSEG0 / CKSEG0.
37 * - We need a properly sign extended address for 64-bit code. To get away
38 * without ifdefs we let the compiler do it by a type cast.
39 */
40#define INDEX_BASE CKSEG0
41
42#define cache_op(op,addr) \
43 __asm__ __volatile__( \
44 " .set push \n" \
45 " .set noreorder \n" \
46 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
47 " cache %0, %1 \n" \
48 " .set pop \n" \
49 : \
50 : "i" (op), "R" (*(unsigned char *)(addr)))
51
52static inline void flush_icache_line_indexed(unsigned long addr)
53{
54 cache_op(Index_Invalidate_I, addr);
55}
56
57static inline void flush_dcache_line_indexed(unsigned long addr)
58{
59 cache_op(Index_Writeback_Inv_D, addr);
60}
61
62static inline void flush_scache_line_indexed(unsigned long addr)
63{
64 cache_op(Index_Writeback_Inv_SD, addr);
65}
66
67static inline void flush_icache_line(unsigned long addr)
68{
69 switch (boot_cpu_type()) {
70 case CPU_LOONGSON2:
71 cache_op(Hit_Invalidate_I_Loongson2, addr);
72 break;
73
74 default:
75 cache_op(Hit_Invalidate_I, addr);
76 break;
77 }
78}
79
80static inline void flush_dcache_line(unsigned long addr)
81{
82 cache_op(Hit_Writeback_Inv_D, addr);
83}
84
85static inline void invalidate_dcache_line(unsigned long addr)
86{
87 cache_op(Hit_Invalidate_D, addr);
88}
89
90static inline void invalidate_scache_line(unsigned long addr)
91{
92 cache_op(Hit_Invalidate_SD, addr);
93}
94
95static inline void flush_scache_line(unsigned long addr)
96{
97 cache_op(Hit_Writeback_Inv_SD, addr);
98}
99
100#define protected_cache_op(op,addr) \
101({ \
102 int __err = 0; \
103 __asm__ __volatile__( \
104 " .set push \n" \
105 " .set noreorder \n" \
106 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
107 "1: cache %1, (%2) \n" \
108 "2: .insn \n" \
109 " .set pop \n" \
110 " .section .fixup,\"ax\" \n" \
111 "3: li %0, %3 \n" \
112 " j 2b \n" \
113 " .previous \n" \
114 " .section __ex_table,\"a\" \n" \
115 " "STR(PTR)" 1b, 3b \n" \
116 " .previous" \
117 : "+r" (__err) \
118 : "i" (op), "r" (addr), "i" (-EFAULT)); \
119 __err; \
120})
121
122
123#define protected_cachee_op(op,addr) \
124({ \
125 int __err = 0; \
126 __asm__ __volatile__( \
127 " .set push \n" \
128 " .set noreorder \n" \
129 " .set mips0 \n" \
130 " .set eva \n" \
131 "1: cachee %1, (%2) \n" \
132 "2: .insn \n" \
133 " .set pop \n" \
134 " .section .fixup,\"ax\" \n" \
135 "3: li %0, %3 \n" \
136 " j 2b \n" \
137 " .previous \n" \
138 " .section __ex_table,\"a\" \n" \
139 " "STR(PTR)" 1b, 3b \n" \
140 " .previous" \
141 : "+r" (__err) \
142 : "i" (op), "r" (addr), "i" (-EFAULT)); \
143 __err; \
144})
145
146/*
147 * The next two are for badland addresses like signal trampolines.
148 */
149static inline int protected_flush_icache_line(unsigned long addr)
150{
151 switch (boot_cpu_type()) {
152 case CPU_LOONGSON2:
153 return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
154
155 default:
156#ifdef CONFIG_EVA
157 return protected_cachee_op(Hit_Invalidate_I, addr);
158#else
159 return protected_cache_op(Hit_Invalidate_I, addr);
160#endif
161 }
162}
163
164/*
165 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
166 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
167 * caches. We're talking about one cacheline unnecessarily getting invalidated
168 * here so the penalty isn't overly hard.
169 */
170static inline int protected_writeback_dcache_line(unsigned long addr)
171{
172#ifdef CONFIG_EVA
173 return protected_cachee_op(Hit_Writeback_Inv_D, addr);
174#else
175 return protected_cache_op(Hit_Writeback_Inv_D, addr);
176#endif
177}
178
179static inline int protected_writeback_scache_line(unsigned long addr)
180{
181#ifdef CONFIG_EVA
182 return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
183#else
184 return protected_cache_op(Hit_Writeback_Inv_SD, addr);
185#endif
186}
187
188/*
189 * This one is RM7000-specific
190 */
191static inline void invalidate_tcache_page(unsigned long addr)
192{
193 cache_op(Page_Invalidate_T, addr);
194}
195
196#ifndef CONFIG_CPU_MIPSR6
197#define cache16_unroll32(base,op) \
198 __asm__ __volatile__( \
199 " .set push \n" \
200 " .set noreorder \n" \
201 " .set mips3 \n" \
202 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
203 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
204 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
205 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
206 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
207 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
208 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
209 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
210 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
211 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
212 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
213 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
214 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
215 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
216 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
217 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
218 " .set pop \n" \
219 : \
220 : "r" (base), \
221 "i" (op));
222
223#define cache32_unroll32(base,op) \
224 __asm__ __volatile__( \
225 " .set push \n" \
226 " .set noreorder \n" \
227 " .set mips3 \n" \
228 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
229 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
230 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
231 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
232 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
233 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
234 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
235 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
236 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
237 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
238 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
239 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
240 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
241 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
242 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
243 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
244 " .set pop \n" \
245 : \
246 : "r" (base), \
247 "i" (op));
248
249#define cache64_unroll32(base,op) \
250 __asm__ __volatile__( \
251 " .set push \n" \
252 " .set noreorder \n" \
253 " .set mips3 \n" \
254 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
255 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
256 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
257 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
258 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
259 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
260 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
261 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
262 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
263 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
264 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
265 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
266 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
267 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
268 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
269 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
270 " .set pop \n" \
271 : \
272 : "r" (base), \
273 "i" (op));
274
275#define cache128_unroll32(base,op) \
276 __asm__ __volatile__( \
277 " .set push \n" \
278 " .set noreorder \n" \
279 " .set mips3 \n" \
280 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
281 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
282 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
283 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
284 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
285 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
286 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
287 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
288 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
289 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
290 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
291 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
292 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
293 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
294 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
295 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
296 " .set pop \n" \
297 : \
298 : "r" (base), \
299 "i" (op));
300
301#else
302/*
303 * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
304 * This means we now need to increment the base register before we flush
305 * more cache lines
306 */
307#define cache16_unroll32(base,op) \
308 __asm__ __volatile__( \
309 " .set push\n" \
310 " .set noreorder\n" \
311 " .set mips64r6\n" \
312 " .set noat\n" \
313 " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
314 " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
315 " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
316 " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
317 " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
318 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
319 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
320 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
321 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
322 " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
323 " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
324 " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
325 " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
326 " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
327 " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
328 " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
329 " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
330 " .set pop\n" \
331 : \
332 : "r" (base), \
333 "i" (op));
334
335#define cache32_unroll32(base,op) \
336 __asm__ __volatile__( \
337 " .set push\n" \
338 " .set noreorder\n" \
339 " .set mips64r6\n" \
340 " .set noat\n" \
341 " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
342 " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
343 " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
344 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
345 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
346 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
347 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
348 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
349 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
350 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
351 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
352 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
353 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
354 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
355 " "__stringify(LONG_ADDIU)" $1, $1, 0x100\n" \
356 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
357 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
358 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
359 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
360 " .set pop\n" \
361 : \
362 : "r" (base), \
363 "i" (op));
364
365#define cache64_unroll32(base,op) \
366 __asm__ __volatile__( \
367 " .set push\n" \
368 " .set noreorder\n" \
369 " .set mips64r6\n" \
370 " .set noat\n" \
371 " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
372 " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
373 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
374 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
375 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
376 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
377 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
378 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
379 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
380 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
381 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
382 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
383 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
384 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
385 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
386 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
387 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
388 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
389 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
390 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
391 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
392 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
393 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
394 " .set pop\n" \
395 : \
396 : "r" (base), \
397 "i" (op));
398
399#define cache128_unroll32(base,op) \
400 __asm__ __volatile__( \
401 " .set push\n" \
402 " .set noreorder\n" \
403 " .set mips64r6\n" \
404 " .set noat\n" \
405 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
406 " "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n" \
407 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
408 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
409 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
410 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
411 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
412 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
413 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
414 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
415 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
416 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
417 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
418 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
419 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
420 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
421 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
422 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
423 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
424 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
425 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
426 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
427 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
428 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
429 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
430 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
431 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
432 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
433 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
434 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
435 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
436 " "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n" \
437 " cache %1, 0x000($1); cache %1, 0x080($1)\n" \
438 " .set pop\n" \
439 : \
440 : "r" (base), \
441 "i" (op));
442#endif /* CONFIG_CPU_MIPSR6 */
443
444/*
445 * Perform the cache operation specified by op using a user mode virtual
446 * address while in kernel mode.
447 */
448#define cache16_unroll32_user(base,op) \
449 __asm__ __volatile__( \
450 " .set push \n" \
451 " .set noreorder \n" \
452 " .set mips0 \n" \
453 " .set eva \n" \
454 " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
455 " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
456 " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
457 " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
458 " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
459 " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
460 " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
461 " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
462 " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
463 " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
464 " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
465 " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
466 " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
467 " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
468 " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
469 " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
470 " .set pop \n" \
471 : \
472 : "r" (base), \
473 "i" (op));
474
475#define cache32_unroll32_user(base, op) \
476 __asm__ __volatile__( \
477 " .set push \n" \
478 " .set noreorder \n" \
479 " .set mips0 \n" \
480 " .set eva \n" \
481 " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
482 " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
483 " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
484 " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
485 " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
486 " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
487 " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
488 " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
489 " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
490 " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
491 " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
492 " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
493 " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
494 " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
495 " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
496 " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
497 " .set pop \n" \
498 : \
499 : "r" (base), \
500 "i" (op));
501
502#define cache64_unroll32_user(base, op) \
503 __asm__ __volatile__( \
504 " .set push \n" \
505 " .set noreorder \n" \
506 " .set mips0 \n" \
507 " .set eva \n" \
508 " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
509 " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
510 " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
511 " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
512 " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
513 " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
514 " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
515 " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
516 " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
517 " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
518 " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
519 " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
520 " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
521 " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
522 " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
523 " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
524 " .set pop \n" \
525 : \
526 : "r" (base), \
527 "i" (op));
528
529/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
530#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
531static inline void extra##blast_##pfx##cache##lsize(void) \
532{ \
533 unsigned long start = INDEX_BASE; \
534 unsigned long end = start + current_cpu_data.desc.waysize; \
535 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
536 unsigned long ws_end = current_cpu_data.desc.ways << \
537 current_cpu_data.desc.waybit; \
538 unsigned long ws, addr; \
539 \
540 for (ws = 0; ws < ws_end; ws += ws_inc) \
541 for (addr = start; addr < end; addr += lsize * 32) \
542 cache##lsize##_unroll32(addr|ws, indexop); \
543} \
544 \
545static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
546{ \
547 unsigned long start = page; \
548 unsigned long end = page + PAGE_SIZE; \
549 \
550 do { \
551 cache##lsize##_unroll32(start, hitop); \
552 start += lsize * 32; \
553 } while (start < end); \
554} \
555 \
556static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
557{ \
558 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
559 unsigned long start = INDEX_BASE + (page & indexmask); \
560 unsigned long end = start + PAGE_SIZE; \
561 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
562 unsigned long ws_end = current_cpu_data.desc.ways << \
563 current_cpu_data.desc.waybit; \
564 unsigned long ws, addr; \
565 \
566 for (ws = 0; ws < ws_end; ws += ws_inc) \
567 for (addr = start; addr < end; addr += lsize * 32) \
568 cache##lsize##_unroll32(addr|ws, indexop); \
569}
570
571__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
572__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
573__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
574__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
575__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
576__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
577__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
578__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
579__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
580__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
581__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
582__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
583__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
584
585__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
586__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
587__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
588__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
589__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
590__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
591
592#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
593static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
594{ \
595 unsigned long start = page; \
596 unsigned long end = page + PAGE_SIZE; \
597 \
598 do { \
599 cache##lsize##_unroll32_user(start, hitop); \
600 start += lsize * 32; \
601 } while (start < end); \
602}
603
604__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
605 16)
606__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
607__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
608 32)
609__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
610__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
611 64)
612__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
613
614/* build blast_xxx_range, protected_blast_xxx_range */
615#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
616static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
617 unsigned long end) \
618{ \
619 unsigned long lsize = cpu_##desc##_line_size(); \
620 unsigned long addr = start & ~(lsize - 1); \
621 unsigned long aend = (end - 1) & ~(lsize - 1); \
622 \
623 while (1) { \
624 prot##cache_op(hitop, addr); \
625 if (addr == aend) \
626 break; \
627 addr += lsize; \
628 } \
629}
630
631#ifndef CONFIG_EVA
632
633__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
634__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
635
636#else
637
638#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
639static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
640 unsigned long end) \
641{ \
642 unsigned long lsize = cpu_##desc##_line_size(); \
643 unsigned long addr = start & ~(lsize - 1); \
644 unsigned long aend = (end - 1) & ~(lsize - 1); \
645 \
646 if (!uaccess_kernel()) { \
647 while (1) { \
648 protected_cachee_op(hitop, addr); \
649 if (addr == aend) \
650 break; \
651 addr += lsize; \
652 } \
653 } else { \
654 while (1) { \
655 protected_cache_op(hitop, addr); \
656 if (addr == aend) \
657 break; \
658 addr += lsize; \
659 } \
660 \
661 } \
662}
663
664__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
665__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
666
667#endif
668__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
669__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
670 protected_, loongson2_)
671__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
672__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
673__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
674/* blast_inv_dcache_range */
675__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
676__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
677
678/* Currently, this is very specific to Loongson-3 */
679#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
680static inline void blast_##pfx##cache##lsize##_node(long node) \
681{ \
682 unsigned long start = CAC_BASE | nid_to_addrbase(node); \
683 unsigned long end = start + current_cpu_data.desc.waysize; \
684 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
685 unsigned long ws_end = current_cpu_data.desc.ways << \
686 current_cpu_data.desc.waybit; \
687 unsigned long ws, addr; \
688 \
689 for (ws = 0; ws < ws_end; ws += ws_inc) \
690 for (addr = start; addr < end; addr += lsize * 32) \
691 cache##lsize##_unroll32(addr|ws, indexop); \
692}
693
694__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
695__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
696__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
697__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
698
699#endif /* _ASM_R4KCACHE_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Inline assembly cache operations.
7 *
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11 */
12#ifndef _ASM_R4KCACHE_H
13#define _ASM_R4KCACHE_H
14
15#include <linux/stringify.h>
16
17#include <asm/asm.h>
18#include <asm/asm-eva.h>
19#include <asm/cacheops.h>
20#include <asm/compiler.h>
21#include <asm/cpu-features.h>
22#include <asm/cpu-type.h>
23#include <asm/mipsmtregs.h>
24#include <asm/mmzone.h>
25#include <asm/unroll.h>
26
27extern void r5k_sc_init(void);
28extern void rm7k_sc_init(void);
29extern int mips_sc_init(void);
30
31extern void (*r4k_blast_dcache)(void);
32extern void (*r4k_blast_icache)(void);
33
34/*
35 * This macro return a properly sign-extended address suitable as base address
36 * for indexed cache operations. Two issues here:
37 *
38 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
39 * the index bits from the virtual address. This breaks with tradition
40 * set by the R4000. To keep unpleasant surprises from happening we pick
41 * an address in KSEG0 / CKSEG0.
42 * - We need a properly sign extended address for 64-bit code. To get away
43 * without ifdefs we let the compiler do it by a type cast.
44 */
45#define INDEX_BASE CKSEG0
46
47#define _cache_op(insn, op, addr) \
48 __asm__ __volatile__( \
49 " .set push \n" \
50 " .set noreorder \n" \
51 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
52 " " insn("%0", "%1") " \n" \
53 " .set pop \n" \
54 : \
55 : "i" (op), "R" (*(unsigned char *)(addr)))
56
57#define cache_op(op, addr) \
58 _cache_op(kernel_cache, op, addr)
59
60static inline void flush_icache_line_indexed(unsigned long addr)
61{
62 cache_op(Index_Invalidate_I, addr);
63}
64
65static inline void flush_dcache_line_indexed(unsigned long addr)
66{
67 cache_op(Index_Writeback_Inv_D, addr);
68}
69
70static inline void flush_scache_line_indexed(unsigned long addr)
71{
72 cache_op(Index_Writeback_Inv_SD, addr);
73}
74
75static inline void flush_icache_line(unsigned long addr)
76{
77 switch (boot_cpu_type()) {
78 case CPU_LOONGSON2EF:
79 cache_op(Hit_Invalidate_I_Loongson2, addr);
80 break;
81
82 default:
83 cache_op(Hit_Invalidate_I, addr);
84 break;
85 }
86}
87
88static inline void flush_dcache_line(unsigned long addr)
89{
90 cache_op(Hit_Writeback_Inv_D, addr);
91}
92
93static inline void invalidate_dcache_line(unsigned long addr)
94{
95 cache_op(Hit_Invalidate_D, addr);
96}
97
98static inline void invalidate_scache_line(unsigned long addr)
99{
100 cache_op(Hit_Invalidate_SD, addr);
101}
102
103static inline void flush_scache_line(unsigned long addr)
104{
105 cache_op(Hit_Writeback_Inv_SD, addr);
106}
107
108#ifdef CONFIG_EVA
109
110#define protected_cache_op(op, addr) \
111({ \
112 int __err = 0; \
113 __asm__ __volatile__( \
114 " .set push \n" \
115 " .set noreorder \n" \
116 " .set mips0 \n" \
117 " .set eva \n" \
118 "1: cachee %1, (%2) \n" \
119 "2: .insn \n" \
120 " .set pop \n" \
121 " .section .fixup,\"ax\" \n" \
122 "3: li %0, %3 \n" \
123 " j 2b \n" \
124 " .previous \n" \
125 " .section __ex_table,\"a\" \n" \
126 " "STR(PTR_WD)" 1b, 3b \n" \
127 " .previous" \
128 : "+r" (__err) \
129 : "i" (op), "r" (addr), "i" (-EFAULT)); \
130 __err; \
131})
132#else
133
134#define protected_cache_op(op, addr) \
135({ \
136 int __err = 0; \
137 __asm__ __volatile__( \
138 " .set push \n" \
139 " .set noreorder \n" \
140 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
141 "1: cache %1, (%2) \n" \
142 "2: .insn \n" \
143 " .set pop \n" \
144 " .section .fixup,\"ax\" \n" \
145 "3: li %0, %3 \n" \
146 " j 2b \n" \
147 " .previous \n" \
148 " .section __ex_table,\"a\" \n" \
149 " "STR(PTR_WD)" 1b, 3b \n" \
150 " .previous" \
151 : "+r" (__err) \
152 : "i" (op), "r" (addr), "i" (-EFAULT)); \
153 __err; \
154})
155#endif
156
157/*
158 * The next two are for badland addresses like signal trampolines.
159 */
160static inline int protected_flush_icache_line(unsigned long addr)
161{
162 switch (boot_cpu_type()) {
163 case CPU_LOONGSON2EF:
164 return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
165
166 default:
167 return protected_cache_op(Hit_Invalidate_I, addr);
168 }
169}
170
171/*
172 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
173 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
174 * caches. We're talking about one cacheline unnecessarily getting invalidated
175 * here so the penalty isn't overly hard.
176 */
177static inline int protected_writeback_dcache_line(unsigned long addr)
178{
179 return protected_cache_op(Hit_Writeback_Inv_D, addr);
180}
181
182static inline int protected_writeback_scache_line(unsigned long addr)
183{
184 return protected_cache_op(Hit_Writeback_Inv_SD, addr);
185}
186
187/*
188 * This one is RM7000-specific
189 */
190static inline void invalidate_tcache_page(unsigned long addr)
191{
192 cache_op(Page_Invalidate_T, addr);
193}
194
195#define cache_unroll(times, insn, op, addr, lsize) do { \
196 int i = 0; \
197 unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize))); \
198} while (0)
199
200/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
201#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
202static inline void extra##blast_##pfx##cache##lsize(void) \
203{ \
204 unsigned long start = INDEX_BASE; \
205 unsigned long end = start + current_cpu_data.desc.waysize; \
206 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
207 unsigned long ws_end = current_cpu_data.desc.ways << \
208 current_cpu_data.desc.waybit; \
209 unsigned long ws, addr; \
210 \
211 for (ws = 0; ws < ws_end; ws += ws_inc) \
212 for (addr = start; addr < end; addr += lsize * 32) \
213 cache_unroll(32, kernel_cache, indexop, \
214 addr | ws, lsize); \
215} \
216 \
217static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
218{ \
219 unsigned long start = page; \
220 unsigned long end = page + PAGE_SIZE; \
221 \
222 do { \
223 cache_unroll(32, kernel_cache, hitop, start, lsize); \
224 start += lsize * 32; \
225 } while (start < end); \
226} \
227 \
228static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
229{ \
230 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
231 unsigned long start = INDEX_BASE + (page & indexmask); \
232 unsigned long end = start + PAGE_SIZE; \
233 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
234 unsigned long ws_end = current_cpu_data.desc.ways << \
235 current_cpu_data.desc.waybit; \
236 unsigned long ws, addr; \
237 \
238 for (ws = 0; ws < ws_end; ws += ws_inc) \
239 for (addr = start; addr < end; addr += lsize * 32) \
240 cache_unroll(32, kernel_cache, indexop, \
241 addr | ws, lsize); \
242}
243
244__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
245__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
246__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
247__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
248__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
249__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
250__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
251__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
252__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
253__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
254__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
255__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
256__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
257
258__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
259__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
260__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
261__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
262__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
263__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
264
265#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
266static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
267{ \
268 unsigned long start = page; \
269 unsigned long end = page + PAGE_SIZE; \
270 \
271 do { \
272 cache_unroll(32, user_cache, hitop, start, lsize); \
273 start += lsize * 32; \
274 } while (start < end); \
275}
276
277__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
278 16)
279__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
280__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
281 32)
282__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
283__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
284 64)
285__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
286
287/* build blast_xxx_range, protected_blast_xxx_range */
288#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
289static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
290 unsigned long end) \
291{ \
292 unsigned long lsize = cpu_##desc##_line_size(); \
293 unsigned long addr = start & ~(lsize - 1); \
294 unsigned long aend = (end - 1) & ~(lsize - 1); \
295 \
296 while (1) { \
297 prot##cache_op(hitop, addr); \
298 if (addr == aend) \
299 break; \
300 addr += lsize; \
301 } \
302}
303
304__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
305__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
306__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
307__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
308 protected_, loongson2_)
309__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
310__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
311__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
312/* blast_inv_dcache_range */
313__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
314__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
315
316/* Currently, this is very specific to Loongson-3 */
317#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
318static inline void blast_##pfx##cache##lsize##_node(long node) \
319{ \
320 unsigned long start = CAC_BASE | nid_to_addrbase(node); \
321 unsigned long end = start + current_cpu_data.desc.waysize; \
322 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
323 unsigned long ws_end = current_cpu_data.desc.ways << \
324 current_cpu_data.desc.waybit; \
325 unsigned long ws, addr; \
326 \
327 for (ws = 0; ws < ws_end; ws += ws_inc) \
328 for (addr = start; addr < end; addr += lsize * 32) \
329 cache_unroll(32, kernel_cache, indexop, \
330 addr | ws, lsize); \
331}
332
333__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
334__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
335__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
336__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
337
338#endif /* _ASM_R4KCACHE_H */