Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Inline assembly cache operations.
7 *
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11 */
12#ifndef _ASM_R4KCACHE_H
13#define _ASM_R4KCACHE_H
14
15#include <asm/asm.h>
16#include <asm/cacheops.h>
17#include <asm/cpu-features.h>
18#include <asm/cpu-type.h>
19#include <asm/mipsmtregs.h>
20#include <asm/uaccess.h> /* for segment_eq() */
21
22/*
23 * This macro return a properly sign-extended address suitable as base address
24 * for indexed cache operations. Two issues here:
25 *
26 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
27 * the index bits from the virtual address. This breaks with tradition
28 * set by the R4000. To keep unpleasant surprises from happening we pick
29 * an address in KSEG0 / CKSEG0.
30 * - We need a properly sign extended address for 64-bit code. To get away
31 * without ifdefs we let the compiler do it by a type cast.
32 */
33#define INDEX_BASE CKSEG0
34
35#define cache_op(op,addr) \
36 __asm__ __volatile__( \
37 " .set push \n" \
38 " .set noreorder \n" \
39 " .set arch=r4000 \n" \
40 " cache %0, %1 \n" \
41 " .set pop \n" \
42 : \
43 : "i" (op), "R" (*(unsigned char *)(addr)))
44
45#ifdef CONFIG_MIPS_MT
46/*
47 * Temporary hacks for SMTC debug. Optionally force single-threaded
48 * execution during I-cache flushes.
49 */
50
51#define PROTECT_CACHE_FLUSHES 1
52
53#ifdef PROTECT_CACHE_FLUSHES
54
55extern int mt_protiflush;
56extern int mt_protdflush;
57extern void mt_cflush_lockdown(void);
58extern void mt_cflush_release(void);
59
60#define BEGIN_MT_IPROT \
61 unsigned long flags = 0; \
62 unsigned long mtflags = 0; \
63 if(mt_protiflush) { \
64 local_irq_save(flags); \
65 ehb(); \
66 mtflags = dvpe(); \
67 mt_cflush_lockdown(); \
68 }
69
70#define END_MT_IPROT \
71 if(mt_protiflush) { \
72 mt_cflush_release(); \
73 evpe(mtflags); \
74 local_irq_restore(flags); \
75 }
76
77#define BEGIN_MT_DPROT \
78 unsigned long flags = 0; \
79 unsigned long mtflags = 0; \
80 if(mt_protdflush) { \
81 local_irq_save(flags); \
82 ehb(); \
83 mtflags = dvpe(); \
84 mt_cflush_lockdown(); \
85 }
86
87#define END_MT_DPROT \
88 if(mt_protdflush) { \
89 mt_cflush_release(); \
90 evpe(mtflags); \
91 local_irq_restore(flags); \
92 }
93
94#else
95
96#define BEGIN_MT_IPROT
97#define BEGIN_MT_DPROT
98#define END_MT_IPROT
99#define END_MT_DPROT
100
101#endif /* PROTECT_CACHE_FLUSHES */
102
103#define __iflush_prologue \
104 unsigned long redundance; \
105 extern int mt_n_iflushes; \
106 BEGIN_MT_IPROT \
107 for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
108
109#define __iflush_epilogue \
110 END_MT_IPROT \
111 }
112
113#define __dflush_prologue \
114 unsigned long redundance; \
115 extern int mt_n_dflushes; \
116 BEGIN_MT_DPROT \
117 for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
118
119#define __dflush_epilogue \
120 END_MT_DPROT \
121 }
122
123#define __inv_dflush_prologue __dflush_prologue
124#define __inv_dflush_epilogue __dflush_epilogue
125#define __sflush_prologue {
126#define __sflush_epilogue }
127#define __inv_sflush_prologue __sflush_prologue
128#define __inv_sflush_epilogue __sflush_epilogue
129
130#else /* CONFIG_MIPS_MT */
131
132#define __iflush_prologue {
133#define __iflush_epilogue }
134#define __dflush_prologue {
135#define __dflush_epilogue }
136#define __inv_dflush_prologue {
137#define __inv_dflush_epilogue }
138#define __sflush_prologue {
139#define __sflush_epilogue }
140#define __inv_sflush_prologue {
141#define __inv_sflush_epilogue }
142
143#endif /* CONFIG_MIPS_MT */
144
145static inline void flush_icache_line_indexed(unsigned long addr)
146{
147 __iflush_prologue
148 cache_op(Index_Invalidate_I, addr);
149 __iflush_epilogue
150}
151
152static inline void flush_dcache_line_indexed(unsigned long addr)
153{
154 __dflush_prologue
155 cache_op(Index_Writeback_Inv_D, addr);
156 __dflush_epilogue
157}
158
159static inline void flush_scache_line_indexed(unsigned long addr)
160{
161 cache_op(Index_Writeback_Inv_SD, addr);
162}
163
164static inline void flush_icache_line(unsigned long addr)
165{
166 __iflush_prologue
167 switch (boot_cpu_type()) {
168 case CPU_LOONGSON2:
169 cache_op(Hit_Invalidate_I_Loongson2, addr);
170 break;
171
172 default:
173 cache_op(Hit_Invalidate_I, addr);
174 break;
175 }
176 __iflush_epilogue
177}
178
179static inline void flush_dcache_line(unsigned long addr)
180{
181 __dflush_prologue
182 cache_op(Hit_Writeback_Inv_D, addr);
183 __dflush_epilogue
184}
185
186static inline void invalidate_dcache_line(unsigned long addr)
187{
188 __dflush_prologue
189 cache_op(Hit_Invalidate_D, addr);
190 __dflush_epilogue
191}
192
193static inline void invalidate_scache_line(unsigned long addr)
194{
195 cache_op(Hit_Invalidate_SD, addr);
196}
197
198static inline void flush_scache_line(unsigned long addr)
199{
200 cache_op(Hit_Writeback_Inv_SD, addr);
201}
202
203#define protected_cache_op(op,addr) \
204 __asm__ __volatile__( \
205 " .set push \n" \
206 " .set noreorder \n" \
207 " .set arch=r4000 \n" \
208 "1: cache %0, (%1) \n" \
209 "2: .set pop \n" \
210 " .section __ex_table,\"a\" \n" \
211 " "STR(PTR)" 1b, 2b \n" \
212 " .previous" \
213 : \
214 : "i" (op), "r" (addr))
215
216#define protected_cachee_op(op,addr) \
217 __asm__ __volatile__( \
218 " .set push \n" \
219 " .set noreorder \n" \
220 " .set mips0 \n" \
221 " .set eva \n" \
222 "1: cachee %0, (%1) \n" \
223 "2: .set pop \n" \
224 " .section __ex_table,\"a\" \n" \
225 " "STR(PTR)" 1b, 2b \n" \
226 " .previous" \
227 : \
228 : "i" (op), "r" (addr))
229
230/*
231 * The next two are for badland addresses like signal trampolines.
232 */
233static inline void protected_flush_icache_line(unsigned long addr)
234{
235 switch (boot_cpu_type()) {
236 case CPU_LOONGSON2:
237 protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
238 break;
239
240 default:
241#ifdef CONFIG_EVA
242 protected_cachee_op(Hit_Invalidate_I, addr);
243#else
244 protected_cache_op(Hit_Invalidate_I, addr);
245#endif
246 break;
247 }
248}
249
250/*
251 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
252 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
253 * caches. We're talking about one cacheline unnecessarily getting invalidated
254 * here so the penalty isn't overly hard.
255 */
256static inline void protected_writeback_dcache_line(unsigned long addr)
257{
258 protected_cache_op(Hit_Writeback_Inv_D, addr);
259}
260
261static inline void protected_writeback_scache_line(unsigned long addr)
262{
263 protected_cache_op(Hit_Writeback_Inv_SD, addr);
264}
265
266/*
267 * This one is RM7000-specific
268 */
269static inline void invalidate_tcache_page(unsigned long addr)
270{
271 cache_op(Page_Invalidate_T, addr);
272}
273
274#define cache16_unroll32(base,op) \
275 __asm__ __volatile__( \
276 " .set push \n" \
277 " .set noreorder \n" \
278 " .set mips3 \n" \
279 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
280 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
281 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
282 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
283 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
284 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
285 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
286 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
287 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
288 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
289 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
290 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
291 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
292 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
293 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
294 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
295 " .set pop \n" \
296 : \
297 : "r" (base), \
298 "i" (op));
299
300#define cache32_unroll32(base,op) \
301 __asm__ __volatile__( \
302 " .set push \n" \
303 " .set noreorder \n" \
304 " .set mips3 \n" \
305 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
306 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
307 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
308 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
309 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
310 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
311 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
312 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
313 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
314 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
315 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
316 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
317 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
318 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
319 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
320 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
321 " .set pop \n" \
322 : \
323 : "r" (base), \
324 "i" (op));
325
326#define cache64_unroll32(base,op) \
327 __asm__ __volatile__( \
328 " .set push \n" \
329 " .set noreorder \n" \
330 " .set mips3 \n" \
331 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
332 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
333 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
334 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
335 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
336 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
337 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
338 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
339 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
340 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
341 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
342 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
343 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
344 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
345 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
346 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
347 " .set pop \n" \
348 : \
349 : "r" (base), \
350 "i" (op));
351
352#define cache128_unroll32(base,op) \
353 __asm__ __volatile__( \
354 " .set push \n" \
355 " .set noreorder \n" \
356 " .set mips3 \n" \
357 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
358 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
359 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
360 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
361 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
362 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
363 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
364 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
365 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
366 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
367 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
368 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
369 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
370 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
371 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
372 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
373 " .set pop \n" \
374 : \
375 : "r" (base), \
376 "i" (op));
377
378/*
379 * Perform the cache operation specified by op using a user mode virtual
380 * address while in kernel mode.
381 */
382#define cache16_unroll32_user(base,op) \
383 __asm__ __volatile__( \
384 " .set push \n" \
385 " .set noreorder \n" \
386 " .set mips0 \n" \
387 " .set eva \n" \
388 " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
389 " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
390 " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
391 " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
392 " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
393 " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
394 " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
395 " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
396 " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
397 " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
398 " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
399 " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
400 " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
401 " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
402 " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
403 " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
404 " .set pop \n" \
405 : \
406 : "r" (base), \
407 "i" (op));
408
409#define cache32_unroll32_user(base, op) \
410 __asm__ __volatile__( \
411 " .set push \n" \
412 " .set noreorder \n" \
413 " .set mips0 \n" \
414 " .set eva \n" \
415 " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
416 " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
417 " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
418 " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
419 " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
420 " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
421 " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
422 " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
423 " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
424 " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
425 " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
426 " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
427 " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
428 " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
429 " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
430 " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
431 " .set pop \n" \
432 : \
433 : "r" (base), \
434 "i" (op));
435
436#define cache64_unroll32_user(base, op) \
437 __asm__ __volatile__( \
438 " .set push \n" \
439 " .set noreorder \n" \
440 " .set mips0 \n" \
441 " .set eva \n" \
442 " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
443 " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
444 " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
445 " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
446 " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
447 " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
448 " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
449 " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
450 " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
451 " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
452 " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
453 " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
454 " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
455 " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
456 " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
457 " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
458 " .set pop \n" \
459 : \
460 : "r" (base), \
461 "i" (op));
462
463/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
464#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
465static inline void extra##blast_##pfx##cache##lsize(void) \
466{ \
467 unsigned long start = INDEX_BASE; \
468 unsigned long end = start + current_cpu_data.desc.waysize; \
469 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
470 unsigned long ws_end = current_cpu_data.desc.ways << \
471 current_cpu_data.desc.waybit; \
472 unsigned long ws, addr; \
473 \
474 __##pfx##flush_prologue \
475 \
476 for (ws = 0; ws < ws_end; ws += ws_inc) \
477 for (addr = start; addr < end; addr += lsize * 32) \
478 cache##lsize##_unroll32(addr|ws, indexop); \
479 \
480 __##pfx##flush_epilogue \
481} \
482 \
483static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
484{ \
485 unsigned long start = page; \
486 unsigned long end = page + PAGE_SIZE; \
487 \
488 __##pfx##flush_prologue \
489 \
490 do { \
491 cache##lsize##_unroll32(start, hitop); \
492 start += lsize * 32; \
493 } while (start < end); \
494 \
495 __##pfx##flush_epilogue \
496} \
497 \
498static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
499{ \
500 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
501 unsigned long start = INDEX_BASE + (page & indexmask); \
502 unsigned long end = start + PAGE_SIZE; \
503 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
504 unsigned long ws_end = current_cpu_data.desc.ways << \
505 current_cpu_data.desc.waybit; \
506 unsigned long ws, addr; \
507 \
508 __##pfx##flush_prologue \
509 \
510 for (ws = 0; ws < ws_end; ws += ws_inc) \
511 for (addr = start; addr < end; addr += lsize * 32) \
512 cache##lsize##_unroll32(addr|ws, indexop); \
513 \
514 __##pfx##flush_epilogue \
515}
516
517__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
518__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
519__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
520__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
521__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
522__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
523__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
524__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
525__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
526__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
527__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
528
529__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
530__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
531__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
532__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
533__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
534__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
535
536#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
537static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
538{ \
539 unsigned long start = page; \
540 unsigned long end = page + PAGE_SIZE; \
541 \
542 __##pfx##flush_prologue \
543 \
544 do { \
545 cache##lsize##_unroll32_user(start, hitop); \
546 start += lsize * 32; \
547 } while (start < end); \
548 \
549 __##pfx##flush_epilogue \
550}
551
552__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
553 16)
554__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
555__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
556 32)
557__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
558__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
559 64)
560__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
561
562/* build blast_xxx_range, protected_blast_xxx_range */
563#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
564static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
565 unsigned long end) \
566{ \
567 unsigned long lsize = cpu_##desc##_line_size(); \
568 unsigned long addr = start & ~(lsize - 1); \
569 unsigned long aend = (end - 1) & ~(lsize - 1); \
570 \
571 __##pfx##flush_prologue \
572 \
573 while (1) { \
574 prot##cache_op(hitop, addr); \
575 if (addr == aend) \
576 break; \
577 addr += lsize; \
578 } \
579 \
580 __##pfx##flush_epilogue \
581}
582
583#ifndef CONFIG_EVA
584
585__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
586__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
587
588#else
589
590#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
591static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
592 unsigned long end) \
593{ \
594 unsigned long lsize = cpu_##desc##_line_size(); \
595 unsigned long addr = start & ~(lsize - 1); \
596 unsigned long aend = (end - 1) & ~(lsize - 1); \
597 \
598 __##pfx##flush_prologue \
599 \
600 if (segment_eq(get_fs(), USER_DS)) { \
601 while (1) { \
602 protected_cachee_op(hitop, addr); \
603 if (addr == aend) \
604 break; \
605 addr += lsize; \
606 } \
607 } else { \
608 while (1) { \
609 protected_cache_op(hitop, addr); \
610 if (addr == aend) \
611 break; \
612 addr += lsize; \
613 } \
614 \
615 } \
616 __##pfx##flush_epilogue \
617}
618
619__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
620__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
621
622#endif
623__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
624__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
625 protected_, loongson2_)
626__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
627__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
628__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
629/* blast_inv_dcache_range */
630__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
631__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
632
633#endif /* _ASM_R4KCACHE_H */
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Inline assembly cache operations.
7 *
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11 */
12#ifndef _ASM_R4KCACHE_H
13#define _ASM_R4KCACHE_H
14
15#include <linux/stringify.h>
16
17#include <asm/asm.h>
18#include <asm/asm-eva.h>
19#include <asm/cacheops.h>
20#include <asm/compiler.h>
21#include <asm/cpu-features.h>
22#include <asm/cpu-type.h>
23#include <asm/mipsmtregs.h>
24#include <asm/mmzone.h>
25#include <asm/unroll.h>
26#include <linux/uaccess.h> /* for uaccess_kernel() */
27
28extern void (*r4k_blast_dcache)(void);
29extern void (*r4k_blast_icache)(void);
30
31/*
32 * This macro return a properly sign-extended address suitable as base address
33 * for indexed cache operations. Two issues here:
34 *
35 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
36 * the index bits from the virtual address. This breaks with tradition
37 * set by the R4000. To keep unpleasant surprises from happening we pick
38 * an address in KSEG0 / CKSEG0.
39 * - We need a properly sign extended address for 64-bit code. To get away
40 * without ifdefs we let the compiler do it by a type cast.
41 */
42#define INDEX_BASE CKSEG0
43
44#define _cache_op(insn, op, addr) \
45 __asm__ __volatile__( \
46 " .set push \n" \
47 " .set noreorder \n" \
48 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
49 " " insn("%0", "%1") " \n" \
50 " .set pop \n" \
51 : \
52 : "i" (op), "R" (*(unsigned char *)(addr)))
53
54#define cache_op(op, addr) \
55 _cache_op(kernel_cache, op, addr)
56
57static inline void flush_icache_line_indexed(unsigned long addr)
58{
59 cache_op(Index_Invalidate_I, addr);
60}
61
62static inline void flush_dcache_line_indexed(unsigned long addr)
63{
64 cache_op(Index_Writeback_Inv_D, addr);
65}
66
67static inline void flush_scache_line_indexed(unsigned long addr)
68{
69 cache_op(Index_Writeback_Inv_SD, addr);
70}
71
72static inline void flush_icache_line(unsigned long addr)
73{
74 switch (boot_cpu_type()) {
75 case CPU_LOONGSON2EF:
76 cache_op(Hit_Invalidate_I_Loongson2, addr);
77 break;
78
79 default:
80 cache_op(Hit_Invalidate_I, addr);
81 break;
82 }
83}
84
85static inline void flush_dcache_line(unsigned long addr)
86{
87 cache_op(Hit_Writeback_Inv_D, addr);
88}
89
90static inline void invalidate_dcache_line(unsigned long addr)
91{
92 cache_op(Hit_Invalidate_D, addr);
93}
94
95static inline void invalidate_scache_line(unsigned long addr)
96{
97 cache_op(Hit_Invalidate_SD, addr);
98}
99
100static inline void flush_scache_line(unsigned long addr)
101{
102 cache_op(Hit_Writeback_Inv_SD, addr);
103}
104
105#define protected_cache_op(op,addr) \
106({ \
107 int __err = 0; \
108 __asm__ __volatile__( \
109 " .set push \n" \
110 " .set noreorder \n" \
111 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
112 "1: cache %1, (%2) \n" \
113 "2: .insn \n" \
114 " .set pop \n" \
115 " .section .fixup,\"ax\" \n" \
116 "3: li %0, %3 \n" \
117 " j 2b \n" \
118 " .previous \n" \
119 " .section __ex_table,\"a\" \n" \
120 " "STR(PTR)" 1b, 3b \n" \
121 " .previous" \
122 : "+r" (__err) \
123 : "i" (op), "r" (addr), "i" (-EFAULT)); \
124 __err; \
125})
126
127
128#define protected_cachee_op(op,addr) \
129({ \
130 int __err = 0; \
131 __asm__ __volatile__( \
132 " .set push \n" \
133 " .set noreorder \n" \
134 " .set mips0 \n" \
135 " .set eva \n" \
136 "1: cachee %1, (%2) \n" \
137 "2: .insn \n" \
138 " .set pop \n" \
139 " .section .fixup,\"ax\" \n" \
140 "3: li %0, %3 \n" \
141 " j 2b \n" \
142 " .previous \n" \
143 " .section __ex_table,\"a\" \n" \
144 " "STR(PTR)" 1b, 3b \n" \
145 " .previous" \
146 : "+r" (__err) \
147 : "i" (op), "r" (addr), "i" (-EFAULT)); \
148 __err; \
149})
150
151/*
152 * The next two are for badland addresses like signal trampolines.
153 */
154static inline int protected_flush_icache_line(unsigned long addr)
155{
156 switch (boot_cpu_type()) {
157 case CPU_LOONGSON2EF:
158 return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
159
160 default:
161#ifdef CONFIG_EVA
162 return protected_cachee_op(Hit_Invalidate_I, addr);
163#else
164 return protected_cache_op(Hit_Invalidate_I, addr);
165#endif
166 }
167}
168
169/*
170 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
171 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
172 * caches. We're talking about one cacheline unnecessarily getting invalidated
173 * here so the penalty isn't overly hard.
174 */
175static inline int protected_writeback_dcache_line(unsigned long addr)
176{
177#ifdef CONFIG_EVA
178 return protected_cachee_op(Hit_Writeback_Inv_D, addr);
179#else
180 return protected_cache_op(Hit_Writeback_Inv_D, addr);
181#endif
182}
183
184static inline int protected_writeback_scache_line(unsigned long addr)
185{
186#ifdef CONFIG_EVA
187 return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
188#else
189 return protected_cache_op(Hit_Writeback_Inv_SD, addr);
190#endif
191}
192
193/*
194 * This one is RM7000-specific
195 */
196static inline void invalidate_tcache_page(unsigned long addr)
197{
198 cache_op(Page_Invalidate_T, addr);
199}
200
201#define cache_unroll(times, insn, op, addr, lsize) do { \
202 int i = 0; \
203 unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize))); \
204} while (0)
205
206/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
207#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
208static inline void extra##blast_##pfx##cache##lsize(void) \
209{ \
210 unsigned long start = INDEX_BASE; \
211 unsigned long end = start + current_cpu_data.desc.waysize; \
212 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
213 unsigned long ws_end = current_cpu_data.desc.ways << \
214 current_cpu_data.desc.waybit; \
215 unsigned long ws, addr; \
216 \
217 for (ws = 0; ws < ws_end; ws += ws_inc) \
218 for (addr = start; addr < end; addr += lsize * 32) \
219 cache_unroll(32, kernel_cache, indexop, \
220 addr | ws, lsize); \
221} \
222 \
223static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
224{ \
225 unsigned long start = page; \
226 unsigned long end = page + PAGE_SIZE; \
227 \
228 do { \
229 cache_unroll(32, kernel_cache, hitop, start, lsize); \
230 start += lsize * 32; \
231 } while (start < end); \
232} \
233 \
234static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
235{ \
236 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
237 unsigned long start = INDEX_BASE + (page & indexmask); \
238 unsigned long end = start + PAGE_SIZE; \
239 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
240 unsigned long ws_end = current_cpu_data.desc.ways << \
241 current_cpu_data.desc.waybit; \
242 unsigned long ws, addr; \
243 \
244 for (ws = 0; ws < ws_end; ws += ws_inc) \
245 for (addr = start; addr < end; addr += lsize * 32) \
246 cache_unroll(32, kernel_cache, indexop, \
247 addr | ws, lsize); \
248}
249
250__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
251__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
252__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
253__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
254__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
255__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
256__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
257__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
258__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
259__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
260__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
261__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
262__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
263
264__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
265__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
266__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
267__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
268__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
269__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
270
271#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
272static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
273{ \
274 unsigned long start = page; \
275 unsigned long end = page + PAGE_SIZE; \
276 \
277 do { \
278 cache_unroll(32, user_cache, hitop, start, lsize); \
279 start += lsize * 32; \
280 } while (start < end); \
281}
282
283__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
284 16)
285__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
286__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
287 32)
288__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
289__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
290 64)
291__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
292
293/* build blast_xxx_range, protected_blast_xxx_range */
294#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
295static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
296 unsigned long end) \
297{ \
298 unsigned long lsize = cpu_##desc##_line_size(); \
299 unsigned long addr = start & ~(lsize - 1); \
300 unsigned long aend = (end - 1) & ~(lsize - 1); \
301 \
302 while (1) { \
303 prot##cache_op(hitop, addr); \
304 if (addr == aend) \
305 break; \
306 addr += lsize; \
307 } \
308}
309
310#ifndef CONFIG_EVA
311
312__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
313__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
314
315#else
316
317#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
318static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
319 unsigned long end) \
320{ \
321 unsigned long lsize = cpu_##desc##_line_size(); \
322 unsigned long addr = start & ~(lsize - 1); \
323 unsigned long aend = (end - 1) & ~(lsize - 1); \
324 \
325 if (!uaccess_kernel()) { \
326 while (1) { \
327 protected_cachee_op(hitop, addr); \
328 if (addr == aend) \
329 break; \
330 addr += lsize; \
331 } \
332 } else { \
333 while (1) { \
334 protected_cache_op(hitop, addr); \
335 if (addr == aend) \
336 break; \
337 addr += lsize; \
338 } \
339 \
340 } \
341}
342
343__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
344__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
345
346#endif
347__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
348__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
349 protected_, loongson2_)
350__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
351__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
352__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
353/* blast_inv_dcache_range */
354__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
355__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
356
357/* Currently, this is very specific to Loongson-3 */
358#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
359static inline void blast_##pfx##cache##lsize##_node(long node) \
360{ \
361 unsigned long start = CAC_BASE | nid_to_addrbase(node); \
362 unsigned long end = start + current_cpu_data.desc.waysize; \
363 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
364 unsigned long ws_end = current_cpu_data.desc.ways << \
365 current_cpu_data.desc.waybit; \
366 unsigned long ws, addr; \
367 \
368 for (ws = 0; ws < ws_end; ws += ws_inc) \
369 for (addr = start; addr < end; addr += lsize * 32) \
370 cache_unroll(32, kernel_cache, indexop, \
371 addr | ws, lsize); \
372}
373
374__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
375__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
376__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
377__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
378
379#endif /* _ASM_R4KCACHE_H */