Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Inline assembly cache operations.
  7 *
  8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
 10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
 11 */
 12#ifndef _ASM_R4KCACHE_H
 13#define _ASM_R4KCACHE_H
 14
 15#include <linux/stringify.h>
 16
 17#include <asm/asm.h>
 18#include <asm/asm-eva.h>
 19#include <asm/cacheops.h>
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/cpu-type.h>
 23#include <asm/mipsmtregs.h>
 24#include <asm/mmzone.h>
 25#include <asm/unroll.h>
 26
 27extern void r5k_sc_init(void);
 28extern void rm7k_sc_init(void);
 29extern int mips_sc_init(void);
 30
 31extern void (*r4k_blast_dcache)(void);
 32extern void (*r4k_blast_icache)(void);
 33
 34/*
 35 * This macro return a properly sign-extended address suitable as base address
 36 * for indexed cache operations.  Two issues here:
 37 *
 38 *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
 39 *    the index bits from the virtual address.	This breaks with tradition
 40 *    set by the R4000.	 To keep unpleasant surprises from happening we pick
 41 *    an address in KSEG0 / CKSEG0.
 42 *  - We need a properly sign extended address for 64-bit code.	 To get away
 43 *    without ifdefs we let the compiler do it by a type cast.
 44 */
 45#define INDEX_BASE	CKSEG0
 46
 47#define _cache_op(insn, op, addr)					\
 48	__asm__ __volatile__(						\
 49	"	.set	push					\n"	\
 50	"	.set	noreorder				\n"	\
 51	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
 52	"	" insn("%0", "%1") "				\n"	\
 53	"	.set	pop					\n"	\
 54	:								\
 55	: "i" (op), "R" (*(unsigned char *)(addr)))
 56
 57#define cache_op(op, addr)						\
 58	_cache_op(kernel_cache, op, addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59
 60static inline void flush_icache_line_indexed(unsigned long addr)
 61{
 
 62	cache_op(Index_Invalidate_I, addr);
 
 63}
 64
 65static inline void flush_dcache_line_indexed(unsigned long addr)
 66{
 
 67	cache_op(Index_Writeback_Inv_D, addr);
 
 68}
 69
 70static inline void flush_scache_line_indexed(unsigned long addr)
 71{
 72	cache_op(Index_Writeback_Inv_SD, addr);
 73}
 74
 75static inline void flush_icache_line(unsigned long addr)
 76{
 
 77	switch (boot_cpu_type()) {
 78	case CPU_LOONGSON2EF:
 79		cache_op(Hit_Invalidate_I_Loongson2, addr);
 80		break;
 81
 82	default:
 83		cache_op(Hit_Invalidate_I, addr);
 84		break;
 85	}
 
 86}
 87
 88static inline void flush_dcache_line(unsigned long addr)
 89{
 
 90	cache_op(Hit_Writeback_Inv_D, addr);
 
 91}
 92
 93static inline void invalidate_dcache_line(unsigned long addr)
 94{
 
 95	cache_op(Hit_Invalidate_D, addr);
 
 96}
 97
 98static inline void invalidate_scache_line(unsigned long addr)
 99{
100	cache_op(Hit_Invalidate_SD, addr);
101}
102
103static inline void flush_scache_line(unsigned long addr)
104{
105	cache_op(Hit_Writeback_Inv_SD, addr);
106}
107
108#ifdef CONFIG_EVA
109
110#define protected_cache_op(op, addr)				\
111({								\
112	int __err = 0;						\
113	__asm__ __volatile__(					\
114	"	.set	push			\n"		\
115	"	.set	noreorder		\n"		\
116	"	.set	mips0			\n"		\
117	"	.set	eva			\n"		\
118	"1:	cachee	%1, (%2)		\n"		\
119	"2:	.insn				\n"		\
120	"	.set	pop			\n"		\
121	"	.section .fixup,\"ax\"		\n"		\
122	"3:	li	%0, %3			\n"		\
123	"	j	2b			\n"		\
124	"	.previous			\n"		\
125	"	.section __ex_table,\"a\"	\n"		\
126	"	"STR(PTR_WD)" 1b, 3b		\n"		\
127	"	.previous"					\
128	: "+r" (__err)						\
129	: "i" (op), "r" (addr), "i" (-EFAULT));			\
130	__err;							\
131})
132#else
133
134#define protected_cache_op(op, addr)				\
135({								\
136	int __err = 0;						\
137	__asm__ __volatile__(					\
138	"	.set	push			\n"		\
139	"	.set	noreorder		\n"		\
140	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
141	"1:	cache	%1, (%2)		\n"		\
142	"2:	.insn				\n"		\
143	"	.set	pop			\n"		\
144	"	.section .fixup,\"ax\"		\n"		\
145	"3:	li	%0, %3			\n"		\
146	"	j	2b			\n"		\
147	"	.previous			\n"		\
148	"	.section __ex_table,\"a\"	\n"		\
149	"	"STR(PTR_WD)" 1b, 3b		\n"		\
150	"	.previous"					\
151	: "+r" (__err)						\
152	: "i" (op), "r" (addr), "i" (-EFAULT));			\
153	__err;							\
154})
155#endif
156
157/*
158 * The next two are for badland addresses like signal trampolines.
159 */
160static inline int protected_flush_icache_line(unsigned long addr)
161{
162	switch (boot_cpu_type()) {
163	case CPU_LOONGSON2EF:
164		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
 
165
166	default:
167		return protected_cache_op(Hit_Invalidate_I, addr);
 
 
 
 
 
168	}
169}
170
171/*
172 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
173 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
174 * caches.  We're talking about one cacheline unnecessarily getting invalidated
175 * here so the penalty isn't overly hard.
176 */
177static inline int protected_writeback_dcache_line(unsigned long addr)
178{
179	return protected_cache_op(Hit_Writeback_Inv_D, addr);
180}
181
182static inline int protected_writeback_scache_line(unsigned long addr)
183{
184	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
185}
186
187/*
188 * This one is RM7000-specific
189 */
190static inline void invalidate_tcache_page(unsigned long addr)
191{
192	cache_op(Page_Invalidate_T, addr);
193}
194
195#define cache_unroll(times, insn, op, addr, lsize) do {			\
196	int i = 0;							\
197	unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize)));	\
198} while (0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
200/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
201#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
202static inline void extra##blast_##pfx##cache##lsize(void)		\
203{									\
204	unsigned long start = INDEX_BASE;				\
205	unsigned long end = start + current_cpu_data.desc.waysize;	\
206	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
207	unsigned long ws_end = current_cpu_data.desc.ways <<		\
208			       current_cpu_data.desc.waybit;		\
209	unsigned long ws, addr;						\
210									\
 
 
211	for (ws = 0; ws < ws_end; ws += ws_inc)				\
212		for (addr = start; addr < end; addr += lsize * 32)	\
213			cache_unroll(32, kernel_cache, indexop,		\
214				     addr | ws, lsize);			\
 
215}									\
216									\
217static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
218{									\
219	unsigned long start = page;					\
220	unsigned long end = page + PAGE_SIZE;				\
221									\
 
 
222	do {								\
223		cache_unroll(32, kernel_cache, hitop, start, lsize);	\
224		start += lsize * 32;					\
225	} while (start < end);						\
 
 
226}									\
227									\
228static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
229{									\
230	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
231	unsigned long start = INDEX_BASE + (page & indexmask);		\
232	unsigned long end = start + PAGE_SIZE;				\
233	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
234	unsigned long ws_end = current_cpu_data.desc.ways <<		\
235			       current_cpu_data.desc.waybit;		\
236	unsigned long ws, addr;						\
237									\
 
 
238	for (ws = 0; ws < ws_end; ws += ws_inc)				\
239		for (addr = start; addr < end; addr += lsize * 32)	\
240			cache_unroll(32, kernel_cache, indexop,		\
241				     addr | ws, lsize);			\
 
242}
243
244__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
245__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
246__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
247__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
248__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
249__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
250__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
251__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
252__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
253__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
254__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
255__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
256__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
257
258__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
259__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
260__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
261__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
262__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
263__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
264
265#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
266static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
267{									\
268	unsigned long start = page;					\
269	unsigned long end = page + PAGE_SIZE;				\
270									\
 
 
271	do {								\
272		cache_unroll(32, user_cache, hitop, start, lsize);	\
273		start += lsize * 32;					\
274	} while (start < end);						\
 
 
275}
276
277__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
278			 16)
279__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
280__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
281			 32)
282__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
283__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
284			 64)
285__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
286
287/* build blast_xxx_range, protected_blast_xxx_range */
288#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
289static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
290						    unsigned long end)	\
291{									\
292	unsigned long lsize = cpu_##desc##_line_size();			\
293	unsigned long addr = start & ~(lsize - 1);			\
294	unsigned long aend = (end - 1) & ~(lsize - 1);			\
295									\
 
 
296	while (1) {							\
297		prot##cache_op(hitop, addr);				\
298		if (addr == aend)					\
299			break;						\
300		addr += lsize;						\
301	}								\
 
 
302}
303
 
 
304__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
305__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
307__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
308	protected_, loongson2_)
309__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
310__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
311__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
312/* blast_inv_dcache_range */
313__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
314__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
315
316/* Currently, this is very specific to Loongson-3 */
317#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
318static inline void blast_##pfx##cache##lsize##_node(long node)		\
319{									\
320	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
321	unsigned long end = start + current_cpu_data.desc.waysize;	\
322	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
323	unsigned long ws_end = current_cpu_data.desc.ways <<		\
324			       current_cpu_data.desc.waybit;		\
325	unsigned long ws, addr;						\
326									\
327	for (ws = 0; ws < ws_end; ws += ws_inc)				\
328		for (addr = start; addr < end; addr += lsize * 32)	\
329			cache_unroll(32, kernel_cache, indexop,		\
330				     addr | ws, lsize);			\
331}
332
333__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
334__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
335__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
336__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
337
338#endif /* _ASM_R4KCACHE_H */
v3.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Inline assembly cache operations.
  7 *
  8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
 10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
 11 */
 12#ifndef _ASM_R4KCACHE_H
 13#define _ASM_R4KCACHE_H
 14
 
 
 15#include <asm/asm.h>
 
 16#include <asm/cacheops.h>
 
 17#include <asm/cpu-features.h>
 18#include <asm/cpu-type.h>
 19#include <asm/mipsmtregs.h>
 20#include <asm/uaccess.h> /* for segment_eq() */
 
 
 
 
 
 
 
 
 21
 22/*
 23 * This macro return a properly sign-extended address suitable as base address
 24 * for indexed cache operations.  Two issues here:
 25 *
 26 *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
 27 *    the index bits from the virtual address.	This breaks with tradition
 28 *    set by the R4000.	 To keep unpleasant surprises from happening we pick
 29 *    an address in KSEG0 / CKSEG0.
 30 *  - We need a properly sign extended address for 64-bit code.	 To get away
 31 *    without ifdefs we let the compiler do it by a type cast.
 32 */
 33#define INDEX_BASE	CKSEG0
 34
 35#define cache_op(op,addr)						\
 36	__asm__ __volatile__(						\
 37	"	.set	push					\n"	\
 38	"	.set	noreorder				\n"	\
 39	"	.set	arch=r4000				\n"	\
 40	"	cache	%0, %1					\n"	\
 41	"	.set	pop					\n"	\
 42	:								\
 43	: "i" (op), "R" (*(unsigned char *)(addr)))
 44
 45#ifdef CONFIG_MIPS_MT
 46/*
 47 * Temporary hacks for SMTC debug. Optionally force single-threaded
 48 * execution during I-cache flushes.
 49 */
 50
 51#define PROTECT_CACHE_FLUSHES 1
 52
 53#ifdef PROTECT_CACHE_FLUSHES
 54
 55extern int mt_protiflush;
 56extern int mt_protdflush;
 57extern void mt_cflush_lockdown(void);
 58extern void mt_cflush_release(void);
 59
 60#define BEGIN_MT_IPROT \
 61	unsigned long flags = 0;			\
 62	unsigned long mtflags = 0;			\
 63	if(mt_protiflush) {				\
 64		local_irq_save(flags);			\
 65		ehb();					\
 66		mtflags = dvpe();			\
 67		mt_cflush_lockdown();			\
 68	}
 69
 70#define END_MT_IPROT \
 71	if(mt_protiflush) {				\
 72		mt_cflush_release();			\
 73		evpe(mtflags);				\
 74		local_irq_restore(flags);		\
 75	}
 76
 77#define BEGIN_MT_DPROT \
 78	unsigned long flags = 0;			\
 79	unsigned long mtflags = 0;			\
 80	if(mt_protdflush) {				\
 81		local_irq_save(flags);			\
 82		ehb();					\
 83		mtflags = dvpe();			\
 84		mt_cflush_lockdown();			\
 85	}
 86
 87#define END_MT_DPROT \
 88	if(mt_protdflush) {				\
 89		mt_cflush_release();			\
 90		evpe(mtflags);				\
 91		local_irq_restore(flags);		\
 92	}
 93
 94#else
 95
 96#define BEGIN_MT_IPROT
 97#define BEGIN_MT_DPROT
 98#define END_MT_IPROT
 99#define END_MT_DPROT
100
101#endif /* PROTECT_CACHE_FLUSHES */
102
103#define __iflush_prologue						\
104	unsigned long redundance;					\
105	extern int mt_n_iflushes;					\
106	BEGIN_MT_IPROT							\
107	for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
108
109#define __iflush_epilogue						\
110	END_MT_IPROT							\
111	}
112
113#define __dflush_prologue						\
114	unsigned long redundance;					\
115	extern int mt_n_dflushes;					\
116	BEGIN_MT_DPROT							\
117	for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
118
119#define __dflush_epilogue \
120	END_MT_DPROT	 \
121	}
122
123#define __inv_dflush_prologue __dflush_prologue
124#define __inv_dflush_epilogue __dflush_epilogue
125#define __sflush_prologue {
126#define __sflush_epilogue }
127#define __inv_sflush_prologue __sflush_prologue
128#define __inv_sflush_epilogue __sflush_epilogue
129
130#else /* CONFIG_MIPS_MT */
131
132#define __iflush_prologue {
133#define __iflush_epilogue }
134#define __dflush_prologue {
135#define __dflush_epilogue }
136#define __inv_dflush_prologue {
137#define __inv_dflush_epilogue }
138#define __sflush_prologue {
139#define __sflush_epilogue }
140#define __inv_sflush_prologue {
141#define __inv_sflush_epilogue }
142
143#endif /* CONFIG_MIPS_MT */
144
145static inline void flush_icache_line_indexed(unsigned long addr)
146{
147	__iflush_prologue
148	cache_op(Index_Invalidate_I, addr);
149	__iflush_epilogue
150}
151
152static inline void flush_dcache_line_indexed(unsigned long addr)
153{
154	__dflush_prologue
155	cache_op(Index_Writeback_Inv_D, addr);
156	__dflush_epilogue
157}
158
159static inline void flush_scache_line_indexed(unsigned long addr)
160{
161	cache_op(Index_Writeback_Inv_SD, addr);
162}
163
164static inline void flush_icache_line(unsigned long addr)
165{
166	__iflush_prologue
167	switch (boot_cpu_type()) {
168	case CPU_LOONGSON2:
169		cache_op(Hit_Invalidate_I_Loongson2, addr);
170		break;
171
172	default:
173		cache_op(Hit_Invalidate_I, addr);
174		break;
175	}
176	__iflush_epilogue
177}
178
179static inline void flush_dcache_line(unsigned long addr)
180{
181	__dflush_prologue
182	cache_op(Hit_Writeback_Inv_D, addr);
183	__dflush_epilogue
184}
185
186static inline void invalidate_dcache_line(unsigned long addr)
187{
188	__dflush_prologue
189	cache_op(Hit_Invalidate_D, addr);
190	__dflush_epilogue
191}
192
193static inline void invalidate_scache_line(unsigned long addr)
194{
195	cache_op(Hit_Invalidate_SD, addr);
196}
197
198static inline void flush_scache_line(unsigned long addr)
199{
200	cache_op(Hit_Writeback_Inv_SD, addr);
201}
202
203#define protected_cache_op(op,addr)				\
 
 
 
 
204	__asm__ __volatile__(					\
205	"	.set	push			\n"		\
206	"	.set	noreorder		\n"		\
207	"	.set	arch=r4000		\n"		\
208	"1:	cache	%0, (%1)		\n"		\
209	"2:	.set	pop			\n"		\
 
 
 
 
 
 
210	"	.section __ex_table,\"a\"	\n"		\
211	"	"STR(PTR)" 1b, 2b		\n"		\
212	"	.previous"					\
213	:							\
214	: "i" (op), "r" (addr))
 
 
 
215
216#define protected_cachee_op(op,addr)				\
 
 
217	__asm__ __volatile__(					\
218	"	.set	push			\n"		\
219	"	.set	noreorder		\n"		\
220	"	.set	mips0			\n"		\
221	"	.set	eva			\n"		\
222	"1:	cachee	%0, (%1)		\n"		\
223	"2:	.set	pop			\n"		\
 
 
 
 
224	"	.section __ex_table,\"a\"	\n"		\
225	"	"STR(PTR)" 1b, 2b		\n"		\
226	"	.previous"					\
227	:							\
228	: "i" (op), "r" (addr))
 
 
 
229
230/*
231 * The next two are for badland addresses like signal trampolines.
232 */
233static inline void protected_flush_icache_line(unsigned long addr)
234{
235	switch (boot_cpu_type()) {
236	case CPU_LOONGSON2:
237		protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
238		break;
239
240	default:
241#ifdef CONFIG_EVA
242		protected_cachee_op(Hit_Invalidate_I, addr);
243#else
244		protected_cache_op(Hit_Invalidate_I, addr);
245#endif
246		break;
247	}
248}
249
250/*
251 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
252 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
253 * caches.  We're talking about one cacheline unnecessarily getting invalidated
254 * here so the penalty isn't overly hard.
255 */
256static inline void protected_writeback_dcache_line(unsigned long addr)
257{
258	protected_cache_op(Hit_Writeback_Inv_D, addr);
259}
260
261static inline void protected_writeback_scache_line(unsigned long addr)
262{
263	protected_cache_op(Hit_Writeback_Inv_SD, addr);
264}
265
266/*
267 * This one is RM7000-specific
268 */
269static inline void invalidate_tcache_page(unsigned long addr)
270{
271	cache_op(Page_Invalidate_T, addr);
272}
273
274#define cache16_unroll32(base,op)					\
275	__asm__ __volatile__(						\
276	"	.set push					\n"	\
277	"	.set noreorder					\n"	\
278	"	.set mips3					\n"	\
279	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
280	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
281	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
282	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
283	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
284	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
285	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
286	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
287	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
288	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
289	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
290	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
291	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
292	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
293	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
294	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
295	"	.set pop					\n"	\
296		:							\
297		: "r" (base),						\
298		  "i" (op));
299
300#define cache32_unroll32(base,op)					\
301	__asm__ __volatile__(						\
302	"	.set push					\n"	\
303	"	.set noreorder					\n"	\
304	"	.set mips3					\n"	\
305	"	cache %1, 0x000(%0); cache %1, 0x020(%0)	\n"	\
306	"	cache %1, 0x040(%0); cache %1, 0x060(%0)	\n"	\
307	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)	\n"	\
308	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)	\n"	\
309	"	cache %1, 0x100(%0); cache %1, 0x120(%0)	\n"	\
310	"	cache %1, 0x140(%0); cache %1, 0x160(%0)	\n"	\
311	"	cache %1, 0x180(%0); cache %1, 0x1a0(%0)	\n"	\
312	"	cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)	\n"	\
313	"	cache %1, 0x200(%0); cache %1, 0x220(%0)	\n"	\
314	"	cache %1, 0x240(%0); cache %1, 0x260(%0)	\n"	\
315	"	cache %1, 0x280(%0); cache %1, 0x2a0(%0)	\n"	\
316	"	cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)	\n"	\
317	"	cache %1, 0x300(%0); cache %1, 0x320(%0)	\n"	\
318	"	cache %1, 0x340(%0); cache %1, 0x360(%0)	\n"	\
319	"	cache %1, 0x380(%0); cache %1, 0x3a0(%0)	\n"	\
320	"	cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)	\n"	\
321	"	.set pop					\n"	\
322		:							\
323		: "r" (base),						\
324		  "i" (op));
325
326#define cache64_unroll32(base,op)					\
327	__asm__ __volatile__(						\
328	"	.set push					\n"	\
329	"	.set noreorder					\n"	\
330	"	.set mips3					\n"	\
331	"	cache %1, 0x000(%0); cache %1, 0x040(%0)	\n"	\
332	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n"	\
333	"	cache %1, 0x100(%0); cache %1, 0x140(%0)	\n"	\
334	"	cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n"	\
335	"	cache %1, 0x200(%0); cache %1, 0x240(%0)	\n"	\
336	"	cache %1, 0x280(%0); cache %1, 0x2c0(%0)	\n"	\
337	"	cache %1, 0x300(%0); cache %1, 0x340(%0)	\n"	\
338	"	cache %1, 0x380(%0); cache %1, 0x3c0(%0)	\n"	\
339	"	cache %1, 0x400(%0); cache %1, 0x440(%0)	\n"	\
340	"	cache %1, 0x480(%0); cache %1, 0x4c0(%0)	\n"	\
341	"	cache %1, 0x500(%0); cache %1, 0x540(%0)	\n"	\
342	"	cache %1, 0x580(%0); cache %1, 0x5c0(%0)	\n"	\
343	"	cache %1, 0x600(%0); cache %1, 0x640(%0)	\n"	\
344	"	cache %1, 0x680(%0); cache %1, 0x6c0(%0)	\n"	\
345	"	cache %1, 0x700(%0); cache %1, 0x740(%0)	\n"	\
346	"	cache %1, 0x780(%0); cache %1, 0x7c0(%0)	\n"	\
347	"	.set pop					\n"	\
348		:							\
349		: "r" (base),						\
350		  "i" (op));
351
352#define cache128_unroll32(base,op)					\
353	__asm__ __volatile__(						\
354	"	.set push					\n"	\
355	"	.set noreorder					\n"	\
356	"	.set mips3					\n"	\
357	"	cache %1, 0x000(%0); cache %1, 0x080(%0)	\n"	\
358	"	cache %1, 0x100(%0); cache %1, 0x180(%0)	\n"	\
359	"	cache %1, 0x200(%0); cache %1, 0x280(%0)	\n"	\
360	"	cache %1, 0x300(%0); cache %1, 0x380(%0)	\n"	\
361	"	cache %1, 0x400(%0); cache %1, 0x480(%0)	\n"	\
362	"	cache %1, 0x500(%0); cache %1, 0x580(%0)	\n"	\
363	"	cache %1, 0x600(%0); cache %1, 0x680(%0)	\n"	\
364	"	cache %1, 0x700(%0); cache %1, 0x780(%0)	\n"	\
365	"	cache %1, 0x800(%0); cache %1, 0x880(%0)	\n"	\
366	"	cache %1, 0x900(%0); cache %1, 0x980(%0)	\n"	\
367	"	cache %1, 0xa00(%0); cache %1, 0xa80(%0)	\n"	\
368	"	cache %1, 0xb00(%0); cache %1, 0xb80(%0)	\n"	\
369	"	cache %1, 0xc00(%0); cache %1, 0xc80(%0)	\n"	\
370	"	cache %1, 0xd00(%0); cache %1, 0xd80(%0)	\n"	\
371	"	cache %1, 0xe00(%0); cache %1, 0xe80(%0)	\n"	\
372	"	cache %1, 0xf00(%0); cache %1, 0xf80(%0)	\n"	\
373	"	.set pop					\n"	\
374		:							\
375		: "r" (base),						\
376		  "i" (op));
377
378/*
379 * Perform the cache operation specified by op using a user mode virtual
380 * address while in kernel mode.
381 */
382#define cache16_unroll32_user(base,op)					\
383	__asm__ __volatile__(						\
384	"	.set push					\n"	\
385	"	.set noreorder					\n"	\
386	"	.set mips0					\n"	\
387	"	.set eva					\n"	\
388	"	cachee %1, 0x000(%0); cachee %1, 0x010(%0)	\n"	\
389	"	cachee %1, 0x020(%0); cachee %1, 0x030(%0)	\n"	\
390	"	cachee %1, 0x040(%0); cachee %1, 0x050(%0)	\n"	\
391	"	cachee %1, 0x060(%0); cachee %1, 0x070(%0)	\n"	\
392	"	cachee %1, 0x080(%0); cachee %1, 0x090(%0)	\n"	\
393	"	cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)	\n"	\
394	"	cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)	\n"	\
395	"	cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)	\n"	\
396	"	cachee %1, 0x100(%0); cachee %1, 0x110(%0)	\n"	\
397	"	cachee %1, 0x120(%0); cachee %1, 0x130(%0)	\n"	\
398	"	cachee %1, 0x140(%0); cachee %1, 0x150(%0)	\n"	\
399	"	cachee %1, 0x160(%0); cachee %1, 0x170(%0)	\n"	\
400	"	cachee %1, 0x180(%0); cachee %1, 0x190(%0)	\n"	\
401	"	cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)	\n"	\
402	"	cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)	\n"	\
403	"	cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)	\n"	\
404	"	.set pop					\n"	\
405		:							\
406		: "r" (base),						\
407		  "i" (op));
408
409#define cache32_unroll32_user(base, op)					\
410	__asm__ __volatile__(						\
411	"	.set push					\n"	\
412	"	.set noreorder					\n"	\
413	"	.set mips0					\n"	\
414	"	.set eva					\n"	\
415	"	cachee %1, 0x000(%0); cachee %1, 0x020(%0)	\n"	\
416	"	cachee %1, 0x040(%0); cachee %1, 0x060(%0)	\n"	\
417	"	cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)	\n"	\
418	"	cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)	\n"	\
419	"	cachee %1, 0x100(%0); cachee %1, 0x120(%0)	\n"	\
420	"	cachee %1, 0x140(%0); cachee %1, 0x160(%0)	\n"	\
421	"	cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)	\n"	\
422	"	cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)	\n"	\
423	"	cachee %1, 0x200(%0); cachee %1, 0x220(%0)	\n"	\
424	"	cachee %1, 0x240(%0); cachee %1, 0x260(%0)	\n"	\
425	"	cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)	\n"	\
426	"	cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)	\n"	\
427	"	cachee %1, 0x300(%0); cachee %1, 0x320(%0)	\n"	\
428	"	cachee %1, 0x340(%0); cachee %1, 0x360(%0)	\n"	\
429	"	cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)	\n"	\
430	"	cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)	\n"	\
431	"	.set pop					\n"	\
432		:							\
433		: "r" (base),						\
434		  "i" (op));
435
436#define cache64_unroll32_user(base, op)					\
437	__asm__ __volatile__(						\
438	"	.set push					\n"	\
439	"	.set noreorder					\n"	\
440	"	.set mips0					\n"	\
441	"	.set eva					\n"	\
442	"	cachee %1, 0x000(%0); cachee %1, 0x040(%0)	\n"	\
443	"	cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)	\n"	\
444	"	cachee %1, 0x100(%0); cachee %1, 0x140(%0)	\n"	\
445	"	cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)	\n"	\
446	"	cachee %1, 0x200(%0); cachee %1, 0x240(%0)	\n"	\
447	"	cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)	\n"	\
448	"	cachee %1, 0x300(%0); cachee %1, 0x340(%0)	\n"	\
449	"	cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)	\n"	\
450	"	cachee %1, 0x400(%0); cachee %1, 0x440(%0)	\n"	\
451	"	cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)	\n"	\
452	"	cachee %1, 0x500(%0); cachee %1, 0x540(%0)	\n"	\
453	"	cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)	\n"	\
454	"	cachee %1, 0x600(%0); cachee %1, 0x640(%0)	\n"	\
455	"	cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)	\n"	\
456	"	cachee %1, 0x700(%0); cachee %1, 0x740(%0)	\n"	\
457	"	cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)	\n"	\
458	"	.set pop					\n"	\
459		:							\
460		: "r" (base),						\
461		  "i" (op));
462
463/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
464#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
465static inline void extra##blast_##pfx##cache##lsize(void)		\
466{									\
467	unsigned long start = INDEX_BASE;				\
468	unsigned long end = start + current_cpu_data.desc.waysize;	\
469	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
470	unsigned long ws_end = current_cpu_data.desc.ways <<		\
471			       current_cpu_data.desc.waybit;		\
472	unsigned long ws, addr;						\
473									\
474	__##pfx##flush_prologue						\
475									\
476	for (ws = 0; ws < ws_end; ws += ws_inc)				\
477		for (addr = start; addr < end; addr += lsize * 32)	\
478			cache##lsize##_unroll32(addr|ws, indexop);	\
479									\
480	__##pfx##flush_epilogue						\
481}									\
482									\
483static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
484{									\
485	unsigned long start = page;					\
486	unsigned long end = page + PAGE_SIZE;				\
487									\
488	__##pfx##flush_prologue						\
489									\
490	do {								\
491		cache##lsize##_unroll32(start, hitop);			\
492		start += lsize * 32;					\
493	} while (start < end);						\
494									\
495	__##pfx##flush_epilogue						\
496}									\
497									\
498static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
499{									\
500	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
501	unsigned long start = INDEX_BASE + (page & indexmask);		\
502	unsigned long end = start + PAGE_SIZE;				\
503	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
504	unsigned long ws_end = current_cpu_data.desc.ways <<		\
505			       current_cpu_data.desc.waybit;		\
506	unsigned long ws, addr;						\
507									\
508	__##pfx##flush_prologue						\
509									\
510	for (ws = 0; ws < ws_end; ws += ws_inc)				\
511		for (addr = start; addr < end; addr += lsize * 32)	\
512			cache##lsize##_unroll32(addr|ws, indexop);	\
513									\
514	__##pfx##flush_epilogue						\
515}
516
517__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
518__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
519__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
520__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
521__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
522__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
523__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
524__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
525__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
526__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
 
 
527__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
528
529__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
530__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
531__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
532__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
533__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
534__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
535
536#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
537static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
538{									\
539	unsigned long start = page;					\
540	unsigned long end = page + PAGE_SIZE;				\
541									\
542	__##pfx##flush_prologue						\
543									\
544	do {								\
545		cache##lsize##_unroll32_user(start, hitop);             \
546		start += lsize * 32;					\
547	} while (start < end);						\
548									\
549	__##pfx##flush_epilogue						\
550}
551
552__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
553			 16)
554__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
555__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
556			 32)
557__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
558__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
559			 64)
560__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
561
562/* build blast_xxx_range, protected_blast_xxx_range */
563#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
564static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
565						    unsigned long end)	\
566{									\
567	unsigned long lsize = cpu_##desc##_line_size();			\
568	unsigned long addr = start & ~(lsize - 1);			\
569	unsigned long aend = (end - 1) & ~(lsize - 1);			\
570									\
571	__##pfx##flush_prologue						\
572									\
573	while (1) {							\
574		prot##cache_op(hitop, addr);				\
575		if (addr == aend)					\
576			break;						\
577		addr += lsize;						\
578	}								\
579									\
580	__##pfx##flush_epilogue						\
581}
582
583#ifndef CONFIG_EVA
584
585__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
586__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
587
588#else
589
590#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
591static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
592							unsigned long end) \
593{									\
594	unsigned long lsize = cpu_##desc##_line_size();			\
595	unsigned long addr = start & ~(lsize - 1);			\
596	unsigned long aend = (end - 1) & ~(lsize - 1);			\
597									\
598	__##pfx##flush_prologue						\
599									\
600	if (segment_eq(get_fs(), USER_DS)) {				\
601		while (1) {						\
602			protected_cachee_op(hitop, addr);		\
603			if (addr == aend)				\
604				break;					\
605			addr += lsize;					\
606		}							\
607	} else {							\
608		while (1) {						\
609			protected_cache_op(hitop, addr);		\
610			if (addr == aend)				\
611				break;					\
612			addr += lsize;					\
613		}                                                       \
614									\
615	}								\
616	__##pfx##flush_epilogue						\
617}
618
619__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
620__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
621
622#endif
623__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
624__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
625	protected_, loongson2_)
626__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
627__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
628__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
629/* blast_inv_dcache_range */
630__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
631__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
632
633#endif /* _ASM_R4KCACHE_H */