Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Inline assembly cache operations.
  7 *
  8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
 10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
 11 */
 12#ifndef _ASM_R4KCACHE_H
 13#define _ASM_R4KCACHE_H
 14
 
 
 15#include <asm/asm.h>
 16#include <asm/cacheops.h>
 
 17#include <asm/cpu-features.h>
 
 18#include <asm/mipsmtregs.h>
 
 
 
 
 19
 20/*
 21 * This macro return a properly sign-extended address suitable as base address
 22 * for indexed cache operations.  Two issues here:
 23 *
 24 *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
 25 *    the index bits from the virtual address.  This breaks with tradition
 26 *    set by the R4000.  To keep unpleasant surprises from happening we pick
 27 *    an address in KSEG0 / CKSEG0.
 28 *  - We need a properly sign extended address for 64-bit code.  To get away
 29 *    without ifdefs we let the compiler do it by a type cast.
 30 */
 31#define INDEX_BASE	CKSEG0
 32
 33#define cache_op(op,addr)						\
 34	__asm__ __volatile__(						\
 35	"	.set	push					\n"	\
 36	"	.set	noreorder				\n"	\
 37	"	.set	mips3\n\t				\n"	\
 38	"	cache	%0, %1					\n"	\
 39	"	.set	pop					\n"	\
 40	:								\
 41	: "i" (op), "R" (*(unsigned char *)(addr)))
 42
 43#ifdef CONFIG_MIPS_MT
 44/*
 45 * Temporary hacks for SMTC debug. Optionally force single-threaded
 46 * execution during I-cache flushes.
 47 */
 48
 49#define PROTECT_CACHE_FLUSHES 1
 50
 51#ifdef PROTECT_CACHE_FLUSHES
 52
 53extern int mt_protiflush;
 54extern int mt_protdflush;
 55extern void mt_cflush_lockdown(void);
 56extern void mt_cflush_release(void);
 57
 58#define BEGIN_MT_IPROT \
 59	unsigned long flags = 0;			\
 60	unsigned long mtflags = 0;			\
 61	if(mt_protiflush) {				\
 62		local_irq_save(flags);			\
 63		ehb();					\
 64		mtflags = dvpe();			\
 65		mt_cflush_lockdown();			\
 66	}
 67
 68#define END_MT_IPROT \
 69	if(mt_protiflush) {				\
 70		mt_cflush_release();			\
 71		evpe(mtflags);				\
 72		local_irq_restore(flags);		\
 73	}
 74
 75#define BEGIN_MT_DPROT \
 76	unsigned long flags = 0;			\
 77	unsigned long mtflags = 0;			\
 78	if(mt_protdflush) {				\
 79		local_irq_save(flags);			\
 80		ehb();					\
 81		mtflags = dvpe();			\
 82		mt_cflush_lockdown();			\
 83	}
 84
 85#define END_MT_DPROT \
 86	if(mt_protdflush) {				\
 87		mt_cflush_release();			\
 88		evpe(mtflags);				\
 89		local_irq_restore(flags);		\
 90	}
 91
 92#else
 93
 94#define BEGIN_MT_IPROT
 95#define BEGIN_MT_DPROT
 96#define END_MT_IPROT
 97#define END_MT_DPROT
 98
 99#endif /* PROTECT_CACHE_FLUSHES */
100
101#define __iflush_prologue						\
102	unsigned long redundance;					\
103	extern int mt_n_iflushes;					\
104	BEGIN_MT_IPROT							\
105	for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
106
107#define __iflush_epilogue						\
108	END_MT_IPROT							\
109	}
110
111#define __dflush_prologue						\
112	unsigned long redundance;					\
113	extern int mt_n_dflushes;					\
114	BEGIN_MT_DPROT							\
115	for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
116
117#define __dflush_epilogue \
118	END_MT_DPROT	 \
119	}
120
121#define __inv_dflush_prologue __dflush_prologue
122#define __inv_dflush_epilogue __dflush_epilogue
123#define __sflush_prologue {
124#define __sflush_epilogue }
125#define __inv_sflush_prologue __sflush_prologue
126#define __inv_sflush_epilogue __sflush_epilogue
127
128#else /* CONFIG_MIPS_MT */
129
130#define __iflush_prologue {
131#define __iflush_epilogue }
132#define __dflush_prologue {
133#define __dflush_epilogue }
134#define __inv_dflush_prologue {
135#define __inv_dflush_epilogue }
136#define __sflush_prologue {
137#define __sflush_epilogue }
138#define __inv_sflush_prologue {
139#define __inv_sflush_epilogue }
140
141#endif /* CONFIG_MIPS_MT */
142
143static inline void flush_icache_line_indexed(unsigned long addr)
144{
145	__iflush_prologue
146	cache_op(Index_Invalidate_I, addr);
147	__iflush_epilogue
148}
149
150static inline void flush_dcache_line_indexed(unsigned long addr)
151{
152	__dflush_prologue
153	cache_op(Index_Writeback_Inv_D, addr);
154	__dflush_epilogue
155}
156
157static inline void flush_scache_line_indexed(unsigned long addr)
158{
159	cache_op(Index_Writeback_Inv_SD, addr);
160}
161
162static inline void flush_icache_line(unsigned long addr)
163{
164	__iflush_prologue
165	cache_op(Hit_Invalidate_I, addr);
 
 
 
 
 
 
 
 
166	__iflush_epilogue
167}
168
169static inline void flush_dcache_line(unsigned long addr)
170{
171	__dflush_prologue
172	cache_op(Hit_Writeback_Inv_D, addr);
173	__dflush_epilogue
174}
175
176static inline void invalidate_dcache_line(unsigned long addr)
177{
178	__dflush_prologue
179	cache_op(Hit_Invalidate_D, addr);
180	__dflush_epilogue
181}
182
183static inline void invalidate_scache_line(unsigned long addr)
184{
185	cache_op(Hit_Invalidate_SD, addr);
186}
187
188static inline void flush_scache_line(unsigned long addr)
189{
190	cache_op(Hit_Writeback_Inv_SD, addr);
191}
192
193#define protected_cache_op(op,addr)				\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194	__asm__ __volatile__(					\
195	"	.set	push			\n"		\
196	"	.set	noreorder		\n"		\
197	"	.set	mips3			\n"		\
198	"1:	cache	%0, (%1)		\n"		\
199	"2:	.set	pop			\n"		\
 
 
 
 
 
 
200	"	.section __ex_table,\"a\"	\n"		\
201	"	"STR(PTR)" 1b, 2b		\n"		\
202	"	.previous"					\
203	:							\
204	: "i" (op), "r" (addr))
 
 
205
206/*
207 * The next two are for badland addresses like signal trampolines.
208 */
209static inline void protected_flush_icache_line(unsigned long addr)
210{
211	protected_cache_op(Hit_Invalidate_I, addr);
 
 
 
 
 
 
 
 
 
 
212}
213
214/*
215 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
216 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
217 * caches.  We're talking about one cacheline unnecessarily getting invalidated
218 * here so the penalty isn't overly hard.
219 */
220static inline void protected_writeback_dcache_line(unsigned long addr)
221{
222	protected_cache_op(Hit_Writeback_Inv_D, addr);
 
 
 
 
223}
224
225static inline void protected_writeback_scache_line(unsigned long addr)
226{
227	protected_cache_op(Hit_Writeback_Inv_SD, addr);
 
 
 
 
228}
229
230/*
231 * This one is RM7000-specific
232 */
233static inline void invalidate_tcache_page(unsigned long addr)
234{
235	cache_op(Page_Invalidate_T, addr);
236}
237
 
238#define cache16_unroll32(base,op)					\
239	__asm__ __volatile__(						\
240	"	.set push					\n"	\
241	"	.set noreorder					\n"	\
242	"	.set mips3					\n"	\
243	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
244	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
245	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
246	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
247	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
248	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
249	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
250	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
251	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
252	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
253	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
254	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
255	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
256	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
257	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
258	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
259	"	.set pop					\n"	\
260		:							\
261		: "r" (base),						\
262		  "i" (op));
263
264#define cache32_unroll32(base,op)					\
265	__asm__ __volatile__(						\
266	"	.set push					\n"	\
267	"	.set noreorder					\n"	\
268	"	.set mips3					\n"	\
269	"	cache %1, 0x000(%0); cache %1, 0x020(%0)	\n"	\
270	"	cache %1, 0x040(%0); cache %1, 0x060(%0)	\n"	\
271	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)	\n"	\
272	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)	\n"	\
273	"	cache %1, 0x100(%0); cache %1, 0x120(%0)	\n"	\
274	"	cache %1, 0x140(%0); cache %1, 0x160(%0)	\n"	\
275	"	cache %1, 0x180(%0); cache %1, 0x1a0(%0)	\n"	\
276	"	cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)	\n"	\
277	"	cache %1, 0x200(%0); cache %1, 0x220(%0)	\n"	\
278	"	cache %1, 0x240(%0); cache %1, 0x260(%0)	\n"	\
279	"	cache %1, 0x280(%0); cache %1, 0x2a0(%0)	\n"	\
280	"	cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)	\n"	\
281	"	cache %1, 0x300(%0); cache %1, 0x320(%0)	\n"	\
282	"	cache %1, 0x340(%0); cache %1, 0x360(%0)	\n"	\
283	"	cache %1, 0x380(%0); cache %1, 0x3a0(%0)	\n"	\
284	"	cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)	\n"	\
285	"	.set pop					\n"	\
286		:							\
287		: "r" (base),						\
288		  "i" (op));
289
290#define cache64_unroll32(base,op)					\
291	__asm__ __volatile__(						\
292	"	.set push					\n"	\
293	"	.set noreorder					\n"	\
294	"	.set mips3					\n"	\
295	"	cache %1, 0x000(%0); cache %1, 0x040(%0)	\n"	\
296	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n"	\
297	"	cache %1, 0x100(%0); cache %1, 0x140(%0)	\n"	\
298	"	cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n"	\
299	"	cache %1, 0x200(%0); cache %1, 0x240(%0)	\n"	\
300	"	cache %1, 0x280(%0); cache %1, 0x2c0(%0)	\n"	\
301	"	cache %1, 0x300(%0); cache %1, 0x340(%0)	\n"	\
302	"	cache %1, 0x380(%0); cache %1, 0x3c0(%0)	\n"	\
303	"	cache %1, 0x400(%0); cache %1, 0x440(%0)	\n"	\
304	"	cache %1, 0x480(%0); cache %1, 0x4c0(%0)	\n"	\
305	"	cache %1, 0x500(%0); cache %1, 0x540(%0)	\n"	\
306	"	cache %1, 0x580(%0); cache %1, 0x5c0(%0)	\n"	\
307	"	cache %1, 0x600(%0); cache %1, 0x640(%0)	\n"	\
308	"	cache %1, 0x680(%0); cache %1, 0x6c0(%0)	\n"	\
309	"	cache %1, 0x700(%0); cache %1, 0x740(%0)	\n"	\
310	"	cache %1, 0x780(%0); cache %1, 0x7c0(%0)	\n"	\
311	"	.set pop					\n"	\
312		:							\
313		: "r" (base),						\
314		  "i" (op));
315
316#define cache128_unroll32(base,op)					\
317	__asm__ __volatile__(						\
318	"	.set push					\n"	\
319	"	.set noreorder					\n"	\
320	"	.set mips3					\n"	\
321	"	cache %1, 0x000(%0); cache %1, 0x080(%0)	\n"	\
322	"	cache %1, 0x100(%0); cache %1, 0x180(%0)	\n"	\
323	"	cache %1, 0x200(%0); cache %1, 0x280(%0)	\n"	\
324	"	cache %1, 0x300(%0); cache %1, 0x380(%0)	\n"	\
325	"	cache %1, 0x400(%0); cache %1, 0x480(%0)	\n"	\
326	"	cache %1, 0x500(%0); cache %1, 0x580(%0)	\n"	\
327	"	cache %1, 0x600(%0); cache %1, 0x680(%0)	\n"	\
328	"	cache %1, 0x700(%0); cache %1, 0x780(%0)	\n"	\
329	"	cache %1, 0x800(%0); cache %1, 0x880(%0)	\n"	\
330	"	cache %1, 0x900(%0); cache %1, 0x980(%0)	\n"	\
331	"	cache %1, 0xa00(%0); cache %1, 0xa80(%0)	\n"	\
332	"	cache %1, 0xb00(%0); cache %1, 0xb80(%0)	\n"	\
333	"	cache %1, 0xc00(%0); cache %1, 0xc80(%0)	\n"	\
334	"	cache %1, 0xd00(%0); cache %1, 0xd80(%0)	\n"	\
335	"	cache %1, 0xe00(%0); cache %1, 0xe80(%0)	\n"	\
336	"	cache %1, 0xf00(%0); cache %1, 0xf80(%0)	\n"	\
337	"	.set pop					\n"	\
338		:							\
339		: "r" (base),						\
340		  "i" (op));
341
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
343#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
344static inline void blast_##pfx##cache##lsize(void)			\
345{									\
346	unsigned long start = INDEX_BASE;				\
347	unsigned long end = start + current_cpu_data.desc.waysize;	\
348	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
349	unsigned long ws_end = current_cpu_data.desc.ways <<		\
350	                       current_cpu_data.desc.waybit;		\
351	unsigned long ws, addr;						\
352									\
353	__##pfx##flush_prologue						\
354									\
355	for (ws = 0; ws < ws_end; ws += ws_inc)				\
356		for (addr = start; addr < end; addr += lsize * 32)	\
357			cache##lsize##_unroll32(addr|ws, indexop);	\
358									\
359	__##pfx##flush_epilogue						\
360}									\
361									\
362static inline void blast_##pfx##cache##lsize##_page(unsigned long page)	\
363{									\
364	unsigned long start = page;					\
365	unsigned long end = page + PAGE_SIZE;				\
366									\
367	__##pfx##flush_prologue						\
368									\
369	do {								\
370		cache##lsize##_unroll32(start, hitop);			\
371		start += lsize * 32;					\
372	} while (start < end);						\
373									\
374	__##pfx##flush_epilogue						\
375}									\
376									\
377static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
378{									\
379	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
380	unsigned long start = INDEX_BASE + (page & indexmask);		\
381	unsigned long end = start + PAGE_SIZE;				\
382	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
383	unsigned long ws_end = current_cpu_data.desc.ways <<		\
384	                       current_cpu_data.desc.waybit;		\
385	unsigned long ws, addr;						\
386									\
387	__##pfx##flush_prologue						\
388									\
389	for (ws = 0; ws < ws_end; ws += ws_inc)				\
390		for (addr = start; addr < end; addr += lsize * 32)	\
391			cache##lsize##_unroll32(addr|ws, indexop);	\
392									\
393	__##pfx##flush_epilogue						\
394}
395
396__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
397__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
398__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
399__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
400__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
401__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
402__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
403__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
404__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
405__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
406
407__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
408__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
409__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
410__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
411__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
412__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413
414/* build blast_xxx_range, protected_blast_xxx_range */
415#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
416static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
417						    unsigned long end)	\
418{									\
419	unsigned long lsize = cpu_##desc##_line_size();			\
420	unsigned long addr = start & ~(lsize - 1);			\
421	unsigned long aend = (end - 1) & ~(lsize - 1);			\
422									\
423	__##pfx##flush_prologue						\
424									\
425	while (1) {							\
426		prot##cache_op(hitop, addr);				\
427		if (addr == aend)					\
428			break;						\
429		addr += lsize;						\
430	}								\
431									\
432	__##pfx##flush_epilogue						\
433}
434
435__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
436__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
437__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
438__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
439__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440/* blast_inv_dcache_range */
441__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
442__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
443
444#endif /* _ASM_R4KCACHE_H */
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Inline assembly cache operations.
  7 *
  8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
 10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
 11 */
 12#ifndef _ASM_R4KCACHE_H
 13#define _ASM_R4KCACHE_H
 14
 15#include <linux/stringify.h>
 16
 17#include <asm/asm.h>
 18#include <asm/cacheops.h>
 19#include <asm/compiler.h>
 20#include <asm/cpu-features.h>
 21#include <asm/cpu-type.h>
 22#include <asm/mipsmtregs.h>
 23#include <linux/uaccess.h> /* for uaccess_kernel() */
 24
 25extern void (*r4k_blast_dcache)(void);
 26extern void (*r4k_blast_icache)(void);
 27
 28/*
 29 * This macro return a properly sign-extended address suitable as base address
 30 * for indexed cache operations.  Two issues here:
 31 *
 32 *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
 33 *    the index bits from the virtual address.	This breaks with tradition
 34 *    set by the R4000.	 To keep unpleasant surprises from happening we pick
 35 *    an address in KSEG0 / CKSEG0.
 36 *  - We need a properly sign extended address for 64-bit code.	 To get away
 37 *    without ifdefs we let the compiler do it by a type cast.
 38 */
 39#define INDEX_BASE	CKSEG0
 40
 41#define cache_op(op,addr)						\
 42	__asm__ __volatile__(						\
 43	"	.set	push					\n"	\
 44	"	.set	noreorder				\n"	\
 45	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
 46	"	cache	%0, %1					\n"	\
 47	"	.set	pop					\n"	\
 48	:								\
 49	: "i" (op), "R" (*(unsigned char *)(addr)))
 50
 51#ifdef CONFIG_MIPS_MT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 52
 53#define __iflush_prologue						\
 54	unsigned long redundance;					\
 55	extern int mt_n_iflushes;					\
 
 56	for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
 57
 58#define __iflush_epilogue						\
 
 59	}
 60
 61#define __dflush_prologue						\
 62	unsigned long redundance;					\
 63	extern int mt_n_dflushes;					\
 
 64	for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
 65
 66#define __dflush_epilogue \
 
 67	}
 68
 69#define __inv_dflush_prologue __dflush_prologue
 70#define __inv_dflush_epilogue __dflush_epilogue
 71#define __sflush_prologue {
 72#define __sflush_epilogue }
 73#define __inv_sflush_prologue __sflush_prologue
 74#define __inv_sflush_epilogue __sflush_epilogue
 75
 76#else /* CONFIG_MIPS_MT */
 77
 78#define __iflush_prologue {
 79#define __iflush_epilogue }
 80#define __dflush_prologue {
 81#define __dflush_epilogue }
 82#define __inv_dflush_prologue {
 83#define __inv_dflush_epilogue }
 84#define __sflush_prologue {
 85#define __sflush_epilogue }
 86#define __inv_sflush_prologue {
 87#define __inv_sflush_epilogue }
 88
 89#endif /* CONFIG_MIPS_MT */
 90
 91static inline void flush_icache_line_indexed(unsigned long addr)
 92{
 93	__iflush_prologue
 94	cache_op(Index_Invalidate_I, addr);
 95	__iflush_epilogue
 96}
 97
 98static inline void flush_dcache_line_indexed(unsigned long addr)
 99{
100	__dflush_prologue
101	cache_op(Index_Writeback_Inv_D, addr);
102	__dflush_epilogue
103}
104
105static inline void flush_scache_line_indexed(unsigned long addr)
106{
107	cache_op(Index_Writeback_Inv_SD, addr);
108}
109
110static inline void flush_icache_line(unsigned long addr)
111{
112	__iflush_prologue
113	switch (boot_cpu_type()) {
114	case CPU_LOONGSON2:
115		cache_op(Hit_Invalidate_I_Loongson2, addr);
116		break;
117
118	default:
119		cache_op(Hit_Invalidate_I, addr);
120		break;
121	}
122	__iflush_epilogue
123}
124
125static inline void flush_dcache_line(unsigned long addr)
126{
127	__dflush_prologue
128	cache_op(Hit_Writeback_Inv_D, addr);
129	__dflush_epilogue
130}
131
132static inline void invalidate_dcache_line(unsigned long addr)
133{
134	__dflush_prologue
135	cache_op(Hit_Invalidate_D, addr);
136	__dflush_epilogue
137}
138
139static inline void invalidate_scache_line(unsigned long addr)
140{
141	cache_op(Hit_Invalidate_SD, addr);
142}
143
144static inline void flush_scache_line(unsigned long addr)
145{
146	cache_op(Hit_Writeback_Inv_SD, addr);
147}
148
149#define protected_cache_op(op,addr)				\
150({								\
151	int __err = 0;						\
152	__asm__ __volatile__(					\
153	"	.set	push			\n"		\
154	"	.set	noreorder		\n"		\
155	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
156	"1:	cache	%1, (%2)		\n"		\
157	"2:	.insn				\n"		\
158	"	.set	pop			\n"		\
159	"	.section .fixup,\"ax\"		\n"		\
160	"3:	li	%0, %3			\n"		\
161	"	j	2b			\n"		\
162	"	.previous			\n"		\
163	"	.section __ex_table,\"a\"	\n"		\
164	"	"STR(PTR)" 1b, 3b		\n"		\
165	"	.previous"					\
166	: "+r" (__err)						\
167	: "i" (op), "r" (addr), "i" (-EFAULT));			\
168	__err;							\
169})
170
171
172#define protected_cachee_op(op,addr)				\
173({								\
174	int __err = 0;						\
175	__asm__ __volatile__(					\
176	"	.set	push			\n"		\
177	"	.set	noreorder		\n"		\
178	"	.set	mips0			\n"		\
179	"	.set	eva			\n"		\
180	"1:	cachee	%1, (%2)		\n"		\
181	"2:	.insn				\n"		\
182	"	.set	pop			\n"		\
183	"	.section .fixup,\"ax\"		\n"		\
184	"3:	li	%0, %3			\n"		\
185	"	j	2b			\n"		\
186	"	.previous			\n"		\
187	"	.section __ex_table,\"a\"	\n"		\
188	"	"STR(PTR)" 1b, 3b		\n"		\
189	"	.previous"					\
190	: "+r" (__err)						\
191	: "i" (op), "r" (addr), "i" (-EFAULT));			\
192	__err;							\
193})
194
195/*
196 * The next two are for badland addresses like signal trampolines.
197 */
198static inline int protected_flush_icache_line(unsigned long addr)
199{
200	switch (boot_cpu_type()) {
201	case CPU_LOONGSON2:
202		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
203
204	default:
205#ifdef CONFIG_EVA
206		return protected_cachee_op(Hit_Invalidate_I, addr);
207#else
208		return protected_cache_op(Hit_Invalidate_I, addr);
209#endif
210	}
211}
212
213/*
214 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
215 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
216 * caches.  We're talking about one cacheline unnecessarily getting invalidated
217 * here so the penalty isn't overly hard.
218 */
219static inline int protected_writeback_dcache_line(unsigned long addr)
220{
221#ifdef CONFIG_EVA
222	return protected_cachee_op(Hit_Writeback_Inv_D, addr);
223#else
224	return protected_cache_op(Hit_Writeback_Inv_D, addr);
225#endif
226}
227
228static inline int protected_writeback_scache_line(unsigned long addr)
229{
230#ifdef CONFIG_EVA
231	return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
232#else
233	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
234#endif
235}
236
237/*
238 * This one is RM7000-specific
239 */
240static inline void invalidate_tcache_page(unsigned long addr)
241{
242	cache_op(Page_Invalidate_T, addr);
243}
244
245#ifndef CONFIG_CPU_MIPSR6
246#define cache16_unroll32(base,op)					\
247	__asm__ __volatile__(						\
248	"	.set push					\n"	\
249	"	.set noreorder					\n"	\
250	"	.set mips3					\n"	\
251	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
252	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
253	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
254	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
255	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
256	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
257	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
258	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
259	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
260	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
261	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
262	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
263	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
264	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
265	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
266	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
267	"	.set pop					\n"	\
268		:							\
269		: "r" (base),						\
270		  "i" (op));
271
272#define cache32_unroll32(base,op)					\
273	__asm__ __volatile__(						\
274	"	.set push					\n"	\
275	"	.set noreorder					\n"	\
276	"	.set mips3					\n"	\
277	"	cache %1, 0x000(%0); cache %1, 0x020(%0)	\n"	\
278	"	cache %1, 0x040(%0); cache %1, 0x060(%0)	\n"	\
279	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)	\n"	\
280	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)	\n"	\
281	"	cache %1, 0x100(%0); cache %1, 0x120(%0)	\n"	\
282	"	cache %1, 0x140(%0); cache %1, 0x160(%0)	\n"	\
283	"	cache %1, 0x180(%0); cache %1, 0x1a0(%0)	\n"	\
284	"	cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)	\n"	\
285	"	cache %1, 0x200(%0); cache %1, 0x220(%0)	\n"	\
286	"	cache %1, 0x240(%0); cache %1, 0x260(%0)	\n"	\
287	"	cache %1, 0x280(%0); cache %1, 0x2a0(%0)	\n"	\
288	"	cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)	\n"	\
289	"	cache %1, 0x300(%0); cache %1, 0x320(%0)	\n"	\
290	"	cache %1, 0x340(%0); cache %1, 0x360(%0)	\n"	\
291	"	cache %1, 0x380(%0); cache %1, 0x3a0(%0)	\n"	\
292	"	cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)	\n"	\
293	"	.set pop					\n"	\
294		:							\
295		: "r" (base),						\
296		  "i" (op));
297
298#define cache64_unroll32(base,op)					\
299	__asm__ __volatile__(						\
300	"	.set push					\n"	\
301	"	.set noreorder					\n"	\
302	"	.set mips3					\n"	\
303	"	cache %1, 0x000(%0); cache %1, 0x040(%0)	\n"	\
304	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n"	\
305	"	cache %1, 0x100(%0); cache %1, 0x140(%0)	\n"	\
306	"	cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n"	\
307	"	cache %1, 0x200(%0); cache %1, 0x240(%0)	\n"	\
308	"	cache %1, 0x280(%0); cache %1, 0x2c0(%0)	\n"	\
309	"	cache %1, 0x300(%0); cache %1, 0x340(%0)	\n"	\
310	"	cache %1, 0x380(%0); cache %1, 0x3c0(%0)	\n"	\
311	"	cache %1, 0x400(%0); cache %1, 0x440(%0)	\n"	\
312	"	cache %1, 0x480(%0); cache %1, 0x4c0(%0)	\n"	\
313	"	cache %1, 0x500(%0); cache %1, 0x540(%0)	\n"	\
314	"	cache %1, 0x580(%0); cache %1, 0x5c0(%0)	\n"	\
315	"	cache %1, 0x600(%0); cache %1, 0x640(%0)	\n"	\
316	"	cache %1, 0x680(%0); cache %1, 0x6c0(%0)	\n"	\
317	"	cache %1, 0x700(%0); cache %1, 0x740(%0)	\n"	\
318	"	cache %1, 0x780(%0); cache %1, 0x7c0(%0)	\n"	\
319	"	.set pop					\n"	\
320		:							\
321		: "r" (base),						\
322		  "i" (op));
323
324#define cache128_unroll32(base,op)					\
325	__asm__ __volatile__(						\
326	"	.set push					\n"	\
327	"	.set noreorder					\n"	\
328	"	.set mips3					\n"	\
329	"	cache %1, 0x000(%0); cache %1, 0x080(%0)	\n"	\
330	"	cache %1, 0x100(%0); cache %1, 0x180(%0)	\n"	\
331	"	cache %1, 0x200(%0); cache %1, 0x280(%0)	\n"	\
332	"	cache %1, 0x300(%0); cache %1, 0x380(%0)	\n"	\
333	"	cache %1, 0x400(%0); cache %1, 0x480(%0)	\n"	\
334	"	cache %1, 0x500(%0); cache %1, 0x580(%0)	\n"	\
335	"	cache %1, 0x600(%0); cache %1, 0x680(%0)	\n"	\
336	"	cache %1, 0x700(%0); cache %1, 0x780(%0)	\n"	\
337	"	cache %1, 0x800(%0); cache %1, 0x880(%0)	\n"	\
338	"	cache %1, 0x900(%0); cache %1, 0x980(%0)	\n"	\
339	"	cache %1, 0xa00(%0); cache %1, 0xa80(%0)	\n"	\
340	"	cache %1, 0xb00(%0); cache %1, 0xb80(%0)	\n"	\
341	"	cache %1, 0xc00(%0); cache %1, 0xc80(%0)	\n"	\
342	"	cache %1, 0xd00(%0); cache %1, 0xd80(%0)	\n"	\
343	"	cache %1, 0xe00(%0); cache %1, 0xe80(%0)	\n"	\
344	"	cache %1, 0xf00(%0); cache %1, 0xf80(%0)	\n"	\
345	"	.set pop					\n"	\
346		:							\
347		: "r" (base),						\
348		  "i" (op));
349
350#else
351/*
352 * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
353 * This means we now need to increment the base register before we flush
354 * more cache lines
355 */
356#define cache16_unroll32(base,op)				\
357	__asm__ __volatile__(					\
358	"	.set push\n"					\
359	"	.set noreorder\n"				\
360	"	.set mips64r6\n"				\
361	"	.set noat\n"					\
362	"	cache %1, 0x000(%0); cache %1, 0x010(%0)\n"	\
363	"	cache %1, 0x020(%0); cache %1, 0x030(%0)\n"	\
364	"	cache %1, 0x040(%0); cache %1, 0x050(%0)\n"	\
365	"	cache %1, 0x060(%0); cache %1, 0x070(%0)\n"	\
366	"	cache %1, 0x080(%0); cache %1, 0x090(%0)\n"	\
367	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"	\
368	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"	\
369	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"	\
370	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100	\n"	\
371	"	cache %1, 0x000($1); cache %1, 0x010($1)\n"	\
372	"	cache %1, 0x020($1); cache %1, 0x030($1)\n"	\
373	"	cache %1, 0x040($1); cache %1, 0x050($1)\n"	\
374	"	cache %1, 0x060($1); cache %1, 0x070($1)\n"	\
375	"	cache %1, 0x080($1); cache %1, 0x090($1)\n"	\
376	"	cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"	\
377	"	cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"	\
378	"	cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"	\
379	"	.set pop\n"					\
380		:						\
381		: "r" (base),					\
382		  "i" (op));
383
384#define cache32_unroll32(base,op)				\
385	__asm__ __volatile__(					\
386	"	.set push\n"					\
387	"	.set noreorder\n"				\
388	"	.set mips64r6\n"				\
389	"	.set noat\n"					\
390	"	cache %1, 0x000(%0); cache %1, 0x020(%0)\n"	\
391	"	cache %1, 0x040(%0); cache %1, 0x060(%0)\n"	\
392	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"	\
393	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"	\
394	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
395	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
396	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
397	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
398	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
399	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
400	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
401	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
402	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
403	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
404	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100\n"	\
405	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
406	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
407	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
408	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
409	"	.set pop\n"					\
410		:						\
411		: "r" (base),					\
412		  "i" (op));
413
414#define cache64_unroll32(base,op)				\
415	__asm__ __volatile__(					\
416	"	.set push\n"					\
417	"	.set noreorder\n"				\
418	"	.set mips64r6\n"				\
419	"	.set noat\n"					\
420	"	cache %1, 0x000(%0); cache %1, 0x040(%0)\n"	\
421	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"	\
422	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
423	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
424	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
425	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
426	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
427	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
428	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
429	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
430	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
431	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
432	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
433	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
434	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
435	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
436	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
437	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
438	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
439	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
440	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
441	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
442	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
443	"	.set pop\n"					\
444		:						\
445		: "r" (base),					\
446		  "i" (op));
447
448#define cache128_unroll32(base,op)				\
449	__asm__ __volatile__(					\
450	"	.set push\n"					\
451	"	.set noreorder\n"				\
452	"	.set mips64r6\n"				\
453	"	.set noat\n"					\
454	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
455	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
456	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
457	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
458	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
459	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
460	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
461	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
462	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
463	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
464	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
465	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
466	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
467	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
468	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
469	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
470	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
471	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
472	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
473	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
474	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
475	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
476	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
477	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
478	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
479	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
480	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
481	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
482	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
483	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
484	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
485	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
486	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
487	"	.set pop\n"					\
488		:						\
489		: "r" (base),					\
490		  "i" (op));
491#endif /* CONFIG_CPU_MIPSR6 */
492
493/*
494 * Perform the cache operation specified by op using a user mode virtual
495 * address while in kernel mode.
496 */
497#define cache16_unroll32_user(base,op)					\
498	__asm__ __volatile__(						\
499	"	.set push					\n"	\
500	"	.set noreorder					\n"	\
501	"	.set mips0					\n"	\
502	"	.set eva					\n"	\
503	"	cachee %1, 0x000(%0); cachee %1, 0x010(%0)	\n"	\
504	"	cachee %1, 0x020(%0); cachee %1, 0x030(%0)	\n"	\
505	"	cachee %1, 0x040(%0); cachee %1, 0x050(%0)	\n"	\
506	"	cachee %1, 0x060(%0); cachee %1, 0x070(%0)	\n"	\
507	"	cachee %1, 0x080(%0); cachee %1, 0x090(%0)	\n"	\
508	"	cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)	\n"	\
509	"	cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)	\n"	\
510	"	cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)	\n"	\
511	"	cachee %1, 0x100(%0); cachee %1, 0x110(%0)	\n"	\
512	"	cachee %1, 0x120(%0); cachee %1, 0x130(%0)	\n"	\
513	"	cachee %1, 0x140(%0); cachee %1, 0x150(%0)	\n"	\
514	"	cachee %1, 0x160(%0); cachee %1, 0x170(%0)	\n"	\
515	"	cachee %1, 0x180(%0); cachee %1, 0x190(%0)	\n"	\
516	"	cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)	\n"	\
517	"	cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)	\n"	\
518	"	cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)	\n"	\
519	"	.set pop					\n"	\
520		:							\
521		: "r" (base),						\
522		  "i" (op));
523
524#define cache32_unroll32_user(base, op)					\
525	__asm__ __volatile__(						\
526	"	.set push					\n"	\
527	"	.set noreorder					\n"	\
528	"	.set mips0					\n"	\
529	"	.set eva					\n"	\
530	"	cachee %1, 0x000(%0); cachee %1, 0x020(%0)	\n"	\
531	"	cachee %1, 0x040(%0); cachee %1, 0x060(%0)	\n"	\
532	"	cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)	\n"	\
533	"	cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)	\n"	\
534	"	cachee %1, 0x100(%0); cachee %1, 0x120(%0)	\n"	\
535	"	cachee %1, 0x140(%0); cachee %1, 0x160(%0)	\n"	\
536	"	cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)	\n"	\
537	"	cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)	\n"	\
538	"	cachee %1, 0x200(%0); cachee %1, 0x220(%0)	\n"	\
539	"	cachee %1, 0x240(%0); cachee %1, 0x260(%0)	\n"	\
540	"	cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)	\n"	\
541	"	cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)	\n"	\
542	"	cachee %1, 0x300(%0); cachee %1, 0x320(%0)	\n"	\
543	"	cachee %1, 0x340(%0); cachee %1, 0x360(%0)	\n"	\
544	"	cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)	\n"	\
545	"	cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)	\n"	\
546	"	.set pop					\n"	\
547		:							\
548		: "r" (base),						\
549		  "i" (op));
550
551#define cache64_unroll32_user(base, op)					\
552	__asm__ __volatile__(						\
553	"	.set push					\n"	\
554	"	.set noreorder					\n"	\
555	"	.set mips0					\n"	\
556	"	.set eva					\n"	\
557	"	cachee %1, 0x000(%0); cachee %1, 0x040(%0)	\n"	\
558	"	cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)	\n"	\
559	"	cachee %1, 0x100(%0); cachee %1, 0x140(%0)	\n"	\
560	"	cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)	\n"	\
561	"	cachee %1, 0x200(%0); cachee %1, 0x240(%0)	\n"	\
562	"	cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)	\n"	\
563	"	cachee %1, 0x300(%0); cachee %1, 0x340(%0)	\n"	\
564	"	cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)	\n"	\
565	"	cachee %1, 0x400(%0); cachee %1, 0x440(%0)	\n"	\
566	"	cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)	\n"	\
567	"	cachee %1, 0x500(%0); cachee %1, 0x540(%0)	\n"	\
568	"	cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)	\n"	\
569	"	cachee %1, 0x600(%0); cachee %1, 0x640(%0)	\n"	\
570	"	cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)	\n"	\
571	"	cachee %1, 0x700(%0); cachee %1, 0x740(%0)	\n"	\
572	"	cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)	\n"	\
573	"	.set pop					\n"	\
574		:							\
575		: "r" (base),						\
576		  "i" (op));
577
578/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
579#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
580static inline void extra##blast_##pfx##cache##lsize(void)		\
581{									\
582	unsigned long start = INDEX_BASE;				\
583	unsigned long end = start + current_cpu_data.desc.waysize;	\
584	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
585	unsigned long ws_end = current_cpu_data.desc.ways <<		\
586			       current_cpu_data.desc.waybit;		\
587	unsigned long ws, addr;						\
588									\
589	__##pfx##flush_prologue						\
590									\
591	for (ws = 0; ws < ws_end; ws += ws_inc)				\
592		for (addr = start; addr < end; addr += lsize * 32)	\
593			cache##lsize##_unroll32(addr|ws, indexop);	\
594									\
595	__##pfx##flush_epilogue						\
596}									\
597									\
598static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
599{									\
600	unsigned long start = page;					\
601	unsigned long end = page + PAGE_SIZE;				\
602									\
603	__##pfx##flush_prologue						\
604									\
605	do {								\
606		cache##lsize##_unroll32(start, hitop);			\
607		start += lsize * 32;					\
608	} while (start < end);						\
609									\
610	__##pfx##flush_epilogue						\
611}									\
612									\
613static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
614{									\
615	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
616	unsigned long start = INDEX_BASE + (page & indexmask);		\
617	unsigned long end = start + PAGE_SIZE;				\
618	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
619	unsigned long ws_end = current_cpu_data.desc.ways <<		\
620			       current_cpu_data.desc.waybit;		\
621	unsigned long ws, addr;						\
622									\
623	__##pfx##flush_prologue						\
624									\
625	for (ws = 0; ws < ws_end; ws += ws_inc)				\
626		for (addr = start; addr < end; addr += lsize * 32)	\
627			cache##lsize##_unroll32(addr|ws, indexop);	\
628									\
629	__##pfx##flush_epilogue						\
630}
631
632__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
633__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
634__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
635__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
636__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
637__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
638__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
639__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
640__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
641__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
642__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
643__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
644__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
645
646__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
647__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
648__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
649__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
650__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
651__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
652
653#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
654static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
655{									\
656	unsigned long start = page;					\
657	unsigned long end = page + PAGE_SIZE;				\
658									\
659	__##pfx##flush_prologue						\
660									\
661	do {								\
662		cache##lsize##_unroll32_user(start, hitop);             \
663		start += lsize * 32;					\
664	} while (start < end);						\
665									\
666	__##pfx##flush_epilogue						\
667}
668
669__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
670			 16)
671__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
672__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
673			 32)
674__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
675__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
676			 64)
677__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
678
679/* build blast_xxx_range, protected_blast_xxx_range */
680#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
681static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
682						    unsigned long end)	\
683{									\
684	unsigned long lsize = cpu_##desc##_line_size();			\
685	unsigned long addr = start & ~(lsize - 1);			\
686	unsigned long aend = (end - 1) & ~(lsize - 1);			\
687									\
688	__##pfx##flush_prologue						\
689									\
690	while (1) {							\
691		prot##cache_op(hitop, addr);				\
692		if (addr == aend)					\
693			break;						\
694		addr += lsize;						\
695	}								\
696									\
697	__##pfx##flush_epilogue						\
698}
699
700#ifndef CONFIG_EVA
701
702__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
703__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
704
705#else
706
707#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
708static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
709							unsigned long end) \
710{									\
711	unsigned long lsize = cpu_##desc##_line_size();			\
712	unsigned long addr = start & ~(lsize - 1);			\
713	unsigned long aend = (end - 1) & ~(lsize - 1);			\
714									\
715	__##pfx##flush_prologue						\
716									\
717	if (!uaccess_kernel()) {					\
718		while (1) {						\
719			protected_cachee_op(hitop, addr);		\
720			if (addr == aend)				\
721				break;					\
722			addr += lsize;					\
723		}							\
724	} else {							\
725		while (1) {						\
726			protected_cache_op(hitop, addr);		\
727			if (addr == aend)				\
728				break;					\
729			addr += lsize;					\
730		}                                                       \
731									\
732	}								\
733	__##pfx##flush_epilogue						\
734}
735
736__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
737__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
738
739#endif
740__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
741__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
742	protected_, loongson2_)
743__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
744__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
745__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
746/* blast_inv_dcache_range */
747__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
748__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
749
750#endif /* _ASM_R4KCACHE_H */