Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Inline assembly cache operations.
  7 *
  8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
 10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
 11 */
 12#ifndef _ASM_R4KCACHE_H
 13#define _ASM_R4KCACHE_H
 14
 15#include <linux/stringify.h>
 16
 17#include <asm/asm.h>
 
 18#include <asm/cacheops.h>
 19#include <asm/compiler.h>
 20#include <asm/cpu-features.h>
 21#include <asm/cpu-type.h>
 22#include <asm/mipsmtregs.h>
 23#include <asm/uaccess.h> /* for segment_eq() */
 
 
 24
 25extern void (*r4k_blast_dcache)(void);
 26extern void (*r4k_blast_icache)(void);
 27
 28/*
 29 * This macro return a properly sign-extended address suitable as base address
 30 * for indexed cache operations.  Two issues here:
 31 *
 32 *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
 33 *    the index bits from the virtual address.	This breaks with tradition
 34 *    set by the R4000.	 To keep unpleasant surprises from happening we pick
 35 *    an address in KSEG0 / CKSEG0.
 36 *  - We need a properly sign extended address for 64-bit code.	 To get away
 37 *    without ifdefs we let the compiler do it by a type cast.
 38 */
 39#define INDEX_BASE	CKSEG0
 40
 41#define cache_op(op,addr)						\
 42	__asm__ __volatile__(						\
 43	"	.set	push					\n"	\
 44	"	.set	noreorder				\n"	\
 45	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
 46	"	cache	%0, %1					\n"	\
 47	"	.set	pop					\n"	\
 48	:								\
 49	: "i" (op), "R" (*(unsigned char *)(addr)))
 50
 51#ifdef CONFIG_MIPS_MT
 52
 53#define __iflush_prologue						\
 54	unsigned long redundance;					\
 55	extern int mt_n_iflushes;					\
 56	for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
 57
 58#define __iflush_epilogue						\
 59	}
 60
 61#define __dflush_prologue						\
 62	unsigned long redundance;					\
 63	extern int mt_n_dflushes;					\
 64	for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
 65
 66#define __dflush_epilogue \
 67	}
 68
 69#define __inv_dflush_prologue __dflush_prologue
 70#define __inv_dflush_epilogue __dflush_epilogue
 71#define __sflush_prologue {
 72#define __sflush_epilogue }
 73#define __inv_sflush_prologue __sflush_prologue
 74#define __inv_sflush_epilogue __sflush_epilogue
 75
 76#else /* CONFIG_MIPS_MT */
 77
 78#define __iflush_prologue {
 79#define __iflush_epilogue }
 80#define __dflush_prologue {
 81#define __dflush_epilogue }
 82#define __inv_dflush_prologue {
 83#define __inv_dflush_epilogue }
 84#define __sflush_prologue {
 85#define __sflush_epilogue }
 86#define __inv_sflush_prologue {
 87#define __inv_sflush_epilogue }
 88
 89#endif /* CONFIG_MIPS_MT */
 90
 91static inline void flush_icache_line_indexed(unsigned long addr)
 92{
 93	__iflush_prologue
 94	cache_op(Index_Invalidate_I, addr);
 95	__iflush_epilogue
 96}
 97
 98static inline void flush_dcache_line_indexed(unsigned long addr)
 99{
100	__dflush_prologue
101	cache_op(Index_Writeback_Inv_D, addr);
102	__dflush_epilogue
103}
104
105static inline void flush_scache_line_indexed(unsigned long addr)
106{
107	cache_op(Index_Writeback_Inv_SD, addr);
108}
109
110static inline void flush_icache_line(unsigned long addr)
111{
112	__iflush_prologue
113	switch (boot_cpu_type()) {
114	case CPU_LOONGSON2:
115		cache_op(Hit_Invalidate_I_Loongson2, addr);
116		break;
117
118	default:
119		cache_op(Hit_Invalidate_I, addr);
120		break;
121	}
122	__iflush_epilogue
123}
124
125static inline void flush_dcache_line(unsigned long addr)
126{
127	__dflush_prologue
128	cache_op(Hit_Writeback_Inv_D, addr);
129	__dflush_epilogue
130}
131
132static inline void invalidate_dcache_line(unsigned long addr)
133{
134	__dflush_prologue
135	cache_op(Hit_Invalidate_D, addr);
136	__dflush_epilogue
137}
138
139static inline void invalidate_scache_line(unsigned long addr)
140{
141	cache_op(Hit_Invalidate_SD, addr);
142}
143
144static inline void flush_scache_line(unsigned long addr)
145{
146	cache_op(Hit_Writeback_Inv_SD, addr);
147}
148
149#define protected_cache_op(op,addr)				\
 
 
150	__asm__ __volatile__(					\
151	"	.set	push			\n"		\
152	"	.set	noreorder		\n"		\
153	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
154	"1:	cache	%0, (%1)		\n"		\
155	"2:	.set	pop			\n"		\
 
 
 
 
 
156	"	.section __ex_table,\"a\"	\n"		\
157	"	"STR(PTR)" 1b, 2b		\n"		\
158	"	.previous"					\
159	:							\
160	: "i" (op), "r" (addr))
 
 
 
161
162#define protected_cachee_op(op,addr)				\
 
 
163	__asm__ __volatile__(					\
164	"	.set	push			\n"		\
165	"	.set	noreorder		\n"		\
166	"	.set	mips0			\n"		\
167	"	.set	eva			\n"		\
168	"1:	cachee	%0, (%1)		\n"		\
169	"2:	.set	pop			\n"		\
 
 
 
 
 
170	"	.section __ex_table,\"a\"	\n"		\
171	"	"STR(PTR)" 1b, 2b		\n"		\
172	"	.previous"					\
173	:							\
174	: "i" (op), "r" (addr))
 
 
175
176/*
177 * The next two are for badland addresses like signal trampolines.
178 */
179static inline void protected_flush_icache_line(unsigned long addr)
180{
181	switch (boot_cpu_type()) {
182	case CPU_LOONGSON2:
183		protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
184		break;
185
186	default:
187#ifdef CONFIG_EVA
188		protected_cachee_op(Hit_Invalidate_I, addr);
189#else
190		protected_cache_op(Hit_Invalidate_I, addr);
191#endif
192		break;
193	}
194}
195
196/*
197 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
198 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
199 * caches.  We're talking about one cacheline unnecessarily getting invalidated
200 * here so the penalty isn't overly hard.
201 */
202static inline void protected_writeback_dcache_line(unsigned long addr)
203{
204#ifdef CONFIG_EVA
205	protected_cachee_op(Hit_Writeback_Inv_D, addr);
206#else
207	protected_cache_op(Hit_Writeback_Inv_D, addr);
208#endif
209}
210
211static inline void protected_writeback_scache_line(unsigned long addr)
212{
213	protected_cache_op(Hit_Writeback_Inv_SD, addr);
 
 
 
 
214}
215
216/*
217 * This one is RM7000-specific
218 */
219static inline void invalidate_tcache_page(unsigned long addr)
220{
221	cache_op(Page_Invalidate_T, addr);
222}
223
224#ifndef CONFIG_CPU_MIPSR6
225#define cache16_unroll32(base,op)					\
226	__asm__ __volatile__(						\
227	"	.set push					\n"	\
228	"	.set noreorder					\n"	\
229	"	.set mips3					\n"	\
230	"	cache %1, 0x000(%0); cache %1, 0x010(%0)	\n"	\
231	"	cache %1, 0x020(%0); cache %1, 0x030(%0)	\n"	\
232	"	cache %1, 0x040(%0); cache %1, 0x050(%0)	\n"	\
233	"	cache %1, 0x060(%0); cache %1, 0x070(%0)	\n"	\
234	"	cache %1, 0x080(%0); cache %1, 0x090(%0)	\n"	\
235	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)	\n"	\
236	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)	\n"	\
237	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)	\n"	\
238	"	cache %1, 0x100(%0); cache %1, 0x110(%0)	\n"	\
239	"	cache %1, 0x120(%0); cache %1, 0x130(%0)	\n"	\
240	"	cache %1, 0x140(%0); cache %1, 0x150(%0)	\n"	\
241	"	cache %1, 0x160(%0); cache %1, 0x170(%0)	\n"	\
242	"	cache %1, 0x180(%0); cache %1, 0x190(%0)	\n"	\
243	"	cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)	\n"	\
244	"	cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)	\n"	\
245	"	cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)	\n"	\
246	"	.set pop					\n"	\
247		:							\
248		: "r" (base),						\
249		  "i" (op));
250
251#define cache32_unroll32(base,op)					\
252	__asm__ __volatile__(						\
253	"	.set push					\n"	\
254	"	.set noreorder					\n"	\
255	"	.set mips3					\n"	\
256	"	cache %1, 0x000(%0); cache %1, 0x020(%0)	\n"	\
257	"	cache %1, 0x040(%0); cache %1, 0x060(%0)	\n"	\
258	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)	\n"	\
259	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)	\n"	\
260	"	cache %1, 0x100(%0); cache %1, 0x120(%0)	\n"	\
261	"	cache %1, 0x140(%0); cache %1, 0x160(%0)	\n"	\
262	"	cache %1, 0x180(%0); cache %1, 0x1a0(%0)	\n"	\
263	"	cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)	\n"	\
264	"	cache %1, 0x200(%0); cache %1, 0x220(%0)	\n"	\
265	"	cache %1, 0x240(%0); cache %1, 0x260(%0)	\n"	\
266	"	cache %1, 0x280(%0); cache %1, 0x2a0(%0)	\n"	\
267	"	cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)	\n"	\
268	"	cache %1, 0x300(%0); cache %1, 0x320(%0)	\n"	\
269	"	cache %1, 0x340(%0); cache %1, 0x360(%0)	\n"	\
270	"	cache %1, 0x380(%0); cache %1, 0x3a0(%0)	\n"	\
271	"	cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)	\n"	\
272	"	.set pop					\n"	\
273		:							\
274		: "r" (base),						\
275		  "i" (op));
276
277#define cache64_unroll32(base,op)					\
278	__asm__ __volatile__(						\
279	"	.set push					\n"	\
280	"	.set noreorder					\n"	\
281	"	.set mips3					\n"	\
282	"	cache %1, 0x000(%0); cache %1, 0x040(%0)	\n"	\
283	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)	\n"	\
284	"	cache %1, 0x100(%0); cache %1, 0x140(%0)	\n"	\
285	"	cache %1, 0x180(%0); cache %1, 0x1c0(%0)	\n"	\
286	"	cache %1, 0x200(%0); cache %1, 0x240(%0)	\n"	\
287	"	cache %1, 0x280(%0); cache %1, 0x2c0(%0)	\n"	\
288	"	cache %1, 0x300(%0); cache %1, 0x340(%0)	\n"	\
289	"	cache %1, 0x380(%0); cache %1, 0x3c0(%0)	\n"	\
290	"	cache %1, 0x400(%0); cache %1, 0x440(%0)	\n"	\
291	"	cache %1, 0x480(%0); cache %1, 0x4c0(%0)	\n"	\
292	"	cache %1, 0x500(%0); cache %1, 0x540(%0)	\n"	\
293	"	cache %1, 0x580(%0); cache %1, 0x5c0(%0)	\n"	\
294	"	cache %1, 0x600(%0); cache %1, 0x640(%0)	\n"	\
295	"	cache %1, 0x680(%0); cache %1, 0x6c0(%0)	\n"	\
296	"	cache %1, 0x700(%0); cache %1, 0x740(%0)	\n"	\
297	"	cache %1, 0x780(%0); cache %1, 0x7c0(%0)	\n"	\
298	"	.set pop					\n"	\
299		:							\
300		: "r" (base),						\
301		  "i" (op));
302
303#define cache128_unroll32(base,op)					\
304	__asm__ __volatile__(						\
305	"	.set push					\n"	\
306	"	.set noreorder					\n"	\
307	"	.set mips3					\n"	\
308	"	cache %1, 0x000(%0); cache %1, 0x080(%0)	\n"	\
309	"	cache %1, 0x100(%0); cache %1, 0x180(%0)	\n"	\
310	"	cache %1, 0x200(%0); cache %1, 0x280(%0)	\n"	\
311	"	cache %1, 0x300(%0); cache %1, 0x380(%0)	\n"	\
312	"	cache %1, 0x400(%0); cache %1, 0x480(%0)	\n"	\
313	"	cache %1, 0x500(%0); cache %1, 0x580(%0)	\n"	\
314	"	cache %1, 0x600(%0); cache %1, 0x680(%0)	\n"	\
315	"	cache %1, 0x700(%0); cache %1, 0x780(%0)	\n"	\
316	"	cache %1, 0x800(%0); cache %1, 0x880(%0)	\n"	\
317	"	cache %1, 0x900(%0); cache %1, 0x980(%0)	\n"	\
318	"	cache %1, 0xa00(%0); cache %1, 0xa80(%0)	\n"	\
319	"	cache %1, 0xb00(%0); cache %1, 0xb80(%0)	\n"	\
320	"	cache %1, 0xc00(%0); cache %1, 0xc80(%0)	\n"	\
321	"	cache %1, 0xd00(%0); cache %1, 0xd80(%0)	\n"	\
322	"	cache %1, 0xe00(%0); cache %1, 0xe80(%0)	\n"	\
323	"	cache %1, 0xf00(%0); cache %1, 0xf80(%0)	\n"	\
324	"	.set pop					\n"	\
325		:							\
326		: "r" (base),						\
327		  "i" (op));
328
329#else
330/*
331 * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
332 * This means we now need to increment the base register before we flush
333 * more cache lines
334 */
335#define cache16_unroll32(base,op)				\
336	__asm__ __volatile__(					\
337	"	.set push\n"					\
338	"	.set noreorder\n"				\
339	"	.set mips64r6\n"				\
340	"	.set noat\n"					\
341	"	cache %1, 0x000(%0); cache %1, 0x010(%0)\n"	\
342	"	cache %1, 0x020(%0); cache %1, 0x030(%0)\n"	\
343	"	cache %1, 0x040(%0); cache %1, 0x050(%0)\n"	\
344	"	cache %1, 0x060(%0); cache %1, 0x070(%0)\n"	\
345	"	cache %1, 0x080(%0); cache %1, 0x090(%0)\n"	\
346	"	cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"	\
347	"	cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"	\
348	"	cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"	\
349	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100	\n"	\
350	"	cache %1, 0x000($1); cache %1, 0x010($1)\n"	\
351	"	cache %1, 0x020($1); cache %1, 0x030($1)\n"	\
352	"	cache %1, 0x040($1); cache %1, 0x050($1)\n"	\
353	"	cache %1, 0x060($1); cache %1, 0x070($1)\n"	\
354	"	cache %1, 0x080($1); cache %1, 0x090($1)\n"	\
355	"	cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"	\
356	"	cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"	\
357	"	cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"	\
358	"	.set pop\n"					\
359		:						\
360		: "r" (base),					\
361		  "i" (op));
362
363#define cache32_unroll32(base,op)				\
364	__asm__ __volatile__(					\
365	"	.set push\n"					\
366	"	.set noreorder\n"				\
367	"	.set mips64r6\n"				\
368	"	.set noat\n"					\
369	"	cache %1, 0x000(%0); cache %1, 0x020(%0)\n"	\
370	"	cache %1, 0x040(%0); cache %1, 0x060(%0)\n"	\
371	"	cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"	\
372	"	cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"	\
373	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
374	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
375	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
376	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
377	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
378	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
379	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
380	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
381	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
382	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
383	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100\n"	\
384	"	cache %1, 0x000($1); cache %1, 0x020($1)\n"	\
385	"	cache %1, 0x040($1); cache %1, 0x060($1)\n"	\
386	"	cache %1, 0x080($1); cache %1, 0x0a0($1)\n"	\
387	"	cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"	\
388	"	.set pop\n"					\
389		:						\
390		: "r" (base),					\
391		  "i" (op));
392
393#define cache64_unroll32(base,op)				\
394	__asm__ __volatile__(					\
395	"	.set push\n"					\
396	"	.set noreorder\n"				\
397	"	.set mips64r6\n"				\
398	"	.set noat\n"					\
399	"	cache %1, 0x000(%0); cache %1, 0x040(%0)\n"	\
400	"	cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"	\
401	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
402	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
403	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
404	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
405	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
406	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
407	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
408	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
409	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
410	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
411	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
412	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
413	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
414	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
415	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
416	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
417	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
418	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
419	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
420	"	cache %1, 0x000($1); cache %1, 0x040($1)\n"	\
421	"	cache %1, 0x080($1); cache %1, 0x0c0($1)\n"	\
422	"	.set pop\n"					\
423		:						\
424		: "r" (base),					\
425		  "i" (op));
426
427#define cache128_unroll32(base,op)				\
428	__asm__ __volatile__(					\
429	"	.set push\n"					\
430	"	.set noreorder\n"				\
431	"	.set mips64r6\n"				\
432	"	.set noat\n"					\
433	"	cache %1, 0x000(%0); cache %1, 0x080(%0)\n"	\
434	"	"__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"	\
435	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
436	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
437	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
438	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
439	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
440	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
441	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
442	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
443	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
444	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
445	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
446	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
447	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
448	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
449	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
450	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
451	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
452	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
453	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
454	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
455	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
456	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
457	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
458	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
459	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
460	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
461	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
462	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
463	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
464	"	"__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"	\
465	"	cache %1, 0x000($1); cache %1, 0x080($1)\n"	\
466	"	.set pop\n"					\
467		:						\
468		: "r" (base),					\
469		  "i" (op));
470#endif /* CONFIG_CPU_MIPSR6 */
471
472/*
473 * Perform the cache operation specified by op using a user mode virtual
474 * address while in kernel mode.
475 */
476#define cache16_unroll32_user(base,op)					\
477	__asm__ __volatile__(						\
478	"	.set push					\n"	\
479	"	.set noreorder					\n"	\
480	"	.set mips0					\n"	\
481	"	.set eva					\n"	\
482	"	cachee %1, 0x000(%0); cachee %1, 0x010(%0)	\n"	\
483	"	cachee %1, 0x020(%0); cachee %1, 0x030(%0)	\n"	\
484	"	cachee %1, 0x040(%0); cachee %1, 0x050(%0)	\n"	\
485	"	cachee %1, 0x060(%0); cachee %1, 0x070(%0)	\n"	\
486	"	cachee %1, 0x080(%0); cachee %1, 0x090(%0)	\n"	\
487	"	cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)	\n"	\
488	"	cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)	\n"	\
489	"	cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)	\n"	\
490	"	cachee %1, 0x100(%0); cachee %1, 0x110(%0)	\n"	\
491	"	cachee %1, 0x120(%0); cachee %1, 0x130(%0)	\n"	\
492	"	cachee %1, 0x140(%0); cachee %1, 0x150(%0)	\n"	\
493	"	cachee %1, 0x160(%0); cachee %1, 0x170(%0)	\n"	\
494	"	cachee %1, 0x180(%0); cachee %1, 0x190(%0)	\n"	\
495	"	cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)	\n"	\
496	"	cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)	\n"	\
497	"	cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)	\n"	\
498	"	.set pop					\n"	\
499		:							\
500		: "r" (base),						\
501		  "i" (op));
502
503#define cache32_unroll32_user(base, op)					\
504	__asm__ __volatile__(						\
505	"	.set push					\n"	\
506	"	.set noreorder					\n"	\
507	"	.set mips0					\n"	\
508	"	.set eva					\n"	\
509	"	cachee %1, 0x000(%0); cachee %1, 0x020(%0)	\n"	\
510	"	cachee %1, 0x040(%0); cachee %1, 0x060(%0)	\n"	\
511	"	cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)	\n"	\
512	"	cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)	\n"	\
513	"	cachee %1, 0x100(%0); cachee %1, 0x120(%0)	\n"	\
514	"	cachee %1, 0x140(%0); cachee %1, 0x160(%0)	\n"	\
515	"	cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)	\n"	\
516	"	cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)	\n"	\
517	"	cachee %1, 0x200(%0); cachee %1, 0x220(%0)	\n"	\
518	"	cachee %1, 0x240(%0); cachee %1, 0x260(%0)	\n"	\
519	"	cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)	\n"	\
520	"	cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)	\n"	\
521	"	cachee %1, 0x300(%0); cachee %1, 0x320(%0)	\n"	\
522	"	cachee %1, 0x340(%0); cachee %1, 0x360(%0)	\n"	\
523	"	cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)	\n"	\
524	"	cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)	\n"	\
525	"	.set pop					\n"	\
526		:							\
527		: "r" (base),						\
528		  "i" (op));
529
530#define cache64_unroll32_user(base, op)					\
531	__asm__ __volatile__(						\
532	"	.set push					\n"	\
533	"	.set noreorder					\n"	\
534	"	.set mips0					\n"	\
535	"	.set eva					\n"	\
536	"	cachee %1, 0x000(%0); cachee %1, 0x040(%0)	\n"	\
537	"	cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)	\n"	\
538	"	cachee %1, 0x100(%0); cachee %1, 0x140(%0)	\n"	\
539	"	cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)	\n"	\
540	"	cachee %1, 0x200(%0); cachee %1, 0x240(%0)	\n"	\
541	"	cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)	\n"	\
542	"	cachee %1, 0x300(%0); cachee %1, 0x340(%0)	\n"	\
543	"	cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)	\n"	\
544	"	cachee %1, 0x400(%0); cachee %1, 0x440(%0)	\n"	\
545	"	cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)	\n"	\
546	"	cachee %1, 0x500(%0); cachee %1, 0x540(%0)	\n"	\
547	"	cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)	\n"	\
548	"	cachee %1, 0x600(%0); cachee %1, 0x640(%0)	\n"	\
549	"	cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)	\n"	\
550	"	cachee %1, 0x700(%0); cachee %1, 0x740(%0)	\n"	\
551	"	cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)	\n"	\
552	"	.set pop					\n"	\
553		:							\
554		: "r" (base),						\
555		  "i" (op));
556
557/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
558#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
559static inline void extra##blast_##pfx##cache##lsize(void)		\
560{									\
561	unsigned long start = INDEX_BASE;				\
562	unsigned long end = start + current_cpu_data.desc.waysize;	\
563	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
564	unsigned long ws_end = current_cpu_data.desc.ways <<		\
565			       current_cpu_data.desc.waybit;		\
566	unsigned long ws, addr;						\
567									\
568	__##pfx##flush_prologue						\
569									\
570	for (ws = 0; ws < ws_end; ws += ws_inc)				\
571		for (addr = start; addr < end; addr += lsize * 32)	\
572			cache##lsize##_unroll32(addr|ws, indexop);	\
573									\
574	__##pfx##flush_epilogue						\
575}									\
576									\
577static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
578{									\
579	unsigned long start = page;					\
580	unsigned long end = page + PAGE_SIZE;				\
581									\
582	__##pfx##flush_prologue						\
583									\
584	do {								\
585		cache##lsize##_unroll32(start, hitop);			\
586		start += lsize * 32;					\
587	} while (start < end);						\
588									\
589	__##pfx##flush_epilogue						\
590}									\
591									\
592static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
593{									\
594	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
595	unsigned long start = INDEX_BASE + (page & indexmask);		\
596	unsigned long end = start + PAGE_SIZE;				\
597	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
598	unsigned long ws_end = current_cpu_data.desc.ways <<		\
599			       current_cpu_data.desc.waybit;		\
600	unsigned long ws, addr;						\
601									\
602	__##pfx##flush_prologue						\
603									\
604	for (ws = 0; ws < ws_end; ws += ws_inc)				\
605		for (addr = start; addr < end; addr += lsize * 32)	\
606			cache##lsize##_unroll32(addr|ws, indexop);	\
607									\
608	__##pfx##flush_epilogue						\
609}
610
611__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
612__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
613__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
614__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
615__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
616__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
617__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
618__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
619__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
620__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
621__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
622__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
623__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
624
625__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
626__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
627__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
628__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
629__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
630__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
631
632#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
633static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
634{									\
635	unsigned long start = page;					\
636	unsigned long end = page + PAGE_SIZE;				\
637									\
638	__##pfx##flush_prologue						\
639									\
640	do {								\
641		cache##lsize##_unroll32_user(start, hitop);             \
642		start += lsize * 32;					\
643	} while (start < end);						\
644									\
645	__##pfx##flush_epilogue						\
646}
647
648__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
649			 16)
650__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
651__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
652			 32)
653__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
654__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
655			 64)
656__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
657
658/* build blast_xxx_range, protected_blast_xxx_range */
659#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
660static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
661						    unsigned long end)	\
662{									\
663	unsigned long lsize = cpu_##desc##_line_size();			\
664	unsigned long addr = start & ~(lsize - 1);			\
665	unsigned long aend = (end - 1) & ~(lsize - 1);			\
666									\
667	__##pfx##flush_prologue						\
668									\
669	while (1) {							\
670		prot##cache_op(hitop, addr);				\
671		if (addr == aend)					\
672			break;						\
673		addr += lsize;						\
674	}								\
675									\
676	__##pfx##flush_epilogue						\
677}
678
679#ifndef CONFIG_EVA
680
681__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
682__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
683
684#else
685
686#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
687static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
688							unsigned long end) \
689{									\
690	unsigned long lsize = cpu_##desc##_line_size();			\
691	unsigned long addr = start & ~(lsize - 1);			\
692	unsigned long aend = (end - 1) & ~(lsize - 1);			\
693									\
694	__##pfx##flush_prologue						\
695									\
696	if (segment_eq(get_fs(), USER_DS)) {				\
697		while (1) {						\
698			protected_cachee_op(hitop, addr);		\
699			if (addr == aend)				\
700				break;					\
701			addr += lsize;					\
702		}							\
703	} else {							\
704		while (1) {						\
705			protected_cache_op(hitop, addr);		\
706			if (addr == aend)				\
707				break;					\
708			addr += lsize;					\
709		}                                                       \
710									\
711	}								\
712	__##pfx##flush_epilogue						\
713}
714
715__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
716__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
717
718#endif
719__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
720__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
721	protected_, loongson2_)
722__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
723__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
724__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
725/* blast_inv_dcache_range */
726__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
727__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728
729#endif /* _ASM_R4KCACHE_H */
v5.9
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Inline assembly cache operations.
  7 *
  8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
 10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
 11 */
 12#ifndef _ASM_R4KCACHE_H
 13#define _ASM_R4KCACHE_H
 14
 15#include <linux/stringify.h>
 16
 17#include <asm/asm.h>
 18#include <asm/asm-eva.h>
 19#include <asm/cacheops.h>
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/cpu-type.h>
 23#include <asm/mipsmtregs.h>
 24#include <asm/mmzone.h>
 25#include <asm/unroll.h>
 26#include <linux/uaccess.h> /* for uaccess_kernel() */
 27
 28extern void (*r4k_blast_dcache)(void);
 29extern void (*r4k_blast_icache)(void);
 30
 31/*
 32 * This macro return a properly sign-extended address suitable as base address
 33 * for indexed cache operations.  Two issues here:
 34 *
 35 *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
 36 *    the index bits from the virtual address.	This breaks with tradition
 37 *    set by the R4000.	 To keep unpleasant surprises from happening we pick
 38 *    an address in KSEG0 / CKSEG0.
 39 *  - We need a properly sign extended address for 64-bit code.	 To get away
 40 *    without ifdefs we let the compiler do it by a type cast.
 41 */
 42#define INDEX_BASE	CKSEG0
 43
 44#define _cache_op(insn, op, addr)					\
 45	__asm__ __volatile__(						\
 46	"	.set	push					\n"	\
 47	"	.set	noreorder				\n"	\
 48	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
 49	"	" insn("%0", "%1") "				\n"	\
 50	"	.set	pop					\n"	\
 51	:								\
 52	: "i" (op), "R" (*(unsigned char *)(addr)))
 53
 54#define cache_op(op, addr)						\
 55	_cache_op(kernel_cache, op, addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56
 57static inline void flush_icache_line_indexed(unsigned long addr)
 58{
 
 59	cache_op(Index_Invalidate_I, addr);
 
 60}
 61
 62static inline void flush_dcache_line_indexed(unsigned long addr)
 63{
 
 64	cache_op(Index_Writeback_Inv_D, addr);
 
 65}
 66
 67static inline void flush_scache_line_indexed(unsigned long addr)
 68{
 69	cache_op(Index_Writeback_Inv_SD, addr);
 70}
 71
 72static inline void flush_icache_line(unsigned long addr)
 73{
 
 74	switch (boot_cpu_type()) {
 75	case CPU_LOONGSON2EF:
 76		cache_op(Hit_Invalidate_I_Loongson2, addr);
 77		break;
 78
 79	default:
 80		cache_op(Hit_Invalidate_I, addr);
 81		break;
 82	}
 
 83}
 84
 85static inline void flush_dcache_line(unsigned long addr)
 86{
 
 87	cache_op(Hit_Writeback_Inv_D, addr);
 
 88}
 89
 90static inline void invalidate_dcache_line(unsigned long addr)
 91{
 
 92	cache_op(Hit_Invalidate_D, addr);
 
 93}
 94
 95static inline void invalidate_scache_line(unsigned long addr)
 96{
 97	cache_op(Hit_Invalidate_SD, addr);
 98}
 99
100static inline void flush_scache_line(unsigned long addr)
101{
102	cache_op(Hit_Writeback_Inv_SD, addr);
103}
104
105#define protected_cache_op(op,addr)				\
106({								\
107	int __err = 0;						\
108	__asm__ __volatile__(					\
109	"	.set	push			\n"		\
110	"	.set	noreorder		\n"		\
111	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
112	"1:	cache	%1, (%2)		\n"		\
113	"2:	.insn				\n"		\
114	"	.set	pop			\n"		\
115	"	.section .fixup,\"ax\"		\n"		\
116	"3:	li	%0, %3			\n"		\
117	"	j	2b			\n"		\
118	"	.previous			\n"		\
119	"	.section __ex_table,\"a\"	\n"		\
120	"	"STR(PTR)" 1b, 3b		\n"		\
121	"	.previous"					\
122	: "+r" (__err)						\
123	: "i" (op), "r" (addr), "i" (-EFAULT));			\
124	__err;							\
125})
126
127
128#define protected_cachee_op(op,addr)				\
129({								\
130	int __err = 0;						\
131	__asm__ __volatile__(					\
132	"	.set	push			\n"		\
133	"	.set	noreorder		\n"		\
134	"	.set	mips0			\n"		\
135	"	.set	eva			\n"		\
136	"1:	cachee	%1, (%2)		\n"		\
137	"2:	.insn				\n"		\
138	"	.set	pop			\n"		\
139	"	.section .fixup,\"ax\"		\n"		\
140	"3:	li	%0, %3			\n"		\
141	"	j	2b			\n"		\
142	"	.previous			\n"		\
143	"	.section __ex_table,\"a\"	\n"		\
144	"	"STR(PTR)" 1b, 3b		\n"		\
145	"	.previous"					\
146	: "+r" (__err)						\
147	: "i" (op), "r" (addr), "i" (-EFAULT));			\
148	__err;							\
149})
150
151/*
152 * The next two are for badland addresses like signal trampolines.
153 */
154static inline int protected_flush_icache_line(unsigned long addr)
155{
156	switch (boot_cpu_type()) {
157	case CPU_LOONGSON2EF:
158		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
 
159
160	default:
161#ifdef CONFIG_EVA
162		return protected_cachee_op(Hit_Invalidate_I, addr);
163#else
164		return protected_cache_op(Hit_Invalidate_I, addr);
165#endif
 
166	}
167}
168
169/*
170 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
171 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
172 * caches.  We're talking about one cacheline unnecessarily getting invalidated
173 * here so the penalty isn't overly hard.
174 */
175static inline int protected_writeback_dcache_line(unsigned long addr)
176{
177#ifdef CONFIG_EVA
178	return protected_cachee_op(Hit_Writeback_Inv_D, addr);
179#else
180	return protected_cache_op(Hit_Writeback_Inv_D, addr);
181#endif
182}
183
184static inline int protected_writeback_scache_line(unsigned long addr)
185{
186#ifdef CONFIG_EVA
187	return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
188#else
189	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
190#endif
191}
192
193/*
194 * This one is RM7000-specific
195 */
196static inline void invalidate_tcache_page(unsigned long addr)
197{
198	cache_op(Page_Invalidate_T, addr);
199}
200
201#define cache_unroll(times, insn, op, addr, lsize) do {			\
202	int i = 0;							\
203	unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize)));	\
204} while (0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
206/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
207#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
208static inline void extra##blast_##pfx##cache##lsize(void)		\
209{									\
210	unsigned long start = INDEX_BASE;				\
211	unsigned long end = start + current_cpu_data.desc.waysize;	\
212	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
213	unsigned long ws_end = current_cpu_data.desc.ways <<		\
214			       current_cpu_data.desc.waybit;		\
215	unsigned long ws, addr;						\
216									\
 
 
217	for (ws = 0; ws < ws_end; ws += ws_inc)				\
218		for (addr = start; addr < end; addr += lsize * 32)	\
219			cache_unroll(32, kernel_cache, indexop,		\
220				     addr | ws, lsize);			\
 
221}									\
222									\
223static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
224{									\
225	unsigned long start = page;					\
226	unsigned long end = page + PAGE_SIZE;				\
227									\
 
 
228	do {								\
229		cache_unroll(32, kernel_cache, hitop, start, lsize);	\
230		start += lsize * 32;					\
231	} while (start < end);						\
 
 
232}									\
233									\
234static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
235{									\
236	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
237	unsigned long start = INDEX_BASE + (page & indexmask);		\
238	unsigned long end = start + PAGE_SIZE;				\
239	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
240	unsigned long ws_end = current_cpu_data.desc.ways <<		\
241			       current_cpu_data.desc.waybit;		\
242	unsigned long ws, addr;						\
243									\
 
 
244	for (ws = 0; ws < ws_end; ws += ws_inc)				\
245		for (addr = start; addr < end; addr += lsize * 32)	\
246			cache_unroll(32, kernel_cache, indexop,		\
247				     addr | ws, lsize);			\
 
248}
249
250__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
251__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
252__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
253__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
254__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
255__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
256__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
257__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
258__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
259__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
260__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
261__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
262__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
263
264__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
265__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
266__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
267__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
268__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
269__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
270
271#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
272static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
273{									\
274	unsigned long start = page;					\
275	unsigned long end = page + PAGE_SIZE;				\
276									\
 
 
277	do {								\
278		cache_unroll(32, user_cache, hitop, start, lsize);	\
279		start += lsize * 32;					\
280	} while (start < end);						\
 
 
281}
282
283__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
284			 16)
285__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
286__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
287			 32)
288__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
289__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
290			 64)
291__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
292
293/* build blast_xxx_range, protected_blast_xxx_range */
294#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
295static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
296						    unsigned long end)	\
297{									\
298	unsigned long lsize = cpu_##desc##_line_size();			\
299	unsigned long addr = start & ~(lsize - 1);			\
300	unsigned long aend = (end - 1) & ~(lsize - 1);			\
301									\
 
 
302	while (1) {							\
303		prot##cache_op(hitop, addr);				\
304		if (addr == aend)					\
305			break;						\
306		addr += lsize;						\
307	}								\
 
 
308}
309
310#ifndef CONFIG_EVA
311
312__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
313__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
314
315#else
316
317#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
318static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
319							unsigned long end) \
320{									\
321	unsigned long lsize = cpu_##desc##_line_size();			\
322	unsigned long addr = start & ~(lsize - 1);			\
323	unsigned long aend = (end - 1) & ~(lsize - 1);			\
324									\
325	if (!uaccess_kernel()) {					\
 
 
326		while (1) {						\
327			protected_cachee_op(hitop, addr);		\
328			if (addr == aend)				\
329				break;					\
330			addr += lsize;					\
331		}							\
332	} else {							\
333		while (1) {						\
334			protected_cache_op(hitop, addr);		\
335			if (addr == aend)				\
336				break;					\
337			addr += lsize;					\
338		}                                                       \
339									\
340	}								\
 
341}
342
343__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
344__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
345
346#endif
347__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
348__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
349	protected_, loongson2_)
350__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
351__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
352__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
353/* blast_inv_dcache_range */
354__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
355__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
356
357/* Currently, this is very specific to Loongson-3 */
358#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
359static inline void blast_##pfx##cache##lsize##_node(long node)		\
360{									\
361	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
362	unsigned long end = start + current_cpu_data.desc.waysize;	\
363	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
364	unsigned long ws_end = current_cpu_data.desc.ways <<		\
365			       current_cpu_data.desc.waybit;		\
366	unsigned long ws, addr;						\
367									\
368	for (ws = 0; ws < ws_end; ws += ws_inc)				\
369		for (addr = start; addr < end; addr += lsize * 32)	\
370			cache_unroll(32, kernel_cache, indexop,		\
371				     addr | ws, lsize);			\
372}
373
374__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
375__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
376__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
377__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
378
379#endif /* _ASM_R4KCACHE_H */