Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Inline assembly cache operations.
  7 *
  8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
 10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
 11 */
 12#ifndef _ASM_R4KCACHE_H
 13#define _ASM_R4KCACHE_H
 14
 15#include <linux/stringify.h>
 16
 17#include <asm/asm.h>
 18#include <asm/asm-eva.h>
 19#include <asm/cacheops.h>
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/cpu-type.h>
 23#include <asm/mipsmtregs.h>
 24#include <asm/mmzone.h>
 25#include <asm/unroll.h>
 26
 27extern void r5k_sc_init(void);
 28extern void rm7k_sc_init(void);
 29extern int mips_sc_init(void);
 30
 31extern void (*r4k_blast_dcache)(void);
 32extern void (*r4k_blast_icache)(void);
 33
 34/*
 35 * This macro return a properly sign-extended address suitable as base address
 36 * for indexed cache operations.  Two issues here:
 37 *
 38 *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
 39 *    the index bits from the virtual address.	This breaks with tradition
 40 *    set by the R4000.	 To keep unpleasant surprises from happening we pick
 41 *    an address in KSEG0 / CKSEG0.
 42 *  - We need a properly sign extended address for 64-bit code.	 To get away
 43 *    without ifdefs we let the compiler do it by a type cast.
 44 */
 45#define INDEX_BASE	CKSEG0
 46
 47#define _cache_op(insn, op, addr)					\
 48	__asm__ __volatile__(						\
 49	"	.set	push					\n"	\
 50	"	.set	noreorder				\n"	\
 51	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
 52	"	" insn("%0", "%1") "				\n"	\
 53	"	.set	pop					\n"	\
 54	:								\
 55	: "i" (op), "R" (*(unsigned char *)(addr)))
 56
 57#define cache_op(op, addr)						\
 58	_cache_op(kernel_cache, op, addr)
 59
 60static inline void flush_icache_line_indexed(unsigned long addr)
 61{
 62	cache_op(Index_Invalidate_I, addr);
 63}
 64
 65static inline void flush_dcache_line_indexed(unsigned long addr)
 66{
 67	cache_op(Index_Writeback_Inv_D, addr);
 68}
 69
 70static inline void flush_scache_line_indexed(unsigned long addr)
 71{
 72	cache_op(Index_Writeback_Inv_SD, addr);
 73}
 74
 75static inline void flush_icache_line(unsigned long addr)
 76{
 77	switch (boot_cpu_type()) {
 78	case CPU_LOONGSON2EF:
 79		cache_op(Hit_Invalidate_I_Loongson2, addr);
 80		break;
 81
 82	default:
 83		cache_op(Hit_Invalidate_I, addr);
 84		break;
 85	}
 86}
 87
 88static inline void flush_dcache_line(unsigned long addr)
 89{
 90	cache_op(Hit_Writeback_Inv_D, addr);
 91}
 92
 93static inline void invalidate_dcache_line(unsigned long addr)
 94{
 95	cache_op(Hit_Invalidate_D, addr);
 96}
 97
 98static inline void invalidate_scache_line(unsigned long addr)
 99{
100	cache_op(Hit_Invalidate_SD, addr);
101}
102
103static inline void flush_scache_line(unsigned long addr)
104{
105	cache_op(Hit_Writeback_Inv_SD, addr);
106}
107
108#ifdef CONFIG_EVA
109
110#define protected_cache_op(op, addr)				\
111({								\
112	int __err = 0;						\
113	__asm__ __volatile__(					\
114	"	.set	push			\n"		\
115	"	.set	noreorder		\n"		\
116	"	.set	mips0			\n"		\
117	"	.set	eva			\n"		\
118	"1:	cachee	%1, (%2)		\n"		\
119	"2:	.insn				\n"		\
120	"	.set	pop			\n"		\
121	"	.section .fixup,\"ax\"		\n"		\
122	"3:	li	%0, %3			\n"		\
123	"	j	2b			\n"		\
124	"	.previous			\n"		\
125	"	.section __ex_table,\"a\"	\n"		\
126	"	"STR(PTR_WD)" 1b, 3b		\n"		\
127	"	.previous"					\
128	: "+r" (__err)						\
129	: "i" (op), "r" (addr), "i" (-EFAULT));			\
130	__err;							\
131})
132#else
133
134#define protected_cache_op(op, addr)				\
 
135({								\
136	int __err = 0;						\
137	__asm__ __volatile__(					\
138	"	.set	push			\n"		\
139	"	.set	noreorder		\n"		\
140	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
141	"1:	cache	%1, (%2)		\n"		\
 
142	"2:	.insn				\n"		\
143	"	.set	pop			\n"		\
144	"	.section .fixup,\"ax\"		\n"		\
145	"3:	li	%0, %3			\n"		\
146	"	j	2b			\n"		\
147	"	.previous			\n"		\
148	"	.section __ex_table,\"a\"	\n"		\
149	"	"STR(PTR_WD)" 1b, 3b		\n"		\
150	"	.previous"					\
151	: "+r" (__err)						\
152	: "i" (op), "r" (addr), "i" (-EFAULT));			\
153	__err;							\
154})
155#endif
156
157/*
158 * The next two are for badland addresses like signal trampolines.
159 */
160static inline int protected_flush_icache_line(unsigned long addr)
161{
162	switch (boot_cpu_type()) {
163	case CPU_LOONGSON2EF:
164		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
165
166	default:
 
 
 
167		return protected_cache_op(Hit_Invalidate_I, addr);
 
168	}
169}
170
171/*
172 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
173 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
174 * caches.  We're talking about one cacheline unnecessarily getting invalidated
175 * here so the penalty isn't overly hard.
176 */
177static inline int protected_writeback_dcache_line(unsigned long addr)
178{
 
 
 
179	return protected_cache_op(Hit_Writeback_Inv_D, addr);
 
180}
181
182static inline int protected_writeback_scache_line(unsigned long addr)
183{
 
 
 
184	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
 
185}
186
187/*
188 * This one is RM7000-specific
189 */
190static inline void invalidate_tcache_page(unsigned long addr)
191{
192	cache_op(Page_Invalidate_T, addr);
193}
194
195#define cache_unroll(times, insn, op, addr, lsize) do {			\
196	int i = 0;							\
197	unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize)));	\
198} while (0)
199
200/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
201#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
202static inline void extra##blast_##pfx##cache##lsize(void)		\
203{									\
204	unsigned long start = INDEX_BASE;				\
205	unsigned long end = start + current_cpu_data.desc.waysize;	\
206	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
207	unsigned long ws_end = current_cpu_data.desc.ways <<		\
208			       current_cpu_data.desc.waybit;		\
209	unsigned long ws, addr;						\
210									\
211	for (ws = 0; ws < ws_end; ws += ws_inc)				\
212		for (addr = start; addr < end; addr += lsize * 32)	\
213			cache_unroll(32, kernel_cache, indexop,		\
214				     addr | ws, lsize);			\
215}									\
216									\
217static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
218{									\
219	unsigned long start = page;					\
220	unsigned long end = page + PAGE_SIZE;				\
221									\
222	do {								\
223		cache_unroll(32, kernel_cache, hitop, start, lsize);	\
224		start += lsize * 32;					\
225	} while (start < end);						\
226}									\
227									\
228static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
229{									\
230	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
231	unsigned long start = INDEX_BASE + (page & indexmask);		\
232	unsigned long end = start + PAGE_SIZE;				\
233	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
234	unsigned long ws_end = current_cpu_data.desc.ways <<		\
235			       current_cpu_data.desc.waybit;		\
236	unsigned long ws, addr;						\
237									\
238	for (ws = 0; ws < ws_end; ws += ws_inc)				\
239		for (addr = start; addr < end; addr += lsize * 32)	\
240			cache_unroll(32, kernel_cache, indexop,		\
241				     addr | ws, lsize);			\
242}
243
244__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
245__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
246__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
247__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
248__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
249__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
250__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
251__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
252__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
253__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
254__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
255__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
256__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
257
258__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
259__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
260__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
261__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
262__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
263__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
264
265#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
266static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
267{									\
268	unsigned long start = page;					\
269	unsigned long end = page + PAGE_SIZE;				\
270									\
271	do {								\
272		cache_unroll(32, user_cache, hitop, start, lsize);	\
273		start += lsize * 32;					\
274	} while (start < end);						\
275}
276
277__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
278			 16)
279__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
280__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
281			 32)
282__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
283__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
284			 64)
285__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
286
287/* build blast_xxx_range, protected_blast_xxx_range */
288#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
289static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
290						    unsigned long end)	\
291{									\
292	unsigned long lsize = cpu_##desc##_line_size();			\
293	unsigned long addr = start & ~(lsize - 1);			\
294	unsigned long aend = (end - 1) & ~(lsize - 1);			\
295									\
296	while (1) {							\
297		prot##cache_op(hitop, addr);				\
298		if (addr == aend)					\
299			break;						\
300		addr += lsize;						\
301	}								\
302}
303
 
 
304__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
305__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
307__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
308	protected_, loongson2_)
309__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
310__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
311__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
312/* blast_inv_dcache_range */
313__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
314__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
315
316/* Currently, this is very specific to Loongson-3 */
317#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
318static inline void blast_##pfx##cache##lsize##_node(long node)		\
319{									\
320	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
321	unsigned long end = start + current_cpu_data.desc.waysize;	\
322	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
323	unsigned long ws_end = current_cpu_data.desc.ways <<		\
324			       current_cpu_data.desc.waybit;		\
325	unsigned long ws, addr;						\
326									\
327	for (ws = 0; ws < ws_end; ws += ws_inc)				\
328		for (addr = start; addr < end; addr += lsize * 32)	\
329			cache_unroll(32, kernel_cache, indexop,		\
330				     addr | ws, lsize);			\
331}
332
333__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
334__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
335__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
336__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
337
338#endif /* _ASM_R4KCACHE_H */
v5.9
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Inline assembly cache operations.
  7 *
  8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
 10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
 11 */
 12#ifndef _ASM_R4KCACHE_H
 13#define _ASM_R4KCACHE_H
 14
 15#include <linux/stringify.h>
 16
 17#include <asm/asm.h>
 18#include <asm/asm-eva.h>
 19#include <asm/cacheops.h>
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/cpu-type.h>
 23#include <asm/mipsmtregs.h>
 24#include <asm/mmzone.h>
 25#include <asm/unroll.h>
 26#include <linux/uaccess.h> /* for uaccess_kernel() */
 
 
 
 27
 28extern void (*r4k_blast_dcache)(void);
 29extern void (*r4k_blast_icache)(void);
 30
 31/*
 32 * This macro return a properly sign-extended address suitable as base address
 33 * for indexed cache operations.  Two issues here:
 34 *
 35 *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
 36 *    the index bits from the virtual address.	This breaks with tradition
 37 *    set by the R4000.	 To keep unpleasant surprises from happening we pick
 38 *    an address in KSEG0 / CKSEG0.
 39 *  - We need a properly sign extended address for 64-bit code.	 To get away
 40 *    without ifdefs we let the compiler do it by a type cast.
 41 */
 42#define INDEX_BASE	CKSEG0
 43
 44#define _cache_op(insn, op, addr)					\
 45	__asm__ __volatile__(						\
 46	"	.set	push					\n"	\
 47	"	.set	noreorder				\n"	\
 48	"	.set "MIPS_ISA_ARCH_LEVEL"			\n"	\
 49	"	" insn("%0", "%1") "				\n"	\
 50	"	.set	pop					\n"	\
 51	:								\
 52	: "i" (op), "R" (*(unsigned char *)(addr)))
 53
 54#define cache_op(op, addr)						\
 55	_cache_op(kernel_cache, op, addr)
 56
 57static inline void flush_icache_line_indexed(unsigned long addr)
 58{
 59	cache_op(Index_Invalidate_I, addr);
 60}
 61
 62static inline void flush_dcache_line_indexed(unsigned long addr)
 63{
 64	cache_op(Index_Writeback_Inv_D, addr);
 65}
 66
 67static inline void flush_scache_line_indexed(unsigned long addr)
 68{
 69	cache_op(Index_Writeback_Inv_SD, addr);
 70}
 71
 72static inline void flush_icache_line(unsigned long addr)
 73{
 74	switch (boot_cpu_type()) {
 75	case CPU_LOONGSON2EF:
 76		cache_op(Hit_Invalidate_I_Loongson2, addr);
 77		break;
 78
 79	default:
 80		cache_op(Hit_Invalidate_I, addr);
 81		break;
 82	}
 83}
 84
 85static inline void flush_dcache_line(unsigned long addr)
 86{
 87	cache_op(Hit_Writeback_Inv_D, addr);
 88}
 89
 90static inline void invalidate_dcache_line(unsigned long addr)
 91{
 92	cache_op(Hit_Invalidate_D, addr);
 93}
 94
 95static inline void invalidate_scache_line(unsigned long addr)
 96{
 97	cache_op(Hit_Invalidate_SD, addr);
 98}
 99
100static inline void flush_scache_line(unsigned long addr)
101{
102	cache_op(Hit_Writeback_Inv_SD, addr);
103}
104
105#define protected_cache_op(op,addr)				\
 
 
106({								\
107	int __err = 0;						\
108	__asm__ __volatile__(					\
109	"	.set	push			\n"		\
110	"	.set	noreorder		\n"		\
111	"	.set "MIPS_ISA_ARCH_LEVEL"	\n"		\
112	"1:	cache	%1, (%2)		\n"		\
 
113	"2:	.insn				\n"		\
114	"	.set	pop			\n"		\
115	"	.section .fixup,\"ax\"		\n"		\
116	"3:	li	%0, %3			\n"		\
117	"	j	2b			\n"		\
118	"	.previous			\n"		\
119	"	.section __ex_table,\"a\"	\n"		\
120	"	"STR(PTR)" 1b, 3b		\n"		\
121	"	.previous"					\
122	: "+r" (__err)						\
123	: "i" (op), "r" (addr), "i" (-EFAULT));			\
124	__err;							\
125})
 
126
127
128#define protected_cachee_op(op,addr)				\
129({								\
130	int __err = 0;						\
131	__asm__ __volatile__(					\
132	"	.set	push			\n"		\
133	"	.set	noreorder		\n"		\
134	"	.set	mips0			\n"		\
135	"	.set	eva			\n"		\
136	"1:	cachee	%1, (%2)		\n"		\
137	"2:	.insn				\n"		\
138	"	.set	pop			\n"		\
139	"	.section .fixup,\"ax\"		\n"		\
140	"3:	li	%0, %3			\n"		\
141	"	j	2b			\n"		\
142	"	.previous			\n"		\
143	"	.section __ex_table,\"a\"	\n"		\
144	"	"STR(PTR)" 1b, 3b		\n"		\
145	"	.previous"					\
146	: "+r" (__err)						\
147	: "i" (op), "r" (addr), "i" (-EFAULT));			\
148	__err;							\
149})
 
150
151/*
152 * The next two are for badland addresses like signal trampolines.
153 */
154static inline int protected_flush_icache_line(unsigned long addr)
155{
156	switch (boot_cpu_type()) {
157	case CPU_LOONGSON2EF:
158		return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
159
160	default:
161#ifdef CONFIG_EVA
162		return protected_cachee_op(Hit_Invalidate_I, addr);
163#else
164		return protected_cache_op(Hit_Invalidate_I, addr);
165#endif
166	}
167}
168
169/*
170 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
171 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
172 * caches.  We're talking about one cacheline unnecessarily getting invalidated
173 * here so the penalty isn't overly hard.
174 */
175static inline int protected_writeback_dcache_line(unsigned long addr)
176{
177#ifdef CONFIG_EVA
178	return protected_cachee_op(Hit_Writeback_Inv_D, addr);
179#else
180	return protected_cache_op(Hit_Writeback_Inv_D, addr);
181#endif
182}
183
184static inline int protected_writeback_scache_line(unsigned long addr)
185{
186#ifdef CONFIG_EVA
187	return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
188#else
189	return protected_cache_op(Hit_Writeback_Inv_SD, addr);
190#endif
191}
192
193/*
194 * This one is RM7000-specific
195 */
196static inline void invalidate_tcache_page(unsigned long addr)
197{
198	cache_op(Page_Invalidate_T, addr);
199}
200
201#define cache_unroll(times, insn, op, addr, lsize) do {			\
202	int i = 0;							\
203	unroll(times, _cache_op, insn, op, (addr) + (i++ * (lsize)));	\
204} while (0)
205
206/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
207#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)	\
208static inline void extra##blast_##pfx##cache##lsize(void)		\
209{									\
210	unsigned long start = INDEX_BASE;				\
211	unsigned long end = start + current_cpu_data.desc.waysize;	\
212	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
213	unsigned long ws_end = current_cpu_data.desc.ways <<		\
214			       current_cpu_data.desc.waybit;		\
215	unsigned long ws, addr;						\
216									\
217	for (ws = 0; ws < ws_end; ws += ws_inc)				\
218		for (addr = start; addr < end; addr += lsize * 32)	\
219			cache_unroll(32, kernel_cache, indexop,		\
220				     addr | ws, lsize);			\
221}									\
222									\
223static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
224{									\
225	unsigned long start = page;					\
226	unsigned long end = page + PAGE_SIZE;				\
227									\
228	do {								\
229		cache_unroll(32, kernel_cache, hitop, start, lsize);	\
230		start += lsize * 32;					\
231	} while (start < end);						\
232}									\
233									\
234static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
235{									\
236	unsigned long indexmask = current_cpu_data.desc.waysize - 1;	\
237	unsigned long start = INDEX_BASE + (page & indexmask);		\
238	unsigned long end = start + PAGE_SIZE;				\
239	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
240	unsigned long ws_end = current_cpu_data.desc.ways <<		\
241			       current_cpu_data.desc.waybit;		\
242	unsigned long ws, addr;						\
243									\
244	for (ws = 0; ws < ws_end; ws += ws_inc)				\
245		for (addr = start; addr < end; addr += lsize * 32)	\
246			cache_unroll(32, kernel_cache, indexop,		\
247				     addr | ws, lsize);			\
248}
249
250__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
251__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
252__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
253__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
254__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
255__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
256__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
257__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
258__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
259__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
260__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
261__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
262__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
263
264__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
265__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
266__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
267__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
268__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
269__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
270
271#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
272static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
273{									\
274	unsigned long start = page;					\
275	unsigned long end = page + PAGE_SIZE;				\
276									\
277	do {								\
278		cache_unroll(32, user_cache, hitop, start, lsize);	\
279		start += lsize * 32;					\
280	} while (start < end);						\
281}
282
283__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
284			 16)
285__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
286__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
287			 32)
288__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
289__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
290			 64)
291__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
292
293/* build blast_xxx_range, protected_blast_xxx_range */
294#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)	\
295static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
296						    unsigned long end)	\
297{									\
298	unsigned long lsize = cpu_##desc##_line_size();			\
299	unsigned long addr = start & ~(lsize - 1);			\
300	unsigned long aend = (end - 1) & ~(lsize - 1);			\
301									\
302	while (1) {							\
303		prot##cache_op(hitop, addr);				\
304		if (addr == aend)					\
305			break;						\
306		addr += lsize;						\
307	}								\
308}
309
310#ifndef CONFIG_EVA
311
312__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
313__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
314
315#else
316
317#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)		\
318static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
319							unsigned long end) \
320{									\
321	unsigned long lsize = cpu_##desc##_line_size();			\
322	unsigned long addr = start & ~(lsize - 1);			\
323	unsigned long aend = (end - 1) & ~(lsize - 1);			\
324									\
325	if (!uaccess_kernel()) {					\
326		while (1) {						\
327			protected_cachee_op(hitop, addr);		\
328			if (addr == aend)				\
329				break;					\
330			addr += lsize;					\
331		}							\
332	} else {							\
333		while (1) {						\
334			protected_cache_op(hitop, addr);		\
335			if (addr == aend)				\
336				break;					\
337			addr += lsize;					\
338		}                                                       \
339									\
340	}								\
341}
342
343__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
344__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
345
346#endif
347__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
348__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
349	protected_, loongson2_)
350__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
351__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
352__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
353/* blast_inv_dcache_range */
354__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
355__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
356
357/* Currently, this is very specific to Loongson-3 */
358#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize)	\
359static inline void blast_##pfx##cache##lsize##_node(long node)		\
360{									\
361	unsigned long start = CAC_BASE | nid_to_addrbase(node);		\
362	unsigned long end = start + current_cpu_data.desc.waysize;	\
363	unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;	\
364	unsigned long ws_end = current_cpu_data.desc.ways <<		\
365			       current_cpu_data.desc.waybit;		\
366	unsigned long ws, addr;						\
367									\
368	for (ws = 0; ws < ws_end; ws += ws_inc)				\
369		for (addr = start; addr < end; addr += lsize * 32)	\
370			cache_unroll(32, kernel_cache, indexop,		\
371				     addr | ws, lsize);			\
372}
373
374__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
375__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
376__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
377__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
378
379#endif /* _ASM_R4KCACHE_H */