Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 *  linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
  4 *
  5 *  Heavily based on proc-arm926.S
  6 *  Maintainer: Assaf Hoffman <hoffman@marvell.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <linux/linkage.h>
 10#include <linux/init.h>
 11#include <linux/cfi_types.h>
 12#include <linux/pgtable.h>
 13#include <asm/assembler.h>
 14#include <asm/hwcap.h>
 15#include <asm/pgtable-hwdef.h>
 
 16#include <asm/page.h>
 17#include <asm/ptrace.h>
 18#include "proc-macros.S"
 19
 20/*
 21 * This is the maximum size of an area which will be invalidated
 22 * using the single invalidate entry instructions.  Anything larger
 23 * than this, and we go for the whole cache.
 24 *
 25 * This value should be chosen such that we choose the cheapest
 26 * alternative.
 27 */
 28#define CACHE_DLIMIT	16384
 29
 30/*
 31 * the cache line size of the I and D cache
 32 */
 33#define CACHE_DLINESIZE	32
 34
 35	.bss
 36	.align 3
 37__cache_params_loc:
 38	.space	8
 39
 40	.text
 41__cache_params:
 42	.word	__cache_params_loc
 43
 44/*
 45 * cpu_feroceon_proc_init()
 46 */
 47SYM_TYPED_FUNC_START(cpu_feroceon_proc_init)
 48	mrc	p15, 0, r0, c0, c0, 1		@ read cache type register
 49	ldr	r1, __cache_params
 50	mov	r2, #(16 << 5)
 51	tst	r0, #(1 << 16)			@ get way
 52	mov	r0, r0, lsr #18			@ get cache size order
 53	movne	r3, #((4 - 1) << 30)		@ 4-way
 54	and	r0, r0, #0xf
 55	moveq	r3, #0				@ 1-way
 56	mov	r2, r2, lsl r0			@ actual cache size
 57	movne	r2, r2, lsr #2			@ turned into # of sets
 58	sub	r2, r2, #(1 << 5)
 59	stmia	r1, {r2, r3}
 60#ifdef CONFIG_VFP
 61	mov	r1, #1				@ disable quirky VFP
 62	str_l	r1, VFP_arch_feroceon, r2
 63#endif
 64	ret	lr
 65SYM_FUNC_END(cpu_feroceon_proc_init)
 66
 67/*
 68 * cpu_feroceon_proc_fin()
 69 */
 70SYM_TYPED_FUNC_START(cpu_feroceon_proc_fin)
 71#if defined(CONFIG_CACHE_FEROCEON_L2) && \
 72	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
 73	mov	r0, #0
 74	mcr	p15, 1, r0, c15, c9, 0		@ clean L2
 75	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
 76#endif
 77
 78	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
 79	bic	r0, r0, #0x1000			@ ...i............
 80	bic	r0, r0, #0x000e			@ ............wca.
 81	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
 82	ret	lr
 83SYM_FUNC_END(cpu_feroceon_proc_fin)
 84
 85/*
 86 * cpu_feroceon_reset(loc)
 87 *
 88 * Perform a soft reset of the system.  Put the CPU into the
 89 * same state as it would be if it had been reset, and branch
 90 * to what would be the reset vector.
 91 *
 92 * loc: location to jump to for soft reset
 93 */
 94	.align	5
 95	.pushsection	.idmap.text, "ax"
 96SYM_TYPED_FUNC_START(cpu_feroceon_reset)
 97	mov	ip, #0
 98	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 99	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
100#ifdef CONFIG_MMU
101	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
102#endif
103	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
104	bic	ip, ip, #0x000f			@ ............wcam
105	bic	ip, ip, #0x1100			@ ...i...s........
106	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
107	ret	r0
108SYM_FUNC_END(cpu_feroceon_reset)
109	.popsection
110
111/*
112 * cpu_feroceon_do_idle()
113 *
114 * Called with IRQs disabled
115 */
116	.align	5
117SYM_TYPED_FUNC_START(cpu_feroceon_do_idle)
118	mov	r0, #0
119	mcr	p15, 0, r0, c7, c10, 4		@ Drain write buffer
120	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
121	ret	lr
122SYM_FUNC_END(cpu_feroceon_do_idle)
123
124/*
125 *	flush_icache_all()
126 *
127 *	Unconditionally clean and invalidate the entire icache.
128 */
129SYM_TYPED_FUNC_START(feroceon_flush_icache_all)
130	mov	r0, #0
131	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
132	ret	lr
133SYM_FUNC_END(feroceon_flush_icache_all)
134
135/*
136 *	flush_user_cache_all()
137 *
138 *	Clean and invalidate all cache entries in a particular
139 *	address space.
140 */
141	.align	5
142SYM_FUNC_ALIAS(feroceon_flush_user_cache_all, feroceon_flush_kern_cache_all)
 
143
144/*
145 *	flush_kern_cache_all()
146 *
147 *	Clean and invalidate the entire cache.
148 */
149SYM_TYPED_FUNC_START(feroceon_flush_kern_cache_all)
150	mov	r2, #VM_EXEC
151
152__flush_whole_cache:
153	ldr	r1, __cache_params
154	ldmia	r1, {r1, r3}
1551:	orr	ip, r1, r3
1562:	mcr	p15, 0, ip, c7, c14, 2		@ clean + invalidate D set/way
157	subs	ip, ip, #(1 << 30)		@ next way
158	bcs	2b
159	subs	r1, r1, #(1 << 5)		@ next set
160	bcs	1b
161
162	tst	r2, #VM_EXEC
163	mov	ip, #0
164	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
165	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
166	ret	lr
167SYM_FUNC_END(feroceon_flush_kern_cache_all)
168
169/*
170 *	flush_user_cache_range(start, end, flags)
171 *
172 *	Clean and invalidate a range of cache entries in the
173 *	specified address range.
174 *
175 *	- start	- start address (inclusive)
176 *	- end	- end address (exclusive)
177 *	- flags	- vm_flags describing address space
178 */
179	.align	5
180SYM_TYPED_FUNC_START(feroceon_flush_user_cache_range)
181	sub	r3, r1, r0			@ calculate total size
182	cmp	r3, #CACHE_DLIMIT
183	bgt	__flush_whole_cache
1841:	tst	r2, #VM_EXEC
185	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
186	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
187	add	r0, r0, #CACHE_DLINESIZE
188	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
189	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
190	add	r0, r0, #CACHE_DLINESIZE
191	cmp	r0, r1
192	blo	1b
193	tst	r2, #VM_EXEC
194	mov	ip, #0
195	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
196	ret	lr
197SYM_FUNC_END(feroceon_flush_user_cache_range)
198
199/*
200 *	coherent_kern_range(start, end)
201 *
202 *	Ensure coherency between the Icache and the Dcache in the
203 *	region described by start, end.  If you have non-snooping
204 *	Harvard caches, you need to implement this function.
205 *
206 *	- start	- virtual start address
207 *	- end	- virtual end address
208 */
209	.align	5
210SYM_TYPED_FUNC_START(feroceon_coherent_kern_range)
211#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
212	b	feroceon_coherent_user_range
213#endif
214SYM_FUNC_END(feroceon_coherent_kern_range)
215
216/*
217 *	coherent_user_range(start, end)
218 *
219 *	Ensure coherency between the Icache and the Dcache in the
220 *	region described by start, end.  If you have non-snooping
221 *	Harvard caches, you need to implement this function.
222 *
223 *	- start	- virtual start address
224 *	- end	- virtual end address
225 */
226SYM_TYPED_FUNC_START(feroceon_coherent_user_range)
227	bic	r0, r0, #CACHE_DLINESIZE - 1
2281:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
229	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
230	add	r0, r0, #CACHE_DLINESIZE
231	cmp	r0, r1
232	blo	1b
233	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
234	mov	r0, #0
235	ret	lr
236SYM_FUNC_END(feroceon_coherent_user_range)
237
238/*
239 *	flush_kern_dcache_area(void *addr, size_t size)
240 *
241 *	Ensure no D cache aliasing occurs, either with itself or
242 *	the I cache
243 *
244 *	- addr	- kernel address
245 *	- size	- region size
246 */
247	.align	5
248SYM_TYPED_FUNC_START(feroceon_flush_kern_dcache_area)
249	add	r1, r0, r1
2501:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
251	add	r0, r0, #CACHE_DLINESIZE
252	cmp	r0, r1
253	blo	1b
254	mov	r0, #0
255	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
256	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
257	ret	lr
258SYM_FUNC_END(feroceon_flush_kern_dcache_area)
259
260	.align	5
261SYM_TYPED_FUNC_START(feroceon_range_flush_kern_dcache_area)
262	mrs	r2, cpsr
263	add	r1, r0, #PAGE_SZ - CACHE_DLINESIZE	@ top addr is inclusive
264	orr	r3, r2, #PSR_I_BIT
265	msr	cpsr_c, r3			@ disable interrupts
266	mcr	p15, 5, r0, c15, c15, 0		@ D clean/inv range start
267	mcr	p15, 5, r1, c15, c15, 1		@ D clean/inv range top
268	msr	cpsr_c, r2			@ restore interrupts
269	mov	r0, #0
270	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
271	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
272	ret	lr
273SYM_FUNC_END(feroceon_range_flush_kern_dcache_area)
274
275/*
276 *	dma_inv_range(start, end)
277 *
278 *	Invalidate (discard) the specified virtual address range.
279 *	May not write back any entries.  If 'start' or 'end'
280 *	are not cache line aligned, those lines must be written
281 *	back.
282 *
283 *	- start	- virtual start address
284 *	- end	- virtual end address
285 *
286 * (same as v4wb)
287 */
288	.align	5
289feroceon_dma_inv_range:
290	tst	r0, #CACHE_DLINESIZE - 1
291	bic	r0, r0, #CACHE_DLINESIZE - 1
292	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
293	tst	r1, #CACHE_DLINESIZE - 1
294	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
2951:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
296	add	r0, r0, #CACHE_DLINESIZE
297	cmp	r0, r1
298	blo	1b
299	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
300	ret	lr
301
302	.align	5
303feroceon_range_dma_inv_range:
304	mrs	r2, cpsr
305	tst	r0, #CACHE_DLINESIZE - 1
306	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
307	tst	r1, #CACHE_DLINESIZE - 1
308	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
309	cmp	r1, r0
310	subne	r1, r1, #1			@ top address is inclusive
311	orr	r3, r2, #PSR_I_BIT
312	msr	cpsr_c, r3			@ disable interrupts
313	mcr	p15, 5, r0, c15, c14, 0		@ D inv range start
314	mcr	p15, 5, r1, c15, c14, 1		@ D inv range top
315	msr	cpsr_c, r2			@ restore interrupts
316	ret	lr
317
318/*
319 *	dma_clean_range(start, end)
320 *
321 *	Clean the specified virtual address range.
322 *
323 *	- start	- virtual start address
324 *	- end	- virtual end address
325 *
326 * (same as v4wb)
327 */
328	.align	5
329feroceon_dma_clean_range:
330	bic	r0, r0, #CACHE_DLINESIZE - 1
3311:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
332	add	r0, r0, #CACHE_DLINESIZE
333	cmp	r0, r1
334	blo	1b
335	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
336	ret	lr
337
338	.align	5
339feroceon_range_dma_clean_range:
340	mrs	r2, cpsr
341	cmp	r1, r0
342	subne	r1, r1, #1			@ top address is inclusive
343	orr	r3, r2, #PSR_I_BIT
344	msr	cpsr_c, r3			@ disable interrupts
345	mcr	p15, 5, r0, c15, c13, 0		@ D clean range start
346	mcr	p15, 5, r1, c15, c13, 1		@ D clean range top
347	msr	cpsr_c, r2			@ restore interrupts
348	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
349	ret	lr
350
351/*
352 *	dma_flush_range(start, end)
353 *
354 *	Clean and invalidate the specified virtual address range.
355 *
356 *	- start	- virtual start address
357 *	- end	- virtual end address
358 */
359	.align	5
360SYM_TYPED_FUNC_START(feroceon_dma_flush_range)
361	bic	r0, r0, #CACHE_DLINESIZE - 1
3621:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
363	add	r0, r0, #CACHE_DLINESIZE
364	cmp	r0, r1
365	blo	1b
366	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
367	ret	lr
368SYM_FUNC_END(feroceon_dma_flush_range)
369
370	.align	5
371SYM_TYPED_FUNC_START(feroceon_range_dma_flush_range)
372	mrs	r2, cpsr
373	cmp	r1, r0
374	subne	r1, r1, #1			@ top address is inclusive
375	orr	r3, r2, #PSR_I_BIT
376	msr	cpsr_c, r3			@ disable interrupts
377	mcr	p15, 5, r0, c15, c15, 0		@ D clean/inv range start
378	mcr	p15, 5, r1, c15, c15, 1		@ D clean/inv range top
379	msr	cpsr_c, r2			@ restore interrupts
380	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
381	ret	lr
382SYM_FUNC_END(feroceon_range_dma_flush_range)
383
384/*
385 *	dma_map_area(start, size, dir)
386 *	- start	- kernel virtual start address
387 *	- size	- size of region
388 *	- dir	- DMA direction
389 */
390SYM_TYPED_FUNC_START(feroceon_dma_map_area)
391	add	r1, r1, r0
392	cmp	r2, #DMA_TO_DEVICE
393	beq	feroceon_dma_clean_range
394	bcs	feroceon_dma_inv_range
395	b	feroceon_dma_flush_range
396SYM_FUNC_END(feroceon_dma_map_area)
397
398/*
399 *	dma_map_area(start, size, dir)
400 *	- start	- kernel virtual start address
401 *	- size	- size of region
402 *	- dir	- DMA direction
403 */
404SYM_TYPED_FUNC_START(feroceon_range_dma_map_area)
405	add	r1, r1, r0
406	cmp	r2, #DMA_TO_DEVICE
407	beq	feroceon_range_dma_clean_range
408	bcs	feroceon_range_dma_inv_range
409	b	feroceon_range_dma_flush_range
410SYM_FUNC_END(feroceon_range_dma_map_area)
411
412/*
413 *	dma_unmap_area(start, size, dir)
414 *	- start	- kernel virtual start address
415 *	- size	- size of region
416 *	- dir	- DMA direction
417 */
418SYM_TYPED_FUNC_START(feroceon_dma_unmap_area)
419	ret	lr
420SYM_FUNC_END(feroceon_dma_unmap_area)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421
422	.align	5
423SYM_TYPED_FUNC_START(cpu_feroceon_dcache_clean_area)
424#if defined(CONFIG_CACHE_FEROCEON_L2) && \
425	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
426	mov	r2, r0
427	mov	r3, r1
428#endif
4291:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
430	add	r0, r0, #CACHE_DLINESIZE
431	subs	r1, r1, #CACHE_DLINESIZE
432	bhi	1b
433#if defined(CONFIG_CACHE_FEROCEON_L2) && \
434	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4351:	mcr	p15, 1, r2, c15, c9, 1		@ clean L2 entry
436	add	r2, r2, #CACHE_DLINESIZE
437	subs	r3, r3, #CACHE_DLINESIZE
438	bhi	1b
439#endif
440	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
441	ret	lr
442SYM_FUNC_END(cpu_feroceon_dcache_clean_area)
443
444/* =============================== PageTable ============================== */
445
446/*
447 * cpu_feroceon_switch_mm(pgd)
448 *
449 * Set the translation base pointer to be as described by pgd.
450 *
451 * pgd: new page tables
452 */
453	.align	5
454SYM_TYPED_FUNC_START(cpu_feroceon_switch_mm)
455#ifdef CONFIG_MMU
456	/*
457	 * Note: we wish to call __flush_whole_cache but we need to preserve
458	 * lr to do so.  The only way without touching main memory is to
459	 * use r2 which is normally used to test the VM_EXEC flag, and
460	 * compensate locally for the skipped ops if it is not set.
461	 */
462	mov	r2, lr				@ abuse r2 to preserve lr
463	bl	__flush_whole_cache
464	@ if r2 contains the VM_EXEC bit then the next 2 ops are done already
465	tst	r2, #VM_EXEC
466	mcreq	p15, 0, ip, c7, c5, 0		@ invalidate I cache
467	mcreq	p15, 0, ip, c7, c10, 4		@ drain WB
468
469	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
470	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
471	ret	r2
472#else
473	ret	lr
474#endif
475SYM_FUNC_END(cpu_feroceon_switch_mm)
476
477/*
478 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
479 *
480 * Set a PTE and flush it out
481 */
482	.align	5
483SYM_TYPED_FUNC_START(cpu_feroceon_set_pte_ext)
484#ifdef CONFIG_MMU
485	armv3_set_pte_ext wc_disable=0
486	mov	r0, r0
487	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
488#if defined(CONFIG_CACHE_FEROCEON_L2) && \
489	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
490	mcr	p15, 1, r0, c15, c9, 1		@ clean L2 entry
491#endif
492	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
493#endif
494	ret	lr
495SYM_FUNC_END(cpu_feroceon_set_pte_ext)
496
497/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
498.globl	cpu_feroceon_suspend_size
499.equ	cpu_feroceon_suspend_size, 4 * 3
500#ifdef CONFIG_ARM_CPU_SUSPEND
501SYM_TYPED_FUNC_START(cpu_feroceon_do_suspend)
502	stmfd	sp!, {r4 - r6, lr}
503	mrc	p15, 0, r4, c13, c0, 0	@ PID
504	mrc	p15, 0, r5, c3, c0, 0	@ Domain ID
505	mrc	p15, 0, r6, c1, c0, 0	@ Control register
506	stmia	r0, {r4 - r6}
507	ldmfd	sp!, {r4 - r6, pc}
508SYM_FUNC_END(cpu_feroceon_do_suspend)
509
510SYM_TYPED_FUNC_START(cpu_feroceon_do_resume)
511	mov	ip, #0
512	mcr	p15, 0, ip, c8, c7, 0	@ invalidate I+D TLBs
513	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I+D caches
514	ldmia	r0, {r4 - r6}
515	mcr	p15, 0, r4, c13, c0, 0	@ PID
516	mcr	p15, 0, r5, c3, c0, 0	@ Domain ID
517	mcr	p15, 0, r1, c2, c0, 0	@ TTB address
518	mov	r0, r6			@ control register
519	b	cpu_resume_mmu
520SYM_FUNC_END(cpu_feroceon_do_resume)
521#endif
522
523	.type	__feroceon_setup, #function
524__feroceon_setup:
525	mov	r0, #0
526	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
527	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
528#ifdef CONFIG_MMU
529	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
530#endif
531
532	adr	r5, feroceon_crval
533	ldmia	r5, {r5, r6}
534	mrc	p15, 0, r0, c1, c0		@ get control register v4
535	bic	r0, r0, r5
536	orr	r0, r0, r6
537	ret	lr
538	.size	__feroceon_setup, . - __feroceon_setup
539
540	/*
541	 *      B
542	 *  R   P
543	 * .RVI UFRS BLDP WCAM
544	 * .011 .001 ..11 0101
545	 *
546	 */
547	.type	feroceon_crval, #object
548feroceon_crval:
549	crval	clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
550
551	__INITDATA
552
553	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
554	define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
555
556	.section ".rodata"
557
558	string	cpu_arch_name, "armv5te"
559	string	cpu_elf_name, "v5"
560	string	cpu_feroceon_name, "Feroceon"
561	string	cpu_88fr531_name, "Feroceon 88FR531-vd"
562	string	cpu_88fr571_name, "Feroceon 88FR571-vd"
563	string	cpu_88fr131_name, "Feroceon 88FR131"
564
565	.align
566
567	.section ".proc.info.init", "a"
568
569.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
570	.type	__\name\()_proc_info,#object
571__\name\()_proc_info:
572	.long	\cpu_val
573	.long	\cpu_mask
574	.long	PMD_TYPE_SECT | \
575		PMD_SECT_BUFFERABLE | \
576		PMD_SECT_CACHEABLE | \
577		PMD_BIT4 | \
578		PMD_SECT_AP_WRITE | \
579		PMD_SECT_AP_READ
580	.long	PMD_TYPE_SECT | \
581		PMD_BIT4 | \
582		PMD_SECT_AP_WRITE | \
583		PMD_SECT_AP_READ
584	initfn	__feroceon_setup, __\name\()_proc_info
585	.long	cpu_arch_name
586	.long	cpu_elf_name
587	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
588	.long	\cpu_name
589	.long	feroceon_processor_functions
590	.long	v4wbi_tlb_fns
591	.long	feroceon_user_fns
592	.long	\cache
593	 .size	__\name\()_proc_info, . - __\name\()_proc_info
594.endm
595
596#ifdef CONFIG_CPU_FEROCEON_OLD_ID
597	feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
598		cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
599#endif
600
601	feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
602		cache=feroceon_cache_fns
603	feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
604		cache=feroceon_range_cache_fns
605	feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
606		cache=feroceon_range_cache_fns
v4.17
 
  1/*
  2 *  linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
  3 *
  4 *  Heavily based on proc-arm926.S
  5 *  Maintainer: Assaf Hoffman <hoffman@marvell.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License as published by
  9 * the Free Software Foundation; either version 2 of the License, or
 10 * (at your option) any later version.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 20 */
 21
 22#include <linux/linkage.h>
 23#include <linux/init.h>
 
 
 24#include <asm/assembler.h>
 25#include <asm/hwcap.h>
 26#include <asm/pgtable-hwdef.h>
 27#include <asm/pgtable.h>
 28#include <asm/page.h>
 29#include <asm/ptrace.h>
 30#include "proc-macros.S"
 31
 32/*
 33 * This is the maximum size of an area which will be invalidated
 34 * using the single invalidate entry instructions.  Anything larger
 35 * than this, and we go for the whole cache.
 36 *
 37 * This value should be chosen such that we choose the cheapest
 38 * alternative.
 39 */
 40#define CACHE_DLIMIT	16384
 41
 42/*
 43 * the cache line size of the I and D cache
 44 */
 45#define CACHE_DLINESIZE	32
 46
 47	.bss
 48	.align 3
 49__cache_params_loc:
 50	.space	8
 51
 52	.text
 53__cache_params:
 54	.word	__cache_params_loc
 55
 56/*
 57 * cpu_feroceon_proc_init()
 58 */
 59ENTRY(cpu_feroceon_proc_init)
 60	mrc	p15, 0, r0, c0, c0, 1		@ read cache type register
 61	ldr	r1, __cache_params
 62	mov	r2, #(16 << 5)
 63	tst	r0, #(1 << 16)			@ get way
 64	mov	r0, r0, lsr #18			@ get cache size order
 65	movne	r3, #((4 - 1) << 30)		@ 4-way
 66	and	r0, r0, #0xf
 67	moveq	r3, #0				@ 1-way
 68	mov	r2, r2, lsl r0			@ actual cache size
 69	movne	r2, r2, lsr #2			@ turned into # of sets
 70	sub	r2, r2, #(1 << 5)
 71	stmia	r1, {r2, r3}
 
 
 
 
 72	ret	lr
 
 73
 74/*
 75 * cpu_feroceon_proc_fin()
 76 */
 77ENTRY(cpu_feroceon_proc_fin)
 78#if defined(CONFIG_CACHE_FEROCEON_L2) && \
 79	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
 80	mov	r0, #0
 81	mcr	p15, 1, r0, c15, c9, 0		@ clean L2
 82	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
 83#endif
 84
 85	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
 86	bic	r0, r0, #0x1000			@ ...i............
 87	bic	r0, r0, #0x000e			@ ............wca.
 88	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
 89	ret	lr
 
 90
 91/*
 92 * cpu_feroceon_reset(loc)
 93 *
 94 * Perform a soft reset of the system.  Put the CPU into the
 95 * same state as it would be if it had been reset, and branch
 96 * to what would be the reset vector.
 97 *
 98 * loc: location to jump to for soft reset
 99 */
100	.align	5
101	.pushsection	.idmap.text, "ax"
102ENTRY(cpu_feroceon_reset)
103	mov	ip, #0
104	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
105	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
106#ifdef CONFIG_MMU
107	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
108#endif
109	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
110	bic	ip, ip, #0x000f			@ ............wcam
111	bic	ip, ip, #0x1100			@ ...i...s........
112	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
113	ret	r0
114ENDPROC(cpu_feroceon_reset)
115	.popsection
116
117/*
118 * cpu_feroceon_do_idle()
119 *
120 * Called with IRQs disabled
121 */
122	.align	5
123ENTRY(cpu_feroceon_do_idle)
124	mov	r0, #0
125	mcr	p15, 0, r0, c7, c10, 4		@ Drain write buffer
126	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
127	ret	lr
 
128
129/*
130 *	flush_icache_all()
131 *
132 *	Unconditionally clean and invalidate the entire icache.
133 */
134ENTRY(feroceon_flush_icache_all)
135	mov	r0, #0
136	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
137	ret	lr
138ENDPROC(feroceon_flush_icache_all)
139
140/*
141 *	flush_user_cache_all()
142 *
143 *	Clean and invalidate all cache entries in a particular
144 *	address space.
145 */
146	.align	5
147ENTRY(feroceon_flush_user_cache_all)
148	/* FALLTHROUGH */
149
150/*
151 *	flush_kern_cache_all()
152 *
153 *	Clean and invalidate the entire cache.
154 */
155ENTRY(feroceon_flush_kern_cache_all)
156	mov	r2, #VM_EXEC
157
158__flush_whole_cache:
159	ldr	r1, __cache_params
160	ldmia	r1, {r1, r3}
1611:	orr	ip, r1, r3
1622:	mcr	p15, 0, ip, c7, c14, 2		@ clean + invalidate D set/way
163	subs	ip, ip, #(1 << 30)		@ next way
164	bcs	2b
165	subs	r1, r1, #(1 << 5)		@ next set
166	bcs	1b
167
168	tst	r2, #VM_EXEC
169	mov	ip, #0
170	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
171	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
172	ret	lr
 
173
174/*
175 *	flush_user_cache_range(start, end, flags)
176 *
177 *	Clean and invalidate a range of cache entries in the
178 *	specified address range.
179 *
180 *	- start	- start address (inclusive)
181 *	- end	- end address (exclusive)
182 *	- flags	- vm_flags describing address space
183 */
184	.align	5
185ENTRY(feroceon_flush_user_cache_range)
186	sub	r3, r1, r0			@ calculate total size
187	cmp	r3, #CACHE_DLIMIT
188	bgt	__flush_whole_cache
1891:	tst	r2, #VM_EXEC
190	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
191	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
192	add	r0, r0, #CACHE_DLINESIZE
193	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
194	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
195	add	r0, r0, #CACHE_DLINESIZE
196	cmp	r0, r1
197	blo	1b
198	tst	r2, #VM_EXEC
199	mov	ip, #0
200	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
201	ret	lr
 
202
203/*
204 *	coherent_kern_range(start, end)
205 *
206 *	Ensure coherency between the Icache and the Dcache in the
207 *	region described by start, end.  If you have non-snooping
208 *	Harvard caches, you need to implement this function.
209 *
210 *	- start	- virtual start address
211 *	- end	- virtual end address
212 */
213	.align	5
214ENTRY(feroceon_coherent_kern_range)
215	/* FALLTHROUGH */
 
 
 
216
217/*
218 *	coherent_user_range(start, end)
219 *
220 *	Ensure coherency between the Icache and the Dcache in the
221 *	region described by start, end.  If you have non-snooping
222 *	Harvard caches, you need to implement this function.
223 *
224 *	- start	- virtual start address
225 *	- end	- virtual end address
226 */
227ENTRY(feroceon_coherent_user_range)
228	bic	r0, r0, #CACHE_DLINESIZE - 1
2291:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
230	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
231	add	r0, r0, #CACHE_DLINESIZE
232	cmp	r0, r1
233	blo	1b
234	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
235	mov	r0, #0
236	ret	lr
 
237
238/*
239 *	flush_kern_dcache_area(void *addr, size_t size)
240 *
241 *	Ensure no D cache aliasing occurs, either with itself or
242 *	the I cache
243 *
244 *	- addr	- kernel address
245 *	- size	- region size
246 */
247	.align	5
248ENTRY(feroceon_flush_kern_dcache_area)
249	add	r1, r0, r1
2501:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
251	add	r0, r0, #CACHE_DLINESIZE
252	cmp	r0, r1
253	blo	1b
254	mov	r0, #0
255	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
256	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
257	ret	lr
 
258
259	.align	5
260ENTRY(feroceon_range_flush_kern_dcache_area)
261	mrs	r2, cpsr
262	add	r1, r0, #PAGE_SZ - CACHE_DLINESIZE	@ top addr is inclusive
263	orr	r3, r2, #PSR_I_BIT
264	msr	cpsr_c, r3			@ disable interrupts
265	mcr	p15, 5, r0, c15, c15, 0		@ D clean/inv range start
266	mcr	p15, 5, r1, c15, c15, 1		@ D clean/inv range top
267	msr	cpsr_c, r2			@ restore interrupts
268	mov	r0, #0
269	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
270	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
271	ret	lr
 
272
273/*
274 *	dma_inv_range(start, end)
275 *
276 *	Invalidate (discard) the specified virtual address range.
277 *	May not write back any entries.  If 'start' or 'end'
278 *	are not cache line aligned, those lines must be written
279 *	back.
280 *
281 *	- start	- virtual start address
282 *	- end	- virtual end address
283 *
284 * (same as v4wb)
285 */
286	.align	5
287feroceon_dma_inv_range:
288	tst	r0, #CACHE_DLINESIZE - 1
289	bic	r0, r0, #CACHE_DLINESIZE - 1
290	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
291	tst	r1, #CACHE_DLINESIZE - 1
292	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
2931:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
294	add	r0, r0, #CACHE_DLINESIZE
295	cmp	r0, r1
296	blo	1b
297	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
298	ret	lr
299
300	.align	5
301feroceon_range_dma_inv_range:
302	mrs	r2, cpsr
303	tst	r0, #CACHE_DLINESIZE - 1
304	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
305	tst	r1, #CACHE_DLINESIZE - 1
306	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
307	cmp	r1, r0
308	subne	r1, r1, #1			@ top address is inclusive
309	orr	r3, r2, #PSR_I_BIT
310	msr	cpsr_c, r3			@ disable interrupts
311	mcr	p15, 5, r0, c15, c14, 0		@ D inv range start
312	mcr	p15, 5, r1, c15, c14, 1		@ D inv range top
313	msr	cpsr_c, r2			@ restore interrupts
314	ret	lr
315
316/*
317 *	dma_clean_range(start, end)
318 *
319 *	Clean the specified virtual address range.
320 *
321 *	- start	- virtual start address
322 *	- end	- virtual end address
323 *
324 * (same as v4wb)
325 */
326	.align	5
327feroceon_dma_clean_range:
328	bic	r0, r0, #CACHE_DLINESIZE - 1
3291:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
330	add	r0, r0, #CACHE_DLINESIZE
331	cmp	r0, r1
332	blo	1b
333	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
334	ret	lr
335
336	.align	5
337feroceon_range_dma_clean_range:
338	mrs	r2, cpsr
339	cmp	r1, r0
340	subne	r1, r1, #1			@ top address is inclusive
341	orr	r3, r2, #PSR_I_BIT
342	msr	cpsr_c, r3			@ disable interrupts
343	mcr	p15, 5, r0, c15, c13, 0		@ D clean range start
344	mcr	p15, 5, r1, c15, c13, 1		@ D clean range top
345	msr	cpsr_c, r2			@ restore interrupts
346	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
347	ret	lr
348
349/*
350 *	dma_flush_range(start, end)
351 *
352 *	Clean and invalidate the specified virtual address range.
353 *
354 *	- start	- virtual start address
355 *	- end	- virtual end address
356 */
357	.align	5
358ENTRY(feroceon_dma_flush_range)
359	bic	r0, r0, #CACHE_DLINESIZE - 1
3601:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
361	add	r0, r0, #CACHE_DLINESIZE
362	cmp	r0, r1
363	blo	1b
364	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
365	ret	lr
 
366
367	.align	5
368ENTRY(feroceon_range_dma_flush_range)
369	mrs	r2, cpsr
370	cmp	r1, r0
371	subne	r1, r1, #1			@ top address is inclusive
372	orr	r3, r2, #PSR_I_BIT
373	msr	cpsr_c, r3			@ disable interrupts
374	mcr	p15, 5, r0, c15, c15, 0		@ D clean/inv range start
375	mcr	p15, 5, r1, c15, c15, 1		@ D clean/inv range top
376	msr	cpsr_c, r2			@ restore interrupts
377	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
378	ret	lr
 
379
380/*
381 *	dma_map_area(start, size, dir)
382 *	- start	- kernel virtual start address
383 *	- size	- size of region
384 *	- dir	- DMA direction
385 */
386ENTRY(feroceon_dma_map_area)
387	add	r1, r1, r0
388	cmp	r2, #DMA_TO_DEVICE
389	beq	feroceon_dma_clean_range
390	bcs	feroceon_dma_inv_range
391	b	feroceon_dma_flush_range
392ENDPROC(feroceon_dma_map_area)
393
394/*
395 *	dma_map_area(start, size, dir)
396 *	- start	- kernel virtual start address
397 *	- size	- size of region
398 *	- dir	- DMA direction
399 */
400ENTRY(feroceon_range_dma_map_area)
401	add	r1, r1, r0
402	cmp	r2, #DMA_TO_DEVICE
403	beq	feroceon_range_dma_clean_range
404	bcs	feroceon_range_dma_inv_range
405	b	feroceon_range_dma_flush_range
406ENDPROC(feroceon_range_dma_map_area)
407
408/*
409 *	dma_unmap_area(start, size, dir)
410 *	- start	- kernel virtual start address
411 *	- size	- size of region
412 *	- dir	- DMA direction
413 */
414ENTRY(feroceon_dma_unmap_area)
415	ret	lr
416ENDPROC(feroceon_dma_unmap_area)
417
418	.globl	feroceon_flush_kern_cache_louis
419	.equ	feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
420
421	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
422	define_cache_functions feroceon
423
424.macro range_alias basename
425	.globl feroceon_range_\basename
426	.type feroceon_range_\basename , %function
427	.equ feroceon_range_\basename , feroceon_\basename
428.endm
429
430/*
431 * Most of the cache functions are unchanged for this case.
432 * Export suitable alias symbols for the unchanged functions:
433 */
434	range_alias flush_icache_all
435	range_alias flush_user_cache_all
436	range_alias flush_kern_cache_all
437	range_alias flush_kern_cache_louis
438	range_alias flush_user_cache_range
439	range_alias coherent_kern_range
440	range_alias coherent_user_range
441	range_alias dma_unmap_area
442
443	define_cache_functions feroceon_range
444
445	.align	5
446ENTRY(cpu_feroceon_dcache_clean_area)
447#if defined(CONFIG_CACHE_FEROCEON_L2) && \
448	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
449	mov	r2, r0
450	mov	r3, r1
451#endif
4521:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
453	add	r0, r0, #CACHE_DLINESIZE
454	subs	r1, r1, #CACHE_DLINESIZE
455	bhi	1b
456#if defined(CONFIG_CACHE_FEROCEON_L2) && \
457	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4581:	mcr	p15, 1, r2, c15, c9, 1		@ clean L2 entry
459	add	r2, r2, #CACHE_DLINESIZE
460	subs	r3, r3, #CACHE_DLINESIZE
461	bhi	1b
462#endif
463	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
464	ret	lr
 
465
466/* =============================== PageTable ============================== */
467
468/*
469 * cpu_feroceon_switch_mm(pgd)
470 *
471 * Set the translation base pointer to be as described by pgd.
472 *
473 * pgd: new page tables
474 */
475	.align	5
476ENTRY(cpu_feroceon_switch_mm)
477#ifdef CONFIG_MMU
478	/*
479	 * Note: we wish to call __flush_whole_cache but we need to preserve
480	 * lr to do so.  The only way without touching main memory is to
481	 * use r2 which is normally used to test the VM_EXEC flag, and
482	 * compensate locally for the skipped ops if it is not set.
483	 */
484	mov	r2, lr				@ abuse r2 to preserve lr
485	bl	__flush_whole_cache
486	@ if r2 contains the VM_EXEC bit then the next 2 ops are done already
487	tst	r2, #VM_EXEC
488	mcreq	p15, 0, ip, c7, c5, 0		@ invalidate I cache
489	mcreq	p15, 0, ip, c7, c10, 4		@ drain WB
490
491	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
492	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
493	ret	r2
494#else
495	ret	lr
496#endif
 
497
498/*
499 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
500 *
501 * Set a PTE and flush it out
502 */
503	.align	5
504ENTRY(cpu_feroceon_set_pte_ext)
505#ifdef CONFIG_MMU
506	armv3_set_pte_ext wc_disable=0
507	mov	r0, r0
508	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
509#if defined(CONFIG_CACHE_FEROCEON_L2) && \
510	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
511	mcr	p15, 1, r0, c15, c9, 1		@ clean L2 entry
512#endif
513	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
514#endif
515	ret	lr
 
516
517/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
518.globl	cpu_feroceon_suspend_size
519.equ	cpu_feroceon_suspend_size, 4 * 3
520#ifdef CONFIG_ARM_CPU_SUSPEND
521ENTRY(cpu_feroceon_do_suspend)
522	stmfd	sp!, {r4 - r6, lr}
523	mrc	p15, 0, r4, c13, c0, 0	@ PID
524	mrc	p15, 0, r5, c3, c0, 0	@ Domain ID
525	mrc	p15, 0, r6, c1, c0, 0	@ Control register
526	stmia	r0, {r4 - r6}
527	ldmfd	sp!, {r4 - r6, pc}
528ENDPROC(cpu_feroceon_do_suspend)
529
530ENTRY(cpu_feroceon_do_resume)
531	mov	ip, #0
532	mcr	p15, 0, ip, c8, c7, 0	@ invalidate I+D TLBs
533	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I+D caches
534	ldmia	r0, {r4 - r6}
535	mcr	p15, 0, r4, c13, c0, 0	@ PID
536	mcr	p15, 0, r5, c3, c0, 0	@ Domain ID
537	mcr	p15, 0, r1, c2, c0, 0	@ TTB address
538	mov	r0, r6			@ control register
539	b	cpu_resume_mmu
540ENDPROC(cpu_feroceon_do_resume)
541#endif
542
543	.type	__feroceon_setup, #function
544__feroceon_setup:
545	mov	r0, #0
546	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
547	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
548#ifdef CONFIG_MMU
549	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
550#endif
551
552	adr	r5, feroceon_crval
553	ldmia	r5, {r5, r6}
554	mrc	p15, 0, r0, c1, c0		@ get control register v4
555	bic	r0, r0, r5
556	orr	r0, r0, r6
557	ret	lr
558	.size	__feroceon_setup, . - __feroceon_setup
559
560	/*
561	 *      B
562	 *  R   P
563	 * .RVI UFRS BLDP WCAM
564	 * .011 .001 ..11 0101
565	 *
566	 */
567	.type	feroceon_crval, #object
568feroceon_crval:
569	crval	clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
570
571	__INITDATA
572
573	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
574	define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
575
576	.section ".rodata"
577
578	string	cpu_arch_name, "armv5te"
579	string	cpu_elf_name, "v5"
580	string	cpu_feroceon_name, "Feroceon"
581	string	cpu_88fr531_name, "Feroceon 88FR531-vd"
582	string	cpu_88fr571_name, "Feroceon 88FR571-vd"
583	string	cpu_88fr131_name, "Feroceon 88FR131"
584
585	.align
586
587	.section ".proc.info.init", #alloc
588
589.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
590	.type	__\name\()_proc_info,#object
591__\name\()_proc_info:
592	.long	\cpu_val
593	.long	\cpu_mask
594	.long	PMD_TYPE_SECT | \
595		PMD_SECT_BUFFERABLE | \
596		PMD_SECT_CACHEABLE | \
597		PMD_BIT4 | \
598		PMD_SECT_AP_WRITE | \
599		PMD_SECT_AP_READ
600	.long	PMD_TYPE_SECT | \
601		PMD_BIT4 | \
602		PMD_SECT_AP_WRITE | \
603		PMD_SECT_AP_READ
604	initfn	__feroceon_setup, __\name\()_proc_info
605	.long	cpu_arch_name
606	.long	cpu_elf_name
607	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
608	.long	\cpu_name
609	.long	feroceon_processor_functions
610	.long	v4wbi_tlb_fns
611	.long	feroceon_user_fns
612	.long	\cache
613	 .size	__\name\()_proc_info, . - __\name\()_proc_info
614.endm
615
616#ifdef CONFIG_CPU_FEROCEON_OLD_ID
617	feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
618		cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
619#endif
620
621	feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
622		cache=feroceon_cache_fns
623	feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
624		cache=feroceon_range_cache_fns
625	feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
626		cache=feroceon_range_cache_fns