Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
4 *
5 * Heavily based on proc-arm926.S
6 * Maintainer: Assaf Hoffman <hoffman@marvell.com>
7 */
8
9#include <linux/linkage.h>
10#include <linux/init.h>
11#include <linux/pgtable.h>
12#include <asm/assembler.h>
13#include <asm/hwcap.h>
14#include <asm/pgtable-hwdef.h>
15#include <asm/page.h>
16#include <asm/ptrace.h>
17#include "proc-macros.S"
18
19/*
20 * This is the maximum size of an area which will be invalidated
21 * using the single invalidate entry instructions. Anything larger
22 * than this, and we go for the whole cache.
23 *
24 * This value should be chosen such that we choose the cheapest
25 * alternative.
26 */
27#define CACHE_DLIMIT 16384
28
29/*
30 * the cache line size of the I and D cache
31 */
32#define CACHE_DLINESIZE 32
33
34 .bss
35 .align 3
36__cache_params_loc:
37 .space 8
38
39 .text
40__cache_params:
41 .word __cache_params_loc
42
43/*
44 * cpu_feroceon_proc_init()
45 */
46ENTRY(cpu_feroceon_proc_init)
47 mrc p15, 0, r0, c0, c0, 1 @ read cache type register
48 ldr r1, __cache_params
49 mov r2, #(16 << 5)
50 tst r0, #(1 << 16) @ get way
51 mov r0, r0, lsr #18 @ get cache size order
52 movne r3, #((4 - 1) << 30) @ 4-way
53 and r0, r0, #0xf
54 moveq r3, #0 @ 1-way
55 mov r2, r2, lsl r0 @ actual cache size
56 movne r2, r2, lsr #2 @ turned into # of sets
57 sub r2, r2, #(1 << 5)
58 stmia r1, {r2, r3}
59#ifdef CONFIG_VFP
60 mov r1, #1 @ disable quirky VFP
61 str_l r1, VFP_arch_feroceon, r2
62#endif
63 ret lr
64
65/*
66 * cpu_feroceon_proc_fin()
67 */
68ENTRY(cpu_feroceon_proc_fin)
69#if defined(CONFIG_CACHE_FEROCEON_L2) && \
70 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
71 mov r0, #0
72 mcr p15, 1, r0, c15, c9, 0 @ clean L2
73 mcr p15, 0, r0, c7, c10, 4 @ drain WB
74#endif
75
76 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
77 bic r0, r0, #0x1000 @ ...i............
78 bic r0, r0, #0x000e @ ............wca.
79 mcr p15, 0, r0, c1, c0, 0 @ disable caches
80 ret lr
81
82/*
83 * cpu_feroceon_reset(loc)
84 *
85 * Perform a soft reset of the system. Put the CPU into the
86 * same state as it would be if it had been reset, and branch
87 * to what would be the reset vector.
88 *
89 * loc: location to jump to for soft reset
90 */
91 .align 5
92 .pushsection .idmap.text, "ax"
93ENTRY(cpu_feroceon_reset)
94 mov ip, #0
95 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
96 mcr p15, 0, ip, c7, c10, 4 @ drain WB
97#ifdef CONFIG_MMU
98 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
99#endif
100 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
101 bic ip, ip, #0x000f @ ............wcam
102 bic ip, ip, #0x1100 @ ...i...s........
103 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
104 ret r0
105ENDPROC(cpu_feroceon_reset)
106 .popsection
107
108/*
109 * cpu_feroceon_do_idle()
110 *
111 * Called with IRQs disabled
112 */
113 .align 5
114ENTRY(cpu_feroceon_do_idle)
115 mov r0, #0
116 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
117 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
118 ret lr
119
120/*
121 * flush_icache_all()
122 *
123 * Unconditionally clean and invalidate the entire icache.
124 */
125ENTRY(feroceon_flush_icache_all)
126 mov r0, #0
127 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
128 ret lr
129ENDPROC(feroceon_flush_icache_all)
130
131/*
132 * flush_user_cache_all()
133 *
134 * Clean and invalidate all cache entries in a particular
135 * address space.
136 */
137 .align 5
138ENTRY(feroceon_flush_user_cache_all)
139 /* FALLTHROUGH */
140
141/*
142 * flush_kern_cache_all()
143 *
144 * Clean and invalidate the entire cache.
145 */
146ENTRY(feroceon_flush_kern_cache_all)
147 mov r2, #VM_EXEC
148
149__flush_whole_cache:
150 ldr r1, __cache_params
151 ldmia r1, {r1, r3}
1521: orr ip, r1, r3
1532: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way
154 subs ip, ip, #(1 << 30) @ next way
155 bcs 2b
156 subs r1, r1, #(1 << 5) @ next set
157 bcs 1b
158
159 tst r2, #VM_EXEC
160 mov ip, #0
161 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
162 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
163 ret lr
164
165/*
166 * flush_user_cache_range(start, end, flags)
167 *
168 * Clean and invalidate a range of cache entries in the
169 * specified address range.
170 *
171 * - start - start address (inclusive)
172 * - end - end address (exclusive)
173 * - flags - vm_flags describing address space
174 */
175 .align 5
176ENTRY(feroceon_flush_user_cache_range)
177 sub r3, r1, r0 @ calculate total size
178 cmp r3, #CACHE_DLIMIT
179 bgt __flush_whole_cache
1801: tst r2, #VM_EXEC
181 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
182 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
183 add r0, r0, #CACHE_DLINESIZE
184 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
185 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
186 add r0, r0, #CACHE_DLINESIZE
187 cmp r0, r1
188 blo 1b
189 tst r2, #VM_EXEC
190 mov ip, #0
191 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
192 ret lr
193
194/*
195 * coherent_kern_range(start, end)
196 *
197 * Ensure coherency between the Icache and the Dcache in the
198 * region described by start, end. If you have non-snooping
199 * Harvard caches, you need to implement this function.
200 *
201 * - start - virtual start address
202 * - end - virtual end address
203 */
204 .align 5
205ENTRY(feroceon_coherent_kern_range)
206 /* FALLTHROUGH */
207
208/*
209 * coherent_user_range(start, end)
210 *
211 * Ensure coherency between the Icache and the Dcache in the
212 * region described by start, end. If you have non-snooping
213 * Harvard caches, you need to implement this function.
214 *
215 * - start - virtual start address
216 * - end - virtual end address
217 */
218ENTRY(feroceon_coherent_user_range)
219 bic r0, r0, #CACHE_DLINESIZE - 1
2201: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
221 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
222 add r0, r0, #CACHE_DLINESIZE
223 cmp r0, r1
224 blo 1b
225 mcr p15, 0, r0, c7, c10, 4 @ drain WB
226 mov r0, #0
227 ret lr
228
229/*
230 * flush_kern_dcache_area(void *addr, size_t size)
231 *
232 * Ensure no D cache aliasing occurs, either with itself or
233 * the I cache
234 *
235 * - addr - kernel address
236 * - size - region size
237 */
238 .align 5
239ENTRY(feroceon_flush_kern_dcache_area)
240 add r1, r0, r1
2411: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
242 add r0, r0, #CACHE_DLINESIZE
243 cmp r0, r1
244 blo 1b
245 mov r0, #0
246 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
247 mcr p15, 0, r0, c7, c10, 4 @ drain WB
248 ret lr
249
250 .align 5
251ENTRY(feroceon_range_flush_kern_dcache_area)
252 mrs r2, cpsr
253 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
254 orr r3, r2, #PSR_I_BIT
255 msr cpsr_c, r3 @ disable interrupts
256 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
257 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
258 msr cpsr_c, r2 @ restore interrupts
259 mov r0, #0
260 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
261 mcr p15, 0, r0, c7, c10, 4 @ drain WB
262 ret lr
263
264/*
265 * dma_inv_range(start, end)
266 *
267 * Invalidate (discard) the specified virtual address range.
268 * May not write back any entries. If 'start' or 'end'
269 * are not cache line aligned, those lines must be written
270 * back.
271 *
272 * - start - virtual start address
273 * - end - virtual end address
274 *
275 * (same as v4wb)
276 */
277 .align 5
278feroceon_dma_inv_range:
279 tst r0, #CACHE_DLINESIZE - 1
280 bic r0, r0, #CACHE_DLINESIZE - 1
281 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
282 tst r1, #CACHE_DLINESIZE - 1
283 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
2841: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
285 add r0, r0, #CACHE_DLINESIZE
286 cmp r0, r1
287 blo 1b
288 mcr p15, 0, r0, c7, c10, 4 @ drain WB
289 ret lr
290
291 .align 5
292feroceon_range_dma_inv_range:
293 mrs r2, cpsr
294 tst r0, #CACHE_DLINESIZE - 1
295 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
296 tst r1, #CACHE_DLINESIZE - 1
297 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
298 cmp r1, r0
299 subne r1, r1, #1 @ top address is inclusive
300 orr r3, r2, #PSR_I_BIT
301 msr cpsr_c, r3 @ disable interrupts
302 mcr p15, 5, r0, c15, c14, 0 @ D inv range start
303 mcr p15, 5, r1, c15, c14, 1 @ D inv range top
304 msr cpsr_c, r2 @ restore interrupts
305 ret lr
306
307/*
308 * dma_clean_range(start, end)
309 *
310 * Clean the specified virtual address range.
311 *
312 * - start - virtual start address
313 * - end - virtual end address
314 *
315 * (same as v4wb)
316 */
317 .align 5
318feroceon_dma_clean_range:
319 bic r0, r0, #CACHE_DLINESIZE - 1
3201: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
321 add r0, r0, #CACHE_DLINESIZE
322 cmp r0, r1
323 blo 1b
324 mcr p15, 0, r0, c7, c10, 4 @ drain WB
325 ret lr
326
327 .align 5
328feroceon_range_dma_clean_range:
329 mrs r2, cpsr
330 cmp r1, r0
331 subne r1, r1, #1 @ top address is inclusive
332 orr r3, r2, #PSR_I_BIT
333 msr cpsr_c, r3 @ disable interrupts
334 mcr p15, 5, r0, c15, c13, 0 @ D clean range start
335 mcr p15, 5, r1, c15, c13, 1 @ D clean range top
336 msr cpsr_c, r2 @ restore interrupts
337 mcr p15, 0, r0, c7, c10, 4 @ drain WB
338 ret lr
339
340/*
341 * dma_flush_range(start, end)
342 *
343 * Clean and invalidate the specified virtual address range.
344 *
345 * - start - virtual start address
346 * - end - virtual end address
347 */
348 .align 5
349ENTRY(feroceon_dma_flush_range)
350 bic r0, r0, #CACHE_DLINESIZE - 1
3511: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
352 add r0, r0, #CACHE_DLINESIZE
353 cmp r0, r1
354 blo 1b
355 mcr p15, 0, r0, c7, c10, 4 @ drain WB
356 ret lr
357
358 .align 5
359ENTRY(feroceon_range_dma_flush_range)
360 mrs r2, cpsr
361 cmp r1, r0
362 subne r1, r1, #1 @ top address is inclusive
363 orr r3, r2, #PSR_I_BIT
364 msr cpsr_c, r3 @ disable interrupts
365 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
366 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
367 msr cpsr_c, r2 @ restore interrupts
368 mcr p15, 0, r0, c7, c10, 4 @ drain WB
369 ret lr
370
371/*
372 * dma_map_area(start, size, dir)
373 * - start - kernel virtual start address
374 * - size - size of region
375 * - dir - DMA direction
376 */
377ENTRY(feroceon_dma_map_area)
378 add r1, r1, r0
379 cmp r2, #DMA_TO_DEVICE
380 beq feroceon_dma_clean_range
381 bcs feroceon_dma_inv_range
382 b feroceon_dma_flush_range
383ENDPROC(feroceon_dma_map_area)
384
385/*
386 * dma_map_area(start, size, dir)
387 * - start - kernel virtual start address
388 * - size - size of region
389 * - dir - DMA direction
390 */
391ENTRY(feroceon_range_dma_map_area)
392 add r1, r1, r0
393 cmp r2, #DMA_TO_DEVICE
394 beq feroceon_range_dma_clean_range
395 bcs feroceon_range_dma_inv_range
396 b feroceon_range_dma_flush_range
397ENDPROC(feroceon_range_dma_map_area)
398
399/*
400 * dma_unmap_area(start, size, dir)
401 * - start - kernel virtual start address
402 * - size - size of region
403 * - dir - DMA direction
404 */
405ENTRY(feroceon_dma_unmap_area)
406 ret lr
407ENDPROC(feroceon_dma_unmap_area)
408
409 .globl feroceon_flush_kern_cache_louis
410 .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
411
412 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
413 define_cache_functions feroceon
414
415.macro range_alias basename
416 .globl feroceon_range_\basename
417 .type feroceon_range_\basename , %function
418 .equ feroceon_range_\basename , feroceon_\basename
419.endm
420
421/*
422 * Most of the cache functions are unchanged for this case.
423 * Export suitable alias symbols for the unchanged functions:
424 */
425 range_alias flush_icache_all
426 range_alias flush_user_cache_all
427 range_alias flush_kern_cache_all
428 range_alias flush_kern_cache_louis
429 range_alias flush_user_cache_range
430 range_alias coherent_kern_range
431 range_alias coherent_user_range
432 range_alias dma_unmap_area
433
434 define_cache_functions feroceon_range
435
436 .align 5
437ENTRY(cpu_feroceon_dcache_clean_area)
438#if defined(CONFIG_CACHE_FEROCEON_L2) && \
439 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
440 mov r2, r0
441 mov r3, r1
442#endif
4431: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
444 add r0, r0, #CACHE_DLINESIZE
445 subs r1, r1, #CACHE_DLINESIZE
446 bhi 1b
447#if defined(CONFIG_CACHE_FEROCEON_L2) && \
448 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4491: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
450 add r2, r2, #CACHE_DLINESIZE
451 subs r3, r3, #CACHE_DLINESIZE
452 bhi 1b
453#endif
454 mcr p15, 0, r0, c7, c10, 4 @ drain WB
455 ret lr
456
457/* =============================== PageTable ============================== */
458
459/*
460 * cpu_feroceon_switch_mm(pgd)
461 *
462 * Set the translation base pointer to be as described by pgd.
463 *
464 * pgd: new page tables
465 */
466 .align 5
467ENTRY(cpu_feroceon_switch_mm)
468#ifdef CONFIG_MMU
469 /*
470 * Note: we wish to call __flush_whole_cache but we need to preserve
471 * lr to do so. The only way without touching main memory is to
472 * use r2 which is normally used to test the VM_EXEC flag, and
473 * compensate locally for the skipped ops if it is not set.
474 */
475 mov r2, lr @ abuse r2 to preserve lr
476 bl __flush_whole_cache
477 @ if r2 contains the VM_EXEC bit then the next 2 ops are done already
478 tst r2, #VM_EXEC
479 mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache
480 mcreq p15, 0, ip, c7, c10, 4 @ drain WB
481
482 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
483 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
484 ret r2
485#else
486 ret lr
487#endif
488
489/*
490 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
491 *
492 * Set a PTE and flush it out
493 */
494 .align 5
495ENTRY(cpu_feroceon_set_pte_ext)
496#ifdef CONFIG_MMU
497 armv3_set_pte_ext wc_disable=0
498 mov r0, r0
499 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
500#if defined(CONFIG_CACHE_FEROCEON_L2) && \
501 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
502 mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
503#endif
504 mcr p15, 0, r0, c7, c10, 4 @ drain WB
505#endif
506 ret lr
507
508/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
509.globl cpu_feroceon_suspend_size
510.equ cpu_feroceon_suspend_size, 4 * 3
511#ifdef CONFIG_ARM_CPU_SUSPEND
512ENTRY(cpu_feroceon_do_suspend)
513 stmfd sp!, {r4 - r6, lr}
514 mrc p15, 0, r4, c13, c0, 0 @ PID
515 mrc p15, 0, r5, c3, c0, 0 @ Domain ID
516 mrc p15, 0, r6, c1, c0, 0 @ Control register
517 stmia r0, {r4 - r6}
518 ldmfd sp!, {r4 - r6, pc}
519ENDPROC(cpu_feroceon_do_suspend)
520
521ENTRY(cpu_feroceon_do_resume)
522 mov ip, #0
523 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
524 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
525 ldmia r0, {r4 - r6}
526 mcr p15, 0, r4, c13, c0, 0 @ PID
527 mcr p15, 0, r5, c3, c0, 0 @ Domain ID
528 mcr p15, 0, r1, c2, c0, 0 @ TTB address
529 mov r0, r6 @ control register
530 b cpu_resume_mmu
531ENDPROC(cpu_feroceon_do_resume)
532#endif
533
534 .type __feroceon_setup, #function
535__feroceon_setup:
536 mov r0, #0
537 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
538 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
539#ifdef CONFIG_MMU
540 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
541#endif
542
543 adr r5, feroceon_crval
544 ldmia r5, {r5, r6}
545 mrc p15, 0, r0, c1, c0 @ get control register v4
546 bic r0, r0, r5
547 orr r0, r0, r6
548 ret lr
549 .size __feroceon_setup, . - __feroceon_setup
550
551 /*
552 * B
553 * R P
554 * .RVI UFRS BLDP WCAM
555 * .011 .001 ..11 0101
556 *
557 */
558 .type feroceon_crval, #object
559feroceon_crval:
560 crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
561
562 __INITDATA
563
564 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
565 define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
566
567 .section ".rodata"
568
569 string cpu_arch_name, "armv5te"
570 string cpu_elf_name, "v5"
571 string cpu_feroceon_name, "Feroceon"
572 string cpu_88fr531_name, "Feroceon 88FR531-vd"
573 string cpu_88fr571_name, "Feroceon 88FR571-vd"
574 string cpu_88fr131_name, "Feroceon 88FR131"
575
576 .align
577
578 .section ".proc.info.init", "a"
579
580.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
581 .type __\name\()_proc_info,#object
582__\name\()_proc_info:
583 .long \cpu_val
584 .long \cpu_mask
585 .long PMD_TYPE_SECT | \
586 PMD_SECT_BUFFERABLE | \
587 PMD_SECT_CACHEABLE | \
588 PMD_BIT4 | \
589 PMD_SECT_AP_WRITE | \
590 PMD_SECT_AP_READ
591 .long PMD_TYPE_SECT | \
592 PMD_BIT4 | \
593 PMD_SECT_AP_WRITE | \
594 PMD_SECT_AP_READ
595 initfn __feroceon_setup, __\name\()_proc_info
596 .long cpu_arch_name
597 .long cpu_elf_name
598 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
599 .long \cpu_name
600 .long feroceon_processor_functions
601 .long v4wbi_tlb_fns
602 .long feroceon_user_fns
603 .long \cache
604 .size __\name\()_proc_info, . - __\name\()_proc_info
605.endm
606
607#ifdef CONFIG_CPU_FEROCEON_OLD_ID
608 feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
609 cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
610#endif
611
612 feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
613 cache=feroceon_cache_fns
614 feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
615 cache=feroceon_range_cache_fns
616 feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
617 cache=feroceon_range_cache_fns
1/*
2 * linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
3 *
4 * Heavily based on proc-arm926.S
5 * Maintainer: Assaf Hoffman <hoffman@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24#include <asm/assembler.h>
25#include <asm/hwcap.h>
26#include <asm/pgtable-hwdef.h>
27#include <asm/pgtable.h>
28#include <asm/page.h>
29#include <asm/ptrace.h>
30#include "proc-macros.S"
31
32/*
33 * This is the maximum size of an area which will be invalidated
34 * using the single invalidate entry instructions. Anything larger
35 * than this, and we go for the whole cache.
36 *
37 * This value should be chosen such that we choose the cheapest
38 * alternative.
39 */
40#define CACHE_DLIMIT 16384
41
42/*
43 * the cache line size of the I and D cache
44 */
45#define CACHE_DLINESIZE 32
46
47 .bss
48 .align 3
49__cache_params_loc:
50 .space 8
51
52 .text
53__cache_params:
54 .word __cache_params_loc
55
56/*
57 * cpu_feroceon_proc_init()
58 */
59ENTRY(cpu_feroceon_proc_init)
60 mrc p15, 0, r0, c0, c0, 1 @ read cache type register
61 ldr r1, __cache_params
62 mov r2, #(16 << 5)
63 tst r0, #(1 << 16) @ get way
64 mov r0, r0, lsr #18 @ get cache size order
65 movne r3, #((4 - 1) << 30) @ 4-way
66 and r0, r0, #0xf
67 moveq r3, #0 @ 1-way
68 mov r2, r2, lsl r0 @ actual cache size
69 movne r2, r2, lsr #2 @ turned into # of sets
70 sub r2, r2, #(1 << 5)
71 stmia r1, {r2, r3}
72 mov pc, lr
73
74/*
75 * cpu_feroceon_proc_fin()
76 */
77ENTRY(cpu_feroceon_proc_fin)
78#if defined(CONFIG_CACHE_FEROCEON_L2) && \
79 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
80 mov r0, #0
81 mcr p15, 1, r0, c15, c9, 0 @ clean L2
82 mcr p15, 0, r0, c7, c10, 4 @ drain WB
83#endif
84
85 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
86 bic r0, r0, #0x1000 @ ...i............
87 bic r0, r0, #0x000e @ ............wca.
88 mcr p15, 0, r0, c1, c0, 0 @ disable caches
89 mov pc, lr
90
91/*
92 * cpu_feroceon_reset(loc)
93 *
94 * Perform a soft reset of the system. Put the CPU into the
95 * same state as it would be if it had been reset, and branch
96 * to what would be the reset vector.
97 *
98 * loc: location to jump to for soft reset
99 */
100 .align 5
101ENTRY(cpu_feroceon_reset)
102 mov ip, #0
103 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
104 mcr p15, 0, ip, c7, c10, 4 @ drain WB
105#ifdef CONFIG_MMU
106 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
107#endif
108 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
109 bic ip, ip, #0x000f @ ............wcam
110 bic ip, ip, #0x1100 @ ...i...s........
111 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
112 mov pc, r0
113
114/*
115 * cpu_feroceon_do_idle()
116 *
117 * Called with IRQs disabled
118 */
119 .align 5
120ENTRY(cpu_feroceon_do_idle)
121 mov r0, #0
122 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
123 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
124 mov pc, lr
125
126/*
127 * flush_icache_all()
128 *
129 * Unconditionally clean and invalidate the entire icache.
130 */
131ENTRY(feroceon_flush_icache_all)
132 mov r0, #0
133 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
134 mov pc, lr
135ENDPROC(feroceon_flush_icache_all)
136
137/*
138 * flush_user_cache_all()
139 *
140 * Clean and invalidate all cache entries in a particular
141 * address space.
142 */
143 .align 5
144ENTRY(feroceon_flush_user_cache_all)
145 /* FALLTHROUGH */
146
147/*
148 * flush_kern_cache_all()
149 *
150 * Clean and invalidate the entire cache.
151 */
152ENTRY(feroceon_flush_kern_cache_all)
153 mov r2, #VM_EXEC
154
155__flush_whole_cache:
156 ldr r1, __cache_params
157 ldmia r1, {r1, r3}
1581: orr ip, r1, r3
1592: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way
160 subs ip, ip, #(1 << 30) @ next way
161 bcs 2b
162 subs r1, r1, #(1 << 5) @ next set
163 bcs 1b
164
165 tst r2, #VM_EXEC
166 mov ip, #0
167 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
168 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
169 mov pc, lr
170
171/*
172 * flush_user_cache_range(start, end, flags)
173 *
174 * Clean and invalidate a range of cache entries in the
175 * specified address range.
176 *
177 * - start - start address (inclusive)
178 * - end - end address (exclusive)
179 * - flags - vm_flags describing address space
180 */
181 .align 5
182ENTRY(feroceon_flush_user_cache_range)
183 sub r3, r1, r0 @ calculate total size
184 cmp r3, #CACHE_DLIMIT
185 bgt __flush_whole_cache
1861: tst r2, #VM_EXEC
187 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
188 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
189 add r0, r0, #CACHE_DLINESIZE
190 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
191 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
192 add r0, r0, #CACHE_DLINESIZE
193 cmp r0, r1
194 blo 1b
195 tst r2, #VM_EXEC
196 mov ip, #0
197 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
198 mov pc, lr
199
200/*
201 * coherent_kern_range(start, end)
202 *
203 * Ensure coherency between the Icache and the Dcache in the
204 * region described by start, end. If you have non-snooping
205 * Harvard caches, you need to implement this function.
206 *
207 * - start - virtual start address
208 * - end - virtual end address
209 */
210 .align 5
211ENTRY(feroceon_coherent_kern_range)
212 /* FALLTHROUGH */
213
214/*
215 * coherent_user_range(start, end)
216 *
217 * Ensure coherency between the Icache and the Dcache in the
218 * region described by start, end. If you have non-snooping
219 * Harvard caches, you need to implement this function.
220 *
221 * - start - virtual start address
222 * - end - virtual end address
223 */
224ENTRY(feroceon_coherent_user_range)
225 bic r0, r0, #CACHE_DLINESIZE - 1
2261: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
227 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
228 add r0, r0, #CACHE_DLINESIZE
229 cmp r0, r1
230 blo 1b
231 mcr p15, 0, r0, c7, c10, 4 @ drain WB
232 mov pc, lr
233
234/*
235 * flush_kern_dcache_area(void *addr, size_t size)
236 *
237 * Ensure no D cache aliasing occurs, either with itself or
238 * the I cache
239 *
240 * - addr - kernel address
241 * - size - region size
242 */
243 .align 5
244ENTRY(feroceon_flush_kern_dcache_area)
245 add r1, r0, r1
2461: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
247 add r0, r0, #CACHE_DLINESIZE
248 cmp r0, r1
249 blo 1b
250 mov r0, #0
251 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
252 mcr p15, 0, r0, c7, c10, 4 @ drain WB
253 mov pc, lr
254
255 .align 5
256ENTRY(feroceon_range_flush_kern_dcache_area)
257 mrs r2, cpsr
258 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
259 orr r3, r2, #PSR_I_BIT
260 msr cpsr_c, r3 @ disable interrupts
261 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
262 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
263 msr cpsr_c, r2 @ restore interrupts
264 mov r0, #0
265 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
266 mcr p15, 0, r0, c7, c10, 4 @ drain WB
267 mov pc, lr
268
269/*
270 * dma_inv_range(start, end)
271 *
272 * Invalidate (discard) the specified virtual address range.
273 * May not write back any entries. If 'start' or 'end'
274 * are not cache line aligned, those lines must be written
275 * back.
276 *
277 * - start - virtual start address
278 * - end - virtual end address
279 *
280 * (same as v4wb)
281 */
282 .align 5
283feroceon_dma_inv_range:
284 tst r0, #CACHE_DLINESIZE - 1
285 bic r0, r0, #CACHE_DLINESIZE - 1
286 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
287 tst r1, #CACHE_DLINESIZE - 1
288 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
2891: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
290 add r0, r0, #CACHE_DLINESIZE
291 cmp r0, r1
292 blo 1b
293 mcr p15, 0, r0, c7, c10, 4 @ drain WB
294 mov pc, lr
295
296 .align 5
297feroceon_range_dma_inv_range:
298 mrs r2, cpsr
299 tst r0, #CACHE_DLINESIZE - 1
300 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
301 tst r1, #CACHE_DLINESIZE - 1
302 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
303 cmp r1, r0
304 subne r1, r1, #1 @ top address is inclusive
305 orr r3, r2, #PSR_I_BIT
306 msr cpsr_c, r3 @ disable interrupts
307 mcr p15, 5, r0, c15, c14, 0 @ D inv range start
308 mcr p15, 5, r1, c15, c14, 1 @ D inv range top
309 msr cpsr_c, r2 @ restore interrupts
310 mov pc, lr
311
312/*
313 * dma_clean_range(start, end)
314 *
315 * Clean the specified virtual address range.
316 *
317 * - start - virtual start address
318 * - end - virtual end address
319 *
320 * (same as v4wb)
321 */
322 .align 5
323feroceon_dma_clean_range:
324 bic r0, r0, #CACHE_DLINESIZE - 1
3251: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
326 add r0, r0, #CACHE_DLINESIZE
327 cmp r0, r1
328 blo 1b
329 mcr p15, 0, r0, c7, c10, 4 @ drain WB
330 mov pc, lr
331
332 .align 5
333feroceon_range_dma_clean_range:
334 mrs r2, cpsr
335 cmp r1, r0
336 subne r1, r1, #1 @ top address is inclusive
337 orr r3, r2, #PSR_I_BIT
338 msr cpsr_c, r3 @ disable interrupts
339 mcr p15, 5, r0, c15, c13, 0 @ D clean range start
340 mcr p15, 5, r1, c15, c13, 1 @ D clean range top
341 msr cpsr_c, r2 @ restore interrupts
342 mcr p15, 0, r0, c7, c10, 4 @ drain WB
343 mov pc, lr
344
345/*
346 * dma_flush_range(start, end)
347 *
348 * Clean and invalidate the specified virtual address range.
349 *
350 * - start - virtual start address
351 * - end - virtual end address
352 */
353 .align 5
354ENTRY(feroceon_dma_flush_range)
355 bic r0, r0, #CACHE_DLINESIZE - 1
3561: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
357 add r0, r0, #CACHE_DLINESIZE
358 cmp r0, r1
359 blo 1b
360 mcr p15, 0, r0, c7, c10, 4 @ drain WB
361 mov pc, lr
362
363 .align 5
364ENTRY(feroceon_range_dma_flush_range)
365 mrs r2, cpsr
366 cmp r1, r0
367 subne r1, r1, #1 @ top address is inclusive
368 orr r3, r2, #PSR_I_BIT
369 msr cpsr_c, r3 @ disable interrupts
370 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
371 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
372 msr cpsr_c, r2 @ restore interrupts
373 mcr p15, 0, r0, c7, c10, 4 @ drain WB
374 mov pc, lr
375
376/*
377 * dma_map_area(start, size, dir)
378 * - start - kernel virtual start address
379 * - size - size of region
380 * - dir - DMA direction
381 */
382ENTRY(feroceon_dma_map_area)
383 add r1, r1, r0
384 cmp r2, #DMA_TO_DEVICE
385 beq feroceon_dma_clean_range
386 bcs feroceon_dma_inv_range
387 b feroceon_dma_flush_range
388ENDPROC(feroceon_dma_map_area)
389
390/*
391 * dma_map_area(start, size, dir)
392 * - start - kernel virtual start address
393 * - size - size of region
394 * - dir - DMA direction
395 */
396ENTRY(feroceon_range_dma_map_area)
397 add r1, r1, r0
398 cmp r2, #DMA_TO_DEVICE
399 beq feroceon_range_dma_clean_range
400 bcs feroceon_range_dma_inv_range
401 b feroceon_range_dma_flush_range
402ENDPROC(feroceon_range_dma_map_area)
403
404/*
405 * dma_unmap_area(start, size, dir)
406 * - start - kernel virtual start address
407 * - size - size of region
408 * - dir - DMA direction
409 */
410ENTRY(feroceon_dma_unmap_area)
411 mov pc, lr
412ENDPROC(feroceon_dma_unmap_area)
413
414 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
415 define_cache_functions feroceon
416
417.macro range_alias basename
418 .globl feroceon_range_\basename
419 .type feroceon_range_\basename , %function
420 .equ feroceon_range_\basename , feroceon_\basename
421.endm
422
423/*
424 * Most of the cache functions are unchanged for this case.
425 * Export suitable alias symbols for the unchanged functions:
426 */
427 range_alias flush_icache_all
428 range_alias flush_user_cache_all
429 range_alias flush_kern_cache_all
430 range_alias flush_user_cache_range
431 range_alias coherent_kern_range
432 range_alias coherent_user_range
433 range_alias dma_unmap_area
434
435 define_cache_functions feroceon_range
436
437 .align 5
438ENTRY(cpu_feroceon_dcache_clean_area)
439#if defined(CONFIG_CACHE_FEROCEON_L2) && \
440 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
441 mov r2, r0
442 mov r3, r1
443#endif
4441: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
445 add r0, r0, #CACHE_DLINESIZE
446 subs r1, r1, #CACHE_DLINESIZE
447 bhi 1b
448#if defined(CONFIG_CACHE_FEROCEON_L2) && \
449 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4501: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
451 add r2, r2, #CACHE_DLINESIZE
452 subs r3, r3, #CACHE_DLINESIZE
453 bhi 1b
454#endif
455 mcr p15, 0, r0, c7, c10, 4 @ drain WB
456 mov pc, lr
457
458/* =============================== PageTable ============================== */
459
460/*
461 * cpu_feroceon_switch_mm(pgd)
462 *
463 * Set the translation base pointer to be as described by pgd.
464 *
465 * pgd: new page tables
466 */
467 .align 5
468ENTRY(cpu_feroceon_switch_mm)
469#ifdef CONFIG_MMU
470 /*
471 * Note: we wish to call __flush_whole_cache but we need to preserve
472 * lr to do so. The only way without touching main memory is to
473 * use r2 which is normally used to test the VM_EXEC flag, and
474 * compensate locally for the skipped ops if it is not set.
475 */
476 mov r2, lr @ abuse r2 to preserve lr
477 bl __flush_whole_cache
478 @ if r2 contains the VM_EXEC bit then the next 2 ops are done already
479 tst r2, #VM_EXEC
480 mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache
481 mcreq p15, 0, ip, c7, c10, 4 @ drain WB
482
483 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
484 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
485 mov pc, r2
486#else
487 mov pc, lr
488#endif
489
490/*
491 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
492 *
493 * Set a PTE and flush it out
494 */
495 .align 5
496ENTRY(cpu_feroceon_set_pte_ext)
497#ifdef CONFIG_MMU
498 armv3_set_pte_ext wc_disable=0
499 mov r0, r0
500 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
501#if defined(CONFIG_CACHE_FEROCEON_L2) && \
502 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
503 mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
504#endif
505 mcr p15, 0, r0, c7, c10, 4 @ drain WB
506#endif
507 mov pc, lr
508
509 __CPUINIT
510
511 .type __feroceon_setup, #function
512__feroceon_setup:
513 mov r0, #0
514 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
515 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
516#ifdef CONFIG_MMU
517 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
518#endif
519
520 adr r5, feroceon_crval
521 ldmia r5, {r5, r6}
522 mrc p15, 0, r0, c1, c0 @ get control register v4
523 bic r0, r0, r5
524 orr r0, r0, r6
525 mov pc, lr
526 .size __feroceon_setup, . - __feroceon_setup
527
528 /*
529 * B
530 * R P
531 * .RVI UFRS BLDP WCAM
532 * .011 .001 ..11 0101
533 *
534 */
535 .type feroceon_crval, #object
536feroceon_crval:
537 crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
538
539 __INITDATA
540
541 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
542 define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
543
544 .section ".rodata"
545
546 string cpu_arch_name, "armv5te"
547 string cpu_elf_name, "v5"
548 string cpu_feroceon_name, "Feroceon"
549 string cpu_88fr531_name, "Feroceon 88FR531-vd"
550 string cpu_88fr571_name, "Feroceon 88FR571-vd"
551 string cpu_88fr131_name, "Feroceon 88FR131"
552
553 .align
554
555 .section ".proc.info.init", #alloc, #execinstr
556
557.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
558 .type __\name\()_proc_info,#object
559__\name\()_proc_info:
560 .long \cpu_val
561 .long \cpu_mask
562 .long PMD_TYPE_SECT | \
563 PMD_SECT_BUFFERABLE | \
564 PMD_SECT_CACHEABLE | \
565 PMD_BIT4 | \
566 PMD_SECT_AP_WRITE | \
567 PMD_SECT_AP_READ
568 .long PMD_TYPE_SECT | \
569 PMD_BIT4 | \
570 PMD_SECT_AP_WRITE | \
571 PMD_SECT_AP_READ
572 b __feroceon_setup
573 .long cpu_arch_name
574 .long cpu_elf_name
575 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
576 .long \cpu_name
577 .long feroceon_processor_functions
578 .long v4wbi_tlb_fns
579 .long feroceon_user_fns
580 .long \cache
581 .size __\name\()_proc_info, . - __\name\()_proc_info
582.endm
583
584#ifdef CONFIG_CPU_FEROCEON_OLD_ID
585 feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
586 cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
587#endif
588
589 feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
590 cache=feroceon_cache_fns
591 feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
592 cache=feroceon_range_cache_fns
593 feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
594 cache=feroceon_range_cache_fns