Loading...
1/*
2 * linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
3 *
4 * Heavily based on proc-arm926.S
5 * Maintainer: Assaf Hoffman <hoffman@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24#include <asm/assembler.h>
25#include <asm/hwcap.h>
26#include <asm/pgtable-hwdef.h>
27#include <asm/pgtable.h>
28#include <asm/page.h>
29#include <asm/ptrace.h>
30#include "proc-macros.S"
31
32/*
33 * This is the maximum size of an area which will be invalidated
34 * using the single invalidate entry instructions. Anything larger
35 * than this, and we go for the whole cache.
36 *
37 * This value should be chosen such that we choose the cheapest
38 * alternative.
39 */
40#define CACHE_DLIMIT 16384
41
42/*
43 * the cache line size of the I and D cache
44 */
45#define CACHE_DLINESIZE 32
46
47 .bss
48 .align 3
49__cache_params_loc:
50 .space 8
51
52 .text
53__cache_params:
54 .word __cache_params_loc
55
56/*
57 * cpu_feroceon_proc_init()
58 */
59ENTRY(cpu_feroceon_proc_init)
60 mrc p15, 0, r0, c0, c0, 1 @ read cache type register
61 ldr r1, __cache_params
62 mov r2, #(16 << 5)
63 tst r0, #(1 << 16) @ get way
64 mov r0, r0, lsr #18 @ get cache size order
65 movne r3, #((4 - 1) << 30) @ 4-way
66 and r0, r0, #0xf
67 moveq r3, #0 @ 1-way
68 mov r2, r2, lsl r0 @ actual cache size
69 movne r2, r2, lsr #2 @ turned into # of sets
70 sub r2, r2, #(1 << 5)
71 stmia r1, {r2, r3}
72 mov pc, lr
73
74/*
75 * cpu_feroceon_proc_fin()
76 */
77ENTRY(cpu_feroceon_proc_fin)
78#if defined(CONFIG_CACHE_FEROCEON_L2) && \
79 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
80 mov r0, #0
81 mcr p15, 1, r0, c15, c9, 0 @ clean L2
82 mcr p15, 0, r0, c7, c10, 4 @ drain WB
83#endif
84
85 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
86 bic r0, r0, #0x1000 @ ...i............
87 bic r0, r0, #0x000e @ ............wca.
88 mcr p15, 0, r0, c1, c0, 0 @ disable caches
89 mov pc, lr
90
91/*
92 * cpu_feroceon_reset(loc)
93 *
94 * Perform a soft reset of the system. Put the CPU into the
95 * same state as it would be if it had been reset, and branch
96 * to what would be the reset vector.
97 *
98 * loc: location to jump to for soft reset
99 */
100 .align 5
101ENTRY(cpu_feroceon_reset)
102 mov ip, #0
103 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
104 mcr p15, 0, ip, c7, c10, 4 @ drain WB
105#ifdef CONFIG_MMU
106 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
107#endif
108 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
109 bic ip, ip, #0x000f @ ............wcam
110 bic ip, ip, #0x1100 @ ...i...s........
111 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
112 mov pc, r0
113
114/*
115 * cpu_feroceon_do_idle()
116 *
117 * Called with IRQs disabled
118 */
119 .align 5
120ENTRY(cpu_feroceon_do_idle)
121 mov r0, #0
122 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
123 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
124 mov pc, lr
125
126/*
127 * flush_icache_all()
128 *
129 * Unconditionally clean and invalidate the entire icache.
130 */
131ENTRY(feroceon_flush_icache_all)
132 mov r0, #0
133 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
134 mov pc, lr
135ENDPROC(feroceon_flush_icache_all)
136
137/*
138 * flush_user_cache_all()
139 *
140 * Clean and invalidate all cache entries in a particular
141 * address space.
142 */
143 .align 5
144ENTRY(feroceon_flush_user_cache_all)
145 /* FALLTHROUGH */
146
147/*
148 * flush_kern_cache_all()
149 *
150 * Clean and invalidate the entire cache.
151 */
152ENTRY(feroceon_flush_kern_cache_all)
153 mov r2, #VM_EXEC
154
155__flush_whole_cache:
156 ldr r1, __cache_params
157 ldmia r1, {r1, r3}
1581: orr ip, r1, r3
1592: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way
160 subs ip, ip, #(1 << 30) @ next way
161 bcs 2b
162 subs r1, r1, #(1 << 5) @ next set
163 bcs 1b
164
165 tst r2, #VM_EXEC
166 mov ip, #0
167 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
168 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
169 mov pc, lr
170
171/*
172 * flush_user_cache_range(start, end, flags)
173 *
174 * Clean and invalidate a range of cache entries in the
175 * specified address range.
176 *
177 * - start - start address (inclusive)
178 * - end - end address (exclusive)
179 * - flags - vm_flags describing address space
180 */
181 .align 5
182ENTRY(feroceon_flush_user_cache_range)
183 sub r3, r1, r0 @ calculate total size
184 cmp r3, #CACHE_DLIMIT
185 bgt __flush_whole_cache
1861: tst r2, #VM_EXEC
187 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
188 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
189 add r0, r0, #CACHE_DLINESIZE
190 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
191 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
192 add r0, r0, #CACHE_DLINESIZE
193 cmp r0, r1
194 blo 1b
195 tst r2, #VM_EXEC
196 mov ip, #0
197 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
198 mov pc, lr
199
200/*
201 * coherent_kern_range(start, end)
202 *
203 * Ensure coherency between the Icache and the Dcache in the
204 * region described by start, end. If you have non-snooping
205 * Harvard caches, you need to implement this function.
206 *
207 * - start - virtual start address
208 * - end - virtual end address
209 */
210 .align 5
211ENTRY(feroceon_coherent_kern_range)
212 /* FALLTHROUGH */
213
214/*
215 * coherent_user_range(start, end)
216 *
217 * Ensure coherency between the Icache and the Dcache in the
218 * region described by start, end. If you have non-snooping
219 * Harvard caches, you need to implement this function.
220 *
221 * - start - virtual start address
222 * - end - virtual end address
223 */
224ENTRY(feroceon_coherent_user_range)
225 bic r0, r0, #CACHE_DLINESIZE - 1
2261: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
227 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
228 add r0, r0, #CACHE_DLINESIZE
229 cmp r0, r1
230 blo 1b
231 mcr p15, 0, r0, c7, c10, 4 @ drain WB
232 mov pc, lr
233
234/*
235 * flush_kern_dcache_area(void *addr, size_t size)
236 *
237 * Ensure no D cache aliasing occurs, either with itself or
238 * the I cache
239 *
240 * - addr - kernel address
241 * - size - region size
242 */
243 .align 5
244ENTRY(feroceon_flush_kern_dcache_area)
245 add r1, r0, r1
2461: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
247 add r0, r0, #CACHE_DLINESIZE
248 cmp r0, r1
249 blo 1b
250 mov r0, #0
251 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
252 mcr p15, 0, r0, c7, c10, 4 @ drain WB
253 mov pc, lr
254
255 .align 5
256ENTRY(feroceon_range_flush_kern_dcache_area)
257 mrs r2, cpsr
258 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
259 orr r3, r2, #PSR_I_BIT
260 msr cpsr_c, r3 @ disable interrupts
261 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
262 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
263 msr cpsr_c, r2 @ restore interrupts
264 mov r0, #0
265 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
266 mcr p15, 0, r0, c7, c10, 4 @ drain WB
267 mov pc, lr
268
269/*
270 * dma_inv_range(start, end)
271 *
272 * Invalidate (discard) the specified virtual address range.
273 * May not write back any entries. If 'start' or 'end'
274 * are not cache line aligned, those lines must be written
275 * back.
276 *
277 * - start - virtual start address
278 * - end - virtual end address
279 *
280 * (same as v4wb)
281 */
282 .align 5
283feroceon_dma_inv_range:
284 tst r0, #CACHE_DLINESIZE - 1
285 bic r0, r0, #CACHE_DLINESIZE - 1
286 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
287 tst r1, #CACHE_DLINESIZE - 1
288 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
2891: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
290 add r0, r0, #CACHE_DLINESIZE
291 cmp r0, r1
292 blo 1b
293 mcr p15, 0, r0, c7, c10, 4 @ drain WB
294 mov pc, lr
295
296 .align 5
297feroceon_range_dma_inv_range:
298 mrs r2, cpsr
299 tst r0, #CACHE_DLINESIZE - 1
300 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
301 tst r1, #CACHE_DLINESIZE - 1
302 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
303 cmp r1, r0
304 subne r1, r1, #1 @ top address is inclusive
305 orr r3, r2, #PSR_I_BIT
306 msr cpsr_c, r3 @ disable interrupts
307 mcr p15, 5, r0, c15, c14, 0 @ D inv range start
308 mcr p15, 5, r1, c15, c14, 1 @ D inv range top
309 msr cpsr_c, r2 @ restore interrupts
310 mov pc, lr
311
312/*
313 * dma_clean_range(start, end)
314 *
315 * Clean the specified virtual address range.
316 *
317 * - start - virtual start address
318 * - end - virtual end address
319 *
320 * (same as v4wb)
321 */
322 .align 5
323feroceon_dma_clean_range:
324 bic r0, r0, #CACHE_DLINESIZE - 1
3251: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
326 add r0, r0, #CACHE_DLINESIZE
327 cmp r0, r1
328 blo 1b
329 mcr p15, 0, r0, c7, c10, 4 @ drain WB
330 mov pc, lr
331
332 .align 5
333feroceon_range_dma_clean_range:
334 mrs r2, cpsr
335 cmp r1, r0
336 subne r1, r1, #1 @ top address is inclusive
337 orr r3, r2, #PSR_I_BIT
338 msr cpsr_c, r3 @ disable interrupts
339 mcr p15, 5, r0, c15, c13, 0 @ D clean range start
340 mcr p15, 5, r1, c15, c13, 1 @ D clean range top
341 msr cpsr_c, r2 @ restore interrupts
342 mcr p15, 0, r0, c7, c10, 4 @ drain WB
343 mov pc, lr
344
345/*
346 * dma_flush_range(start, end)
347 *
348 * Clean and invalidate the specified virtual address range.
349 *
350 * - start - virtual start address
351 * - end - virtual end address
352 */
353 .align 5
354ENTRY(feroceon_dma_flush_range)
355 bic r0, r0, #CACHE_DLINESIZE - 1
3561: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
357 add r0, r0, #CACHE_DLINESIZE
358 cmp r0, r1
359 blo 1b
360 mcr p15, 0, r0, c7, c10, 4 @ drain WB
361 mov pc, lr
362
363 .align 5
364ENTRY(feroceon_range_dma_flush_range)
365 mrs r2, cpsr
366 cmp r1, r0
367 subne r1, r1, #1 @ top address is inclusive
368 orr r3, r2, #PSR_I_BIT
369 msr cpsr_c, r3 @ disable interrupts
370 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
371 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
372 msr cpsr_c, r2 @ restore interrupts
373 mcr p15, 0, r0, c7, c10, 4 @ drain WB
374 mov pc, lr
375
376/*
377 * dma_map_area(start, size, dir)
378 * - start - kernel virtual start address
379 * - size - size of region
380 * - dir - DMA direction
381 */
382ENTRY(feroceon_dma_map_area)
383 add r1, r1, r0
384 cmp r2, #DMA_TO_DEVICE
385 beq feroceon_dma_clean_range
386 bcs feroceon_dma_inv_range
387 b feroceon_dma_flush_range
388ENDPROC(feroceon_dma_map_area)
389
390/*
391 * dma_map_area(start, size, dir)
392 * - start - kernel virtual start address
393 * - size - size of region
394 * - dir - DMA direction
395 */
396ENTRY(feroceon_range_dma_map_area)
397 add r1, r1, r0
398 cmp r2, #DMA_TO_DEVICE
399 beq feroceon_range_dma_clean_range
400 bcs feroceon_range_dma_inv_range
401 b feroceon_range_dma_flush_range
402ENDPROC(feroceon_range_dma_map_area)
403
404/*
405 * dma_unmap_area(start, size, dir)
406 * - start - kernel virtual start address
407 * - size - size of region
408 * - dir - DMA direction
409 */
410ENTRY(feroceon_dma_unmap_area)
411 mov pc, lr
412ENDPROC(feroceon_dma_unmap_area)
413
414 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
415 define_cache_functions feroceon
416
417.macro range_alias basename
418 .globl feroceon_range_\basename
419 .type feroceon_range_\basename , %function
420 .equ feroceon_range_\basename , feroceon_\basename
421.endm
422
423/*
424 * Most of the cache functions are unchanged for this case.
425 * Export suitable alias symbols for the unchanged functions:
426 */
427 range_alias flush_icache_all
428 range_alias flush_user_cache_all
429 range_alias flush_kern_cache_all
430 range_alias flush_user_cache_range
431 range_alias coherent_kern_range
432 range_alias coherent_user_range
433 range_alias dma_unmap_area
434
435 define_cache_functions feroceon_range
436
437 .align 5
438ENTRY(cpu_feroceon_dcache_clean_area)
439#if defined(CONFIG_CACHE_FEROCEON_L2) && \
440 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
441 mov r2, r0
442 mov r3, r1
443#endif
4441: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
445 add r0, r0, #CACHE_DLINESIZE
446 subs r1, r1, #CACHE_DLINESIZE
447 bhi 1b
448#if defined(CONFIG_CACHE_FEROCEON_L2) && \
449 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4501: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
451 add r2, r2, #CACHE_DLINESIZE
452 subs r3, r3, #CACHE_DLINESIZE
453 bhi 1b
454#endif
455 mcr p15, 0, r0, c7, c10, 4 @ drain WB
456 mov pc, lr
457
458/* =============================== PageTable ============================== */
459
460/*
461 * cpu_feroceon_switch_mm(pgd)
462 *
463 * Set the translation base pointer to be as described by pgd.
464 *
465 * pgd: new page tables
466 */
467 .align 5
468ENTRY(cpu_feroceon_switch_mm)
469#ifdef CONFIG_MMU
470 /*
471 * Note: we wish to call __flush_whole_cache but we need to preserve
472 * lr to do so. The only way without touching main memory is to
473 * use r2 which is normally used to test the VM_EXEC flag, and
474 * compensate locally for the skipped ops if it is not set.
475 */
476 mov r2, lr @ abuse r2 to preserve lr
477 bl __flush_whole_cache
478 @ if r2 contains the VM_EXEC bit then the next 2 ops are done already
479 tst r2, #VM_EXEC
480 mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache
481 mcreq p15, 0, ip, c7, c10, 4 @ drain WB
482
483 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
484 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
485 mov pc, r2
486#else
487 mov pc, lr
488#endif
489
490/*
491 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
492 *
493 * Set a PTE and flush it out
494 */
495 .align 5
496ENTRY(cpu_feroceon_set_pte_ext)
497#ifdef CONFIG_MMU
498 armv3_set_pte_ext wc_disable=0
499 mov r0, r0
500 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
501#if defined(CONFIG_CACHE_FEROCEON_L2) && \
502 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
503 mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
504#endif
505 mcr p15, 0, r0, c7, c10, 4 @ drain WB
506#endif
507 mov pc, lr
508
509 __CPUINIT
510
511 .type __feroceon_setup, #function
512__feroceon_setup:
513 mov r0, #0
514 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
515 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
516#ifdef CONFIG_MMU
517 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
518#endif
519
520 adr r5, feroceon_crval
521 ldmia r5, {r5, r6}
522 mrc p15, 0, r0, c1, c0 @ get control register v4
523 bic r0, r0, r5
524 orr r0, r0, r6
525 mov pc, lr
526 .size __feroceon_setup, . - __feroceon_setup
527
528 /*
529 * B
530 * R P
531 * .RVI UFRS BLDP WCAM
532 * .011 .001 ..11 0101
533 *
534 */
535 .type feroceon_crval, #object
536feroceon_crval:
537 crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
538
539 __INITDATA
540
541 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
542 define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
543
544 .section ".rodata"
545
546 string cpu_arch_name, "armv5te"
547 string cpu_elf_name, "v5"
548 string cpu_feroceon_name, "Feroceon"
549 string cpu_88fr531_name, "Feroceon 88FR531-vd"
550 string cpu_88fr571_name, "Feroceon 88FR571-vd"
551 string cpu_88fr131_name, "Feroceon 88FR131"
552
553 .align
554
555 .section ".proc.info.init", #alloc, #execinstr
556
557.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
558 .type __\name\()_proc_info,#object
559__\name\()_proc_info:
560 .long \cpu_val
561 .long \cpu_mask
562 .long PMD_TYPE_SECT | \
563 PMD_SECT_BUFFERABLE | \
564 PMD_SECT_CACHEABLE | \
565 PMD_BIT4 | \
566 PMD_SECT_AP_WRITE | \
567 PMD_SECT_AP_READ
568 .long PMD_TYPE_SECT | \
569 PMD_BIT4 | \
570 PMD_SECT_AP_WRITE | \
571 PMD_SECT_AP_READ
572 b __feroceon_setup
573 .long cpu_arch_name
574 .long cpu_elf_name
575 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
576 .long \cpu_name
577 .long feroceon_processor_functions
578 .long v4wbi_tlb_fns
579 .long feroceon_user_fns
580 .long \cache
581 .size __\name\()_proc_info, . - __\name\()_proc_info
582.endm
583
584#ifdef CONFIG_CPU_FEROCEON_OLD_ID
585 feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
586 cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
587#endif
588
589 feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
590 cache=feroceon_cache_fns
591 feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
592 cache=feroceon_range_cache_fns
593 feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
594 cache=feroceon_range_cache_fns
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
4 *
5 * Heavily based on proc-arm926.S
6 * Maintainer: Assaf Hoffman <hoffman@marvell.com>
7 */
8
9#include <linux/linkage.h>
10#include <linux/init.h>
11#include <linux/pgtable.h>
12#include <asm/assembler.h>
13#include <asm/hwcap.h>
14#include <asm/pgtable-hwdef.h>
15#include <asm/page.h>
16#include <asm/ptrace.h>
17#include "proc-macros.S"
18
19/*
20 * This is the maximum size of an area which will be invalidated
21 * using the single invalidate entry instructions. Anything larger
22 * than this, and we go for the whole cache.
23 *
24 * This value should be chosen such that we choose the cheapest
25 * alternative.
26 */
27#define CACHE_DLIMIT 16384
28
29/*
30 * the cache line size of the I and D cache
31 */
32#define CACHE_DLINESIZE 32
33
34 .bss
35 .align 3
36__cache_params_loc:
37 .space 8
38
39 .text
40__cache_params:
41 .word __cache_params_loc
42
43/*
44 * cpu_feroceon_proc_init()
45 */
46ENTRY(cpu_feroceon_proc_init)
47 mrc p15, 0, r0, c0, c0, 1 @ read cache type register
48 ldr r1, __cache_params
49 mov r2, #(16 << 5)
50 tst r0, #(1 << 16) @ get way
51 mov r0, r0, lsr #18 @ get cache size order
52 movne r3, #((4 - 1) << 30) @ 4-way
53 and r0, r0, #0xf
54 moveq r3, #0 @ 1-way
55 mov r2, r2, lsl r0 @ actual cache size
56 movne r2, r2, lsr #2 @ turned into # of sets
57 sub r2, r2, #(1 << 5)
58 stmia r1, {r2, r3}
59 ret lr
60
61/*
62 * cpu_feroceon_proc_fin()
63 */
64ENTRY(cpu_feroceon_proc_fin)
65#if defined(CONFIG_CACHE_FEROCEON_L2) && \
66 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
67 mov r0, #0
68 mcr p15, 1, r0, c15, c9, 0 @ clean L2
69 mcr p15, 0, r0, c7, c10, 4 @ drain WB
70#endif
71
72 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
73 bic r0, r0, #0x1000 @ ...i............
74 bic r0, r0, #0x000e @ ............wca.
75 mcr p15, 0, r0, c1, c0, 0 @ disable caches
76 ret lr
77
78/*
79 * cpu_feroceon_reset(loc)
80 *
81 * Perform a soft reset of the system. Put the CPU into the
82 * same state as it would be if it had been reset, and branch
83 * to what would be the reset vector.
84 *
85 * loc: location to jump to for soft reset
86 */
87 .align 5
88 .pushsection .idmap.text, "ax"
89ENTRY(cpu_feroceon_reset)
90 mov ip, #0
91 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
92 mcr p15, 0, ip, c7, c10, 4 @ drain WB
93#ifdef CONFIG_MMU
94 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
95#endif
96 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
97 bic ip, ip, #0x000f @ ............wcam
98 bic ip, ip, #0x1100 @ ...i...s........
99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
100 ret r0
101ENDPROC(cpu_feroceon_reset)
102 .popsection
103
104/*
105 * cpu_feroceon_do_idle()
106 *
107 * Called with IRQs disabled
108 */
109 .align 5
110ENTRY(cpu_feroceon_do_idle)
111 mov r0, #0
112 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
113 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
114 ret lr
115
116/*
117 * flush_icache_all()
118 *
119 * Unconditionally clean and invalidate the entire icache.
120 */
121ENTRY(feroceon_flush_icache_all)
122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124 ret lr
125ENDPROC(feroceon_flush_icache_all)
126
127/*
128 * flush_user_cache_all()
129 *
130 * Clean and invalidate all cache entries in a particular
131 * address space.
132 */
133 .align 5
134ENTRY(feroceon_flush_user_cache_all)
135 /* FALLTHROUGH */
136
137/*
138 * flush_kern_cache_all()
139 *
140 * Clean and invalidate the entire cache.
141 */
142ENTRY(feroceon_flush_kern_cache_all)
143 mov r2, #VM_EXEC
144
145__flush_whole_cache:
146 ldr r1, __cache_params
147 ldmia r1, {r1, r3}
1481: orr ip, r1, r3
1492: mcr p15, 0, ip, c7, c14, 2 @ clean + invalidate D set/way
150 subs ip, ip, #(1 << 30) @ next way
151 bcs 2b
152 subs r1, r1, #(1 << 5) @ next set
153 bcs 1b
154
155 tst r2, #VM_EXEC
156 mov ip, #0
157 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
159 ret lr
160
161/*
162 * flush_user_cache_range(start, end, flags)
163 *
164 * Clean and invalidate a range of cache entries in the
165 * specified address range.
166 *
167 * - start - start address (inclusive)
168 * - end - end address (exclusive)
169 * - flags - vm_flags describing address space
170 */
171 .align 5
172ENTRY(feroceon_flush_user_cache_range)
173 sub r3, r1, r0 @ calculate total size
174 cmp r3, #CACHE_DLIMIT
175 bgt __flush_whole_cache
1761: tst r2, #VM_EXEC
177 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
178 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
179 add r0, r0, #CACHE_DLINESIZE
180 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
181 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
182 add r0, r0, #CACHE_DLINESIZE
183 cmp r0, r1
184 blo 1b
185 tst r2, #VM_EXEC
186 mov ip, #0
187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
188 ret lr
189
190/*
191 * coherent_kern_range(start, end)
192 *
193 * Ensure coherency between the Icache and the Dcache in the
194 * region described by start, end. If you have non-snooping
195 * Harvard caches, you need to implement this function.
196 *
197 * - start - virtual start address
198 * - end - virtual end address
199 */
200 .align 5
201ENTRY(feroceon_coherent_kern_range)
202 /* FALLTHROUGH */
203
204/*
205 * coherent_user_range(start, end)
206 *
207 * Ensure coherency between the Icache and the Dcache in the
208 * region described by start, end. If you have non-snooping
209 * Harvard caches, you need to implement this function.
210 *
211 * - start - virtual start address
212 * - end - virtual end address
213 */
214ENTRY(feroceon_coherent_user_range)
215 bic r0, r0, #CACHE_DLINESIZE - 1
2161: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
217 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
218 add r0, r0, #CACHE_DLINESIZE
219 cmp r0, r1
220 blo 1b
221 mcr p15, 0, r0, c7, c10, 4 @ drain WB
222 mov r0, #0
223 ret lr
224
225/*
226 * flush_kern_dcache_area(void *addr, size_t size)
227 *
228 * Ensure no D cache aliasing occurs, either with itself or
229 * the I cache
230 *
231 * - addr - kernel address
232 * - size - region size
233 */
234 .align 5
235ENTRY(feroceon_flush_kern_dcache_area)
236 add r1, r0, r1
2371: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
238 add r0, r0, #CACHE_DLINESIZE
239 cmp r0, r1
240 blo 1b
241 mov r0, #0
242 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
243 mcr p15, 0, r0, c7, c10, 4 @ drain WB
244 ret lr
245
246 .align 5
247ENTRY(feroceon_range_flush_kern_dcache_area)
248 mrs r2, cpsr
249 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive
250 orr r3, r2, #PSR_I_BIT
251 msr cpsr_c, r3 @ disable interrupts
252 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
253 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
254 msr cpsr_c, r2 @ restore interrupts
255 mov r0, #0
256 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
257 mcr p15, 0, r0, c7, c10, 4 @ drain WB
258 ret lr
259
260/*
261 * dma_inv_range(start, end)
262 *
263 * Invalidate (discard) the specified virtual address range.
264 * May not write back any entries. If 'start' or 'end'
265 * are not cache line aligned, those lines must be written
266 * back.
267 *
268 * - start - virtual start address
269 * - end - virtual end address
270 *
271 * (same as v4wb)
272 */
273 .align 5
274feroceon_dma_inv_range:
275 tst r0, #CACHE_DLINESIZE - 1
276 bic r0, r0, #CACHE_DLINESIZE - 1
277 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
278 tst r1, #CACHE_DLINESIZE - 1
279 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
2801: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
281 add r0, r0, #CACHE_DLINESIZE
282 cmp r0, r1
283 blo 1b
284 mcr p15, 0, r0, c7, c10, 4 @ drain WB
285 ret lr
286
287 .align 5
288feroceon_range_dma_inv_range:
289 mrs r2, cpsr
290 tst r0, #CACHE_DLINESIZE - 1
291 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
292 tst r1, #CACHE_DLINESIZE - 1
293 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
294 cmp r1, r0
295 subne r1, r1, #1 @ top address is inclusive
296 orr r3, r2, #PSR_I_BIT
297 msr cpsr_c, r3 @ disable interrupts
298 mcr p15, 5, r0, c15, c14, 0 @ D inv range start
299 mcr p15, 5, r1, c15, c14, 1 @ D inv range top
300 msr cpsr_c, r2 @ restore interrupts
301 ret lr
302
303/*
304 * dma_clean_range(start, end)
305 *
306 * Clean the specified virtual address range.
307 *
308 * - start - virtual start address
309 * - end - virtual end address
310 *
311 * (same as v4wb)
312 */
313 .align 5
314feroceon_dma_clean_range:
315 bic r0, r0, #CACHE_DLINESIZE - 1
3161: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
317 add r0, r0, #CACHE_DLINESIZE
318 cmp r0, r1
319 blo 1b
320 mcr p15, 0, r0, c7, c10, 4 @ drain WB
321 ret lr
322
323 .align 5
324feroceon_range_dma_clean_range:
325 mrs r2, cpsr
326 cmp r1, r0
327 subne r1, r1, #1 @ top address is inclusive
328 orr r3, r2, #PSR_I_BIT
329 msr cpsr_c, r3 @ disable interrupts
330 mcr p15, 5, r0, c15, c13, 0 @ D clean range start
331 mcr p15, 5, r1, c15, c13, 1 @ D clean range top
332 msr cpsr_c, r2 @ restore interrupts
333 mcr p15, 0, r0, c7, c10, 4 @ drain WB
334 ret lr
335
336/*
337 * dma_flush_range(start, end)
338 *
339 * Clean and invalidate the specified virtual address range.
340 *
341 * - start - virtual start address
342 * - end - virtual end address
343 */
344 .align 5
345ENTRY(feroceon_dma_flush_range)
346 bic r0, r0, #CACHE_DLINESIZE - 1
3471: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
348 add r0, r0, #CACHE_DLINESIZE
349 cmp r0, r1
350 blo 1b
351 mcr p15, 0, r0, c7, c10, 4 @ drain WB
352 ret lr
353
354 .align 5
355ENTRY(feroceon_range_dma_flush_range)
356 mrs r2, cpsr
357 cmp r1, r0
358 subne r1, r1, #1 @ top address is inclusive
359 orr r3, r2, #PSR_I_BIT
360 msr cpsr_c, r3 @ disable interrupts
361 mcr p15, 5, r0, c15, c15, 0 @ D clean/inv range start
362 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
363 msr cpsr_c, r2 @ restore interrupts
364 mcr p15, 0, r0, c7, c10, 4 @ drain WB
365 ret lr
366
367/*
368 * dma_map_area(start, size, dir)
369 * - start - kernel virtual start address
370 * - size - size of region
371 * - dir - DMA direction
372 */
373ENTRY(feroceon_dma_map_area)
374 add r1, r1, r0
375 cmp r2, #DMA_TO_DEVICE
376 beq feroceon_dma_clean_range
377 bcs feroceon_dma_inv_range
378 b feroceon_dma_flush_range
379ENDPROC(feroceon_dma_map_area)
380
381/*
382 * dma_map_area(start, size, dir)
383 * - start - kernel virtual start address
384 * - size - size of region
385 * - dir - DMA direction
386 */
387ENTRY(feroceon_range_dma_map_area)
388 add r1, r1, r0
389 cmp r2, #DMA_TO_DEVICE
390 beq feroceon_range_dma_clean_range
391 bcs feroceon_range_dma_inv_range
392 b feroceon_range_dma_flush_range
393ENDPROC(feroceon_range_dma_map_area)
394
395/*
396 * dma_unmap_area(start, size, dir)
397 * - start - kernel virtual start address
398 * - size - size of region
399 * - dir - DMA direction
400 */
401ENTRY(feroceon_dma_unmap_area)
402 ret lr
403ENDPROC(feroceon_dma_unmap_area)
404
405 .globl feroceon_flush_kern_cache_louis
406 .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
407
408 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
409 define_cache_functions feroceon
410
411.macro range_alias basename
412 .globl feroceon_range_\basename
413 .type feroceon_range_\basename , %function
414 .equ feroceon_range_\basename , feroceon_\basename
415.endm
416
417/*
418 * Most of the cache functions are unchanged for this case.
419 * Export suitable alias symbols for the unchanged functions:
420 */
421 range_alias flush_icache_all
422 range_alias flush_user_cache_all
423 range_alias flush_kern_cache_all
424 range_alias flush_kern_cache_louis
425 range_alias flush_user_cache_range
426 range_alias coherent_kern_range
427 range_alias coherent_user_range
428 range_alias dma_unmap_area
429
430 define_cache_functions feroceon_range
431
432 .align 5
433ENTRY(cpu_feroceon_dcache_clean_area)
434#if defined(CONFIG_CACHE_FEROCEON_L2) && \
435 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
436 mov r2, r0
437 mov r3, r1
438#endif
4391: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
440 add r0, r0, #CACHE_DLINESIZE
441 subs r1, r1, #CACHE_DLINESIZE
442 bhi 1b
443#if defined(CONFIG_CACHE_FEROCEON_L2) && \
444 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4451: mcr p15, 1, r2, c15, c9, 1 @ clean L2 entry
446 add r2, r2, #CACHE_DLINESIZE
447 subs r3, r3, #CACHE_DLINESIZE
448 bhi 1b
449#endif
450 mcr p15, 0, r0, c7, c10, 4 @ drain WB
451 ret lr
452
453/* =============================== PageTable ============================== */
454
455/*
456 * cpu_feroceon_switch_mm(pgd)
457 *
458 * Set the translation base pointer to be as described by pgd.
459 *
460 * pgd: new page tables
461 */
462 .align 5
463ENTRY(cpu_feroceon_switch_mm)
464#ifdef CONFIG_MMU
465 /*
466 * Note: we wish to call __flush_whole_cache but we need to preserve
467 * lr to do so. The only way without touching main memory is to
468 * use r2 which is normally used to test the VM_EXEC flag, and
469 * compensate locally for the skipped ops if it is not set.
470 */
471 mov r2, lr @ abuse r2 to preserve lr
472 bl __flush_whole_cache
473 @ if r2 contains the VM_EXEC bit then the next 2 ops are done already
474 tst r2, #VM_EXEC
475 mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache
476 mcreq p15, 0, ip, c7, c10, 4 @ drain WB
477
478 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
479 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
480 ret r2
481#else
482 ret lr
483#endif
484
485/*
486 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
487 *
488 * Set a PTE and flush it out
489 */
490 .align 5
491ENTRY(cpu_feroceon_set_pte_ext)
492#ifdef CONFIG_MMU
493 armv3_set_pte_ext wc_disable=0
494 mov r0, r0
495 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
496#if defined(CONFIG_CACHE_FEROCEON_L2) && \
497 !defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
498 mcr p15, 1, r0, c15, c9, 1 @ clean L2 entry
499#endif
500 mcr p15, 0, r0, c7, c10, 4 @ drain WB
501#endif
502 ret lr
503
504/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
505.globl cpu_feroceon_suspend_size
506.equ cpu_feroceon_suspend_size, 4 * 3
507#ifdef CONFIG_ARM_CPU_SUSPEND
508ENTRY(cpu_feroceon_do_suspend)
509 stmfd sp!, {r4 - r6, lr}
510 mrc p15, 0, r4, c13, c0, 0 @ PID
511 mrc p15, 0, r5, c3, c0, 0 @ Domain ID
512 mrc p15, 0, r6, c1, c0, 0 @ Control register
513 stmia r0, {r4 - r6}
514 ldmfd sp!, {r4 - r6, pc}
515ENDPROC(cpu_feroceon_do_suspend)
516
517ENTRY(cpu_feroceon_do_resume)
518 mov ip, #0
519 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
520 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
521 ldmia r0, {r4 - r6}
522 mcr p15, 0, r4, c13, c0, 0 @ PID
523 mcr p15, 0, r5, c3, c0, 0 @ Domain ID
524 mcr p15, 0, r1, c2, c0, 0 @ TTB address
525 mov r0, r6 @ control register
526 b cpu_resume_mmu
527ENDPROC(cpu_feroceon_do_resume)
528#endif
529
530 .type __feroceon_setup, #function
531__feroceon_setup:
532 mov r0, #0
533 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
534 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
535#ifdef CONFIG_MMU
536 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
537#endif
538
539 adr r5, feroceon_crval
540 ldmia r5, {r5, r6}
541 mrc p15, 0, r0, c1, c0 @ get control register v4
542 bic r0, r0, r5
543 orr r0, r0, r6
544 ret lr
545 .size __feroceon_setup, . - __feroceon_setup
546
547 /*
548 * B
549 * R P
550 * .RVI UFRS BLDP WCAM
551 * .011 .001 ..11 0101
552 *
553 */
554 .type feroceon_crval, #object
555feroceon_crval:
556 crval clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
557
558 __INITDATA
559
560 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
561 define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
562
563 .section ".rodata"
564
565 string cpu_arch_name, "armv5te"
566 string cpu_elf_name, "v5"
567 string cpu_feroceon_name, "Feroceon"
568 string cpu_88fr531_name, "Feroceon 88FR531-vd"
569 string cpu_88fr571_name, "Feroceon 88FR571-vd"
570 string cpu_88fr131_name, "Feroceon 88FR131"
571
572 .align
573
574 .section ".proc.info.init", "a"
575
576.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
577 .type __\name\()_proc_info,#object
578__\name\()_proc_info:
579 .long \cpu_val
580 .long \cpu_mask
581 .long PMD_TYPE_SECT | \
582 PMD_SECT_BUFFERABLE | \
583 PMD_SECT_CACHEABLE | \
584 PMD_BIT4 | \
585 PMD_SECT_AP_WRITE | \
586 PMD_SECT_AP_READ
587 .long PMD_TYPE_SECT | \
588 PMD_BIT4 | \
589 PMD_SECT_AP_WRITE | \
590 PMD_SECT_AP_READ
591 initfn __feroceon_setup, __\name\()_proc_info
592 .long cpu_arch_name
593 .long cpu_elf_name
594 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
595 .long \cpu_name
596 .long feroceon_processor_functions
597 .long v4wbi_tlb_fns
598 .long feroceon_user_fns
599 .long \cache
600 .size __\name\()_proc_info, . - __\name\()_proc_info
601.endm
602
603#ifdef CONFIG_CPU_FEROCEON_OLD_ID
604 feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
605 cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
606#endif
607
608 feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
609 cache=feroceon_cache_fns
610 feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
611 cache=feroceon_range_cache_fns
612 feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
613 cache=feroceon_range_cache_fns