Loading...
1/*
2 * linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920
3 *
4 * Copyright (C) 1999,2000 ARM Limited
5 * Copyright (C) 2000 Deep Blue Solutions Ltd.
6 * hacked for non-paged-MM by Hyok S. Choi, 2003.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 *
23 * These are the low level assembler for performing cache and TLB
24 * functions on the arm920.
25 *
26 * CONFIG_CPU_ARM920_CPU_IDLE -> nohlt
27 */
28#include <linux/linkage.h>
29#include <linux/init.h>
30#include <asm/assembler.h>
31#include <asm/hwcap.h>
32#include <asm/pgtable-hwdef.h>
33#include <asm/pgtable.h>
34#include <asm/page.h>
35#include <asm/ptrace.h>
36#include "proc-macros.S"
37
38/*
39 * The size of one data cache line.
40 */
41#define CACHE_DLINESIZE 32
42
43/*
44 * The number of data cache segments.
45 */
46#define CACHE_DSEGMENTS 8
47
48/*
49 * The number of lines in a cache segment.
50 */
51#define CACHE_DENTRIES 64
52
53/*
54 * This is the size at which it becomes more efficient to
55 * clean the whole cache, rather than using the individual
56 * cache line maintenance instructions.
57 */
58#define CACHE_DLIMIT 65536
59
60
61 .text
62/*
63 * cpu_arm920_proc_init()
64 */
65ENTRY(cpu_arm920_proc_init)
66 mov pc, lr
67
68/*
69 * cpu_arm920_proc_fin()
70 */
71ENTRY(cpu_arm920_proc_fin)
72 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
73 bic r0, r0, #0x1000 @ ...i............
74 bic r0, r0, #0x000e @ ............wca.
75 mcr p15, 0, r0, c1, c0, 0 @ disable caches
76 mov pc, lr
77
78/*
79 * cpu_arm920_reset(loc)
80 *
81 * Perform a soft reset of the system. Put the CPU into the
82 * same state as it would be if it had been reset, and branch
83 * to what would be the reset vector.
84 *
85 * loc: location to jump to for soft reset
86 */
87 .align 5
88ENTRY(cpu_arm920_reset)
89 mov ip, #0
90 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
91 mcr p15, 0, ip, c7, c10, 4 @ drain WB
92#ifdef CONFIG_MMU
93 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
94#endif
95 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
96 bic ip, ip, #0x000f @ ............wcam
97 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
99 mov pc, r0
100
101/*
102 * cpu_arm920_do_idle()
103 */
104 .align 5
105ENTRY(cpu_arm920_do_idle)
106 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
107 mov pc, lr
108
109
110#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
111
112/*
113 * flush_icache_all()
114 *
115 * Unconditionally clean and invalidate the entire icache.
116 */
117ENTRY(arm920_flush_icache_all)
118 mov r0, #0
119 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
120 mov pc, lr
121ENDPROC(arm920_flush_icache_all)
122
123/*
124 * flush_user_cache_all()
125 *
126 * Invalidate all cache entries in a particular address
127 * space.
128 */
129ENTRY(arm920_flush_user_cache_all)
130 /* FALLTHROUGH */
131
132/*
133 * flush_kern_cache_all()
134 *
135 * Clean and invalidate the entire cache.
136 */
137ENTRY(arm920_flush_kern_cache_all)
138 mov r2, #VM_EXEC
139 mov ip, #0
140__flush_whole_cache:
141 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1421: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1432: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
144 subs r3, r3, #1 << 26
145 bcs 2b @ entries 63 to 0
146 subs r1, r1, #1 << 5
147 bcs 1b @ segments 7 to 0
148 tst r2, #VM_EXEC
149 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
150 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
151 mov pc, lr
152
153/*
154 * flush_user_cache_range(start, end, flags)
155 *
156 * Invalidate a range of cache entries in the specified
157 * address space.
158 *
159 * - start - start address (inclusive)
160 * - end - end address (exclusive)
161 * - flags - vm_flags for address space
162 */
163ENTRY(arm920_flush_user_cache_range)
164 mov ip, #0
165 sub r3, r1, r0 @ calculate total size
166 cmp r3, #CACHE_DLIMIT
167 bhs __flush_whole_cache
168
1691: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
170 tst r2, #VM_EXEC
171 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
172 add r0, r0, #CACHE_DLINESIZE
173 cmp r0, r1
174 blo 1b
175 tst r2, #VM_EXEC
176 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
177 mov pc, lr
178
179/*
180 * coherent_kern_range(start, end)
181 *
182 * Ensure coherency between the Icache and the Dcache in the
183 * region described by start, end. If you have non-snooping
184 * Harvard caches, you need to implement this function.
185 *
186 * - start - virtual start address
187 * - end - virtual end address
188 */
189ENTRY(arm920_coherent_kern_range)
190 /* FALLTHROUGH */
191
192/*
193 * coherent_user_range(start, end)
194 *
195 * Ensure coherency between the Icache and the Dcache in the
196 * region described by start, end. If you have non-snooping
197 * Harvard caches, you need to implement this function.
198 *
199 * - start - virtual start address
200 * - end - virtual end address
201 */
202ENTRY(arm920_coherent_user_range)
203 bic r0, r0, #CACHE_DLINESIZE - 1
2041: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
205 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
206 add r0, r0, #CACHE_DLINESIZE
207 cmp r0, r1
208 blo 1b
209 mcr p15, 0, r0, c7, c10, 4 @ drain WB
210 mov pc, lr
211
212/*
213 * flush_kern_dcache_area(void *addr, size_t size)
214 *
215 * Ensure no D cache aliasing occurs, either with itself or
216 * the I cache
217 *
218 * - addr - kernel address
219 * - size - region size
220 */
221ENTRY(arm920_flush_kern_dcache_area)
222 add r1, r0, r1
2231: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
224 add r0, r0, #CACHE_DLINESIZE
225 cmp r0, r1
226 blo 1b
227 mov r0, #0
228 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
229 mcr p15, 0, r0, c7, c10, 4 @ drain WB
230 mov pc, lr
231
232/*
233 * dma_inv_range(start, end)
234 *
235 * Invalidate (discard) the specified virtual address range.
236 * May not write back any entries. If 'start' or 'end'
237 * are not cache line aligned, those lines must be written
238 * back.
239 *
240 * - start - virtual start address
241 * - end - virtual end address
242 *
243 * (same as v4wb)
244 */
245arm920_dma_inv_range:
246 tst r0, #CACHE_DLINESIZE - 1
247 bic r0, r0, #CACHE_DLINESIZE - 1
248 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
249 tst r1, #CACHE_DLINESIZE - 1
250 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
2511: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
252 add r0, r0, #CACHE_DLINESIZE
253 cmp r0, r1
254 blo 1b
255 mcr p15, 0, r0, c7, c10, 4 @ drain WB
256 mov pc, lr
257
258/*
259 * dma_clean_range(start, end)
260 *
261 * Clean the specified virtual address range.
262 *
263 * - start - virtual start address
264 * - end - virtual end address
265 *
266 * (same as v4wb)
267 */
268arm920_dma_clean_range:
269 bic r0, r0, #CACHE_DLINESIZE - 1
2701: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
271 add r0, r0, #CACHE_DLINESIZE
272 cmp r0, r1
273 blo 1b
274 mcr p15, 0, r0, c7, c10, 4 @ drain WB
275 mov pc, lr
276
277/*
278 * dma_flush_range(start, end)
279 *
280 * Clean and invalidate the specified virtual address range.
281 *
282 * - start - virtual start address
283 * - end - virtual end address
284 */
285ENTRY(arm920_dma_flush_range)
286 bic r0, r0, #CACHE_DLINESIZE - 1
2871: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
288 add r0, r0, #CACHE_DLINESIZE
289 cmp r0, r1
290 blo 1b
291 mcr p15, 0, r0, c7, c10, 4 @ drain WB
292 mov pc, lr
293
294/*
295 * dma_map_area(start, size, dir)
296 * - start - kernel virtual start address
297 * - size - size of region
298 * - dir - DMA direction
299 */
300ENTRY(arm920_dma_map_area)
301 add r1, r1, r0
302 cmp r2, #DMA_TO_DEVICE
303 beq arm920_dma_clean_range
304 bcs arm920_dma_inv_range
305 b arm920_dma_flush_range
306ENDPROC(arm920_dma_map_area)
307
308/*
309 * dma_unmap_area(start, size, dir)
310 * - start - kernel virtual start address
311 * - size - size of region
312 * - dir - DMA direction
313 */
314ENTRY(arm920_dma_unmap_area)
315 mov pc, lr
316ENDPROC(arm920_dma_unmap_area)
317
318 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
319 define_cache_functions arm920
320#endif
321
322
323ENTRY(cpu_arm920_dcache_clean_area)
3241: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
325 add r0, r0, #CACHE_DLINESIZE
326 subs r1, r1, #CACHE_DLINESIZE
327 bhi 1b
328 mov pc, lr
329
330/* =============================== PageTable ============================== */
331
332/*
333 * cpu_arm920_switch_mm(pgd)
334 *
335 * Set the translation base pointer to be as described by pgd.
336 *
337 * pgd: new page tables
338 */
339 .align 5
340ENTRY(cpu_arm920_switch_mm)
341#ifdef CONFIG_MMU
342 mov ip, #0
343#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
344 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
345#else
346@ && 'Clean & Invalidate whole DCache'
347@ && Re-written to use Index Ops.
348@ && Uses registers r1, r3 and ip
349
350 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
3511: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
3522: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
353 subs r3, r3, #1 << 26
354 bcs 2b @ entries 63 to 0
355 subs r1, r1, #1 << 5
356 bcs 1b @ segments 7 to 0
357#endif
358 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
359 mcr p15, 0, ip, c7, c10, 4 @ drain WB
360 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
361 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
362#endif
363 mov pc, lr
364
365/*
366 * cpu_arm920_set_pte(ptep, pte, ext)
367 *
368 * Set a PTE and flush it out
369 */
370 .align 5
371ENTRY(cpu_arm920_set_pte_ext)
372#ifdef CONFIG_MMU
373 armv3_set_pte_ext
374 mov r0, r0
375 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
376 mcr p15, 0, r0, c7, c10, 4 @ drain WB
377#endif
378 mov pc, lr
379
380/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
381.globl cpu_arm920_suspend_size
382.equ cpu_arm920_suspend_size, 4 * 4
383#ifdef CONFIG_PM_SLEEP
384ENTRY(cpu_arm920_do_suspend)
385 stmfd sp!, {r4 - r7, lr}
386 mrc p15, 0, r4, c13, c0, 0 @ PID
387 mrc p15, 0, r5, c3, c0, 0 @ Domain ID
388 mrc p15, 0, r6, c2, c0, 0 @ TTB address
389 mrc p15, 0, r7, c1, c0, 0 @ Control register
390 stmia r0, {r4 - r7}
391 ldmfd sp!, {r4 - r7, pc}
392ENDPROC(cpu_arm920_do_suspend)
393
394ENTRY(cpu_arm920_do_resume)
395 mov ip, #0
396 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
397 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
398 ldmia r0, {r4 - r7}
399 mcr p15, 0, r4, c13, c0, 0 @ PID
400 mcr p15, 0, r5, c3, c0, 0 @ Domain ID
401 mcr p15, 0, r6, c2, c0, 0 @ TTB address
402 mov r0, r7 @ control register
403 mov r2, r6, lsr #14 @ get TTB0 base
404 mov r2, r2, lsl #14
405 ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \
406 PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE
407 b cpu_resume_mmu
408ENDPROC(cpu_arm920_do_resume)
409#endif
410
411 __CPUINIT
412
413 .type __arm920_setup, #function
414__arm920_setup:
415 mov r0, #0
416 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
417 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
418#ifdef CONFIG_MMU
419 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
420#endif
421 adr r5, arm920_crval
422 ldmia r5, {r5, r6}
423 mrc p15, 0, r0, c1, c0 @ get control register v4
424 bic r0, r0, r5
425 orr r0, r0, r6
426 mov pc, lr
427 .size __arm920_setup, . - __arm920_setup
428
429 /*
430 * R
431 * .RVI ZFRS BLDP WCAM
432 * ..11 0001 ..11 0101
433 *
434 */
435 .type arm920_crval, #object
436arm920_crval:
437 crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
438
439 __INITDATA
440 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
441 define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1
442
443 .section ".rodata"
444
445 string cpu_arch_name, "armv4t"
446 string cpu_elf_name, "v4"
447 string cpu_arm920_name, "ARM920T"
448
449 .align
450
451 .section ".proc.info.init", #alloc, #execinstr
452
453 .type __arm920_proc_info,#object
454__arm920_proc_info:
455 .long 0x41009200
456 .long 0xff00fff0
457 .long PMD_TYPE_SECT | \
458 PMD_SECT_BUFFERABLE | \
459 PMD_SECT_CACHEABLE | \
460 PMD_BIT4 | \
461 PMD_SECT_AP_WRITE | \
462 PMD_SECT_AP_READ
463 .long PMD_TYPE_SECT | \
464 PMD_BIT4 | \
465 PMD_SECT_AP_WRITE | \
466 PMD_SECT_AP_READ
467 b __arm920_setup
468 .long cpu_arch_name
469 .long cpu_elf_name
470 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
471 .long cpu_arm920_name
472 .long arm920_processor_functions
473 .long v4wbi_tlb_fns
474 .long v4wb_user_fns
475#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
476 .long arm920_cache_fns
477#else
478 .long v4wt_cache_fns
479#endif
480 .size __arm920_proc_info, . - __arm920_proc_info
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920
4 *
5 * Copyright (C) 1999,2000 ARM Limited
6 * Copyright (C) 2000 Deep Blue Solutions Ltd.
7 * hacked for non-paged-MM by Hyok S. Choi, 2003.
8 *
9 * These are the low level assembler for performing cache and TLB
10 * functions on the arm920.
11 *
12 * CONFIG_CPU_ARM920_CPU_IDLE -> nohlt
13 */
14#include <linux/linkage.h>
15#include <linux/init.h>
16#include <linux/pgtable.h>
17#include <asm/assembler.h>
18#include <asm/hwcap.h>
19#include <asm/pgtable-hwdef.h>
20#include <asm/page.h>
21#include <asm/ptrace.h>
22#include "proc-macros.S"
23
24/*
25 * The size of one data cache line.
26 */
27#define CACHE_DLINESIZE 32
28
29/*
30 * The number of data cache segments.
31 */
32#define CACHE_DSEGMENTS 8
33
34/*
35 * The number of lines in a cache segment.
36 */
37#define CACHE_DENTRIES 64
38
39/*
40 * This is the size at which it becomes more efficient to
41 * clean the whole cache, rather than using the individual
42 * cache line maintenance instructions.
43 */
44#define CACHE_DLIMIT 65536
45
46
47 .text
48/*
49 * cpu_arm920_proc_init()
50 */
51ENTRY(cpu_arm920_proc_init)
52 ret lr
53
54/*
55 * cpu_arm920_proc_fin()
56 */
57ENTRY(cpu_arm920_proc_fin)
58 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
59 bic r0, r0, #0x1000 @ ...i............
60 bic r0, r0, #0x000e @ ............wca.
61 mcr p15, 0, r0, c1, c0, 0 @ disable caches
62 ret lr
63
64/*
65 * cpu_arm920_reset(loc)
66 *
67 * Perform a soft reset of the system. Put the CPU into the
68 * same state as it would be if it had been reset, and branch
69 * to what would be the reset vector.
70 *
71 * loc: location to jump to for soft reset
72 */
73 .align 5
74 .pushsection .idmap.text, "ax"
75ENTRY(cpu_arm920_reset)
76 mov ip, #0
77 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
78 mcr p15, 0, ip, c7, c10, 4 @ drain WB
79#ifdef CONFIG_MMU
80 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
81#endif
82 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
83 bic ip, ip, #0x000f @ ............wcam
84 bic ip, ip, #0x1100 @ ...i...s........
85 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
86 ret r0
87ENDPROC(cpu_arm920_reset)
88 .popsection
89
90/*
91 * cpu_arm920_do_idle()
92 */
93 .align 5
94ENTRY(cpu_arm920_do_idle)
95 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
96 ret lr
97
98
99#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
100
101/*
102 * flush_icache_all()
103 *
104 * Unconditionally clean and invalidate the entire icache.
105 */
106ENTRY(arm920_flush_icache_all)
107 mov r0, #0
108 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
109 ret lr
110ENDPROC(arm920_flush_icache_all)
111
112/*
113 * flush_user_cache_all()
114 *
115 * Invalidate all cache entries in a particular address
116 * space.
117 */
118ENTRY(arm920_flush_user_cache_all)
119 /* FALLTHROUGH */
120
121/*
122 * flush_kern_cache_all()
123 *
124 * Clean and invalidate the entire cache.
125 */
126ENTRY(arm920_flush_kern_cache_all)
127 mov r2, #VM_EXEC
128 mov ip, #0
129__flush_whole_cache:
130 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1311: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1322: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
133 subs r3, r3, #1 << 26
134 bcs 2b @ entries 63 to 0
135 subs r1, r1, #1 << 5
136 bcs 1b @ segments 7 to 0
137 tst r2, #VM_EXEC
138 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
139 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
140 ret lr
141
142/*
143 * flush_user_cache_range(start, end, flags)
144 *
145 * Invalidate a range of cache entries in the specified
146 * address space.
147 *
148 * - start - start address (inclusive)
149 * - end - end address (exclusive)
150 * - flags - vm_flags for address space
151 */
152ENTRY(arm920_flush_user_cache_range)
153 mov ip, #0
154 sub r3, r1, r0 @ calculate total size
155 cmp r3, #CACHE_DLIMIT
156 bhs __flush_whole_cache
157
1581: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
159 tst r2, #VM_EXEC
160 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
161 add r0, r0, #CACHE_DLINESIZE
162 cmp r0, r1
163 blo 1b
164 tst r2, #VM_EXEC
165 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
166 ret lr
167
168/*
169 * coherent_kern_range(start, end)
170 *
171 * Ensure coherency between the Icache and the Dcache in the
172 * region described by start, end. If you have non-snooping
173 * Harvard caches, you need to implement this function.
174 *
175 * - start - virtual start address
176 * - end - virtual end address
177 */
178ENTRY(arm920_coherent_kern_range)
179 /* FALLTHROUGH */
180
181/*
182 * coherent_user_range(start, end)
183 *
184 * Ensure coherency between the Icache and the Dcache in the
185 * region described by start, end. If you have non-snooping
186 * Harvard caches, you need to implement this function.
187 *
188 * - start - virtual start address
189 * - end - virtual end address
190 */
191ENTRY(arm920_coherent_user_range)
192 bic r0, r0, #CACHE_DLINESIZE - 1
1931: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
194 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
195 add r0, r0, #CACHE_DLINESIZE
196 cmp r0, r1
197 blo 1b
198 mcr p15, 0, r0, c7, c10, 4 @ drain WB
199 mov r0, #0
200 ret lr
201
202/*
203 * flush_kern_dcache_area(void *addr, size_t size)
204 *
205 * Ensure no D cache aliasing occurs, either with itself or
206 * the I cache
207 *
208 * - addr - kernel address
209 * - size - region size
210 */
211ENTRY(arm920_flush_kern_dcache_area)
212 add r1, r0, r1
2131: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
214 add r0, r0, #CACHE_DLINESIZE
215 cmp r0, r1
216 blo 1b
217 mov r0, #0
218 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
219 mcr p15, 0, r0, c7, c10, 4 @ drain WB
220 ret lr
221
222/*
223 * dma_inv_range(start, end)
224 *
225 * Invalidate (discard) the specified virtual address range.
226 * May not write back any entries. If 'start' or 'end'
227 * are not cache line aligned, those lines must be written
228 * back.
229 *
230 * - start - virtual start address
231 * - end - virtual end address
232 *
233 * (same as v4wb)
234 */
235arm920_dma_inv_range:
236 tst r0, #CACHE_DLINESIZE - 1
237 bic r0, r0, #CACHE_DLINESIZE - 1
238 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
239 tst r1, #CACHE_DLINESIZE - 1
240 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
2411: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
242 add r0, r0, #CACHE_DLINESIZE
243 cmp r0, r1
244 blo 1b
245 mcr p15, 0, r0, c7, c10, 4 @ drain WB
246 ret lr
247
248/*
249 * dma_clean_range(start, end)
250 *
251 * Clean the specified virtual address range.
252 *
253 * - start - virtual start address
254 * - end - virtual end address
255 *
256 * (same as v4wb)
257 */
258arm920_dma_clean_range:
259 bic r0, r0, #CACHE_DLINESIZE - 1
2601: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
261 add r0, r0, #CACHE_DLINESIZE
262 cmp r0, r1
263 blo 1b
264 mcr p15, 0, r0, c7, c10, 4 @ drain WB
265 ret lr
266
267/*
268 * dma_flush_range(start, end)
269 *
270 * Clean and invalidate the specified virtual address range.
271 *
272 * - start - virtual start address
273 * - end - virtual end address
274 */
275ENTRY(arm920_dma_flush_range)
276 bic r0, r0, #CACHE_DLINESIZE - 1
2771: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
278 add r0, r0, #CACHE_DLINESIZE
279 cmp r0, r1
280 blo 1b
281 mcr p15, 0, r0, c7, c10, 4 @ drain WB
282 ret lr
283
284/*
285 * dma_map_area(start, size, dir)
286 * - start - kernel virtual start address
287 * - size - size of region
288 * - dir - DMA direction
289 */
290ENTRY(arm920_dma_map_area)
291 add r1, r1, r0
292 cmp r2, #DMA_TO_DEVICE
293 beq arm920_dma_clean_range
294 bcs arm920_dma_inv_range
295 b arm920_dma_flush_range
296ENDPROC(arm920_dma_map_area)
297
298/*
299 * dma_unmap_area(start, size, dir)
300 * - start - kernel virtual start address
301 * - size - size of region
302 * - dir - DMA direction
303 */
304ENTRY(arm920_dma_unmap_area)
305 ret lr
306ENDPROC(arm920_dma_unmap_area)
307
308 .globl arm920_flush_kern_cache_louis
309 .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all
310
311 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
312 define_cache_functions arm920
313#endif
314
315
316ENTRY(cpu_arm920_dcache_clean_area)
3171: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
318 add r0, r0, #CACHE_DLINESIZE
319 subs r1, r1, #CACHE_DLINESIZE
320 bhi 1b
321 ret lr
322
323/* =============================== PageTable ============================== */
324
325/*
326 * cpu_arm920_switch_mm(pgd)
327 *
328 * Set the translation base pointer to be as described by pgd.
329 *
330 * pgd: new page tables
331 */
332 .align 5
333ENTRY(cpu_arm920_switch_mm)
334#ifdef CONFIG_MMU
335 mov ip, #0
336#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
337 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
338#else
339@ && 'Clean & Invalidate whole DCache'
340@ && Re-written to use Index Ops.
341@ && Uses registers r1, r3 and ip
342
343 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
3441: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
3452: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
346 subs r3, r3, #1 << 26
347 bcs 2b @ entries 63 to 0
348 subs r1, r1, #1 << 5
349 bcs 1b @ segments 7 to 0
350#endif
351 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
352 mcr p15, 0, ip, c7, c10, 4 @ drain WB
353 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
354 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
355#endif
356 ret lr
357
358/*
359 * cpu_arm920_set_pte(ptep, pte, ext)
360 *
361 * Set a PTE and flush it out
362 */
363 .align 5
364ENTRY(cpu_arm920_set_pte_ext)
365#ifdef CONFIG_MMU
366 armv3_set_pte_ext
367 mov r0, r0
368 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
369 mcr p15, 0, r0, c7, c10, 4 @ drain WB
370#endif
371 ret lr
372
373/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
374.globl cpu_arm920_suspend_size
375.equ cpu_arm920_suspend_size, 4 * 3
376#ifdef CONFIG_ARM_CPU_SUSPEND
377ENTRY(cpu_arm920_do_suspend)
378 stmfd sp!, {r4 - r6, lr}
379 mrc p15, 0, r4, c13, c0, 0 @ PID
380 mrc p15, 0, r5, c3, c0, 0 @ Domain ID
381 mrc p15, 0, r6, c1, c0, 0 @ Control register
382 stmia r0, {r4 - r6}
383 ldmfd sp!, {r4 - r6, pc}
384ENDPROC(cpu_arm920_do_suspend)
385
386ENTRY(cpu_arm920_do_resume)
387 mov ip, #0
388 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
389 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
390 ldmia r0, {r4 - r6}
391 mcr p15, 0, r4, c13, c0, 0 @ PID
392 mcr p15, 0, r5, c3, c0, 0 @ Domain ID
393 mcr p15, 0, r1, c2, c0, 0 @ TTB address
394 mov r0, r6 @ control register
395 b cpu_resume_mmu
396ENDPROC(cpu_arm920_do_resume)
397#endif
398
399 .type __arm920_setup, #function
400__arm920_setup:
401 mov r0, #0
402 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
403 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
404#ifdef CONFIG_MMU
405 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
406#endif
407 adr r5, arm920_crval
408 ldmia r5, {r5, r6}
409 mrc p15, 0, r0, c1, c0 @ get control register v4
410 bic r0, r0, r5
411 orr r0, r0, r6
412 ret lr
413 .size __arm920_setup, . - __arm920_setup
414
415 /*
416 * R
417 * .RVI ZFRS BLDP WCAM
418 * ..11 0001 ..11 0101
419 *
420 */
421 .type arm920_crval, #object
422arm920_crval:
423 crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
424
425 __INITDATA
426 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
427 define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1
428
429 .section ".rodata"
430
431 string cpu_arch_name, "armv4t"
432 string cpu_elf_name, "v4"
433 string cpu_arm920_name, "ARM920T"
434
435 .align
436
437 .section ".proc.info.init", "a"
438
439 .type __arm920_proc_info,#object
440__arm920_proc_info:
441 .long 0x41009200
442 .long 0xff00fff0
443 .long PMD_TYPE_SECT | \
444 PMD_SECT_BUFFERABLE | \
445 PMD_SECT_CACHEABLE | \
446 PMD_BIT4 | \
447 PMD_SECT_AP_WRITE | \
448 PMD_SECT_AP_READ
449 .long PMD_TYPE_SECT | \
450 PMD_BIT4 | \
451 PMD_SECT_AP_WRITE | \
452 PMD_SECT_AP_READ
453 initfn __arm920_setup, __arm920_proc_info
454 .long cpu_arch_name
455 .long cpu_elf_name
456 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
457 .long cpu_arm920_name
458 .long arm920_processor_functions
459 .long v4wbi_tlb_fns
460 .long v4wb_user_fns
461#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
462 .long arm920_cache_fns
463#else
464 .long v4wt_cache_fns
465#endif
466 .size __arm920_proc_info, . - __arm920_proc_info