Loading...
1/*
2 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/hwcap.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
17#include <asm/ptrace.h>
18#include "proc-macros.S"
19
20/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
21#define CACHE_DLINESIZE 16
22#define CACHE_DSEGMENTS 4
23#define CACHE_DENTRIES 64
24
25 .text
26/*
27 * cpu_arm940_proc_init()
28 * cpu_arm940_switch_mm()
29 *
30 * These are not required.
31 */
32ENTRY(cpu_arm940_proc_init)
33ENTRY(cpu_arm940_switch_mm)
34 mov pc, lr
35
36/*
37 * cpu_arm940_proc_fin()
38 */
39ENTRY(cpu_arm940_proc_fin)
40 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
41 bic r0, r0, #0x00001000 @ i-cache
42 bic r0, r0, #0x00000004 @ d-cache
43 mcr p15, 0, r0, c1, c0, 0 @ disable caches
44 mov pc, lr
45
46/*
47 * cpu_arm940_reset(loc)
48 * Params : r0 = address to jump to
49 * Notes : This sets up everything for a reset
50 */
51ENTRY(cpu_arm940_reset)
52 mov ip, #0
53 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
54 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
55 mcr p15, 0, ip, c7, c10, 4 @ drain WB
56 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
57 bic ip, ip, #0x00000005 @ .............c.p
58 bic ip, ip, #0x00001000 @ i-cache
59 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
60 mov pc, r0
61
62/*
63 * cpu_arm940_do_idle()
64 */
65 .align 5
66ENTRY(cpu_arm940_do_idle)
67 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
68 mov pc, lr
69
70/*
71 * flush_icache_all()
72 *
73 * Unconditionally clean and invalidate the entire icache.
74 */
75ENTRY(arm940_flush_icache_all)
76 mov r0, #0
77 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
78 mov pc, lr
79ENDPROC(arm940_flush_icache_all)
80
81/*
82 * flush_user_cache_all()
83 */
84ENTRY(arm940_flush_user_cache_all)
85 /* FALLTHROUGH */
86
87/*
88 * flush_kern_cache_all()
89 *
90 * Clean and invalidate the entire cache.
91 */
92ENTRY(arm940_flush_kern_cache_all)
93 mov r2, #VM_EXEC
94 /* FALLTHROUGH */
95
96/*
97 * flush_user_cache_range(start, end, flags)
98 *
99 * There is no efficient way to flush a range of cache entries
100 * in the specified address range. Thus, flushes all.
101 *
102 * - start - start address (inclusive)
103 * - end - end address (exclusive)
104 * - flags - vm_flags describing address space
105 */
106ENTRY(arm940_flush_user_cache_range)
107 mov ip, #0
108#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
109 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
110#else
111 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1121: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1132: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
114 subs r3, r3, #1 << 26
115 bcs 2b @ entries 63 to 0
116 subs r1, r1, #1 << 4
117 bcs 1b @ segments 3 to 0
118#endif
119 tst r2, #VM_EXEC
120 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
121 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
122 mov pc, lr
123
124/*
125 * coherent_kern_range(start, end)
126 *
127 * Ensure coherency between the Icache and the Dcache in the
128 * region described by start, end. If you have non-snooping
129 * Harvard caches, you need to implement this function.
130 *
131 * - start - virtual start address
132 * - end - virtual end address
133 */
134ENTRY(arm940_coherent_kern_range)
135 /* FALLTHROUGH */
136
137/*
138 * coherent_user_range(start, end)
139 *
140 * Ensure coherency between the Icache and the Dcache in the
141 * region described by start, end. If you have non-snooping
142 * Harvard caches, you need to implement this function.
143 *
144 * - start - virtual start address
145 * - end - virtual end address
146 */
147ENTRY(arm940_coherent_user_range)
148 /* FALLTHROUGH */
149
150/*
151 * flush_kern_dcache_area(void *addr, size_t size)
152 *
153 * Ensure no D cache aliasing occurs, either with itself or
154 * the I cache
155 *
156 * - addr - kernel address
157 * - size - region size
158 */
159ENTRY(arm940_flush_kern_dcache_area)
160 mov ip, #0
161 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1621: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1632: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
164 subs r3, r3, #1 << 26
165 bcs 2b @ entries 63 to 0
166 subs r1, r1, #1 << 4
167 bcs 1b @ segments 7 to 0
168 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
169 mcr p15, 0, ip, c7, c10, 4 @ drain WB
170 mov pc, lr
171
172/*
173 * dma_inv_range(start, end)
174 *
175 * There is no efficient way to invalidate a specifid virtual
176 * address range. Thus, invalidates all.
177 *
178 * - start - virtual start address
179 * - end - virtual end address
180 */
181arm940_dma_inv_range:
182 mov ip, #0
183 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1841: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1852: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
186 subs r3, r3, #1 << 26
187 bcs 2b @ entries 63 to 0
188 subs r1, r1, #1 << 4
189 bcs 1b @ segments 7 to 0
190 mcr p15, 0, ip, c7, c10, 4 @ drain WB
191 mov pc, lr
192
193/*
194 * dma_clean_range(start, end)
195 *
196 * There is no efficient way to clean a specifid virtual
197 * address range. Thus, cleans all.
198 *
199 * - start - virtual start address
200 * - end - virtual end address
201 */
202arm940_dma_clean_range:
203ENTRY(cpu_arm940_dcache_clean_area)
204 mov ip, #0
205#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
206 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2071: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2082: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
209 subs r3, r3, #1 << 26
210 bcs 2b @ entries 63 to 0
211 subs r1, r1, #1 << 4
212 bcs 1b @ segments 7 to 0
213#endif
214 mcr p15, 0, ip, c7, c10, 4 @ drain WB
215 mov pc, lr
216
217/*
218 * dma_flush_range(start, end)
219 *
220 * There is no efficient way to clean and invalidate a specifid
221 * virtual address range.
222 *
223 * - start - virtual start address
224 * - end - virtual end address
225 */
226ENTRY(arm940_dma_flush_range)
227 mov ip, #0
228 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2291: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2302:
231#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
232 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
233#else
234 mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
235#endif
236 subs r3, r3, #1 << 26
237 bcs 2b @ entries 63 to 0
238 subs r1, r1, #1 << 4
239 bcs 1b @ segments 7 to 0
240 mcr p15, 0, ip, c7, c10, 4 @ drain WB
241 mov pc, lr
242
243/*
244 * dma_map_area(start, size, dir)
245 * - start - kernel virtual start address
246 * - size - size of region
247 * - dir - DMA direction
248 */
249ENTRY(arm940_dma_map_area)
250 add r1, r1, r0
251 cmp r2, #DMA_TO_DEVICE
252 beq arm940_dma_clean_range
253 bcs arm940_dma_inv_range
254 b arm940_dma_flush_range
255ENDPROC(arm940_dma_map_area)
256
257/*
258 * dma_unmap_area(start, size, dir)
259 * - start - kernel virtual start address
260 * - size - size of region
261 * - dir - DMA direction
262 */
263ENTRY(arm940_dma_unmap_area)
264 mov pc, lr
265ENDPROC(arm940_dma_unmap_area)
266
267 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
268 define_cache_functions arm940
269
270 __CPUINIT
271
272 .type __arm940_setup, #function
273__arm940_setup:
274 mov r0, #0
275 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
276 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
277 mcr p15, 0, r0, c7, c10, 4 @ drain WB
278
279 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
280 mcr p15, 0, r0, c6, c4, 0
281 mcr p15, 0, r0, c6, c5, 0
282 mcr p15, 0, r0, c6, c6, 0
283 mcr p15, 0, r0, c6, c7, 0
284
285 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
286 mcr p15, 0, r0, c6, c4, 1
287 mcr p15, 0, r0, c6, c5, 1
288 mcr p15, 0, r0, c6, c6, 1
289 mcr p15, 0, r0, c6, c7, 1
290
291 mov r0, #0x0000003F @ base = 0, size = 4GB
292 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
293 mcr p15, 0, r0, c6, c0, 1
294
295 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
296 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
297 mov r2, #10 @ 11 is the minimum (4KB)
2981: add r2, r2, #1 @ area size *= 2
299 mov r1, r1, lsr #1
300 bne 1b @ count not zero r-shift
301 orr r0, r0, r2, lsl #1 @ the area register value
302 orr r0, r0, #1 @ set enable bit
303 mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM
304 mcr p15, 0, r0, c6, c1, 1
305
306 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
307 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
308 mov r2, #10 @ 11 is the minimum (4KB)
3091: add r2, r2, #1 @ area size *= 2
310 mov r1, r1, lsr #1
311 bne 1b @ count not zero r-shift
312 orr r0, r0, r2, lsl #1 @ the area register value
313 orr r0, r0, #1 @ set enable bit
314 mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH
315 mcr p15, 0, r0, c6, c2, 1
316
317 mov r0, #0x06
318 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
319 mcr p15, 0, r0, c2, c0, 1
320#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
321 mov r0, #0x00 @ disable whole write buffer
322#else
323 mov r0, #0x02 @ Region 1 write bufferred
324#endif
325 mcr p15, 0, r0, c3, c0, 0
326
327 mov r0, #0x10000
328 sub r0, r0, #1 @ r0 = 0xffff
329 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
330 mcr p15, 0, r0, c5, c0, 1
331
332 mrc p15, 0, r0, c1, c0 @ get control register
333 orr r0, r0, #0x00001000 @ I-cache
334 orr r0, r0, #0x00000005 @ MPU/D-cache
335
336 mov pc, lr
337
338 .size __arm940_setup, . - __arm940_setup
339
340 __INITDATA
341
342 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
343 define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
344
345 .section ".rodata"
346
347 string cpu_arch_name, "armv4t"
348 string cpu_elf_name, "v4"
349 string cpu_arm940_name, "ARM940T"
350
351 .align
352
353 .section ".proc.info.init", #alloc, #execinstr
354
355 .type __arm940_proc_info,#object
356__arm940_proc_info:
357 .long 0x41009400
358 .long 0xff00fff0
359 .long 0
360 b __arm940_setup
361 .long cpu_arch_name
362 .long cpu_elf_name
363 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
364 .long cpu_arm940_name
365 .long arm940_processor_functions
366 .long 0
367 .long 0
368 .long arm940_cache_fns
369 .size __arm940_proc_info, . - __arm940_proc_info
370
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
4 *
5 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
6 */
7#include <linux/linkage.h>
8#include <linux/init.h>
9#include <linux/cfi_types.h>
10#include <linux/pgtable.h>
11#include <asm/assembler.h>
12#include <asm/hwcap.h>
13#include <asm/pgtable-hwdef.h>
14#include <asm/ptrace.h>
15#include "proc-macros.S"
16
17/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
18#define CACHE_DLINESIZE 16
19#define CACHE_DSEGMENTS 4
20#define CACHE_DENTRIES 64
21
22 .text
23/*
24 * cpu_arm940_proc_init()
25 * cpu_arm940_switch_mm()
26 *
27 * These are not required.
28 */
29SYM_TYPED_FUNC_START(cpu_arm940_proc_init)
30 ret lr
31SYM_FUNC_END(cpu_arm940_proc_init)
32
33SYM_TYPED_FUNC_START(cpu_arm940_switch_mm)
34 ret lr
35SYM_FUNC_END(cpu_arm940_switch_mm)
36
37/*
38 * cpu_arm940_proc_fin()
39 */
40SYM_TYPED_FUNC_START(cpu_arm940_proc_fin)
41 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
42 bic r0, r0, #0x00001000 @ i-cache
43 bic r0, r0, #0x00000004 @ d-cache
44 mcr p15, 0, r0, c1, c0, 0 @ disable caches
45 ret lr
46SYM_FUNC_END(cpu_arm940_proc_fin)
47
48/*
49 * cpu_arm940_reset(loc)
50 * Params : r0 = address to jump to
51 * Notes : This sets up everything for a reset
52 */
53 .pushsection .idmap.text, "ax"
54SYM_TYPED_FUNC_START(cpu_arm940_reset)
55 mov ip, #0
56 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
57 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
58 mcr p15, 0, ip, c7, c10, 4 @ drain WB
59 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
60 bic ip, ip, #0x00000005 @ .............c.p
61 bic ip, ip, #0x00001000 @ i-cache
62 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
63 ret r0
64SYM_FUNC_END(cpu_arm940_reset)
65 .popsection
66
67/*
68 * cpu_arm940_do_idle()
69 */
70 .align 5
71SYM_TYPED_FUNC_START(cpu_arm940_do_idle)
72 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
73 ret lr
74SYM_FUNC_END(cpu_arm940_do_idle)
75
76/*
77 * flush_icache_all()
78 *
79 * Unconditionally clean and invalidate the entire icache.
80 */
81SYM_TYPED_FUNC_START(arm940_flush_icache_all)
82 mov r0, #0
83 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
84 ret lr
85SYM_FUNC_END(arm940_flush_icache_all)
86
87/*
88 * flush_user_cache_all()
89 */
90SYM_FUNC_ALIAS(arm940_flush_user_cache_all, arm940_flush_kern_cache_all)
91
92/*
93 * flush_kern_cache_all()
94 *
95 * Clean and invalidate the entire cache.
96 */
97SYM_TYPED_FUNC_START(arm940_flush_kern_cache_all)
98 mov r2, #VM_EXEC
99 b arm940_flush_user_cache_range
100SYM_FUNC_END(arm940_flush_kern_cache_all)
101
102/*
103 * flush_user_cache_range(start, end, flags)
104 *
105 * There is no efficient way to flush a range of cache entries
106 * in the specified address range. Thus, flushes all.
107 *
108 * - start - start address (inclusive)
109 * - end - end address (exclusive)
110 * - flags - vm_flags describing address space
111 */
112SYM_TYPED_FUNC_START(arm940_flush_user_cache_range)
113 mov ip, #0
114#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
115 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
116#else
117 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1181: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1192: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
120 subs r3, r3, #1 << 26
121 bcs 2b @ entries 63 to 0
122 subs r1, r1, #1 << 4
123 bcs 1b @ segments 3 to 0
124#endif
125 tst r2, #VM_EXEC
126 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
127 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
128 ret lr
129SYM_FUNC_END(arm940_flush_user_cache_range)
130
131/*
132 * coherent_kern_range(start, end)
133 *
134 * Ensure coherency between the Icache and the Dcache in the
135 * region described by start, end. If you have non-snooping
136 * Harvard caches, you need to implement this function.
137 *
138 * - start - virtual start address
139 * - end - virtual end address
140 */
141SYM_TYPED_FUNC_START(arm940_coherent_kern_range)
142 b arm940_flush_kern_dcache_area
143SYM_FUNC_END(arm940_coherent_kern_range)
144
145/*
146 * coherent_user_range(start, end)
147 *
148 * Ensure coherency between the Icache and the Dcache in the
149 * region described by start, end. If you have non-snooping
150 * Harvard caches, you need to implement this function.
151 *
152 * - start - virtual start address
153 * - end - virtual end address
154 */
155SYM_TYPED_FUNC_START(arm940_coherent_user_range)
156#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
157 b arm940_flush_kern_dcache_area
158#endif
159SYM_FUNC_END(arm940_coherent_user_range)
160
161/*
162 * flush_kern_dcache_area(void *addr, size_t size)
163 *
164 * Ensure no D cache aliasing occurs, either with itself or
165 * the I cache
166 *
167 * - addr - kernel address
168 * - size - region size
169 */
170SYM_TYPED_FUNC_START(arm940_flush_kern_dcache_area)
171 mov r0, #0
172 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1731: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1742: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
175 subs r3, r3, #1 << 26
176 bcs 2b @ entries 63 to 0
177 subs r1, r1, #1 << 4
178 bcs 1b @ segments 7 to 0
179 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
180 mcr p15, 0, r0, c7, c10, 4 @ drain WB
181 ret lr
182SYM_FUNC_END(arm940_flush_kern_dcache_area)
183
184/*
185 * dma_inv_range(start, end)
186 *
187 * There is no efficient way to invalidate a specifid virtual
188 * address range. Thus, invalidates all.
189 *
190 * - start - virtual start address
191 * - end - virtual end address
192 */
193arm940_dma_inv_range:
194 mov ip, #0
195 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1961: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1972: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
198 subs r3, r3, #1 << 26
199 bcs 2b @ entries 63 to 0
200 subs r1, r1, #1 << 4
201 bcs 1b @ segments 7 to 0
202 mcr p15, 0, ip, c7, c10, 4 @ drain WB
203 ret lr
204
205/*
206 * dma_clean_range(start, end)
207 *
208 * There is no efficient way to clean a specifid virtual
209 * address range. Thus, cleans all.
210 *
211 * - start - virtual start address
212 * - end - virtual end address
213 */
214arm940_dma_clean_range:
215SYM_TYPED_FUNC_START(cpu_arm940_dcache_clean_area)
216 mov ip, #0
217#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
218 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2191: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2202: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
221 subs r3, r3, #1 << 26
222 bcs 2b @ entries 63 to 0
223 subs r1, r1, #1 << 4
224 bcs 1b @ segments 7 to 0
225#endif
226 mcr p15, 0, ip, c7, c10, 4 @ drain WB
227 ret lr
228SYM_FUNC_END(cpu_arm940_dcache_clean_area)
229
230/*
231 * dma_flush_range(start, end)
232 *
233 * There is no efficient way to clean and invalidate a specifid
234 * virtual address range.
235 *
236 * - start - virtual start address
237 * - end - virtual end address
238 */
239SYM_TYPED_FUNC_START(arm940_dma_flush_range)
240 mov ip, #0
241 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2421: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2432:
244#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
245 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
246#else
247 mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
248#endif
249 subs r3, r3, #1 << 26
250 bcs 2b @ entries 63 to 0
251 subs r1, r1, #1 << 4
252 bcs 1b @ segments 7 to 0
253 mcr p15, 0, ip, c7, c10, 4 @ drain WB
254 ret lr
255SYM_FUNC_END(arm940_dma_flush_range)
256
257/*
258 * dma_map_area(start, size, dir)
259 * - start - kernel virtual start address
260 * - size - size of region
261 * - dir - DMA direction
262 */
263SYM_TYPED_FUNC_START(arm940_dma_map_area)
264 add r1, r1, r0
265 cmp r2, #DMA_TO_DEVICE
266 beq arm940_dma_clean_range
267 bcs arm940_dma_inv_range
268 b arm940_dma_flush_range
269SYM_FUNC_END(arm940_dma_map_area)
270
271/*
272 * dma_unmap_area(start, size, dir)
273 * - start - kernel virtual start address
274 * - size - size of region
275 * - dir - DMA direction
276 */
277SYM_TYPED_FUNC_START(arm940_dma_unmap_area)
278 ret lr
279SYM_FUNC_END(arm940_dma_unmap_area)
280
281 .type __arm940_setup, #function
282__arm940_setup:
283 mov r0, #0
284 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
285 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
286 mcr p15, 0, r0, c7, c10, 4 @ drain WB
287
288 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
289 mcr p15, 0, r0, c6, c4, 0
290 mcr p15, 0, r0, c6, c5, 0
291 mcr p15, 0, r0, c6, c6, 0
292 mcr p15, 0, r0, c6, c7, 0
293
294 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
295 mcr p15, 0, r0, c6, c4, 1
296 mcr p15, 0, r0, c6, c5, 1
297 mcr p15, 0, r0, c6, c6, 1
298 mcr p15, 0, r0, c6, c7, 1
299
300 mov r0, #0x0000003F @ base = 0, size = 4GB
301 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
302 mcr p15, 0, r0, c6, c0, 1
303
304 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
305 ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
306 pr_val r3, r0, r7, #1
307 mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
308 mcr p15, 0, r3, c6, c1, 1
309
310 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
311 ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
312 pr_val r3, r0, r6, #1
313 mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
314 mcr p15, 0, r3, c6, c2, 1
315
316 mov r0, #0x06
317 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
318 mcr p15, 0, r0, c2, c0, 1
319#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
320 mov r0, #0x00 @ disable whole write buffer
321#else
322 mov r0, #0x02 @ Region 1 write bufferred
323#endif
324 mcr p15, 0, r0, c3, c0, 0
325
326 mov r0, #0x10000
327 sub r0, r0, #1 @ r0 = 0xffff
328 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
329 mcr p15, 0, r0, c5, c0, 1
330
331 mrc p15, 0, r0, c1, c0 @ get control register
332 orr r0, r0, #0x00001000 @ I-cache
333 orr r0, r0, #0x00000005 @ MPU/D-cache
334
335 ret lr
336
337 .size __arm940_setup, . - __arm940_setup
338
339 __INITDATA
340
341 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
342 define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
343
344 .section ".rodata"
345
346 string cpu_arch_name, "armv4t"
347 string cpu_elf_name, "v4"
348 string cpu_arm940_name, "ARM940T"
349
350 .align
351
352 .section ".proc.info.init", "a"
353
354 .type __arm940_proc_info,#object
355__arm940_proc_info:
356 .long 0x41009400
357 .long 0xff00fff0
358 .long 0
359 initfn __arm940_setup, __arm940_proc_info
360 .long cpu_arch_name
361 .long cpu_elf_name
362 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
363 .long cpu_arm940_name
364 .long arm940_processor_functions
365 .long 0
366 .long 0
367 .long arm940_cache_fns
368 .size __arm940_proc_info, . - __arm940_proc_info
369