Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
4 *
5 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
6 */
7#include <linux/linkage.h>
8#include <linux/init.h>
9#include <linux/pgtable.h>
10#include <asm/assembler.h>
11#include <asm/hwcap.h>
12#include <asm/pgtable-hwdef.h>
13#include <asm/ptrace.h>
14#include "proc-macros.S"
15
16/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
17#define CACHE_DLINESIZE 16
18#define CACHE_DSEGMENTS 4
19#define CACHE_DENTRIES 64
20
21 .text
22/*
23 * cpu_arm940_proc_init()
24 * cpu_arm940_switch_mm()
25 *
26 * These are not required.
27 */
28ENTRY(cpu_arm940_proc_init)
29ENTRY(cpu_arm940_switch_mm)
30 ret lr
31
32/*
33 * cpu_arm940_proc_fin()
34 */
35ENTRY(cpu_arm940_proc_fin)
36 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
37 bic r0, r0, #0x00001000 @ i-cache
38 bic r0, r0, #0x00000004 @ d-cache
39 mcr p15, 0, r0, c1, c0, 0 @ disable caches
40 ret lr
41
42/*
43 * cpu_arm940_reset(loc)
44 * Params : r0 = address to jump to
45 * Notes : This sets up everything for a reset
46 */
47 .pushsection .idmap.text, "ax"
48ENTRY(cpu_arm940_reset)
49 mov ip, #0
50 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
51 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
52 mcr p15, 0, ip, c7, c10, 4 @ drain WB
53 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
54 bic ip, ip, #0x00000005 @ .............c.p
55 bic ip, ip, #0x00001000 @ i-cache
56 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
57 ret r0
58ENDPROC(cpu_arm940_reset)
59 .popsection
60
61/*
62 * cpu_arm940_do_idle()
63 */
64 .align 5
65ENTRY(cpu_arm940_do_idle)
66 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
67 ret lr
68
69/*
70 * flush_icache_all()
71 *
72 * Unconditionally clean and invalidate the entire icache.
73 */
74ENTRY(arm940_flush_icache_all)
75 mov r0, #0
76 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
77 ret lr
78ENDPROC(arm940_flush_icache_all)
79
80/*
81 * flush_user_cache_all()
82 */
83ENTRY(arm940_flush_user_cache_all)
84 /* FALLTHROUGH */
85
86/*
87 * flush_kern_cache_all()
88 *
89 * Clean and invalidate the entire cache.
90 */
91ENTRY(arm940_flush_kern_cache_all)
92 mov r2, #VM_EXEC
93 /* FALLTHROUGH */
94
95/*
96 * flush_user_cache_range(start, end, flags)
97 *
98 * There is no efficient way to flush a range of cache entries
99 * in the specified address range. Thus, flushes all.
100 *
101 * - start - start address (inclusive)
102 * - end - end address (exclusive)
103 * - flags - vm_flags describing address space
104 */
105ENTRY(arm940_flush_user_cache_range)
106 mov ip, #0
107#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
108 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
109#else
110 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1111: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1122: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
113 subs r3, r3, #1 << 26
114 bcs 2b @ entries 63 to 0
115 subs r1, r1, #1 << 4
116 bcs 1b @ segments 3 to 0
117#endif
118 tst r2, #VM_EXEC
119 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
120 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
121 ret lr
122
123/*
124 * coherent_kern_range(start, end)
125 *
126 * Ensure coherency between the Icache and the Dcache in the
127 * region described by start, end. If you have non-snooping
128 * Harvard caches, you need to implement this function.
129 *
130 * - start - virtual start address
131 * - end - virtual end address
132 */
133ENTRY(arm940_coherent_kern_range)
134 /* FALLTHROUGH */
135
136/*
137 * coherent_user_range(start, end)
138 *
139 * Ensure coherency between the Icache and the Dcache in the
140 * region described by start, end. If you have non-snooping
141 * Harvard caches, you need to implement this function.
142 *
143 * - start - virtual start address
144 * - end - virtual end address
145 */
146ENTRY(arm940_coherent_user_range)
147 /* FALLTHROUGH */
148
149/*
150 * flush_kern_dcache_area(void *addr, size_t size)
151 *
152 * Ensure no D cache aliasing occurs, either with itself or
153 * the I cache
154 *
155 * - addr - kernel address
156 * - size - region size
157 */
158ENTRY(arm940_flush_kern_dcache_area)
159 mov r0, #0
160 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1611: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1622: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
163 subs r3, r3, #1 << 26
164 bcs 2b @ entries 63 to 0
165 subs r1, r1, #1 << 4
166 bcs 1b @ segments 7 to 0
167 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
168 mcr p15, 0, r0, c7, c10, 4 @ drain WB
169 ret lr
170
171/*
172 * dma_inv_range(start, end)
173 *
174 * There is no efficient way to invalidate a specifid virtual
175 * address range. Thus, invalidates all.
176 *
177 * - start - virtual start address
178 * - end - virtual end address
179 */
180arm940_dma_inv_range:
181 mov ip, #0
182 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1831: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1842: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
185 subs r3, r3, #1 << 26
186 bcs 2b @ entries 63 to 0
187 subs r1, r1, #1 << 4
188 bcs 1b @ segments 7 to 0
189 mcr p15, 0, ip, c7, c10, 4 @ drain WB
190 ret lr
191
192/*
193 * dma_clean_range(start, end)
194 *
195 * There is no efficient way to clean a specifid virtual
196 * address range. Thus, cleans all.
197 *
198 * - start - virtual start address
199 * - end - virtual end address
200 */
201arm940_dma_clean_range:
202ENTRY(cpu_arm940_dcache_clean_area)
203 mov ip, #0
204#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
205 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2061: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2072: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
208 subs r3, r3, #1 << 26
209 bcs 2b @ entries 63 to 0
210 subs r1, r1, #1 << 4
211 bcs 1b @ segments 7 to 0
212#endif
213 mcr p15, 0, ip, c7, c10, 4 @ drain WB
214 ret lr
215
216/*
217 * dma_flush_range(start, end)
218 *
219 * There is no efficient way to clean and invalidate a specifid
220 * virtual address range.
221 *
222 * - start - virtual start address
223 * - end - virtual end address
224 */
225ENTRY(arm940_dma_flush_range)
226 mov ip, #0
227 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2281: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2292:
230#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
231 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
232#else
233 mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
234#endif
235 subs r3, r3, #1 << 26
236 bcs 2b @ entries 63 to 0
237 subs r1, r1, #1 << 4
238 bcs 1b @ segments 7 to 0
239 mcr p15, 0, ip, c7, c10, 4 @ drain WB
240 ret lr
241
242/*
243 * dma_map_area(start, size, dir)
244 * - start - kernel virtual start address
245 * - size - size of region
246 * - dir - DMA direction
247 */
248ENTRY(arm940_dma_map_area)
249 add r1, r1, r0
250 cmp r2, #DMA_TO_DEVICE
251 beq arm940_dma_clean_range
252 bcs arm940_dma_inv_range
253 b arm940_dma_flush_range
254ENDPROC(arm940_dma_map_area)
255
256/*
257 * dma_unmap_area(start, size, dir)
258 * - start - kernel virtual start address
259 * - size - size of region
260 * - dir - DMA direction
261 */
262ENTRY(arm940_dma_unmap_area)
263 ret lr
264ENDPROC(arm940_dma_unmap_area)
265
266 .globl arm940_flush_kern_cache_louis
267 .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
268
269 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
270 define_cache_functions arm940
271
272 .type __arm940_setup, #function
273__arm940_setup:
274 mov r0, #0
275 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
276 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
277 mcr p15, 0, r0, c7, c10, 4 @ drain WB
278
279 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
280 mcr p15, 0, r0, c6, c4, 0
281 mcr p15, 0, r0, c6, c5, 0
282 mcr p15, 0, r0, c6, c6, 0
283 mcr p15, 0, r0, c6, c7, 0
284
285 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
286 mcr p15, 0, r0, c6, c4, 1
287 mcr p15, 0, r0, c6, c5, 1
288 mcr p15, 0, r0, c6, c6, 1
289 mcr p15, 0, r0, c6, c7, 1
290
291 mov r0, #0x0000003F @ base = 0, size = 4GB
292 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
293 mcr p15, 0, r0, c6, c0, 1
294
295 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
296 ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
297 pr_val r3, r0, r7, #1
298 mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
299 mcr p15, 0, r3, c6, c1, 1
300
301 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
302 ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
303 pr_val r3, r0, r6, #1
304 mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
305 mcr p15, 0, r3, c6, c2, 1
306
307 mov r0, #0x06
308 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
309 mcr p15, 0, r0, c2, c0, 1
310#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
311 mov r0, #0x00 @ disable whole write buffer
312#else
313 mov r0, #0x02 @ Region 1 write bufferred
314#endif
315 mcr p15, 0, r0, c3, c0, 0
316
317 mov r0, #0x10000
318 sub r0, r0, #1 @ r0 = 0xffff
319 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
320 mcr p15, 0, r0, c5, c0, 1
321
322 mrc p15, 0, r0, c1, c0 @ get control register
323 orr r0, r0, #0x00001000 @ I-cache
324 orr r0, r0, #0x00000005 @ MPU/D-cache
325
326 ret lr
327
328 .size __arm940_setup, . - __arm940_setup
329
330 __INITDATA
331
332 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
333 define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
334
335 .section ".rodata"
336
337 string cpu_arch_name, "armv4t"
338 string cpu_elf_name, "v4"
339 string cpu_arm940_name, "ARM940T"
340
341 .align
342
343 .section ".proc.info.init", "a"
344
345 .type __arm940_proc_info,#object
346__arm940_proc_info:
347 .long 0x41009400
348 .long 0xff00fff0
349 .long 0
350 initfn __arm940_setup, __arm940_proc_info
351 .long cpu_arch_name
352 .long cpu_elf_name
353 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
354 .long cpu_arm940_name
355 .long arm940_processor_functions
356 .long 0
357 .long 0
358 .long arm940_cache_fns
359 .size __arm940_proc_info, . - __arm940_proc_info
360
1/*
2 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/hwcap.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
17#include <asm/ptrace.h>
18#include "proc-macros.S"
19
20/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
21#define CACHE_DLINESIZE 16
22#define CACHE_DSEGMENTS 4
23#define CACHE_DENTRIES 64
24
25 .text
26/*
27 * cpu_arm940_proc_init()
28 * cpu_arm940_switch_mm()
29 *
30 * These are not required.
31 */
32ENTRY(cpu_arm940_proc_init)
33ENTRY(cpu_arm940_switch_mm)
34 ret lr
35
36/*
37 * cpu_arm940_proc_fin()
38 */
39ENTRY(cpu_arm940_proc_fin)
40 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
41 bic r0, r0, #0x00001000 @ i-cache
42 bic r0, r0, #0x00000004 @ d-cache
43 mcr p15, 0, r0, c1, c0, 0 @ disable caches
44 ret lr
45
46/*
47 * cpu_arm940_reset(loc)
48 * Params : r0 = address to jump to
49 * Notes : This sets up everything for a reset
50 */
51 .pushsection .idmap.text, "ax"
52ENTRY(cpu_arm940_reset)
53 mov ip, #0
54 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
55 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
56 mcr p15, 0, ip, c7, c10, 4 @ drain WB
57 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
58 bic ip, ip, #0x00000005 @ .............c.p
59 bic ip, ip, #0x00001000 @ i-cache
60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
61 ret r0
62ENDPROC(cpu_arm940_reset)
63 .popsection
64
65/*
66 * cpu_arm940_do_idle()
67 */
68 .align 5
69ENTRY(cpu_arm940_do_idle)
70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
71 ret lr
72
73/*
74 * flush_icache_all()
75 *
76 * Unconditionally clean and invalidate the entire icache.
77 */
78ENTRY(arm940_flush_icache_all)
79 mov r0, #0
80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
81 ret lr
82ENDPROC(arm940_flush_icache_all)
83
84/*
85 * flush_user_cache_all()
86 */
87ENTRY(arm940_flush_user_cache_all)
88 /* FALLTHROUGH */
89
90/*
91 * flush_kern_cache_all()
92 *
93 * Clean and invalidate the entire cache.
94 */
95ENTRY(arm940_flush_kern_cache_all)
96 mov r2, #VM_EXEC
97 /* FALLTHROUGH */
98
99/*
100 * flush_user_cache_range(start, end, flags)
101 *
102 * There is no efficient way to flush a range of cache entries
103 * in the specified address range. Thus, flushes all.
104 *
105 * - start - start address (inclusive)
106 * - end - end address (exclusive)
107 * - flags - vm_flags describing address space
108 */
109ENTRY(arm940_flush_user_cache_range)
110 mov ip, #0
111#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
112 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
113#else
114 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1151: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1162: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
117 subs r3, r3, #1 << 26
118 bcs 2b @ entries 63 to 0
119 subs r1, r1, #1 << 4
120 bcs 1b @ segments 3 to 0
121#endif
122 tst r2, #VM_EXEC
123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
125 ret lr
126
127/*
128 * coherent_kern_range(start, end)
129 *
130 * Ensure coherency between the Icache and the Dcache in the
131 * region described by start, end. If you have non-snooping
132 * Harvard caches, you need to implement this function.
133 *
134 * - start - virtual start address
135 * - end - virtual end address
136 */
137ENTRY(arm940_coherent_kern_range)
138 /* FALLTHROUGH */
139
140/*
141 * coherent_user_range(start, end)
142 *
143 * Ensure coherency between the Icache and the Dcache in the
144 * region described by start, end. If you have non-snooping
145 * Harvard caches, you need to implement this function.
146 *
147 * - start - virtual start address
148 * - end - virtual end address
149 */
150ENTRY(arm940_coherent_user_range)
151 /* FALLTHROUGH */
152
153/*
154 * flush_kern_dcache_area(void *addr, size_t size)
155 *
156 * Ensure no D cache aliasing occurs, either with itself or
157 * the I cache
158 *
159 * - addr - kernel address
160 * - size - region size
161 */
162ENTRY(arm940_flush_kern_dcache_area)
163 mov r0, #0
164 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1651: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1662: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
167 subs r3, r3, #1 << 26
168 bcs 2b @ entries 63 to 0
169 subs r1, r1, #1 << 4
170 bcs 1b @ segments 7 to 0
171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, r0, c7, c10, 4 @ drain WB
173 ret lr
174
175/*
176 * dma_inv_range(start, end)
177 *
178 * There is no efficient way to invalidate a specifid virtual
179 * address range. Thus, invalidates all.
180 *
181 * - start - virtual start address
182 * - end - virtual end address
183 */
184arm940_dma_inv_range:
185 mov ip, #0
186 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1871: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1882: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
189 subs r3, r3, #1 << 26
190 bcs 2b @ entries 63 to 0
191 subs r1, r1, #1 << 4
192 bcs 1b @ segments 7 to 0
193 mcr p15, 0, ip, c7, c10, 4 @ drain WB
194 ret lr
195
196/*
197 * dma_clean_range(start, end)
198 *
199 * There is no efficient way to clean a specifid virtual
200 * address range. Thus, cleans all.
201 *
202 * - start - virtual start address
203 * - end - virtual end address
204 */
205arm940_dma_clean_range:
206ENTRY(cpu_arm940_dcache_clean_area)
207 mov ip, #0
208#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
209 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2101: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2112: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
212 subs r3, r3, #1 << 26
213 bcs 2b @ entries 63 to 0
214 subs r1, r1, #1 << 4
215 bcs 1b @ segments 7 to 0
216#endif
217 mcr p15, 0, ip, c7, c10, 4 @ drain WB
218 ret lr
219
220/*
221 * dma_flush_range(start, end)
222 *
223 * There is no efficient way to clean and invalidate a specifid
224 * virtual address range.
225 *
226 * - start - virtual start address
227 * - end - virtual end address
228 */
229ENTRY(arm940_dma_flush_range)
230 mov ip, #0
231 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2321: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2332:
234#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
235 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
236#else
237 mcr p15, 0, r3, c7, c6, 2 @ invalidate D entry
238#endif
239 subs r3, r3, #1 << 26
240 bcs 2b @ entries 63 to 0
241 subs r1, r1, #1 << 4
242 bcs 1b @ segments 7 to 0
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
244 ret lr
245
246/*
247 * dma_map_area(start, size, dir)
248 * - start - kernel virtual start address
249 * - size - size of region
250 * - dir - DMA direction
251 */
252ENTRY(arm940_dma_map_area)
253 add r1, r1, r0
254 cmp r2, #DMA_TO_DEVICE
255 beq arm940_dma_clean_range
256 bcs arm940_dma_inv_range
257 b arm940_dma_flush_range
258ENDPROC(arm940_dma_map_area)
259
260/*
261 * dma_unmap_area(start, size, dir)
262 * - start - kernel virtual start address
263 * - size - size of region
264 * - dir - DMA direction
265 */
266ENTRY(arm940_dma_unmap_area)
267 ret lr
268ENDPROC(arm940_dma_unmap_area)
269
270 .globl arm940_flush_kern_cache_louis
271 .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
272
273 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
274 define_cache_functions arm940
275
276 .type __arm940_setup, #function
277__arm940_setup:
278 mov r0, #0
279 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
280 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
281 mcr p15, 0, r0, c7, c10, 4 @ drain WB
282
283 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
284 mcr p15, 0, r0, c6, c4, 0
285 mcr p15, 0, r0, c6, c5, 0
286 mcr p15, 0, r0, c6, c6, 0
287 mcr p15, 0, r0, c6, c7, 0
288
289 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
290 mcr p15, 0, r0, c6, c4, 1
291 mcr p15, 0, r0, c6, c5, 1
292 mcr p15, 0, r0, c6, c6, 1
293 mcr p15, 0, r0, c6, c7, 1
294
295 mov r0, #0x0000003F @ base = 0, size = 4GB
296 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
297 mcr p15, 0, r0, c6, c0, 1
298
299 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
300 ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB)
301 pr_val r3, r0, r7, #1
302 mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM
303 mcr p15, 0, r3, c6, c1, 1
304
305 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
306 ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB)
307 pr_val r3, r0, r6, #1
308 mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH
309 mcr p15, 0, r3, c6, c2, 1
310
311 mov r0, #0x06
312 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
313 mcr p15, 0, r0, c2, c0, 1
314#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
315 mov r0, #0x00 @ disable whole write buffer
316#else
317 mov r0, #0x02 @ Region 1 write bufferred
318#endif
319 mcr p15, 0, r0, c3, c0, 0
320
321 mov r0, #0x10000
322 sub r0, r0, #1 @ r0 = 0xffff
323 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
324 mcr p15, 0, r0, c5, c0, 1
325
326 mrc p15, 0, r0, c1, c0 @ get control register
327 orr r0, r0, #0x00001000 @ I-cache
328 orr r0, r0, #0x00000005 @ MPU/D-cache
329
330 ret lr
331
332 .size __arm940_setup, . - __arm940_setup
333
334 __INITDATA
335
336 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
337 define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
338
339 .section ".rodata"
340
341 string cpu_arch_name, "armv4t"
342 string cpu_elf_name, "v4"
343 string cpu_arm940_name, "ARM940T"
344
345 .align
346
347 .section ".proc.info.init", #alloc
348
349 .type __arm940_proc_info,#object
350__arm940_proc_info:
351 .long 0x41009400
352 .long 0xff00fff0
353 .long 0
354 initfn __arm940_setup, __arm940_proc_info
355 .long cpu_arch_name
356 .long cpu_elf_name
357 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
358 .long cpu_arm940_name
359 .long arm940_processor_functions
360 .long 0
361 .long 0
362 .long arm940_cache_fns
363 .size __arm940_proc_info, . - __arm940_proc_info
364