Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v3.1
 
  1/*
  2 *  linux/arch/arm/mm/arm940.S: utility functions for ARM940T
  3 *
  4 *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 */
 11#include <linux/linkage.h>
 12#include <linux/init.h>
 
 13#include <asm/assembler.h>
 14#include <asm/hwcap.h>
 15#include <asm/pgtable-hwdef.h>
 16#include <asm/pgtable.h>
 17#include <asm/ptrace.h>
 18#include "proc-macros.S"
 19
 20/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
 21#define CACHE_DLINESIZE	16
 22#define CACHE_DSEGMENTS	4
 23#define CACHE_DENTRIES	64
 24
 25	.text
 26/*
 27 * cpu_arm940_proc_init()
 28 * cpu_arm940_switch_mm()
 29 *
 30 * These are not required.
 31 */
 32ENTRY(cpu_arm940_proc_init)
 33ENTRY(cpu_arm940_switch_mm)
 34	mov	pc, lr
 35
 36/*
 37 * cpu_arm940_proc_fin()
 38 */
 39ENTRY(cpu_arm940_proc_fin)
 40	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
 41	bic	r0, r0, #0x00001000		@ i-cache
 42	bic	r0, r0, #0x00000004		@ d-cache
 43	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
 44	mov	pc, lr
 45
 46/*
 47 * cpu_arm940_reset(loc)
 48 * Params  : r0 = address to jump to
 49 * Notes   : This sets up everything for a reset
 50 */
 
 51ENTRY(cpu_arm940_reset)
 52	mov	ip, #0
 53	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
 54	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
 55	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 56	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 57	bic	ip, ip, #0x00000005		@ .............c.p
 58	bic	ip, ip, #0x00001000		@ i-cache
 59	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 60	mov	pc, r0
 
 
 61
 62/*
 63 * cpu_arm940_do_idle()
 64 */
 65	.align	5
 66ENTRY(cpu_arm940_do_idle)
 67	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
 68	mov	pc, lr
 69
 70/*
 71 *	flush_icache_all()
 72 *
 73 *	Unconditionally clean and invalidate the entire icache.
 74 */
 75ENTRY(arm940_flush_icache_all)
 76	mov	r0, #0
 77	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 78	mov	pc, lr
 79ENDPROC(arm940_flush_icache_all)
 80
 81/*
 82 *	flush_user_cache_all()
 83 */
 84ENTRY(arm940_flush_user_cache_all)
 85	/* FALLTHROUGH */
 86
 87/*
 88 *	flush_kern_cache_all()
 89 *
 90 *	Clean and invalidate the entire cache.
 91 */
 92ENTRY(arm940_flush_kern_cache_all)
 93	mov	r2, #VM_EXEC
 94	/* FALLTHROUGH */
 95
 96/*
 97 *	flush_user_cache_range(start, end, flags)
 98 *
 99 *	There is no efficient way to flush a range of cache entries
100 *	in the specified address range. Thus, flushes all.
101 *
102 *	- start	- start address (inclusive)
103 *	- end	- end address (exclusive)
104 *	- flags	- vm_flags describing address space
105 */
106ENTRY(arm940_flush_user_cache_range)
107	mov	ip, #0
108#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
109	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
110#else
111	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1121:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1132:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
114	subs	r3, r3, #1 << 26
115	bcs	2b				@ entries 63 to 0
116	subs	r1, r1, #1 << 4
117	bcs	1b				@ segments 3 to 0
118#endif
119	tst	r2, #VM_EXEC
120	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
121	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
122	mov	pc, lr
123
124/*
125 *	coherent_kern_range(start, end)
126 *
127 *	Ensure coherency between the Icache and the Dcache in the
128 *	region described by start, end.  If you have non-snooping
129 *	Harvard caches, you need to implement this function.
130 *
131 *	- start	- virtual start address
132 *	- end	- virtual end address
133 */
134ENTRY(arm940_coherent_kern_range)
135	/* FALLTHROUGH */
136
137/*
138 *	coherent_user_range(start, end)
139 *
140 *	Ensure coherency between the Icache and the Dcache in the
141 *	region described by start, end.  If you have non-snooping
142 *	Harvard caches, you need to implement this function.
143 *
144 *	- start	- virtual start address
145 *	- end	- virtual end address
146 */
147ENTRY(arm940_coherent_user_range)
148	/* FALLTHROUGH */
149
150/*
151 *	flush_kern_dcache_area(void *addr, size_t size)
152 *
153 *	Ensure no D cache aliasing occurs, either with itself or
154 *	the I cache
155 *
156 *	- addr	- kernel address
157 *	- size	- region size
158 */
159ENTRY(arm940_flush_kern_dcache_area)
160	mov	ip, #0
161	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1621:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1632:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
164	subs	r3, r3, #1 << 26
165	bcs	2b				@ entries 63 to 0
166	subs	r1, r1, #1 << 4
167	bcs	1b				@ segments 7 to 0
168	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
169	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
170	mov	pc, lr
171
172/*
173 *	dma_inv_range(start, end)
174 *
175 *	There is no efficient way to invalidate a specifid virtual
176 *	address range. Thus, invalidates all.
177 *
178 *	- start	- virtual start address
179 *	- end	- virtual end address
180 */
181arm940_dma_inv_range:
182	mov	ip, #0
183	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1841:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1852:	mcr	p15, 0, r3, c7, c6, 2		@ flush D entry
186	subs	r3, r3, #1 << 26
187	bcs	2b				@ entries 63 to 0
188	subs	r1, r1, #1 << 4
189	bcs	1b				@ segments 7 to 0
190	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
191	mov	pc, lr
192
193/*
194 *	dma_clean_range(start, end)
195 *
196 *	There is no efficient way to clean a specifid virtual
197 *	address range. Thus, cleans all.
198 *
199 *	- start	- virtual start address
200 *	- end	- virtual end address
201 */
202arm940_dma_clean_range:
203ENTRY(cpu_arm940_dcache_clean_area)
204	mov	ip, #0
205#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
206	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2071:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2082:	mcr	p15, 0, r3, c7, c10, 2		@ clean D entry
209	subs	r3, r3, #1 << 26
210	bcs	2b				@ entries 63 to 0
211	subs	r1, r1, #1 << 4
212	bcs	1b				@ segments 7 to 0
213#endif
214	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
215	mov	pc, lr
216
217/*
218 *	dma_flush_range(start, end)
219 *
220 *	There is no efficient way to clean and invalidate a specifid
221 *	virtual address range.
222 *
223 *	- start	- virtual start address
224 *	- end	- virtual end address
225 */
226ENTRY(arm940_dma_flush_range)
227	mov	ip, #0
228	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2291:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2302:
231#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
232	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D entry
233#else
234	mcr	p15, 0, r3, c7, c6, 2		@ invalidate D entry
235#endif
236	subs	r3, r3, #1 << 26
237	bcs	2b				@ entries 63 to 0
238	subs	r1, r1, #1 << 4
239	bcs	1b				@ segments 7 to 0
240	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
241	mov	pc, lr
242
243/*
244 *	dma_map_area(start, size, dir)
245 *	- start	- kernel virtual start address
246 *	- size	- size of region
247 *	- dir	- DMA direction
248 */
249ENTRY(arm940_dma_map_area)
250	add	r1, r1, r0
251	cmp	r2, #DMA_TO_DEVICE
252	beq	arm940_dma_clean_range
253	bcs	arm940_dma_inv_range
254	b	arm940_dma_flush_range
255ENDPROC(arm940_dma_map_area)
256
257/*
258 *	dma_unmap_area(start, size, dir)
259 *	- start	- kernel virtual start address
260 *	- size	- size of region
261 *	- dir	- DMA direction
262 */
263ENTRY(arm940_dma_unmap_area)
264	mov	pc, lr
265ENDPROC(arm940_dma_unmap_area)
266
 
 
 
267	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
268	define_cache_functions arm940
269
270	__CPUINIT
271
272	.type	__arm940_setup, #function
273__arm940_setup:
274	mov	r0, #0
275	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
276	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
277	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
278
279	mcr	p15, 0, r0, c6, c3, 0		@ disable data area 3~7
280	mcr	p15, 0, r0, c6, c4, 0
281	mcr	p15, 0, r0, c6, c5, 0
282	mcr	p15, 0, r0, c6, c6, 0
283	mcr	p15, 0, r0, c6, c7, 0
284
285	mcr	p15, 0, r0, c6, c3, 1		@ disable instruction area 3~7
286	mcr	p15, 0, r0, c6, c4, 1
287	mcr	p15, 0, r0, c6, c5, 1
288	mcr	p15, 0, r0, c6, c6, 1
289	mcr	p15, 0, r0, c6, c7, 1
290
291	mov	r0, #0x0000003F			@ base = 0, size = 4GB
292	mcr	p15, 0, r0, c6,	c0, 0		@ set area 0, default
293	mcr	p15, 0, r0, c6,	c0, 1
294
295	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
296	ldr	r1, =(CONFIG_DRAM_SIZE >> 12)	@ size of RAM (must be >= 4KB)
297	mov	r2, #10				@ 11 is the minimum (4KB)
2981:	add	r2, r2, #1			@ area size *= 2
299	mov	r1, r1, lsr #1
300	bne	1b				@ count not zero r-shift
301	orr	r0, r0, r2, lsl #1		@ the area register value
302	orr	r0, r0, #1			@ set enable bit
303	mcr	p15, 0, r0, c6,	c1, 0		@ set area 1, RAM
304	mcr	p15, 0, r0, c6,	c1, 1
305
306	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
307	ldr	r1, =(CONFIG_FLASH_SIZE >> 12)	@ size of FLASH (must be >= 4KB)
308	mov	r2, #10				@ 11 is the minimum (4KB)
3091:	add	r2, r2, #1			@ area size *= 2
310	mov	r1, r1, lsr #1
311	bne	1b				@ count not zero r-shift
312	orr	r0, r0, r2, lsl #1		@ the area register value
313	orr	r0, r0, #1			@ set enable bit
314	mcr	p15, 0, r0, c6,	c2, 0		@ set area 2, ROM/FLASH
315	mcr	p15, 0, r0, c6,	c2, 1
316
317	mov	r0, #0x06
318	mcr	p15, 0, r0, c2, c0, 0		@ Region 1&2 cacheable
319	mcr	p15, 0, r0, c2, c0, 1
320#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
321	mov	r0, #0x00			@ disable whole write buffer
322#else
323	mov	r0, #0x02			@ Region 1 write bufferred
324#endif
325	mcr	p15, 0, r0, c3, c0, 0
326
327	mov	r0, #0x10000
328	sub	r0, r0, #1			@ r0 = 0xffff
329	mcr	p15, 0, r0, c5, c0, 0		@ all read/write access
330	mcr	p15, 0, r0, c5, c0, 1
331
332	mrc	p15, 0, r0, c1, c0		@ get control register
333	orr	r0, r0, #0x00001000		@ I-cache
334	orr	r0, r0, #0x00000005		@ MPU/D-cache
335
336	mov	pc, lr
337
338	.size	__arm940_setup, . - __arm940_setup
339
340	__INITDATA
341
342	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
343	define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
344
345	.section ".rodata"
346
347	string	cpu_arch_name, "armv4t"
348	string	cpu_elf_name, "v4"
349	string	cpu_arm940_name, "ARM940T"
350
351	.align
352
353	.section ".proc.info.init", #alloc, #execinstr
354
355	.type	__arm940_proc_info,#object
356__arm940_proc_info:
357	.long	0x41009400
358	.long	0xff00fff0
359	.long	0
360	b	__arm940_setup
361	.long	cpu_arch_name
362	.long	cpu_elf_name
363	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
364	.long	cpu_arm940_name
365	.long	arm940_processor_functions
366	.long	0
367	.long	0
368	.long	arm940_cache_fns
369	.size	__arm940_proc_info, . - __arm940_proc_info
370
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  linux/arch/arm/mm/arm940.S: utility functions for ARM940T
  4 *
  5 *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
 
 
 
 
 
  6 */
  7#include <linux/linkage.h>
  8#include <linux/init.h>
  9#include <linux/pgtable.h>
 10#include <asm/assembler.h>
 11#include <asm/hwcap.h>
 12#include <asm/pgtable-hwdef.h>
 
 13#include <asm/ptrace.h>
 14#include "proc-macros.S"
 15
 16/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
 17#define CACHE_DLINESIZE	16
 18#define CACHE_DSEGMENTS	4
 19#define CACHE_DENTRIES	64
 20
 21	.text
 22/*
 23 * cpu_arm940_proc_init()
 24 * cpu_arm940_switch_mm()
 25 *
 26 * These are not required.
 27 */
 28ENTRY(cpu_arm940_proc_init)
 29ENTRY(cpu_arm940_switch_mm)
 30	ret	lr
 31
 32/*
 33 * cpu_arm940_proc_fin()
 34 */
 35ENTRY(cpu_arm940_proc_fin)
 36	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
 37	bic	r0, r0, #0x00001000		@ i-cache
 38	bic	r0, r0, #0x00000004		@ d-cache
 39	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
 40	ret	lr
 41
 42/*
 43 * cpu_arm940_reset(loc)
 44 * Params  : r0 = address to jump to
 45 * Notes   : This sets up everything for a reset
 46 */
 47	.pushsection	.idmap.text, "ax"
 48ENTRY(cpu_arm940_reset)
 49	mov	ip, #0
 50	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
 51	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
 52	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 53	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 54	bic	ip, ip, #0x00000005		@ .............c.p
 55	bic	ip, ip, #0x00001000		@ i-cache
 56	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 57	ret	r0
 58ENDPROC(cpu_arm940_reset)
 59	.popsection
 60
 61/*
 62 * cpu_arm940_do_idle()
 63 */
 64	.align	5
 65ENTRY(cpu_arm940_do_idle)
 66	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
 67	ret	lr
 68
 69/*
 70 *	flush_icache_all()
 71 *
 72 *	Unconditionally clean and invalidate the entire icache.
 73 */
 74ENTRY(arm940_flush_icache_all)
 75	mov	r0, #0
 76	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 77	ret	lr
 78ENDPROC(arm940_flush_icache_all)
 79
 80/*
 81 *	flush_user_cache_all()
 82 */
 83ENTRY(arm940_flush_user_cache_all)
 84	/* FALLTHROUGH */
 85
 86/*
 87 *	flush_kern_cache_all()
 88 *
 89 *	Clean and invalidate the entire cache.
 90 */
 91ENTRY(arm940_flush_kern_cache_all)
 92	mov	r2, #VM_EXEC
 93	/* FALLTHROUGH */
 94
 95/*
 96 *	flush_user_cache_range(start, end, flags)
 97 *
 98 *	There is no efficient way to flush a range of cache entries
 99 *	in the specified address range. Thus, flushes all.
100 *
101 *	- start	- start address (inclusive)
102 *	- end	- end address (exclusive)
103 *	- flags	- vm_flags describing address space
104 */
105ENTRY(arm940_flush_user_cache_range)
106	mov	ip, #0
107#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
108	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
109#else
110	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1111:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1122:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
113	subs	r3, r3, #1 << 26
114	bcs	2b				@ entries 63 to 0
115	subs	r1, r1, #1 << 4
116	bcs	1b				@ segments 3 to 0
117#endif
118	tst	r2, #VM_EXEC
119	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
120	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
121	ret	lr
122
123/*
124 *	coherent_kern_range(start, end)
125 *
126 *	Ensure coherency between the Icache and the Dcache in the
127 *	region described by start, end.  If you have non-snooping
128 *	Harvard caches, you need to implement this function.
129 *
130 *	- start	- virtual start address
131 *	- end	- virtual end address
132 */
133ENTRY(arm940_coherent_kern_range)
134	/* FALLTHROUGH */
135
136/*
137 *	coherent_user_range(start, end)
138 *
139 *	Ensure coherency between the Icache and the Dcache in the
140 *	region described by start, end.  If you have non-snooping
141 *	Harvard caches, you need to implement this function.
142 *
143 *	- start	- virtual start address
144 *	- end	- virtual end address
145 */
146ENTRY(arm940_coherent_user_range)
147	/* FALLTHROUGH */
148
149/*
150 *	flush_kern_dcache_area(void *addr, size_t size)
151 *
152 *	Ensure no D cache aliasing occurs, either with itself or
153 *	the I cache
154 *
155 *	- addr	- kernel address
156 *	- size	- region size
157 */
158ENTRY(arm940_flush_kern_dcache_area)
159	mov	r0, #0
160	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1611:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1622:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
163	subs	r3, r3, #1 << 26
164	bcs	2b				@ entries 63 to 0
165	subs	r1, r1, #1 << 4
166	bcs	1b				@ segments 7 to 0
167	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
168	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
169	ret	lr
170
171/*
172 *	dma_inv_range(start, end)
173 *
174 *	There is no efficient way to invalidate a specifid virtual
175 *	address range. Thus, invalidates all.
176 *
177 *	- start	- virtual start address
178 *	- end	- virtual end address
179 */
180arm940_dma_inv_range:
181	mov	ip, #0
182	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1831:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1842:	mcr	p15, 0, r3, c7, c6, 2		@ flush D entry
185	subs	r3, r3, #1 << 26
186	bcs	2b				@ entries 63 to 0
187	subs	r1, r1, #1 << 4
188	bcs	1b				@ segments 7 to 0
189	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
190	ret	lr
191
192/*
193 *	dma_clean_range(start, end)
194 *
195 *	There is no efficient way to clean a specifid virtual
196 *	address range. Thus, cleans all.
197 *
198 *	- start	- virtual start address
199 *	- end	- virtual end address
200 */
201arm940_dma_clean_range:
202ENTRY(cpu_arm940_dcache_clean_area)
203	mov	ip, #0
204#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
205	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2061:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2072:	mcr	p15, 0, r3, c7, c10, 2		@ clean D entry
208	subs	r3, r3, #1 << 26
209	bcs	2b				@ entries 63 to 0
210	subs	r1, r1, #1 << 4
211	bcs	1b				@ segments 7 to 0
212#endif
213	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
214	ret	lr
215
216/*
217 *	dma_flush_range(start, end)
218 *
219 *	There is no efficient way to clean and invalidate a specifid
220 *	virtual address range.
221 *
222 *	- start	- virtual start address
223 *	- end	- virtual end address
224 */
225ENTRY(arm940_dma_flush_range)
226	mov	ip, #0
227	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2281:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2292:
230#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
231	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D entry
232#else
233	mcr	p15, 0, r3, c7, c6, 2		@ invalidate D entry
234#endif
235	subs	r3, r3, #1 << 26
236	bcs	2b				@ entries 63 to 0
237	subs	r1, r1, #1 << 4
238	bcs	1b				@ segments 7 to 0
239	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
240	ret	lr
241
242/*
243 *	dma_map_area(start, size, dir)
244 *	- start	- kernel virtual start address
245 *	- size	- size of region
246 *	- dir	- DMA direction
247 */
248ENTRY(arm940_dma_map_area)
249	add	r1, r1, r0
250	cmp	r2, #DMA_TO_DEVICE
251	beq	arm940_dma_clean_range
252	bcs	arm940_dma_inv_range
253	b	arm940_dma_flush_range
254ENDPROC(arm940_dma_map_area)
255
256/*
257 *	dma_unmap_area(start, size, dir)
258 *	- start	- kernel virtual start address
259 *	- size	- size of region
260 *	- dir	- DMA direction
261 */
262ENTRY(arm940_dma_unmap_area)
263	ret	lr
264ENDPROC(arm940_dma_unmap_area)
265
266	.globl	arm940_flush_kern_cache_louis
267	.equ	arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
268
269	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
270	define_cache_functions arm940
271
 
 
272	.type	__arm940_setup, #function
273__arm940_setup:
274	mov	r0, #0
275	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
276	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
277	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
278
279	mcr	p15, 0, r0, c6, c3, 0		@ disable data area 3~7
280	mcr	p15, 0, r0, c6, c4, 0
281	mcr	p15, 0, r0, c6, c5, 0
282	mcr	p15, 0, r0, c6, c6, 0
283	mcr	p15, 0, r0, c6, c7, 0
284
285	mcr	p15, 0, r0, c6, c3, 1		@ disable instruction area 3~7
286	mcr	p15, 0, r0, c6, c4, 1
287	mcr	p15, 0, r0, c6, c5, 1
288	mcr	p15, 0, r0, c6, c6, 1
289	mcr	p15, 0, r0, c6, c7, 1
290
291	mov	r0, #0x0000003F			@ base = 0, size = 4GB
292	mcr	p15, 0, r0, c6,	c0, 0		@ set area 0, default
293	mcr	p15, 0, r0, c6,	c0, 1
294
295	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
296	ldr	r7, =CONFIG_DRAM_SIZE >> 12	@ size of RAM (must be >= 4KB)
297	pr_val	r3, r0, r7, #1
298	mcr	p15, 0, r3, c6,	c1, 0		@ set area 1, RAM
299	mcr	p15, 0, r3, c6,	c1, 1
 
 
 
 
 
300
301	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
302	ldr	r7, =CONFIG_FLASH_SIZE		@ size of FLASH (must be >= 4KB)
303	pr_val	r3, r0, r6, #1
304	mcr	p15, 0, r3, c6,	c2, 0		@ set area 2, ROM/FLASH
305	mcr	p15, 0, r3, c6,	c2, 1
 
 
 
 
 
306
307	mov	r0, #0x06
308	mcr	p15, 0, r0, c2, c0, 0		@ Region 1&2 cacheable
309	mcr	p15, 0, r0, c2, c0, 1
310#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
311	mov	r0, #0x00			@ disable whole write buffer
312#else
313	mov	r0, #0x02			@ Region 1 write bufferred
314#endif
315	mcr	p15, 0, r0, c3, c0, 0
316
317	mov	r0, #0x10000
318	sub	r0, r0, #1			@ r0 = 0xffff
319	mcr	p15, 0, r0, c5, c0, 0		@ all read/write access
320	mcr	p15, 0, r0, c5, c0, 1
321
322	mrc	p15, 0, r0, c1, c0		@ get control register
323	orr	r0, r0, #0x00001000		@ I-cache
324	orr	r0, r0, #0x00000005		@ MPU/D-cache
325
326	ret	lr
327
328	.size	__arm940_setup, . - __arm940_setup
329
330	__INITDATA
331
332	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
333	define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
334
335	.section ".rodata"
336
337	string	cpu_arch_name, "armv4t"
338	string	cpu_elf_name, "v4"
339	string	cpu_arm940_name, "ARM940T"
340
341	.align
342
343	.section ".proc.info.init", "a"
344
345	.type	__arm940_proc_info,#object
346__arm940_proc_info:
347	.long	0x41009400
348	.long	0xff00fff0
349	.long	0
350	initfn	__arm940_setup, __arm940_proc_info
351	.long	cpu_arch_name
352	.long	cpu_elf_name
353	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
354	.long	cpu_arm940_name
355	.long	arm940_processor_functions
356	.long	0
357	.long	0
358	.long	arm940_cache_fns
359	.size	__arm940_proc_info, . - __arm940_proc_info
360