Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  linux/arch/arm/mm/arm940.S: utility functions for ARM940T
  4 *
  5 *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
 
 
 
 
 
  6 */
  7#include <linux/linkage.h>
  8#include <linux/init.h>
  9#include <linux/cfi_types.h>
 10#include <linux/pgtable.h>
 11#include <asm/assembler.h>
 12#include <asm/hwcap.h>
 13#include <asm/pgtable-hwdef.h>
 
 14#include <asm/ptrace.h>
 15#include "proc-macros.S"
 16
 17/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
 18#define CACHE_DLINESIZE	16
 19#define CACHE_DSEGMENTS	4
 20#define CACHE_DENTRIES	64
 21
 22	.text
 23/*
 24 * cpu_arm940_proc_init()
 25 * cpu_arm940_switch_mm()
 26 *
 27 * These are not required.
 28 */
 29SYM_TYPED_FUNC_START(cpu_arm940_proc_init)
 30	ret	lr
 31SYM_FUNC_END(cpu_arm940_proc_init)
 32
 33SYM_TYPED_FUNC_START(cpu_arm940_switch_mm)
 34	ret	lr
 35SYM_FUNC_END(cpu_arm940_switch_mm)
 36
 37/*
 38 * cpu_arm940_proc_fin()
 39 */
 40SYM_TYPED_FUNC_START(cpu_arm940_proc_fin)
 41	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
 42	bic	r0, r0, #0x00001000		@ i-cache
 43	bic	r0, r0, #0x00000004		@ d-cache
 44	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
 45	ret	lr
 46SYM_FUNC_END(cpu_arm940_proc_fin)
 47
 48/*
 49 * cpu_arm940_reset(loc)
 50 * Params  : r0 = address to jump to
 51 * Notes   : This sets up everything for a reset
 52 */
 53	.pushsection	.idmap.text, "ax"
 54SYM_TYPED_FUNC_START(cpu_arm940_reset)
 55	mov	ip, #0
 56	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
 57	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
 58	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 59	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 60	bic	ip, ip, #0x00000005		@ .............c.p
 61	bic	ip, ip, #0x00001000		@ i-cache
 62	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 63	ret	r0
 64SYM_FUNC_END(cpu_arm940_reset)
 65	.popsection
 66
 67/*
 68 * cpu_arm940_do_idle()
 69 */
 70	.align	5
 71SYM_TYPED_FUNC_START(cpu_arm940_do_idle)
 72	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
 73	ret	lr
 74SYM_FUNC_END(cpu_arm940_do_idle)
 75
 76/*
 77 *	flush_icache_all()
 78 *
 79 *	Unconditionally clean and invalidate the entire icache.
 80 */
 81SYM_TYPED_FUNC_START(arm940_flush_icache_all)
 82	mov	r0, #0
 83	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 84	ret	lr
 85SYM_FUNC_END(arm940_flush_icache_all)
 86
 87/*
 88 *	flush_user_cache_all()
 89 */
 90SYM_FUNC_ALIAS(arm940_flush_user_cache_all, arm940_flush_kern_cache_all)
 
 91
 92/*
 93 *	flush_kern_cache_all()
 94 *
 95 *	Clean and invalidate the entire cache.
 96 */
 97SYM_TYPED_FUNC_START(arm940_flush_kern_cache_all)
 98	mov	r2, #VM_EXEC
 99	b	arm940_flush_user_cache_range
100SYM_FUNC_END(arm940_flush_kern_cache_all)
101
102/*
103 *	flush_user_cache_range(start, end, flags)
104 *
105 *	There is no efficient way to flush a range of cache entries
106 *	in the specified address range. Thus, flushes all.
107 *
108 *	- start	- start address (inclusive)
109 *	- end	- end address (exclusive)
110 *	- flags	- vm_flags describing address space
111 */
112SYM_TYPED_FUNC_START(arm940_flush_user_cache_range)
113	mov	ip, #0
114#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
115	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
116#else
117	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1181:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1192:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
120	subs	r3, r3, #1 << 26
121	bcs	2b				@ entries 63 to 0
122	subs	r1, r1, #1 << 4
123	bcs	1b				@ segments 3 to 0
124#endif
125	tst	r2, #VM_EXEC
126	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
127	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
128	ret	lr
129SYM_FUNC_END(arm940_flush_user_cache_range)
130
131/*
132 *	coherent_kern_range(start, end)
133 *
134 *	Ensure coherency between the Icache and the Dcache in the
135 *	region described by start, end.  If you have non-snooping
136 *	Harvard caches, you need to implement this function.
137 *
138 *	- start	- virtual start address
139 *	- end	- virtual end address
140 */
141SYM_TYPED_FUNC_START(arm940_coherent_kern_range)
142	b	arm940_flush_kern_dcache_area
143SYM_FUNC_END(arm940_coherent_kern_range)
144
145/*
146 *	coherent_user_range(start, end)
147 *
148 *	Ensure coherency between the Icache and the Dcache in the
149 *	region described by start, end.  If you have non-snooping
150 *	Harvard caches, you need to implement this function.
151 *
152 *	- start	- virtual start address
153 *	- end	- virtual end address
154 */
155SYM_TYPED_FUNC_START(arm940_coherent_user_range)
156#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
157	b	arm940_flush_kern_dcache_area
158#endif
159SYM_FUNC_END(arm940_coherent_user_range)
160
161/*
162 *	flush_kern_dcache_area(void *addr, size_t size)
163 *
164 *	Ensure no D cache aliasing occurs, either with itself or
165 *	the I cache
166 *
167 *	- addr	- kernel address
168 *	- size	- region size
169 */
170SYM_TYPED_FUNC_START(arm940_flush_kern_dcache_area)
171	mov	r0, #0
172	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1731:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1742:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
175	subs	r3, r3, #1 << 26
176	bcs	2b				@ entries 63 to 0
177	subs	r1, r1, #1 << 4
178	bcs	1b				@ segments 7 to 0
179	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
180	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
181	ret	lr
182SYM_FUNC_END(arm940_flush_kern_dcache_area)
183
184/*
185 *	dma_inv_range(start, end)
186 *
187 *	There is no efficient way to invalidate a specifid virtual
188 *	address range. Thus, invalidates all.
189 *
190 *	- start	- virtual start address
191 *	- end	- virtual end address
192 */
193arm940_dma_inv_range:
194	mov	ip, #0
195	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1961:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1972:	mcr	p15, 0, r3, c7, c6, 2		@ flush D entry
198	subs	r3, r3, #1 << 26
199	bcs	2b				@ entries 63 to 0
200	subs	r1, r1, #1 << 4
201	bcs	1b				@ segments 7 to 0
202	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
203	ret	lr
204
205/*
206 *	dma_clean_range(start, end)
207 *
208 *	There is no efficient way to clean a specifid virtual
209 *	address range. Thus, cleans all.
210 *
211 *	- start	- virtual start address
212 *	- end	- virtual end address
213 */
214arm940_dma_clean_range:
215SYM_TYPED_FUNC_START(cpu_arm940_dcache_clean_area)
216	mov	ip, #0
217#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
218	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2191:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2202:	mcr	p15, 0, r3, c7, c10, 2		@ clean D entry
221	subs	r3, r3, #1 << 26
222	bcs	2b				@ entries 63 to 0
223	subs	r1, r1, #1 << 4
224	bcs	1b				@ segments 7 to 0
225#endif
226	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
227	ret	lr
228SYM_FUNC_END(cpu_arm940_dcache_clean_area)
229
230/*
231 *	dma_flush_range(start, end)
232 *
233 *	There is no efficient way to clean and invalidate a specifid
234 *	virtual address range.
235 *
236 *	- start	- virtual start address
237 *	- end	- virtual end address
238 */
239SYM_TYPED_FUNC_START(arm940_dma_flush_range)
240	mov	ip, #0
241	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2421:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2432:
244#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
245	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D entry
246#else
247	mcr	p15, 0, r3, c7, c6, 2		@ invalidate D entry
248#endif
249	subs	r3, r3, #1 << 26
250	bcs	2b				@ entries 63 to 0
251	subs	r1, r1, #1 << 4
252	bcs	1b				@ segments 7 to 0
253	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
254	ret	lr
255SYM_FUNC_END(arm940_dma_flush_range)
256
257/*
258 *	dma_map_area(start, size, dir)
259 *	- start	- kernel virtual start address
260 *	- size	- size of region
261 *	- dir	- DMA direction
262 */
263SYM_TYPED_FUNC_START(arm940_dma_map_area)
264	add	r1, r1, r0
265	cmp	r2, #DMA_TO_DEVICE
266	beq	arm940_dma_clean_range
267	bcs	arm940_dma_inv_range
268	b	arm940_dma_flush_range
269SYM_FUNC_END(arm940_dma_map_area)
270
271/*
272 *	dma_unmap_area(start, size, dir)
273 *	- start	- kernel virtual start address
274 *	- size	- size of region
275 *	- dir	- DMA direction
276 */
277SYM_TYPED_FUNC_START(arm940_dma_unmap_area)
278	ret	lr
279SYM_FUNC_END(arm940_dma_unmap_area)
 
 
 
 
 
 
280
281	.type	__arm940_setup, #function
282__arm940_setup:
283	mov	r0, #0
284	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
285	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
286	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
287
288	mcr	p15, 0, r0, c6, c3, 0		@ disable data area 3~7
289	mcr	p15, 0, r0, c6, c4, 0
290	mcr	p15, 0, r0, c6, c5, 0
291	mcr	p15, 0, r0, c6, c6, 0
292	mcr	p15, 0, r0, c6, c7, 0
293
294	mcr	p15, 0, r0, c6, c3, 1		@ disable instruction area 3~7
295	mcr	p15, 0, r0, c6, c4, 1
296	mcr	p15, 0, r0, c6, c5, 1
297	mcr	p15, 0, r0, c6, c6, 1
298	mcr	p15, 0, r0, c6, c7, 1
299
300	mov	r0, #0x0000003F			@ base = 0, size = 4GB
301	mcr	p15, 0, r0, c6,	c0, 0		@ set area 0, default
302	mcr	p15, 0, r0, c6,	c0, 1
303
304	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
305	ldr	r7, =CONFIG_DRAM_SIZE >> 12	@ size of RAM (must be >= 4KB)
306	pr_val	r3, r0, r7, #1
307	mcr	p15, 0, r3, c6,	c1, 0		@ set area 1, RAM
308	mcr	p15, 0, r3, c6,	c1, 1
 
 
 
 
 
309
310	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
311	ldr	r7, =CONFIG_FLASH_SIZE		@ size of FLASH (must be >= 4KB)
312	pr_val	r3, r0, r6, #1
313	mcr	p15, 0, r3, c6,	c2, 0		@ set area 2, ROM/FLASH
314	mcr	p15, 0, r3, c6,	c2, 1
 
 
 
 
 
315
316	mov	r0, #0x06
317	mcr	p15, 0, r0, c2, c0, 0		@ Region 1&2 cacheable
318	mcr	p15, 0, r0, c2, c0, 1
319#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
320	mov	r0, #0x00			@ disable whole write buffer
321#else
322	mov	r0, #0x02			@ Region 1 write bufferred
323#endif
324	mcr	p15, 0, r0, c3, c0, 0
325
326	mov	r0, #0x10000
327	sub	r0, r0, #1			@ r0 = 0xffff
328	mcr	p15, 0, r0, c5, c0, 0		@ all read/write access
329	mcr	p15, 0, r0, c5, c0, 1
330
331	mrc	p15, 0, r0, c1, c0		@ get control register
332	orr	r0, r0, #0x00001000		@ I-cache
333	orr	r0, r0, #0x00000005		@ MPU/D-cache
334
335	ret	lr
336
337	.size	__arm940_setup, . - __arm940_setup
338
339	__INITDATA
340
341	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
342	define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
343
344	.section ".rodata"
345
346	string	cpu_arch_name, "armv4t"
347	string	cpu_elf_name, "v4"
348	string	cpu_arm940_name, "ARM940T"
349
350	.align
351
352	.section ".proc.info.init", "a"
353
354	.type	__arm940_proc_info,#object
355__arm940_proc_info:
356	.long	0x41009400
357	.long	0xff00fff0
358	.long	0
359	initfn	__arm940_setup, __arm940_proc_info
360	.long	cpu_arch_name
361	.long	cpu_elf_name
362	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
363	.long	cpu_arm940_name
364	.long	arm940_processor_functions
365	.long	0
366	.long	0
367	.long	arm940_cache_fns
368	.size	__arm940_proc_info, . - __arm940_proc_info
369
v3.15
 
  1/*
  2 *  linux/arch/arm/mm/arm940.S: utility functions for ARM940T
  3 *
  4 *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 */
 11#include <linux/linkage.h>
 12#include <linux/init.h>
 
 
 13#include <asm/assembler.h>
 14#include <asm/hwcap.h>
 15#include <asm/pgtable-hwdef.h>
 16#include <asm/pgtable.h>
 17#include <asm/ptrace.h>
 18#include "proc-macros.S"
 19
 20/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
 21#define CACHE_DLINESIZE	16
 22#define CACHE_DSEGMENTS	4
 23#define CACHE_DENTRIES	64
 24
 25	.text
 26/*
 27 * cpu_arm940_proc_init()
 28 * cpu_arm940_switch_mm()
 29 *
 30 * These are not required.
 31 */
 32ENTRY(cpu_arm940_proc_init)
 33ENTRY(cpu_arm940_switch_mm)
 34	mov	pc, lr
 
 
 
 
 35
 36/*
 37 * cpu_arm940_proc_fin()
 38 */
 39ENTRY(cpu_arm940_proc_fin)
 40	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
 41	bic	r0, r0, #0x00001000		@ i-cache
 42	bic	r0, r0, #0x00000004		@ d-cache
 43	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
 44	mov	pc, lr
 
 45
 46/*
 47 * cpu_arm940_reset(loc)
 48 * Params  : r0 = address to jump to
 49 * Notes   : This sets up everything for a reset
 50 */
 51	.pushsection	.idmap.text, "ax"
 52ENTRY(cpu_arm940_reset)
 53	mov	ip, #0
 54	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
 55	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
 56	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 57	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 58	bic	ip, ip, #0x00000005		@ .............c.p
 59	bic	ip, ip, #0x00001000		@ i-cache
 60	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 61	mov	pc, r0
 62ENDPROC(cpu_arm940_reset)
 63	.popsection
 64
 65/*
 66 * cpu_arm940_do_idle()
 67 */
 68	.align	5
 69ENTRY(cpu_arm940_do_idle)
 70	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
 71	mov	pc, lr
 
 72
 73/*
 74 *	flush_icache_all()
 75 *
 76 *	Unconditionally clean and invalidate the entire icache.
 77 */
 78ENTRY(arm940_flush_icache_all)
 79	mov	r0, #0
 80	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 81	mov	pc, lr
 82ENDPROC(arm940_flush_icache_all)
 83
 84/*
 85 *	flush_user_cache_all()
 86 */
 87ENTRY(arm940_flush_user_cache_all)
 88	/* FALLTHROUGH */
 89
 90/*
 91 *	flush_kern_cache_all()
 92 *
 93 *	Clean and invalidate the entire cache.
 94 */
 95ENTRY(arm940_flush_kern_cache_all)
 96	mov	r2, #VM_EXEC
 97	/* FALLTHROUGH */
 
 98
 99/*
100 *	flush_user_cache_range(start, end, flags)
101 *
102 *	There is no efficient way to flush a range of cache entries
103 *	in the specified address range. Thus, flushes all.
104 *
105 *	- start	- start address (inclusive)
106 *	- end	- end address (exclusive)
107 *	- flags	- vm_flags describing address space
108 */
109ENTRY(arm940_flush_user_cache_range)
110	mov	ip, #0
111#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
112	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
113#else
114	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1151:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1162:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
117	subs	r3, r3, #1 << 26
118	bcs	2b				@ entries 63 to 0
119	subs	r1, r1, #1 << 4
120	bcs	1b				@ segments 3 to 0
121#endif
122	tst	r2, #VM_EXEC
123	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
124	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
125	mov	pc, lr
 
126
127/*
128 *	coherent_kern_range(start, end)
129 *
130 *	Ensure coherency between the Icache and the Dcache in the
131 *	region described by start, end.  If you have non-snooping
132 *	Harvard caches, you need to implement this function.
133 *
134 *	- start	- virtual start address
135 *	- end	- virtual end address
136 */
137ENTRY(arm940_coherent_kern_range)
138	/* FALLTHROUGH */
 
139
140/*
141 *	coherent_user_range(start, end)
142 *
143 *	Ensure coherency between the Icache and the Dcache in the
144 *	region described by start, end.  If you have non-snooping
145 *	Harvard caches, you need to implement this function.
146 *
147 *	- start	- virtual start address
148 *	- end	- virtual end address
149 */
150ENTRY(arm940_coherent_user_range)
151	/* FALLTHROUGH */
 
 
 
152
153/*
154 *	flush_kern_dcache_area(void *addr, size_t size)
155 *
156 *	Ensure no D cache aliasing occurs, either with itself or
157 *	the I cache
158 *
159 *	- addr	- kernel address
160 *	- size	- region size
161 */
162ENTRY(arm940_flush_kern_dcache_area)
163	mov	r0, #0
164	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1651:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1662:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
167	subs	r3, r3, #1 << 26
168	bcs	2b				@ entries 63 to 0
169	subs	r1, r1, #1 << 4
170	bcs	1b				@ segments 7 to 0
171	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
172	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
173	mov	pc, lr
 
174
175/*
176 *	dma_inv_range(start, end)
177 *
178 *	There is no efficient way to invalidate a specifid virtual
179 *	address range. Thus, invalidates all.
180 *
181 *	- start	- virtual start address
182 *	- end	- virtual end address
183 */
184arm940_dma_inv_range:
185	mov	ip, #0
186	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
1871:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1882:	mcr	p15, 0, r3, c7, c6, 2		@ flush D entry
189	subs	r3, r3, #1 << 26
190	bcs	2b				@ entries 63 to 0
191	subs	r1, r1, #1 << 4
192	bcs	1b				@ segments 7 to 0
193	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
194	mov	pc, lr
195
196/*
197 *	dma_clean_range(start, end)
198 *
199 *	There is no efficient way to clean a specifid virtual
200 *	address range. Thus, cleans all.
201 *
202 *	- start	- virtual start address
203 *	- end	- virtual end address
204 */
205arm940_dma_clean_range:
206ENTRY(cpu_arm940_dcache_clean_area)
207	mov	ip, #0
208#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
209	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2101:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2112:	mcr	p15, 0, r3, c7, c10, 2		@ clean D entry
212	subs	r3, r3, #1 << 26
213	bcs	2b				@ entries 63 to 0
214	subs	r1, r1, #1 << 4
215	bcs	1b				@ segments 7 to 0
216#endif
217	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
218	mov	pc, lr
 
219
220/*
221 *	dma_flush_range(start, end)
222 *
223 *	There is no efficient way to clean and invalidate a specifid
224 *	virtual address range.
225 *
226 *	- start	- virtual start address
227 *	- end	- virtual end address
228 */
229ENTRY(arm940_dma_flush_range)
230	mov	ip, #0
231	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
2321:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2332:
234#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
235	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D entry
236#else
237	mcr	p15, 0, r3, c7, c6, 2		@ invalidate D entry
238#endif
239	subs	r3, r3, #1 << 26
240	bcs	2b				@ entries 63 to 0
241	subs	r1, r1, #1 << 4
242	bcs	1b				@ segments 7 to 0
243	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
244	mov	pc, lr
 
245
246/*
247 *	dma_map_area(start, size, dir)
248 *	- start	- kernel virtual start address
249 *	- size	- size of region
250 *	- dir	- DMA direction
251 */
252ENTRY(arm940_dma_map_area)
253	add	r1, r1, r0
254	cmp	r2, #DMA_TO_DEVICE
255	beq	arm940_dma_clean_range
256	bcs	arm940_dma_inv_range
257	b	arm940_dma_flush_range
258ENDPROC(arm940_dma_map_area)
259
260/*
261 *	dma_unmap_area(start, size, dir)
262 *	- start	- kernel virtual start address
263 *	- size	- size of region
264 *	- dir	- DMA direction
265 */
266ENTRY(arm940_dma_unmap_area)
267	mov	pc, lr
268ENDPROC(arm940_dma_unmap_area)
269
270	.globl	arm940_flush_kern_cache_louis
271	.equ	arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
272
273	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
274	define_cache_functions arm940
275
276	.type	__arm940_setup, #function
277__arm940_setup:
278	mov	r0, #0
279	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
280	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
281	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
282
283	mcr	p15, 0, r0, c6, c3, 0		@ disable data area 3~7
284	mcr	p15, 0, r0, c6, c4, 0
285	mcr	p15, 0, r0, c6, c5, 0
286	mcr	p15, 0, r0, c6, c6, 0
287	mcr	p15, 0, r0, c6, c7, 0
288
289	mcr	p15, 0, r0, c6, c3, 1		@ disable instruction area 3~7
290	mcr	p15, 0, r0, c6, c4, 1
291	mcr	p15, 0, r0, c6, c5, 1
292	mcr	p15, 0, r0, c6, c6, 1
293	mcr	p15, 0, r0, c6, c7, 1
294
295	mov	r0, #0x0000003F			@ base = 0, size = 4GB
296	mcr	p15, 0, r0, c6,	c0, 0		@ set area 0, default
297	mcr	p15, 0, r0, c6,	c0, 1
298
299	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
300	ldr	r1, =(CONFIG_DRAM_SIZE >> 12)	@ size of RAM (must be >= 4KB)
301	mov	r2, #10				@ 11 is the minimum (4KB)
3021:	add	r2, r2, #1			@ area size *= 2
303	mov	r1, r1, lsr #1
304	bne	1b				@ count not zero r-shift
305	orr	r0, r0, r2, lsl #1		@ the area register value
306	orr	r0, r0, #1			@ set enable bit
307	mcr	p15, 0, r0, c6,	c1, 0		@ set area 1, RAM
308	mcr	p15, 0, r0, c6,	c1, 1
309
310	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
311	ldr	r1, =(CONFIG_FLASH_SIZE >> 12)	@ size of FLASH (must be >= 4KB)
312	mov	r2, #10				@ 11 is the minimum (4KB)
3131:	add	r2, r2, #1			@ area size *= 2
314	mov	r1, r1, lsr #1
315	bne	1b				@ count not zero r-shift
316	orr	r0, r0, r2, lsl #1		@ the area register value
317	orr	r0, r0, #1			@ set enable bit
318	mcr	p15, 0, r0, c6,	c2, 0		@ set area 2, ROM/FLASH
319	mcr	p15, 0, r0, c6,	c2, 1
320
321	mov	r0, #0x06
322	mcr	p15, 0, r0, c2, c0, 0		@ Region 1&2 cacheable
323	mcr	p15, 0, r0, c2, c0, 1
324#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
325	mov	r0, #0x00			@ disable whole write buffer
326#else
327	mov	r0, #0x02			@ Region 1 write bufferred
328#endif
329	mcr	p15, 0, r0, c3, c0, 0
330
331	mov	r0, #0x10000
332	sub	r0, r0, #1			@ r0 = 0xffff
333	mcr	p15, 0, r0, c5, c0, 0		@ all read/write access
334	mcr	p15, 0, r0, c5, c0, 1
335
336	mrc	p15, 0, r0, c1, c0		@ get control register
337	orr	r0, r0, #0x00001000		@ I-cache
338	orr	r0, r0, #0x00000005		@ MPU/D-cache
339
340	mov	pc, lr
341
342	.size	__arm940_setup, . - __arm940_setup
343
344	__INITDATA
345
346	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
347	define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
348
349	.section ".rodata"
350
351	string	cpu_arch_name, "armv4t"
352	string	cpu_elf_name, "v4"
353	string	cpu_arm940_name, "ARM940T"
354
355	.align
356
357	.section ".proc.info.init", #alloc, #execinstr
358
359	.type	__arm940_proc_info,#object
360__arm940_proc_info:
361	.long	0x41009400
362	.long	0xff00fff0
363	.long	0
364	b	__arm940_setup
365	.long	cpu_arch_name
366	.long	cpu_elf_name
367	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
368	.long	cpu_arm940_name
369	.long	arm940_processor_functions
370	.long	0
371	.long	0
372	.long	arm940_cache_fns
373	.size	__arm940_proc_info, . - __arm940_proc_info
374