Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *  linux/arch/arm/mm/cache-v4wb.S
  3 *
  4 *  Copyright (C) 1997-2002 Russell king
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/linkage.h>
 11#include <linux/init.h>
 12#include <asm/assembler.h>
 13#include <asm/memory.h>
 14#include <asm/page.h>
 15#include "proc-macros.S"
 16
 17/*
 18 * The size of one data cache line.
 19 */
 20#define CACHE_DLINESIZE	32
 21
 22/*
 23 * The total size of the data cache.
 24 */
 25#if defined(CONFIG_CPU_SA110)
 26# define CACHE_DSIZE	16384
 27#elif defined(CONFIG_CPU_SA1100)
 28# define CACHE_DSIZE	8192
 29#else
 30# error Unknown cache size
 31#endif
 32
 33/*
 34 * This is the size at which it becomes more efficient to
 35 * clean the whole cache, rather than using the individual
 36 * cache line maintenance instructions.
 37 *
 38 *  Size  Clean (ticks) Dirty (ticks)
 39 *   4096   21  20  21    53  55  54
 40 *   8192   40  41  40   106 100 102
 41 *  16384   77  77  76   140 140 138
 42 *  32768  150 149 150   214 216 212 <---
 43 *  65536  296 297 296   351 358 361
 44 * 131072  591 591 591   656 657 651
 45 *  Whole  132 136 132   221 217 207 <---
 46 */
 47#define CACHE_DLIMIT	(CACHE_DSIZE * 4)
 48
 49	.data
 
 50flush_base:
 51	.long	FLUSH_BASE
 52	.text
 53
 54/*
 55 *	flush_icache_all()
 56 *
 57 *	Unconditionally clean and invalidate the entire icache.
 58 */
 59ENTRY(v4wb_flush_icache_all)
 60	mov	r0, #0
 61	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 62	ret	lr
 63ENDPROC(v4wb_flush_icache_all)
 64
 65/*
 66 *	flush_user_cache_all()
 67 *
 68 *	Clean and invalidate all cache entries in a particular address
 69 *	space.
 70 */
 71ENTRY(v4wb_flush_user_cache_all)
 72	/* FALLTHROUGH */
 73/*
 74 *	flush_kern_cache_all()
 75 *
 76 *	Clean and invalidate the entire cache.
 77 */
 78ENTRY(v4wb_flush_kern_cache_all)
 79	mov	ip, #0
 80	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
 81__flush_whole_cache:
 82	ldr	r3, =flush_base
 83	ldr	r1, [r3, #0]
 84	eor	r1, r1, #CACHE_DSIZE
 85	str	r1, [r3, #0]
 86	add	r2, r1, #CACHE_DSIZE
 871:	ldr	r3, [r1], #32
 88	cmp	r1, r2
 89	blo	1b
 90#ifdef FLUSH_BASE_MINICACHE
 91	add	r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
 92	sub	r1, r2, #512			@ only 512 bytes
 931:	ldr	r3, [r1], #32
 94	cmp	r1, r2
 95	blo	1b
 96#endif
 97	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
 98	ret	lr
 99
100/*
101 *	flush_user_cache_range(start, end, flags)
102 *
103 *	Invalidate a range of cache entries in the specified
104 *	address space.
105 *
106 *	- start - start address (inclusive, page aligned)
107 *	- end	- end address (exclusive, page aligned)
108 *	- flags	- vma_area_struct flags describing address space
109 */
110ENTRY(v4wb_flush_user_cache_range)
111	mov	ip, #0
112	sub	r3, r1, r0			@ calculate total size
113	tst	r2, #VM_EXEC			@ executable region?
114	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
115
116	cmp	r3, #CACHE_DLIMIT		@ total size >= limit?
117	bhs	__flush_whole_cache		@ flush whole D cache
118
1191:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
120	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
121	add	r0, r0, #CACHE_DLINESIZE
122	cmp	r0, r1
123	blo	1b
124	tst	r2, #VM_EXEC
125	mcrne	p15, 0, ip, c7, c10, 4		@ drain write buffer
126	ret	lr
127
128/*
129 *	flush_kern_dcache_area(void *addr, size_t size)
130 *
131 *	Ensure no D cache aliasing occurs, either with itself or
132 *	the I cache
133 *
134 *	- addr	- kernel address
135 *	- size	- region size
136 */
137ENTRY(v4wb_flush_kern_dcache_area)
138	add	r1, r0, r1
139	/* fall through */
140
141/*
142 *	coherent_kern_range(start, end)
143 *
144 *	Ensure coherency between the Icache and the Dcache in the
145 *	region described by start.  If you have non-snooping
146 *	Harvard caches, you need to implement this function.
147 *
148 *	- start  - virtual start address
149 *	- end	 - virtual end address
150 */
151ENTRY(v4wb_coherent_kern_range)
152	/* fall through */
153
154/*
155 *	coherent_user_range(start, end)
156 *
157 *	Ensure coherency between the Icache and the Dcache in the
158 *	region described by start.  If you have non-snooping
159 *	Harvard caches, you need to implement this function.
160 *
161 *	- start  - virtual start address
162 *	- end	 - virtual end address
163 */
164ENTRY(v4wb_coherent_user_range)
165	bic	r0, r0, #CACHE_DLINESIZE - 1
1661:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
167	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
168	add	r0, r0, #CACHE_DLINESIZE
169	cmp	r0, r1
170	blo	1b
171	mov	r0, #0
172	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
173	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
174	ret	lr
175
176
177/*
178 *	dma_inv_range(start, end)
179 *
180 *	Invalidate (discard) the specified virtual address range.
181 *	May not write back any entries.  If 'start' or 'end'
182 *	are not cache line aligned, those lines must be written
183 *	back.
184 *
185 *	- start  - virtual start address
186 *	- end	 - virtual end address
187 */
188v4wb_dma_inv_range:
189	tst	r0, #CACHE_DLINESIZE - 1
190	bic	r0, r0, #CACHE_DLINESIZE - 1
191	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
192	tst	r1, #CACHE_DLINESIZE - 1
193	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
1941:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
195	add	r0, r0, #CACHE_DLINESIZE
196	cmp	r0, r1
197	blo	1b
198	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
199	ret	lr
200
201/*
202 *	dma_clean_range(start, end)
203 *
204 *	Clean (write back) the specified virtual address range.
205 *
206 *	- start  - virtual start address
207 *	- end	 - virtual end address
208 */
209v4wb_dma_clean_range:
210	bic	r0, r0, #CACHE_DLINESIZE - 1
2111:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
212	add	r0, r0, #CACHE_DLINESIZE
213	cmp	r0, r1
214	blo	1b
215	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
216	ret	lr
217
218/*
219 *	dma_flush_range(start, end)
220 *
221 *	Clean and invalidate the specified virtual address range.
222 *
223 *	- start  - virtual start address
224 *	- end	 - virtual end address
225 *
226 *	This is actually the same as v4wb_coherent_kern_range()
227 */
228	.globl	v4wb_dma_flush_range
229	.set	v4wb_dma_flush_range, v4wb_coherent_kern_range
230
231/*
232 *	dma_map_area(start, size, dir)
233 *	- start	- kernel virtual start address
234 *	- size	- size of region
235 *	- dir	- DMA direction
236 */
237ENTRY(v4wb_dma_map_area)
238	add	r1, r1, r0
239	cmp	r2, #DMA_TO_DEVICE
240	beq	v4wb_dma_clean_range
241	bcs	v4wb_dma_inv_range
242	b	v4wb_dma_flush_range
243ENDPROC(v4wb_dma_map_area)
244
245/*
246 *	dma_unmap_area(start, size, dir)
247 *	- start	- kernel virtual start address
248 *	- size	- size of region
249 *	- dir	- DMA direction
250 */
251ENTRY(v4wb_dma_unmap_area)
252	ret	lr
253ENDPROC(v4wb_dma_unmap_area)
254
255	.globl	v4wb_flush_kern_cache_louis
256	.equ	v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
257
258	__INITDATA
259
260	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
261	define_cache_functions v4wb
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  linux/arch/arm/mm/cache-v4wb.S
  4 *
  5 *  Copyright (C) 1997-2002 Russell king
 
 
 
 
  6 */
  7#include <linux/linkage.h>
  8#include <linux/init.h>
  9#include <asm/assembler.h>
 
 10#include <asm/page.h>
 11#include "proc-macros.S"
 12
 13/*
 14 * The size of one data cache line.
 15 */
 16#define CACHE_DLINESIZE	32
 17
 18/*
 19 * The total size of the data cache.
 20 */
 21#if defined(CONFIG_CPU_SA110)
 22# define CACHE_DSIZE	16384
 23#elif defined(CONFIG_CPU_SA1100)
 24# define CACHE_DSIZE	8192
 25#else
 26# error Unknown cache size
 27#endif
 28
 29/*
 30 * This is the size at which it becomes more efficient to
 31 * clean the whole cache, rather than using the individual
 32 * cache line maintenance instructions.
 33 *
 34 *  Size  Clean (ticks) Dirty (ticks)
 35 *   4096   21  20  21    53  55  54
 36 *   8192   40  41  40   106 100 102
 37 *  16384   77  77  76   140 140 138
 38 *  32768  150 149 150   214 216 212 <---
 39 *  65536  296 297 296   351 358 361
 40 * 131072  591 591 591   656 657 651
 41 *  Whole  132 136 132   221 217 207 <---
 42 */
 43#define CACHE_DLIMIT	(CACHE_DSIZE * 4)
 44
 45	.data
 46	.align	2
 47flush_base:
 48	.long	FLUSH_BASE
 49	.text
 50
 51/*
 52 *	flush_icache_all()
 53 *
 54 *	Unconditionally clean and invalidate the entire icache.
 55 */
 56ENTRY(v4wb_flush_icache_all)
 57	mov	r0, #0
 58	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 59	ret	lr
 60ENDPROC(v4wb_flush_icache_all)
 61
 62/*
 63 *	flush_user_cache_all()
 64 *
 65 *	Clean and invalidate all cache entries in a particular address
 66 *	space.
 67 */
 68ENTRY(v4wb_flush_user_cache_all)
 69	/* FALLTHROUGH */
 70/*
 71 *	flush_kern_cache_all()
 72 *
 73 *	Clean and invalidate the entire cache.
 74 */
 75ENTRY(v4wb_flush_kern_cache_all)
 76	mov	ip, #0
 77	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
 78__flush_whole_cache:
 79	ldr	r3, =flush_base
 80	ldr	r1, [r3, #0]
 81	eor	r1, r1, #CACHE_DSIZE
 82	str	r1, [r3, #0]
 83	add	r2, r1, #CACHE_DSIZE
 841:	ldr	r3, [r1], #32
 85	cmp	r1, r2
 86	blo	1b
 87#ifdef FLUSH_BASE_MINICACHE
 88	add	r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
 89	sub	r1, r2, #512			@ only 512 bytes
 901:	ldr	r3, [r1], #32
 91	cmp	r1, r2
 92	blo	1b
 93#endif
 94	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
 95	ret	lr
 96
 97/*
 98 *	flush_user_cache_range(start, end, flags)
 99 *
100 *	Invalidate a range of cache entries in the specified
101 *	address space.
102 *
103 *	- start - start address (inclusive, page aligned)
104 *	- end	- end address (exclusive, page aligned)
105 *	- flags	- vma_area_struct flags describing address space
106 */
107ENTRY(v4wb_flush_user_cache_range)
108	mov	ip, #0
109	sub	r3, r1, r0			@ calculate total size
110	tst	r2, #VM_EXEC			@ executable region?
111	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
112
113	cmp	r3, #CACHE_DLIMIT		@ total size >= limit?
114	bhs	__flush_whole_cache		@ flush whole D cache
115
1161:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
117	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
118	add	r0, r0, #CACHE_DLINESIZE
119	cmp	r0, r1
120	blo	1b
121	tst	r2, #VM_EXEC
122	mcrne	p15, 0, ip, c7, c10, 4		@ drain write buffer
123	ret	lr
124
125/*
126 *	flush_kern_dcache_area(void *addr, size_t size)
127 *
128 *	Ensure no D cache aliasing occurs, either with itself or
129 *	the I cache
130 *
131 *	- addr	- kernel address
132 *	- size	- region size
133 */
134ENTRY(v4wb_flush_kern_dcache_area)
135	add	r1, r0, r1
136	/* fall through */
137
138/*
139 *	coherent_kern_range(start, end)
140 *
141 *	Ensure coherency between the Icache and the Dcache in the
142 *	region described by start.  If you have non-snooping
143 *	Harvard caches, you need to implement this function.
144 *
145 *	- start  - virtual start address
146 *	- end	 - virtual end address
147 */
148ENTRY(v4wb_coherent_kern_range)
149	/* fall through */
150
151/*
152 *	coherent_user_range(start, end)
153 *
154 *	Ensure coherency between the Icache and the Dcache in the
155 *	region described by start.  If you have non-snooping
156 *	Harvard caches, you need to implement this function.
157 *
158 *	- start  - virtual start address
159 *	- end	 - virtual end address
160 */
161ENTRY(v4wb_coherent_user_range)
162	bic	r0, r0, #CACHE_DLINESIZE - 1
1631:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
164	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
165	add	r0, r0, #CACHE_DLINESIZE
166	cmp	r0, r1
167	blo	1b
168	mov	r0, #0
169	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
170	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
171	ret	lr
172
173
174/*
175 *	dma_inv_range(start, end)
176 *
177 *	Invalidate (discard) the specified virtual address range.
178 *	May not write back any entries.  If 'start' or 'end'
179 *	are not cache line aligned, those lines must be written
180 *	back.
181 *
182 *	- start  - virtual start address
183 *	- end	 - virtual end address
184 */
185v4wb_dma_inv_range:
186	tst	r0, #CACHE_DLINESIZE - 1
187	bic	r0, r0, #CACHE_DLINESIZE - 1
188	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
189	tst	r1, #CACHE_DLINESIZE - 1
190	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
1911:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
192	add	r0, r0, #CACHE_DLINESIZE
193	cmp	r0, r1
194	blo	1b
195	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
196	ret	lr
197
198/*
199 *	dma_clean_range(start, end)
200 *
201 *	Clean (write back) the specified virtual address range.
202 *
203 *	- start  - virtual start address
204 *	- end	 - virtual end address
205 */
206v4wb_dma_clean_range:
207	bic	r0, r0, #CACHE_DLINESIZE - 1
2081:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
209	add	r0, r0, #CACHE_DLINESIZE
210	cmp	r0, r1
211	blo	1b
212	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
213	ret	lr
214
215/*
216 *	dma_flush_range(start, end)
217 *
218 *	Clean and invalidate the specified virtual address range.
219 *
220 *	- start  - virtual start address
221 *	- end	 - virtual end address
222 *
223 *	This is actually the same as v4wb_coherent_kern_range()
224 */
225	.globl	v4wb_dma_flush_range
226	.set	v4wb_dma_flush_range, v4wb_coherent_kern_range
227
228/*
229 *	dma_map_area(start, size, dir)
230 *	- start	- kernel virtual start address
231 *	- size	- size of region
232 *	- dir	- DMA direction
233 */
234ENTRY(v4wb_dma_map_area)
235	add	r1, r1, r0
236	cmp	r2, #DMA_TO_DEVICE
237	beq	v4wb_dma_clean_range
238	bcs	v4wb_dma_inv_range
239	b	v4wb_dma_flush_range
240ENDPROC(v4wb_dma_map_area)
241
242/*
243 *	dma_unmap_area(start, size, dir)
244 *	- start	- kernel virtual start address
245 *	- size	- size of region
246 *	- dir	- DMA direction
247 */
248ENTRY(v4wb_dma_unmap_area)
249	ret	lr
250ENDPROC(v4wb_dma_unmap_area)
251
252	.globl	v4wb_flush_kern_cache_louis
253	.equ	v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
254
255	__INITDATA
256
257	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
258	define_cache_functions v4wb