Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v4.6
 
  1/*
  2 *  linux/arch/arm/mm/cache-v4wb.S
  3 *
  4 *  Copyright (C) 1997-2002 Russell king
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/linkage.h>
 11#include <linux/init.h>
 12#include <asm/assembler.h>
 13#include <asm/memory.h>
 14#include <asm/page.h>
 15#include "proc-macros.S"
 16
 17/*
 18 * The size of one data cache line.
 19 */
 20#define CACHE_DLINESIZE	32
 21
 22/*
 23 * The total size of the data cache.
 24 */
 25#if defined(CONFIG_CPU_SA110)
 26# define CACHE_DSIZE	16384
 27#elif defined(CONFIG_CPU_SA1100)
 28# define CACHE_DSIZE	8192
 29#else
 30# error Unknown cache size
 31#endif
 32
 33/*
 34 * This is the size at which it becomes more efficient to
 35 * clean the whole cache, rather than using the individual
 36 * cache line maintenance instructions.
 37 *
 38 *  Size  Clean (ticks) Dirty (ticks)
 39 *   4096   21  20  21    53  55  54
 40 *   8192   40  41  40   106 100 102
 41 *  16384   77  77  76   140 140 138
 42 *  32768  150 149 150   214 216 212 <---
 43 *  65536  296 297 296   351 358 361
 44 * 131072  591 591 591   656 657 651
 45 *  Whole  132 136 132   221 217 207 <---
 46 */
 47#define CACHE_DLIMIT	(CACHE_DSIZE * 4)
 48
 49	.data
 
 50flush_base:
 51	.long	FLUSH_BASE
 52	.text
 53
 54/*
 55 *	flush_icache_all()
 56 *
 57 *	Unconditionally clean and invalidate the entire icache.
 58 */
 59ENTRY(v4wb_flush_icache_all)
 60	mov	r0, #0
 61	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 62	ret	lr
 63ENDPROC(v4wb_flush_icache_all)
 64
 65/*
 66 *	flush_user_cache_all()
 67 *
 68 *	Clean and invalidate all cache entries in a particular address
 69 *	space.
 70 */
 71ENTRY(v4wb_flush_user_cache_all)
 72	/* FALLTHROUGH */
 73/*
 74 *	flush_kern_cache_all()
 75 *
 76 *	Clean and invalidate the entire cache.
 77 */
 78ENTRY(v4wb_flush_kern_cache_all)
 79	mov	ip, #0
 80	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
 81__flush_whole_cache:
 82	ldr	r3, =flush_base
 83	ldr	r1, [r3, #0]
 84	eor	r1, r1, #CACHE_DSIZE
 85	str	r1, [r3, #0]
 86	add	r2, r1, #CACHE_DSIZE
 871:	ldr	r3, [r1], #32
 88	cmp	r1, r2
 89	blo	1b
 90#ifdef FLUSH_BASE_MINICACHE
 91	add	r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
 92	sub	r1, r2, #512			@ only 512 bytes
 931:	ldr	r3, [r1], #32
 94	cmp	r1, r2
 95	blo	1b
 96#endif
 97	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
 98	ret	lr
 99
100/*
101 *	flush_user_cache_range(start, end, flags)
102 *
103 *	Invalidate a range of cache entries in the specified
104 *	address space.
105 *
106 *	- start - start address (inclusive, page aligned)
107 *	- end	- end address (exclusive, page aligned)
108 *	- flags	- vma_area_struct flags describing address space
109 */
110ENTRY(v4wb_flush_user_cache_range)
111	mov	ip, #0
112	sub	r3, r1, r0			@ calculate total size
113	tst	r2, #VM_EXEC			@ executable region?
114	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
115
116	cmp	r3, #CACHE_DLIMIT		@ total size >= limit?
117	bhs	__flush_whole_cache		@ flush whole D cache
118
1191:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
120	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
121	add	r0, r0, #CACHE_DLINESIZE
122	cmp	r0, r1
123	blo	1b
124	tst	r2, #VM_EXEC
125	mcrne	p15, 0, ip, c7, c10, 4		@ drain write buffer
126	ret	lr
127
128/*
129 *	flush_kern_dcache_area(void *addr, size_t size)
130 *
131 *	Ensure no D cache aliasing occurs, either with itself or
132 *	the I cache
133 *
134 *	- addr	- kernel address
135 *	- size	- region size
136 */
137ENTRY(v4wb_flush_kern_dcache_area)
138	add	r1, r0, r1
139	/* fall through */
140
141/*
142 *	coherent_kern_range(start, end)
143 *
144 *	Ensure coherency between the Icache and the Dcache in the
145 *	region described by start.  If you have non-snooping
146 *	Harvard caches, you need to implement this function.
147 *
148 *	- start  - virtual start address
149 *	- end	 - virtual end address
150 */
151ENTRY(v4wb_coherent_kern_range)
152	/* fall through */
153
154/*
155 *	coherent_user_range(start, end)
156 *
157 *	Ensure coherency between the Icache and the Dcache in the
158 *	region described by start.  If you have non-snooping
159 *	Harvard caches, you need to implement this function.
160 *
161 *	- start  - virtual start address
162 *	- end	 - virtual end address
163 */
164ENTRY(v4wb_coherent_user_range)
165	bic	r0, r0, #CACHE_DLINESIZE - 1
1661:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
167	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
168	add	r0, r0, #CACHE_DLINESIZE
169	cmp	r0, r1
170	blo	1b
171	mov	r0, #0
172	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
173	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
174	ret	lr
175
176
177/*
178 *	dma_inv_range(start, end)
179 *
180 *	Invalidate (discard) the specified virtual address range.
181 *	May not write back any entries.  If 'start' or 'end'
182 *	are not cache line aligned, those lines must be written
183 *	back.
184 *
185 *	- start  - virtual start address
186 *	- end	 - virtual end address
187 */
188v4wb_dma_inv_range:
189	tst	r0, #CACHE_DLINESIZE - 1
190	bic	r0, r0, #CACHE_DLINESIZE - 1
191	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
192	tst	r1, #CACHE_DLINESIZE - 1
193	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
1941:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
195	add	r0, r0, #CACHE_DLINESIZE
196	cmp	r0, r1
197	blo	1b
198	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
199	ret	lr
200
201/*
202 *	dma_clean_range(start, end)
203 *
204 *	Clean (write back) the specified virtual address range.
205 *
206 *	- start  - virtual start address
207 *	- end	 - virtual end address
208 */
209v4wb_dma_clean_range:
210	bic	r0, r0, #CACHE_DLINESIZE - 1
2111:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
212	add	r0, r0, #CACHE_DLINESIZE
213	cmp	r0, r1
214	blo	1b
215	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
216	ret	lr
217
218/*
219 *	dma_flush_range(start, end)
220 *
221 *	Clean and invalidate the specified virtual address range.
222 *
223 *	- start  - virtual start address
224 *	- end	 - virtual end address
225 *
226 *	This is actually the same as v4wb_coherent_kern_range()
227 */
228	.globl	v4wb_dma_flush_range
229	.set	v4wb_dma_flush_range, v4wb_coherent_kern_range
230
231/*
232 *	dma_map_area(start, size, dir)
233 *	- start	- kernel virtual start address
234 *	- size	- size of region
235 *	- dir	- DMA direction
236 */
237ENTRY(v4wb_dma_map_area)
238	add	r1, r1, r0
239	cmp	r2, #DMA_TO_DEVICE
240	beq	v4wb_dma_clean_range
241	bcs	v4wb_dma_inv_range
242	b	v4wb_dma_flush_range
243ENDPROC(v4wb_dma_map_area)
244
245/*
246 *	dma_unmap_area(start, size, dir)
247 *	- start	- kernel virtual start address
248 *	- size	- size of region
249 *	- dir	- DMA direction
250 */
251ENTRY(v4wb_dma_unmap_area)
252	ret	lr
253ENDPROC(v4wb_dma_unmap_area)
254
255	.globl	v4wb_flush_kern_cache_louis
256	.equ	v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
257
258	__INITDATA
259
260	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
261	define_cache_functions v4wb
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  linux/arch/arm/mm/cache-v4wb.S
  4 *
  5 *  Copyright (C) 1997-2002 Russell king
 
 
 
 
  6 */
  7#include <linux/linkage.h>
  8#include <linux/init.h>
  9#include <asm/assembler.h>
 10#include <asm/memory.h>
 11#include <asm/page.h>
 12#include "proc-macros.S"
 13
 14/*
 15 * The size of one data cache line.
 16 */
 17#define CACHE_DLINESIZE	32
 18
 19/*
 20 * The total size of the data cache.
 21 */
 22#if defined(CONFIG_CPU_SA110)
 23# define CACHE_DSIZE	16384
 24#elif defined(CONFIG_CPU_SA1100)
 25# define CACHE_DSIZE	8192
 26#else
 27# error Unknown cache size
 28#endif
 29
 30/*
 31 * This is the size at which it becomes more efficient to
 32 * clean the whole cache, rather than using the individual
 33 * cache line maintenance instructions.
 34 *
 35 *  Size  Clean (ticks) Dirty (ticks)
 36 *   4096   21  20  21    53  55  54
 37 *   8192   40  41  40   106 100 102
 38 *  16384   77  77  76   140 140 138
 39 *  32768  150 149 150   214 216 212 <---
 40 *  65536  296 297 296   351 358 361
 41 * 131072  591 591 591   656 657 651
 42 *  Whole  132 136 132   221 217 207 <---
 43 */
 44#define CACHE_DLIMIT	(CACHE_DSIZE * 4)
 45
 46	.data
 47	.align	2
 48flush_base:
 49	.long	FLUSH_BASE
 50	.text
 51
 52/*
 53 *	flush_icache_all()
 54 *
 55 *	Unconditionally clean and invalidate the entire icache.
 56 */
 57ENTRY(v4wb_flush_icache_all)
 58	mov	r0, #0
 59	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 60	ret	lr
 61ENDPROC(v4wb_flush_icache_all)
 62
 63/*
 64 *	flush_user_cache_all()
 65 *
 66 *	Clean and invalidate all cache entries in a particular address
 67 *	space.
 68 */
 69ENTRY(v4wb_flush_user_cache_all)
 70	/* FALLTHROUGH */
 71/*
 72 *	flush_kern_cache_all()
 73 *
 74 *	Clean and invalidate the entire cache.
 75 */
 76ENTRY(v4wb_flush_kern_cache_all)
 77	mov	ip, #0
 78	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
 79__flush_whole_cache:
 80	ldr	r3, =flush_base
 81	ldr	r1, [r3, #0]
 82	eor	r1, r1, #CACHE_DSIZE
 83	str	r1, [r3, #0]
 84	add	r2, r1, #CACHE_DSIZE
 851:	ldr	r3, [r1], #32
 86	cmp	r1, r2
 87	blo	1b
 88#ifdef FLUSH_BASE_MINICACHE
 89	add	r2, r2, #FLUSH_BASE_MINICACHE - FLUSH_BASE
 90	sub	r1, r2, #512			@ only 512 bytes
 911:	ldr	r3, [r1], #32
 92	cmp	r1, r2
 93	blo	1b
 94#endif
 95	mcr	p15, 0, ip, c7, c10, 4		@ drain write buffer
 96	ret	lr
 97
 98/*
 99 *	flush_user_cache_range(start, end, flags)
100 *
101 *	Invalidate a range of cache entries in the specified
102 *	address space.
103 *
104 *	- start - start address (inclusive, page aligned)
105 *	- end	- end address (exclusive, page aligned)
106 *	- flags	- vma_area_struct flags describing address space
107 */
108ENTRY(v4wb_flush_user_cache_range)
109	mov	ip, #0
110	sub	r3, r1, r0			@ calculate total size
111	tst	r2, #VM_EXEC			@ executable region?
112	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
113
114	cmp	r3, #CACHE_DLIMIT		@ total size >= limit?
115	bhs	__flush_whole_cache		@ flush whole D cache
116
1171:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
118	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
119	add	r0, r0, #CACHE_DLINESIZE
120	cmp	r0, r1
121	blo	1b
122	tst	r2, #VM_EXEC
123	mcrne	p15, 0, ip, c7, c10, 4		@ drain write buffer
124	ret	lr
125
126/*
127 *	flush_kern_dcache_area(void *addr, size_t size)
128 *
129 *	Ensure no D cache aliasing occurs, either with itself or
130 *	the I cache
131 *
132 *	- addr	- kernel address
133 *	- size	- region size
134 */
135ENTRY(v4wb_flush_kern_dcache_area)
136	add	r1, r0, r1
137	/* fall through */
138
139/*
140 *	coherent_kern_range(start, end)
141 *
142 *	Ensure coherency between the Icache and the Dcache in the
143 *	region described by start.  If you have non-snooping
144 *	Harvard caches, you need to implement this function.
145 *
146 *	- start  - virtual start address
147 *	- end	 - virtual end address
148 */
149ENTRY(v4wb_coherent_kern_range)
150	/* fall through */
151
152/*
153 *	coherent_user_range(start, end)
154 *
155 *	Ensure coherency between the Icache and the Dcache in the
156 *	region described by start.  If you have non-snooping
157 *	Harvard caches, you need to implement this function.
158 *
159 *	- start  - virtual start address
160 *	- end	 - virtual end address
161 */
162ENTRY(v4wb_coherent_user_range)
163	bic	r0, r0, #CACHE_DLINESIZE - 1
1641:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
165	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
166	add	r0, r0, #CACHE_DLINESIZE
167	cmp	r0, r1
168	blo	1b
169	mov	r0, #0
170	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
171	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
172	ret	lr
173
174
175/*
176 *	dma_inv_range(start, end)
177 *
178 *	Invalidate (discard) the specified virtual address range.
179 *	May not write back any entries.  If 'start' or 'end'
180 *	are not cache line aligned, those lines must be written
181 *	back.
182 *
183 *	- start  - virtual start address
184 *	- end	 - virtual end address
185 */
186v4wb_dma_inv_range:
187	tst	r0, #CACHE_DLINESIZE - 1
188	bic	r0, r0, #CACHE_DLINESIZE - 1
189	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
190	tst	r1, #CACHE_DLINESIZE - 1
191	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
1921:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
193	add	r0, r0, #CACHE_DLINESIZE
194	cmp	r0, r1
195	blo	1b
196	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
197	ret	lr
198
199/*
200 *	dma_clean_range(start, end)
201 *
202 *	Clean (write back) the specified virtual address range.
203 *
204 *	- start  - virtual start address
205 *	- end	 - virtual end address
206 */
207v4wb_dma_clean_range:
208	bic	r0, r0, #CACHE_DLINESIZE - 1
2091:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
210	add	r0, r0, #CACHE_DLINESIZE
211	cmp	r0, r1
212	blo	1b
213	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
214	ret	lr
215
216/*
217 *	dma_flush_range(start, end)
218 *
219 *	Clean and invalidate the specified virtual address range.
220 *
221 *	- start  - virtual start address
222 *	- end	 - virtual end address
223 *
224 *	This is actually the same as v4wb_coherent_kern_range()
225 */
226	.globl	v4wb_dma_flush_range
227	.set	v4wb_dma_flush_range, v4wb_coherent_kern_range
228
229/*
230 *	dma_map_area(start, size, dir)
231 *	- start	- kernel virtual start address
232 *	- size	- size of region
233 *	- dir	- DMA direction
234 */
235ENTRY(v4wb_dma_map_area)
236	add	r1, r1, r0
237	cmp	r2, #DMA_TO_DEVICE
238	beq	v4wb_dma_clean_range
239	bcs	v4wb_dma_inv_range
240	b	v4wb_dma_flush_range
241ENDPROC(v4wb_dma_map_area)
242
243/*
244 *	dma_unmap_area(start, size, dir)
245 *	- start	- kernel virtual start address
246 *	- size	- size of region
247 *	- dir	- DMA direction
248 */
249ENTRY(v4wb_dma_unmap_area)
250	ret	lr
251ENDPROC(v4wb_dma_unmap_area)
252
253	.globl	v4wb_flush_kern_cache_louis
254	.equ	v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
255
256	__INITDATA
257
258	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
259	define_cache_functions v4wb