Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/arch/arm/mm/cache-v4wt.S
  3 *
  4 *  Copyright (C) 1997-2002 Russell king
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 *  ARMv4 write through cache operations support.
 11 *
 12 *  We assume that the write buffer is not enabled.
 13 */
 14#include <linux/linkage.h>
 15#include <linux/init.h>
 
 
 16#include <asm/page.h>
 17#include "proc-macros.S"
 18
 19/*
 20 * The size of one data cache line.
 21 */
 22#define CACHE_DLINESIZE	32
 23
 24/*
 25 * The number of data cache segments.
 26 */
 27#define CACHE_DSEGMENTS	8
 28
 29/*
 30 * The number of lines in a cache segment.
 31 */
 32#define CACHE_DENTRIES	64
 33
 34/*
 35 * This is the size at which it becomes more efficient to
 36 * clean the whole cache, rather than using the individual
 37 * cache line maintenance instructions.
 38 *
 39 * *** This needs benchmarking
 40 */
 41#define CACHE_DLIMIT	16384
 42
 43/*
 44 *	flush_icache_all()
 45 *
 46 *	Unconditionally clean and invalidate the entire icache.
 47 */
 48ENTRY(v4wt_flush_icache_all)
 49	mov	r0, #0
 50	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 51	mov	pc, lr
 52ENDPROC(v4wt_flush_icache_all)
 53
 54/*
 55 *	flush_user_cache_all()
 56 *
 57 *	Invalidate all cache entries in a particular address
 58 *	space.
 59 */
 60ENTRY(v4wt_flush_user_cache_all)
 61	/* FALLTHROUGH */
 62/*
 63 *	flush_kern_cache_all()
 64 *
 65 *	Clean and invalidate the entire cache.
 66 */
 67ENTRY(v4wt_flush_kern_cache_all)
 68	mov	r2, #VM_EXEC
 69	mov	ip, #0
 70__flush_whole_cache:
 71	tst	r2, #VM_EXEC
 72	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
 73	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
 74	mov	pc, lr
 
 75
 76/*
 77 *	flush_user_cache_range(start, end, flags)
 78 *
 79 *	Clean and invalidate a range of cache entries in the specified
 80 *	address space.
 81 *
 82 *	- start - start address (inclusive, page aligned)
 83 *	- end	- end address (exclusive, page aligned)
 84 *	- flags	- vma_area_struct flags describing address space
 85 */
 86ENTRY(v4wt_flush_user_cache_range)
 87	sub	r3, r1, r0			@ calculate total size
 88	cmp	r3, #CACHE_DLIMIT
 89	bhs	__flush_whole_cache
 90
 911:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
 92	tst	r2, #VM_EXEC
 93	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
 94	add	r0, r0, #CACHE_DLINESIZE
 95	cmp	r0, r1
 96	blo	1b
 97	mov	pc, lr
 
 98
 99/*
100 *	coherent_kern_range(start, end)
101 *
102 *	Ensure coherency between the Icache and the Dcache in the
103 *	region described by start.  If you have non-snooping
104 *	Harvard caches, you need to implement this function.
105 *
106 *	- start  - virtual start address
107 *	- end	 - virtual end address
108 */
109ENTRY(v4wt_coherent_kern_range)
110	/* FALLTRHOUGH */
 
 
 
111
112/*
113 *	coherent_user_range(start, end)
114 *
115 *	Ensure coherency between the Icache and the Dcache in the
116 *	region described by start.  If you have non-snooping
117 *	Harvard caches, you need to implement this function.
118 *
119 *	- start  - virtual start address
120 *	- end	 - virtual end address
121 */
122ENTRY(v4wt_coherent_user_range)
123	bic	r0, r0, #CACHE_DLINESIZE - 1
1241:	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
125	add	r0, r0, #CACHE_DLINESIZE
126	cmp	r0, r1
127	blo	1b
128	mov	pc, lr
 
 
129
130/*
131 *	flush_kern_dcache_area(void *addr, size_t size)
132 *
133 *	Ensure no D cache aliasing occurs, either with itself or
134 *	the I cache
135 *
136 *	- addr	- kernel address
137 *	- size	- region size
138 */
139ENTRY(v4wt_flush_kern_dcache_area)
140	mov	r2, #0
141	mcr	p15, 0, r2, c7, c5, 0		@ invalidate I cache
142	add	r1, r0, r1
143	/* fallthrough */
 
144
145/*
146 *	dma_inv_range(start, end)
147 *
148 *	Invalidate (discard) the specified virtual address range.
149 *	May not write back any entries.  If 'start' or 'end'
150 *	are not cache line aligned, those lines must be written
151 *	back.
152 *
153 *	- start  - virtual start address
154 *	- end	 - virtual end address
155 */
156v4wt_dma_inv_range:
157	bic	r0, r0, #CACHE_DLINESIZE - 1
1581:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
159	add	r0, r0, #CACHE_DLINESIZE
160	cmp	r0, r1
161	blo	1b
162	mov	pc, lr
163
164/*
165 *	dma_flush_range(start, end)
166 *
167 *	Clean and invalidate the specified virtual address range.
168 *
169 *	- start  - virtual start address
170 *	- end	 - virtual end address
171 */
172	.globl	v4wt_dma_flush_range
173	.equ	v4wt_dma_flush_range, v4wt_dma_inv_range
 
174
175/*
176 *	dma_unmap_area(start, size, dir)
177 *	- start	- kernel virtual start address
178 *	- size	- size of region
179 *	- dir	- DMA direction
180 */
181ENTRY(v4wt_dma_unmap_area)
182	add	r1, r1, r0
183	teq	r2, #DMA_TO_DEVICE
184	bne	v4wt_dma_inv_range
185	/* FALLTHROUGH */
 
186
187/*
188 *	dma_map_area(start, size, dir)
189 *	- start	- kernel virtual start address
190 *	- size	- size of region
191 *	- dir	- DMA direction
192 */
193ENTRY(v4wt_dma_map_area)
194	mov	pc, lr
195ENDPROC(v4wt_dma_unmap_area)
196ENDPROC(v4wt_dma_map_area)
197
198	__INITDATA
199
200	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
201	define_cache_functions v4wt
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  linux/arch/arm/mm/cache-v4wt.S
  4 *
  5 *  Copyright (C) 1997-2002 Russell king
  6 *
 
 
 
 
  7 *  ARMv4 write through cache operations support.
  8 *
  9 *  We assume that the write buffer is not enabled.
 10 */
 11#include <linux/linkage.h>
 12#include <linux/init.h>
 13#include <linux/cfi_types.h>
 14#include <asm/assembler.h>
 15#include <asm/page.h>
 16#include "proc-macros.S"
 17
 18/*
 19 * The size of one data cache line.
 20 */
 21#define CACHE_DLINESIZE	32
 22
 23/*
 24 * The number of data cache segments.
 25 */
 26#define CACHE_DSEGMENTS	8
 27
 28/*
 29 * The number of lines in a cache segment.
 30 */
 31#define CACHE_DENTRIES	64
 32
 33/*
 34 * This is the size at which it becomes more efficient to
 35 * clean the whole cache, rather than using the individual
 36 * cache line maintenance instructions.
 37 *
 38 * *** This needs benchmarking
 39 */
 40#define CACHE_DLIMIT	16384
 41
 42/*
 43 *	flush_icache_all()
 44 *
 45 *	Unconditionally clean and invalidate the entire icache.
 46 */
 47SYM_TYPED_FUNC_START(v4wt_flush_icache_all)
 48	mov	r0, #0
 49	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 50	ret	lr
 51SYM_FUNC_END(v4wt_flush_icache_all)
 52
 53/*
 54 *	flush_user_cache_all()
 55 *
 56 *	Invalidate all cache entries in a particular address
 57 *	space.
 58 */
 59SYM_FUNC_ALIAS(v4wt_flush_user_cache_all, v4wt_flush_kern_cache_all)
 60
 61/*
 62 *	flush_kern_cache_all()
 63 *
 64 *	Clean and invalidate the entire cache.
 65 */
 66SYM_TYPED_FUNC_START(v4wt_flush_kern_cache_all)
 67	mov	r2, #VM_EXEC
 68	mov	ip, #0
 69__flush_whole_cache:
 70	tst	r2, #VM_EXEC
 71	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
 72	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
 73	ret	lr
 74SYM_FUNC_END(v4wt_flush_kern_cache_all)
 75
 76/*
 77 *	flush_user_cache_range(start, end, flags)
 78 *
 79 *	Clean and invalidate a range of cache entries in the specified
 80 *	address space.
 81 *
 82 *	- start - start address (inclusive, page aligned)
 83 *	- end	- end address (exclusive, page aligned)
 84 *	- flags	- vma_area_struct flags describing address space
 85 */
 86SYM_TYPED_FUNC_START(v4wt_flush_user_cache_range)
 87	sub	r3, r1, r0			@ calculate total size
 88	cmp	r3, #CACHE_DLIMIT
 89	bhs	__flush_whole_cache
 90
 911:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
 92	tst	r2, #VM_EXEC
 93	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
 94	add	r0, r0, #CACHE_DLINESIZE
 95	cmp	r0, r1
 96	blo	1b
 97	ret	lr
 98SYM_FUNC_END(v4wt_flush_user_cache_range)
 99
100/*
101 *	coherent_kern_range(start, end)
102 *
103 *	Ensure coherency between the Icache and the Dcache in the
104 *	region described by start.  If you have non-snooping
105 *	Harvard caches, you need to implement this function.
106 *
107 *	- start  - virtual start address
108 *	- end	 - virtual end address
109 */
110SYM_TYPED_FUNC_START(v4wt_coherent_kern_range)
111#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
112	b	v4wt_coherent_user_range
113#endif
114SYM_FUNC_END(v4wt_coherent_kern_range)
115
116/*
117 *	coherent_user_range(start, end)
118 *
119 *	Ensure coherency between the Icache and the Dcache in the
120 *	region described by start.  If you have non-snooping
121 *	Harvard caches, you need to implement this function.
122 *
123 *	- start  - virtual start address
124 *	- end	 - virtual end address
125 */
126SYM_TYPED_FUNC_START(v4wt_coherent_user_range)
127	bic	r0, r0, #CACHE_DLINESIZE - 1
1281:	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
129	add	r0, r0, #CACHE_DLINESIZE
130	cmp	r0, r1
131	blo	1b
132	mov	r0, #0
133	ret	lr
134SYM_FUNC_END(v4wt_coherent_user_range)
135
136/*
137 *	flush_kern_dcache_area(void *addr, size_t size)
138 *
139 *	Ensure no D cache aliasing occurs, either with itself or
140 *	the I cache
141 *
142 *	- addr	- kernel address
143 *	- size	- region size
144 */
145SYM_TYPED_FUNC_START(v4wt_flush_kern_dcache_area)
146	mov	r2, #0
147	mcr	p15, 0, r2, c7, c5, 0		@ invalidate I cache
148	add	r1, r0, r1
149	b	v4wt_dma_inv_range
150SYM_FUNC_END(v4wt_flush_kern_dcache_area)
151
152/*
153 *	dma_inv_range(start, end)
154 *
155 *	Invalidate (discard) the specified virtual address range.
156 *	May not write back any entries.  If 'start' or 'end'
157 *	are not cache line aligned, those lines must be written
158 *	back.
159 *
160 *	- start  - virtual start address
161 *	- end	 - virtual end address
162 */
163v4wt_dma_inv_range:
164	bic	r0, r0, #CACHE_DLINESIZE - 1
1651:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
166	add	r0, r0, #CACHE_DLINESIZE
167	cmp	r0, r1
168	blo	1b
169	ret	lr
170
171/*
172 *	dma_flush_range(start, end)
173 *
174 *	Clean and invalidate the specified virtual address range.
175 *
176 *	- start  - virtual start address
177 *	- end	 - virtual end address
178*/
179SYM_TYPED_FUNC_START(v4wt_dma_flush_range)
180	b	v4wt_dma_inv_range
181SYM_FUNC_END(v4wt_dma_flush_range)
182
183/*
184 *	dma_unmap_area(start, size, dir)
185 *	- start	- kernel virtual start address
186 *	- size	- size of region
187 *	- dir	- DMA direction
188 */
189SYM_TYPED_FUNC_START(v4wt_dma_unmap_area)
190	add	r1, r1, r0
191	teq	r2, #DMA_TO_DEVICE
192	bne	v4wt_dma_inv_range
193	ret	lr
194SYM_FUNC_END(v4wt_dma_unmap_area)
195
196/*
197 *	dma_map_area(start, size, dir)
198 *	- start	- kernel virtual start address
199 *	- size	- size of region
200 *	- dir	- DMA direction
201 */
202SYM_TYPED_FUNC_START(v4wt_dma_map_area)
203	ret	lr
204SYM_FUNC_END(v4wt_dma_map_area)