Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
v3.1
 
  1/*
  2 *  linux/arch/arm/mm/cache-v4wt.S
  3 *
  4 *  Copyright (C) 1997-2002 Russell king
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 *  ARMv4 write through cache operations support.
 11 *
 12 *  We assume that the write buffer is not enabled.
 13 */
 14#include <linux/linkage.h>
 15#include <linux/init.h>
 
 16#include <asm/page.h>
 17#include "proc-macros.S"
 18
 19/*
 20 * The size of one data cache line.
 21 */
 22#define CACHE_DLINESIZE	32
 23
 24/*
 25 * The number of data cache segments.
 26 */
 27#define CACHE_DSEGMENTS	8
 28
 29/*
 30 * The number of lines in a cache segment.
 31 */
 32#define CACHE_DENTRIES	64
 33
 34/*
 35 * This is the size at which it becomes more efficient to
 36 * clean the whole cache, rather than using the individual
 37 * cache line maintenance instructions.
 38 *
 39 * *** This needs benchmarking
 40 */
 41#define CACHE_DLIMIT	16384
 42
 43/*
 44 *	flush_icache_all()
 45 *
 46 *	Unconditionally clean and invalidate the entire icache.
 47 */
 48ENTRY(v4wt_flush_icache_all)
 49	mov	r0, #0
 50	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 51	mov	pc, lr
 52ENDPROC(v4wt_flush_icache_all)
 53
 54/*
 55 *	flush_user_cache_all()
 56 *
 57 *	Invalidate all cache entries in a particular address
 58 *	space.
 59 */
 60ENTRY(v4wt_flush_user_cache_all)
 61	/* FALLTHROUGH */
 62/*
 63 *	flush_kern_cache_all()
 64 *
 65 *	Clean and invalidate the entire cache.
 66 */
 67ENTRY(v4wt_flush_kern_cache_all)
 68	mov	r2, #VM_EXEC
 69	mov	ip, #0
 70__flush_whole_cache:
 71	tst	r2, #VM_EXEC
 72	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
 73	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
 74	mov	pc, lr
 75
 76/*
 77 *	flush_user_cache_range(start, end, flags)
 78 *
 79 *	Clean and invalidate a range of cache entries in the specified
 80 *	address space.
 81 *
 82 *	- start - start address (inclusive, page aligned)
 83 *	- end	- end address (exclusive, page aligned)
 84 *	- flags	- vma_area_struct flags describing address space
 85 */
 86ENTRY(v4wt_flush_user_cache_range)
 87	sub	r3, r1, r0			@ calculate total size
 88	cmp	r3, #CACHE_DLIMIT
 89	bhs	__flush_whole_cache
 90
 911:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
 92	tst	r2, #VM_EXEC
 93	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
 94	add	r0, r0, #CACHE_DLINESIZE
 95	cmp	r0, r1
 96	blo	1b
 97	mov	pc, lr
 98
 99/*
100 *	coherent_kern_range(start, end)
101 *
102 *	Ensure coherency between the Icache and the Dcache in the
103 *	region described by start.  If you have non-snooping
104 *	Harvard caches, you need to implement this function.
105 *
106 *	- start  - virtual start address
107 *	- end	 - virtual end address
108 */
109ENTRY(v4wt_coherent_kern_range)
110	/* FALLTRHOUGH */
111
112/*
113 *	coherent_user_range(start, end)
114 *
115 *	Ensure coherency between the Icache and the Dcache in the
116 *	region described by start.  If you have non-snooping
117 *	Harvard caches, you need to implement this function.
118 *
119 *	- start  - virtual start address
120 *	- end	 - virtual end address
121 */
122ENTRY(v4wt_coherent_user_range)
123	bic	r0, r0, #CACHE_DLINESIZE - 1
1241:	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
125	add	r0, r0, #CACHE_DLINESIZE
126	cmp	r0, r1
127	blo	1b
128	mov	pc, lr
 
129
130/*
131 *	flush_kern_dcache_area(void *addr, size_t size)
132 *
133 *	Ensure no D cache aliasing occurs, either with itself or
134 *	the I cache
135 *
136 *	- addr	- kernel address
137 *	- size	- region size
138 */
139ENTRY(v4wt_flush_kern_dcache_area)
140	mov	r2, #0
141	mcr	p15, 0, r2, c7, c5, 0		@ invalidate I cache
142	add	r1, r0, r1
143	/* fallthrough */
144
145/*
146 *	dma_inv_range(start, end)
147 *
148 *	Invalidate (discard) the specified virtual address range.
149 *	May not write back any entries.  If 'start' or 'end'
150 *	are not cache line aligned, those lines must be written
151 *	back.
152 *
153 *	- start  - virtual start address
154 *	- end	 - virtual end address
155 */
156v4wt_dma_inv_range:
157	bic	r0, r0, #CACHE_DLINESIZE - 1
1581:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
159	add	r0, r0, #CACHE_DLINESIZE
160	cmp	r0, r1
161	blo	1b
162	mov	pc, lr
163
164/*
165 *	dma_flush_range(start, end)
166 *
167 *	Clean and invalidate the specified virtual address range.
168 *
169 *	- start  - virtual start address
170 *	- end	 - virtual end address
171 */
172	.globl	v4wt_dma_flush_range
173	.equ	v4wt_dma_flush_range, v4wt_dma_inv_range
174
175/*
176 *	dma_unmap_area(start, size, dir)
177 *	- start	- kernel virtual start address
178 *	- size	- size of region
179 *	- dir	- DMA direction
180 */
181ENTRY(v4wt_dma_unmap_area)
182	add	r1, r1, r0
183	teq	r2, #DMA_TO_DEVICE
184	bne	v4wt_dma_inv_range
185	/* FALLTHROUGH */
186
187/*
188 *	dma_map_area(start, size, dir)
189 *	- start	- kernel virtual start address
190 *	- size	- size of region
191 *	- dir	- DMA direction
192 */
193ENTRY(v4wt_dma_map_area)
194	mov	pc, lr
195ENDPROC(v4wt_dma_unmap_area)
196ENDPROC(v4wt_dma_map_area)
 
 
 
197
198	__INITDATA
199
200	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
201	define_cache_functions v4wt
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  linux/arch/arm/mm/cache-v4wt.S
  4 *
  5 *  Copyright (C) 1997-2002 Russell king
  6 *
 
 
 
 
  7 *  ARMv4 write through cache operations support.
  8 *
  9 *  We assume that the write buffer is not enabled.
 10 */
 11#include <linux/linkage.h>
 12#include <linux/init.h>
 13#include <asm/assembler.h>
 14#include <asm/page.h>
 15#include "proc-macros.S"
 16
 17/*
 18 * The size of one data cache line.
 19 */
 20#define CACHE_DLINESIZE	32
 21
 22/*
 23 * The number of data cache segments.
 24 */
 25#define CACHE_DSEGMENTS	8
 26
 27/*
 28 * The number of lines in a cache segment.
 29 */
 30#define CACHE_DENTRIES	64
 31
 32/*
 33 * This is the size at which it becomes more efficient to
 34 * clean the whole cache, rather than using the individual
 35 * cache line maintenance instructions.
 36 *
 37 * *** This needs benchmarking
 38 */
 39#define CACHE_DLIMIT	16384
 40
 41/*
 42 *	flush_icache_all()
 43 *
 44 *	Unconditionally clean and invalidate the entire icache.
 45 */
 46ENTRY(v4wt_flush_icache_all)
 47	mov	r0, #0
 48	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 49	ret	lr
 50ENDPROC(v4wt_flush_icache_all)
 51
 52/*
 53 *	flush_user_cache_all()
 54 *
 55 *	Invalidate all cache entries in a particular address
 56 *	space.
 57 */
 58ENTRY(v4wt_flush_user_cache_all)
 59	/* FALLTHROUGH */
 60/*
 61 *	flush_kern_cache_all()
 62 *
 63 *	Clean and invalidate the entire cache.
 64 */
 65ENTRY(v4wt_flush_kern_cache_all)
 66	mov	r2, #VM_EXEC
 67	mov	ip, #0
 68__flush_whole_cache:
 69	tst	r2, #VM_EXEC
 70	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
 71	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
 72	ret	lr
 73
 74/*
 75 *	flush_user_cache_range(start, end, flags)
 76 *
 77 *	Clean and invalidate a range of cache entries in the specified
 78 *	address space.
 79 *
 80 *	- start - start address (inclusive, page aligned)
 81 *	- end	- end address (exclusive, page aligned)
 82 *	- flags	- vma_area_struct flags describing address space
 83 */
 84ENTRY(v4wt_flush_user_cache_range)
 85	sub	r3, r1, r0			@ calculate total size
 86	cmp	r3, #CACHE_DLIMIT
 87	bhs	__flush_whole_cache
 88
 891:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
 90	tst	r2, #VM_EXEC
 91	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
 92	add	r0, r0, #CACHE_DLINESIZE
 93	cmp	r0, r1
 94	blo	1b
 95	ret	lr
 96
 97/*
 98 *	coherent_kern_range(start, end)
 99 *
100 *	Ensure coherency between the Icache and the Dcache in the
101 *	region described by start.  If you have non-snooping
102 *	Harvard caches, you need to implement this function.
103 *
104 *	- start  - virtual start address
105 *	- end	 - virtual end address
106 */
107ENTRY(v4wt_coherent_kern_range)
108	/* FALLTRHOUGH */
109
110/*
111 *	coherent_user_range(start, end)
112 *
113 *	Ensure coherency between the Icache and the Dcache in the
114 *	region described by start.  If you have non-snooping
115 *	Harvard caches, you need to implement this function.
116 *
117 *	- start  - virtual start address
118 *	- end	 - virtual end address
119 */
120ENTRY(v4wt_coherent_user_range)
121	bic	r0, r0, #CACHE_DLINESIZE - 1
1221:	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
123	add	r0, r0, #CACHE_DLINESIZE
124	cmp	r0, r1
125	blo	1b
126	mov	r0, #0
127	ret	lr
128
129/*
130 *	flush_kern_dcache_area(void *addr, size_t size)
131 *
132 *	Ensure no D cache aliasing occurs, either with itself or
133 *	the I cache
134 *
135 *	- addr	- kernel address
136 *	- size	- region size
137 */
138ENTRY(v4wt_flush_kern_dcache_area)
139	mov	r2, #0
140	mcr	p15, 0, r2, c7, c5, 0		@ invalidate I cache
141	add	r1, r0, r1
142	/* fallthrough */
143
144/*
145 *	dma_inv_range(start, end)
146 *
147 *	Invalidate (discard) the specified virtual address range.
148 *	May not write back any entries.  If 'start' or 'end'
149 *	are not cache line aligned, those lines must be written
150 *	back.
151 *
152 *	- start  - virtual start address
153 *	- end	 - virtual end address
154 */
155v4wt_dma_inv_range:
156	bic	r0, r0, #CACHE_DLINESIZE - 1
1571:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
158	add	r0, r0, #CACHE_DLINESIZE
159	cmp	r0, r1
160	blo	1b
161	ret	lr
162
163/*
164 *	dma_flush_range(start, end)
165 *
166 *	Clean and invalidate the specified virtual address range.
167 *
168 *	- start  - virtual start address
169 *	- end	 - virtual end address
170 */
171	.globl	v4wt_dma_flush_range
172	.equ	v4wt_dma_flush_range, v4wt_dma_inv_range
173
174/*
175 *	dma_unmap_area(start, size, dir)
176 *	- start	- kernel virtual start address
177 *	- size	- size of region
178 *	- dir	- DMA direction
179 */
180ENTRY(v4wt_dma_unmap_area)
181	add	r1, r1, r0
182	teq	r2, #DMA_TO_DEVICE
183	bne	v4wt_dma_inv_range
184	/* FALLTHROUGH */
185
186/*
187 *	dma_map_area(start, size, dir)
188 *	- start	- kernel virtual start address
189 *	- size	- size of region
190 *	- dir	- DMA direction
191 */
192ENTRY(v4wt_dma_map_area)
193	ret	lr
194ENDPROC(v4wt_dma_unmap_area)
195ENDPROC(v4wt_dma_map_area)
196
197	.globl	v4wt_flush_kern_cache_louis
198	.equ	v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
199
200	__INITDATA
201
202	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
203	define_cache_functions v4wt