Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _M68K_CACHEFLUSH_H
  3#define _M68K_CACHEFLUSH_H
  4
  5#include <linux/mm.h>
  6#ifdef CONFIG_COLDFIRE
  7#include <asm/mcfsim.h>
  8#endif
  9
 10/* cache code */
 11#define FLUSH_I_AND_D	(0x00000808)
 12#define FLUSH_I		(0x00000008)
 13
 14#ifndef ICACHE_MAX_ADDR
 15#define ICACHE_MAX_ADDR	0
 16#define ICACHE_SET_MASK	0
 17#define DCACHE_MAX_ADDR	0
 18#define DCACHE_SETMASK	0
 19#endif
 20#ifndef CACHE_MODE
 21#define	CACHE_MODE	0
 22#define	CACR_ICINVA	0
 23#define	CACR_DCINVA	0
 24#define	CACR_BCINVA	0
 25#endif
 26
 27/*
 28 * ColdFire architecture has no way to clear individual cache lines, so we
 29 * are stuck invalidating all the cache entries when we want a clear operation.
 30 */
 31static inline void clear_cf_icache(unsigned long start, unsigned long end)
 32{
 33	__asm__ __volatile__ (
 34		"movec	%0,%%cacr\n\t"
 35		"nop"
 36		:
 37		: "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA));
 38}
 39
 40static inline void clear_cf_dcache(unsigned long start, unsigned long end)
 41{
 42	__asm__ __volatile__ (
 43		"movec	%0,%%cacr\n\t"
 44		"nop"
 45		:
 46		: "r" (CACHE_MODE | CACR_DCINVA));
 47}
 48
 49static inline void clear_cf_bcache(unsigned long start, unsigned long end)
 50{
 51	__asm__ __volatile__ (
 52		"movec	%0,%%cacr\n\t"
 53		"nop"
 54		:
 55		: "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA | CACR_DCINVA));
 56}
 57
 58/*
 59 * Use the ColdFire cpushl instruction to push (and invalidate) cache lines.
 60 * The start and end addresses are cache line numbers not memory addresses.
 61 */
 62static inline void flush_cf_icache(unsigned long start, unsigned long end)
 63{
 64	unsigned long set;
 65
 66	for (set = start; set <= end; set += (0x10 - 3)) {
 67		__asm__ __volatile__ (
 68			"cpushl %%ic,(%0)\n\t"
 69			"addq%.l #1,%0\n\t"
 70			"cpushl %%ic,(%0)\n\t"
 71			"addq%.l #1,%0\n\t"
 72			"cpushl %%ic,(%0)\n\t"
 73			"addq%.l #1,%0\n\t"
 74			"cpushl %%ic,(%0)"
 75			: "=a" (set)
 76			: "a" (set));
 77	}
 78}
 79
 80static inline void flush_cf_dcache(unsigned long start, unsigned long end)
 81{
 82	unsigned long set;
 83
 84	for (set = start; set <= end; set += (0x10 - 3)) {
 85		__asm__ __volatile__ (
 86			"cpushl %%dc,(%0)\n\t"
 87			"addq%.l #1,%0\n\t"
 88			"cpushl %%dc,(%0)\n\t"
 89			"addq%.l #1,%0\n\t"
 90			"cpushl %%dc,(%0)\n\t"
 91			"addq%.l #1,%0\n\t"
 92			"cpushl %%dc,(%0)"
 93			: "=a" (set)
 94			: "a" (set));
 95	}
 96}
 97
 98static inline void flush_cf_bcache(unsigned long start, unsigned long end)
 99{
100	unsigned long set;
101
102	for (set = start; set <= end; set += (0x10 - 3)) {
103		__asm__ __volatile__ (
104			"cpushl %%bc,(%0)\n\t"
105			"addq%.l #1,%0\n\t"
106			"cpushl %%bc,(%0)\n\t"
107			"addq%.l #1,%0\n\t"
108			"cpushl %%bc,(%0)\n\t"
109			"addq%.l #1,%0\n\t"
110			"cpushl %%bc,(%0)"
111			: "=a" (set)
112			: "a" (set));
113	}
114}
115
116/*
117 * Cache handling functions
118 */
119
120static inline void flush_icache(void)
121{
122	if (CPU_IS_COLDFIRE) {
123		flush_cf_icache(0, ICACHE_MAX_ADDR);
124	} else if (CPU_IS_040_OR_060) {
125		asm volatile (	"nop\n"
126			"	.chip	68040\n"
127			"	cpusha	%bc\n"
128			"	.chip	68k");
129	} else {
130		unsigned long tmp;
131		asm volatile (	"movec	%%cacr,%0\n"
132			"	or.w	%1,%0\n"
133			"	movec	%0,%%cacr"
134			: "=&d" (tmp)
135			: "id" (FLUSH_I));
136	}
137}
138
139/*
140 * invalidate the cache for the specified memory range.
141 * It starts at the physical address specified for
142 * the given number of bytes.
143 */
144extern void cache_clear(unsigned long paddr, int len);
145/*
146 * push any dirty cache in the specified memory range.
147 * It starts at the physical address specified for
148 * the given number of bytes.
149 */
150extern void cache_push(unsigned long paddr, int len);
151
152/*
153 * push and invalidate pages in the specified user virtual
154 * memory range.
155 */
156extern void cache_push_v(unsigned long vaddr, int len);
157
158/* This is needed whenever the virtual mapping of the current
159   process changes.  */
160#define __flush_cache_all()					\
161({								\
162	if (CPU_IS_COLDFIRE) {					\
163		flush_cf_dcache(0, DCACHE_MAX_ADDR);		\
164	} else if (CPU_IS_040_OR_060) {				\
165		__asm__ __volatile__("nop\n\t"			\
166				     ".chip 68040\n\t"		\
167				     "cpusha %dc\n\t"		\
168				     ".chip 68k");		\
169	} else {						\
170		unsigned long _tmp;				\
171		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
172				     "orw %1,%0\n\t"		\
173				     "movec %0,%%cacr"		\
174				     : "=&d" (_tmp)		\
175				     : "di" (FLUSH_I_AND_D));	\
176	}							\
177})
178
179#define __flush_cache_030()					\
180({								\
181	if (CPU_IS_020_OR_030) {				\
182		unsigned long _tmp;				\
183		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
184				     "orw %1,%0\n\t"		\
185				     "movec %0,%%cacr"		\
186				     : "=&d" (_tmp)		\
187				     : "di" (FLUSH_I_AND_D));	\
188	}							\
189})
190
191#define flush_cache_all() __flush_cache_all()
192
193#define flush_cache_vmap(start, end)		flush_cache_all()
194#define flush_cache_vmap_early(start, end)	do { } while (0)
195#define flush_cache_vunmap(start, end)		flush_cache_all()
196
197static inline void flush_cache_mm(struct mm_struct *mm)
198{
199	if (mm == current->mm)
200		__flush_cache_030();
201}
202
203#define flush_cache_dup_mm(mm)			flush_cache_mm(mm)
204
205/* flush_cache_range/flush_cache_page must be macros to avoid
206   a dependency on linux/mm.h, which includes this file... */
207static inline void flush_cache_range(struct vm_area_struct *vma,
208				     unsigned long start,
209				     unsigned long end)
210{
211	if (vma->vm_mm == current->mm)
212	        __flush_cache_030();
213}
214
215static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
216{
217	if (vma->vm_mm == current->mm)
218	        __flush_cache_030();
219}
220
221
222/* Push the page at kernel virtual address and clear the icache */
223/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
224static inline void __flush_pages_to_ram(void *vaddr, unsigned int nr)
225{
226	if (CPU_IS_COLDFIRE) {
227		unsigned long addr, start, end;
228		addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
229		start = addr & ICACHE_SET_MASK;
230		end = (addr + nr * PAGE_SIZE - 1) & ICACHE_SET_MASK;
231		if (start > end) {
232			flush_cf_bcache(0, end);
233			end = ICACHE_MAX_ADDR;
234		}
235		flush_cf_bcache(start, end);
236	} else if (CPU_IS_040_OR_060) {
237		unsigned long paddr = __pa(vaddr);
238
239		do {
240			__asm__ __volatile__("nop\n\t"
241					     ".chip 68040\n\t"
242					     "cpushp %%bc,(%0)\n\t"
243					     ".chip 68k"
244					     : : "a" (paddr));
245			paddr += PAGE_SIZE;
246		} while (--nr);
247	} else {
248		unsigned long _tmp;
249		__asm__ __volatile__("movec %%cacr,%0\n\t"
250				     "orw %1,%0\n\t"
251				     "movec %0,%%cacr"
252				     : "=&d" (_tmp)
253				     : "di" (FLUSH_I));
254	}
255}
256
257#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
258#define flush_dcache_page(page)	__flush_pages_to_ram(page_address(page), 1)
259#define flush_dcache_folio(folio)		\
260	__flush_pages_to_ram(folio_address(folio), folio_nr_pages(folio))
261#define flush_dcache_mmap_lock(mapping)		do { } while (0)
262#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
263#define flush_icache_pages(vma, page, nr)	\
264	__flush_pages_to_ram(page_address(page), nr)
265
266extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
267				    unsigned long addr, int len);
268extern void flush_icache_range(unsigned long address, unsigned long endaddr);
269extern void flush_icache_user_range(unsigned long address,
270		unsigned long endaddr);
271
272static inline void copy_to_user_page(struct vm_area_struct *vma,
273				     struct page *page, unsigned long vaddr,
274				     void *dst, void *src, int len)
275{
276	flush_cache_page(vma, vaddr, page_to_pfn(page));
277	memcpy(dst, src, len);
278	flush_icache_user_page(vma, page, vaddr, len);
279}
280static inline void copy_from_user_page(struct vm_area_struct *vma,
281				       struct page *page, unsigned long vaddr,
282				       void *dst, void *src, int len)
283{
284	flush_cache_page(vma, vaddr, page_to_pfn(page));
285	memcpy(dst, src, len);
286}
287
288#endif /* _M68K_CACHEFLUSH_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _M68K_CACHEFLUSH_H
  3#define _M68K_CACHEFLUSH_H
  4
  5#include <linux/mm.h>
  6#ifdef CONFIG_COLDFIRE
  7#include <asm/mcfsim.h>
  8#endif
  9
 10/* cache code */
 11#define FLUSH_I_AND_D	(0x00000808)
 12#define FLUSH_I		(0x00000008)
 13
 14#ifndef ICACHE_MAX_ADDR
 15#define ICACHE_MAX_ADDR	0
 16#define ICACHE_SET_MASK	0
 17#define DCACHE_MAX_ADDR	0
 18#define DCACHE_SETMASK	0
 19#endif
 20#ifndef CACHE_MODE
 21#define	CACHE_MODE	0
 22#define	CACR_ICINVA	0
 23#define	CACR_DCINVA	0
 24#define	CACR_BCINVA	0
 25#endif
 26
 27/*
 28 * ColdFire architecture has no way to clear individual cache lines, so we
 29 * are stuck invalidating all the cache entries when we want a clear operation.
 30 */
 31static inline void clear_cf_icache(unsigned long start, unsigned long end)
 32{
 33	__asm__ __volatile__ (
 34		"movec	%0,%%cacr\n\t"
 35		"nop"
 36		:
 37		: "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA));
 38}
 39
 40static inline void clear_cf_dcache(unsigned long start, unsigned long end)
 41{
 42	__asm__ __volatile__ (
 43		"movec	%0,%%cacr\n\t"
 44		"nop"
 45		:
 46		: "r" (CACHE_MODE | CACR_DCINVA));
 47}
 48
 49static inline void clear_cf_bcache(unsigned long start, unsigned long end)
 50{
 51	__asm__ __volatile__ (
 52		"movec	%0,%%cacr\n\t"
 53		"nop"
 54		:
 55		: "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA | CACR_DCINVA));
 56}
 57
 58/*
 59 * Use the ColdFire cpushl instruction to push (and invalidate) cache lines.
 60 * The start and end addresses are cache line numbers not memory addresses.
 61 */
 62static inline void flush_cf_icache(unsigned long start, unsigned long end)
 63{
 64	unsigned long set;
 65
 66	for (set = start; set <= end; set += (0x10 - 3)) {
 67		__asm__ __volatile__ (
 68			"cpushl %%ic,(%0)\n\t"
 69			"addq%.l #1,%0\n\t"
 70			"cpushl %%ic,(%0)\n\t"
 71			"addq%.l #1,%0\n\t"
 72			"cpushl %%ic,(%0)\n\t"
 73			"addq%.l #1,%0\n\t"
 74			"cpushl %%ic,(%0)"
 75			: "=a" (set)
 76			: "a" (set));
 77	}
 78}
 79
 80static inline void flush_cf_dcache(unsigned long start, unsigned long end)
 81{
 82	unsigned long set;
 83
 84	for (set = start; set <= end; set += (0x10 - 3)) {
 85		__asm__ __volatile__ (
 86			"cpushl %%dc,(%0)\n\t"
 87			"addq%.l #1,%0\n\t"
 88			"cpushl %%dc,(%0)\n\t"
 89			"addq%.l #1,%0\n\t"
 90			"cpushl %%dc,(%0)\n\t"
 91			"addq%.l #1,%0\n\t"
 92			"cpushl %%dc,(%0)"
 93			: "=a" (set)
 94			: "a" (set));
 95	}
 96}
 97
 98static inline void flush_cf_bcache(unsigned long start, unsigned long end)
 99{
100	unsigned long set;
101
102	for (set = start; set <= end; set += (0x10 - 3)) {
103		__asm__ __volatile__ (
104			"cpushl %%bc,(%0)\n\t"
105			"addq%.l #1,%0\n\t"
106			"cpushl %%bc,(%0)\n\t"
107			"addq%.l #1,%0\n\t"
108			"cpushl %%bc,(%0)\n\t"
109			"addq%.l #1,%0\n\t"
110			"cpushl %%bc,(%0)"
111			: "=a" (set)
112			: "a" (set));
113	}
114}
115
116/*
117 * Cache handling functions
118 */
119
120static inline void flush_icache(void)
121{
122	if (CPU_IS_COLDFIRE) {
123		flush_cf_icache(0, ICACHE_MAX_ADDR);
124	} else if (CPU_IS_040_OR_060) {
125		asm volatile (	"nop\n"
126			"	.chip	68040\n"
127			"	cpusha	%bc\n"
128			"	.chip	68k");
129	} else {
130		unsigned long tmp;
131		asm volatile (	"movec	%%cacr,%0\n"
132			"	or.w	%1,%0\n"
133			"	movec	%0,%%cacr"
134			: "=&d" (tmp)
135			: "id" (FLUSH_I));
136	}
137}
138
139/*
140 * invalidate the cache for the specified memory range.
141 * It starts at the physical address specified for
142 * the given number of bytes.
143 */
144extern void cache_clear(unsigned long paddr, int len);
145/*
146 * push any dirty cache in the specified memory range.
147 * It starts at the physical address specified for
148 * the given number of bytes.
149 */
150extern void cache_push(unsigned long paddr, int len);
151
152/*
153 * push and invalidate pages in the specified user virtual
154 * memory range.
155 */
156extern void cache_push_v(unsigned long vaddr, int len);
157
158/* This is needed whenever the virtual mapping of the current
159   process changes.  */
160#define __flush_cache_all()					\
161({								\
162	if (CPU_IS_COLDFIRE) {					\
163		flush_cf_dcache(0, DCACHE_MAX_ADDR);		\
164	} else if (CPU_IS_040_OR_060) {				\
165		__asm__ __volatile__("nop\n\t"			\
166				     ".chip 68040\n\t"		\
167				     "cpusha %dc\n\t"		\
168				     ".chip 68k");		\
169	} else {						\
170		unsigned long _tmp;				\
171		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
172				     "orw %1,%0\n\t"		\
173				     "movec %0,%%cacr"		\
174				     : "=&d" (_tmp)		\
175				     : "di" (FLUSH_I_AND_D));	\
176	}							\
177})
178
179#define __flush_cache_030()					\
180({								\
181	if (CPU_IS_020_OR_030) {				\
182		unsigned long _tmp;				\
183		__asm__ __volatile__("movec %%cacr,%0\n\t"	\
184				     "orw %1,%0\n\t"		\
185				     "movec %0,%%cacr"		\
186				     : "=&d" (_tmp)		\
187				     : "di" (FLUSH_I_AND_D));	\
188	}							\
189})
190
191#define flush_cache_all() __flush_cache_all()
192
193#define flush_cache_vmap(start, end)		flush_cache_all()
 
194#define flush_cache_vunmap(start, end)		flush_cache_all()
195
196static inline void flush_cache_mm(struct mm_struct *mm)
197{
198	if (mm == current->mm)
199		__flush_cache_030();
200}
201
202#define flush_cache_dup_mm(mm)			flush_cache_mm(mm)
203
204/* flush_cache_range/flush_cache_page must be macros to avoid
205   a dependency on linux/mm.h, which includes this file... */
206static inline void flush_cache_range(struct vm_area_struct *vma,
207				     unsigned long start,
208				     unsigned long end)
209{
210	if (vma->vm_mm == current->mm)
211	        __flush_cache_030();
212}
213
214static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
215{
216	if (vma->vm_mm == current->mm)
217	        __flush_cache_030();
218}
219
220
221/* Push the page at kernel virtual address and clear the icache */
222/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
223static inline void __flush_page_to_ram(void *vaddr)
224{
225	if (CPU_IS_COLDFIRE) {
226		unsigned long addr, start, end;
227		addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
228		start = addr & ICACHE_SET_MASK;
229		end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
230		if (start > end) {
231			flush_cf_bcache(0, end);
232			end = ICACHE_MAX_ADDR;
233		}
234		flush_cf_bcache(start, end);
235	} else if (CPU_IS_040_OR_060) {
236		__asm__ __volatile__("nop\n\t"
237				     ".chip 68040\n\t"
238				     "cpushp %%bc,(%0)\n\t"
239				     ".chip 68k"
240				     : : "a" (__pa(vaddr)));
 
 
 
 
 
241	} else {
242		unsigned long _tmp;
243		__asm__ __volatile__("movec %%cacr,%0\n\t"
244				     "orw %1,%0\n\t"
245				     "movec %0,%%cacr"
246				     : "=&d" (_tmp)
247				     : "di" (FLUSH_I));
248	}
249}
250
251#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
252#define flush_dcache_page(page)		__flush_page_to_ram(page_address(page))
 
 
253#define flush_dcache_mmap_lock(mapping)		do { } while (0)
254#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
255#define flush_icache_page(vma, page)	__flush_page_to_ram(page_address(page))
 
256
257extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
258				    unsigned long addr, int len);
259extern void flush_icache_range(unsigned long address, unsigned long endaddr);
260extern void flush_icache_user_range(unsigned long address,
261		unsigned long endaddr);
262
263static inline void copy_to_user_page(struct vm_area_struct *vma,
264				     struct page *page, unsigned long vaddr,
265				     void *dst, void *src, int len)
266{
267	flush_cache_page(vma, vaddr, page_to_pfn(page));
268	memcpy(dst, src, len);
269	flush_icache_user_page(vma, page, vaddr, len);
270}
271static inline void copy_from_user_page(struct vm_area_struct *vma,
272				       struct page *page, unsigned long vaddr,
273				       void *dst, void *src, int len)
274{
275	flush_cache_page(vma, vaddr, page_to_pfn(page));
276	memcpy(dst, src, len);
277}
278
279#endif /* _M68K_CACHEFLUSH_H */