Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_GENERIC_CACHEFLUSH_H
  3#define _ASM_GENERIC_CACHEFLUSH_H
  4
  5#include <linux/instrumented.h>
  6
  7struct mm_struct;
  8struct vm_area_struct;
  9struct page;
 10struct address_space;
 11
 12/*
 13 * The cache doesn't need to be flushed when TLB entries change when
 14 * the cache is mapped to physical memory, not virtual memory
 15 */
 16#ifndef flush_cache_all
 17static inline void flush_cache_all(void)
 18{
 19}
 20#endif
 21
 22#ifndef flush_cache_mm
 23static inline void flush_cache_mm(struct mm_struct *mm)
 24{
 25}
 26#endif
 27
 28#ifndef flush_cache_dup_mm
 29static inline void flush_cache_dup_mm(struct mm_struct *mm)
 30{
 31}
 32#endif
 33
 34#ifndef flush_cache_range
 35static inline void flush_cache_range(struct vm_area_struct *vma,
 36				     unsigned long start,
 37				     unsigned long end)
 38{
 39}
 40#endif
 41
 42#ifndef flush_cache_page
 43static inline void flush_cache_page(struct vm_area_struct *vma,
 44				    unsigned long vmaddr,
 45				    unsigned long pfn)
 46{
 47}
 48#endif
 49
 50#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 51static inline void flush_dcache_page(struct page *page)
 52{
 53}
 54
 55#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
 56#endif
 57
 58#ifndef flush_dcache_mmap_lock
 59static inline void flush_dcache_mmap_lock(struct address_space *mapping)
 60{
 61}
 62#endif
 63
 64#ifndef flush_dcache_mmap_unlock
 65static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
 66{
 67}
 68#endif
 69
 70#ifndef flush_icache_range
 71static inline void flush_icache_range(unsigned long start, unsigned long end)
 72{
 73}
 74#endif
 75
 76#ifndef flush_icache_user_range
 77#define flush_icache_user_range flush_icache_range
 78#endif
 79
 80#ifndef flush_icache_user_page
 81static inline void flush_icache_user_page(struct vm_area_struct *vma,
 82					   struct page *page,
 83					   unsigned long addr, int len)
 84{
 85}
 86#endif
 87
 88#ifndef flush_cache_vmap
 89static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 90{
 91}
 92#endif
 93
 94#ifndef flush_cache_vmap_early
 95static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
 96{
 97}
 98#endif
 99
100#ifndef flush_cache_vunmap
101static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
102{
103}
104#endif
105
106#ifndef copy_to_user_page
107#define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
108	do { \
109		instrument_copy_to_user((void __user *)dst, src, len); \
110		memcpy(dst, src, len); \
111		flush_icache_user_page(vma, page, vaddr, len); \
112	} while (0)
113#endif
114
115
116#ifndef copy_from_user_page
117#define copy_from_user_page(vma, page, vaddr, dst, src, len)		  \
118	do {								  \
119		instrument_copy_from_user_before(dst, (void __user *)src, \
120						 len);			  \
121		memcpy(dst, src, len);					  \
122		instrument_copy_from_user_after(dst, (void __user *)src, len, \
123						0);			  \
124	} while (0)
125#endif
 
126
127#endif /* _ASM_GENERIC_CACHEFLUSH_H */
v4.6
 1#ifndef __ASM_CACHEFLUSH_H
 2#define __ASM_CACHEFLUSH_H
 
 3
 4/* Keep includes the same across arches.  */
 5#include <linux/mm.h>
 
 
 
 
 6
 7/*
 8 * The cache doesn't need to be flushed when TLB entries change when
 9 * the cache is mapped to physical memory, not virtual memory
10 */
11#define flush_cache_all()			do { } while (0)
12#define flush_cache_mm(mm)			do { } while (0)
13#define flush_cache_dup_mm(mm)			do { } while (0)
14#define flush_cache_range(vma, start, end)	do { } while (0)
15#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
17#define flush_dcache_page(page)			do { } while (0)
18#define flush_dcache_mmap_lock(mapping)		do { } while (0)
19#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
20#define flush_icache_range(start, end)		do { } while (0)
21#define flush_icache_page(vma,pg)		do { } while (0)
22#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
23#define flush_cache_vmap(start, end)		do { } while (0)
24#define flush_cache_vunmap(start, end)		do { } while (0)
 
 
 
 
 
25
26#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27	do { \
 
28		memcpy(dst, src, len); \
29		flush_icache_user_range(vma, page, vaddr, len); \
 
 
 
 
 
 
 
 
 
 
 
 
30	} while (0)
31#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
32	memcpy(dst, src, len)
33
34#endif /* __ASM_CACHEFLUSH_H */