Linux Audio

Check our new training course

Loading...
v5.4
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef __ASM_CACHEFLUSH_H
 3#define __ASM_CACHEFLUSH_H
 4
 5/* Keep includes the same across arches.  */
 6#include <linux/mm.h>
 7
 8#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
 9
10/*
11 * The cache doesn't need to be flushed when TLB entries change when
12 * the cache is mapped to physical memory, not virtual memory
13 */
 
14static inline void flush_cache_all(void)
15{
16}
 
17
 
18static inline void flush_cache_mm(struct mm_struct *mm)
19{
20}
 
21
 
22static inline void flush_cache_dup_mm(struct mm_struct *mm)
23{
24}
 
25
 
26static inline void flush_cache_range(struct vm_area_struct *vma,
27				     unsigned long start,
28				     unsigned long end)
29{
30}
 
31
 
32static inline void flush_cache_page(struct vm_area_struct *vma,
33				    unsigned long vmaddr,
34				    unsigned long pfn)
35{
36}
 
37
 
38static inline void flush_dcache_page(struct page *page)
39{
40}
 
 
41
 
 
42static inline void flush_dcache_mmap_lock(struct address_space *mapping)
43{
44}
 
45
 
46static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
47{
48}
 
49
 
50static inline void flush_icache_range(unsigned long start, unsigned long end)
51{
52}
 
 
 
 
 
53
 
54static inline void flush_icache_page(struct vm_area_struct *vma,
55				     struct page *page)
56{
57}
 
58
59static inline void flush_icache_user_range(struct vm_area_struct *vma,
 
60					   struct page *page,
61					   unsigned long addr, int len)
62{
63}
 
64
 
65static inline void flush_cache_vmap(unsigned long start, unsigned long end)
66{
67}
 
68
 
69static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
70{
71}
 
72
73#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
 
74	do { \
75		memcpy(dst, src, len); \
76		flush_icache_user_range(vma, page, vaddr, len); \
77	} while (0)
 
 
 
78#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
79	memcpy(dst, src, len)
 
80
81#endif /* __ASM_CACHEFLUSH_H */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_GENERIC_CACHEFLUSH_H
  3#define _ASM_GENERIC_CACHEFLUSH_H
  4
  5struct mm_struct;
  6struct vm_area_struct;
  7struct page;
  8struct address_space;
  9
 10/*
 11 * The cache doesn't need to be flushed when TLB entries change when
 12 * the cache is mapped to physical memory, not virtual memory
 13 */
 14#ifndef flush_cache_all
 15static inline void flush_cache_all(void)
 16{
 17}
 18#endif
 19
 20#ifndef flush_cache_mm
 21static inline void flush_cache_mm(struct mm_struct *mm)
 22{
 23}
 24#endif
 25
 26#ifndef flush_cache_dup_mm
 27static inline void flush_cache_dup_mm(struct mm_struct *mm)
 28{
 29}
 30#endif
 31
 32#ifndef flush_cache_range
 33static inline void flush_cache_range(struct vm_area_struct *vma,
 34				     unsigned long start,
 35				     unsigned long end)
 36{
 37}
 38#endif
 39
 40#ifndef flush_cache_page
 41static inline void flush_cache_page(struct vm_area_struct *vma,
 42				    unsigned long vmaddr,
 43				    unsigned long pfn)
 44{
 45}
 46#endif
 47
 48#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 49static inline void flush_dcache_page(struct page *page)
 50{
 51}
 52#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
 53#endif
 54
 55
 56#ifndef flush_dcache_mmap_lock
 57static inline void flush_dcache_mmap_lock(struct address_space *mapping)
 58{
 59}
 60#endif
 61
 62#ifndef flush_dcache_mmap_unlock
 63static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
 64{
 65}
 66#endif
 67
 68#ifndef flush_icache_range
 69static inline void flush_icache_range(unsigned long start, unsigned long end)
 70{
 71}
 72#endif
 73
 74#ifndef flush_icache_user_range
 75#define flush_icache_user_range flush_icache_range
 76#endif
 77
 78#ifndef flush_icache_page
 79static inline void flush_icache_page(struct vm_area_struct *vma,
 80				     struct page *page)
 81{
 82}
 83#endif
 84
 85#ifndef flush_icache_user_page
 86static inline void flush_icache_user_page(struct vm_area_struct *vma,
 87					   struct page *page,
 88					   unsigned long addr, int len)
 89{
 90}
 91#endif
 92
 93#ifndef flush_cache_vmap
 94static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 95{
 96}
 97#endif
 98
 99#ifndef flush_cache_vunmap
100static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
101{
102}
103#endif
104
105#ifndef copy_to_user_page
106#define copy_to_user_page(vma, page, vaddr, dst, src, len)	\
107	do { \
108		memcpy(dst, src, len); \
109		flush_icache_user_page(vma, page, vaddr, len); \
110	} while (0)
111#endif
112
113#ifndef copy_from_user_page
114#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
115	memcpy(dst, src, len)
116#endif
117
118#endif /* _ASM_GENERIC_CACHEFLUSH_H */