Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_GENERIC_CACHEFLUSH_H
3#define _ASM_GENERIC_CACHEFLUSH_H
4
5#include <linux/instrumented.h>
6
7struct mm_struct;
8struct vm_area_struct;
9struct page;
10struct address_space;
11
12/*
13 * The cache doesn't need to be flushed when TLB entries change when
14 * the cache is mapped to physical memory, not virtual memory
15 */
16#ifndef flush_cache_all
17static inline void flush_cache_all(void)
18{
19}
20#endif
21
22#ifndef flush_cache_mm
23static inline void flush_cache_mm(struct mm_struct *mm)
24{
25}
26#endif
27
28#ifndef flush_cache_dup_mm
29static inline void flush_cache_dup_mm(struct mm_struct *mm)
30{
31}
32#endif
33
34#ifndef flush_cache_range
35static inline void flush_cache_range(struct vm_area_struct *vma,
36 unsigned long start,
37 unsigned long end)
38{
39}
40#endif
41
42#ifndef flush_cache_page
43static inline void flush_cache_page(struct vm_area_struct *vma,
44 unsigned long vmaddr,
45 unsigned long pfn)
46{
47}
48#endif
49
50#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
51static inline void flush_dcache_page(struct page *page)
52{
53}
54
55#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
56#endif
57
58#ifndef flush_dcache_mmap_lock
59static inline void flush_dcache_mmap_lock(struct address_space *mapping)
60{
61}
62#endif
63
64#ifndef flush_dcache_mmap_unlock
65static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
66{
67}
68#endif
69
70#ifndef flush_icache_range
71static inline void flush_icache_range(unsigned long start, unsigned long end)
72{
73}
74#endif
75
76#ifndef flush_icache_user_range
77#define flush_icache_user_range flush_icache_range
78#endif
79
80#ifndef flush_icache_user_page
81static inline void flush_icache_user_page(struct vm_area_struct *vma,
82 struct page *page,
83 unsigned long addr, int len)
84{
85}
86#endif
87
88#ifndef flush_cache_vmap
89static inline void flush_cache_vmap(unsigned long start, unsigned long end)
90{
91}
92#endif
93
94#ifndef flush_cache_vmap_early
95static inline void flush_cache_vmap_early(unsigned long start, unsigned long end)
96{
97}
98#endif
99
100#ifndef flush_cache_vunmap
101static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
102{
103}
104#endif
105
106#ifndef copy_to_user_page
107#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
108 do { \
109 instrument_copy_to_user((void __user *)dst, src, len); \
110 memcpy(dst, src, len); \
111 flush_icache_user_page(vma, page, vaddr, len); \
112 } while (0)
113#endif
114
115
116#ifndef copy_from_user_page
117#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
118 do { \
119 instrument_copy_from_user_before(dst, (void __user *)src, \
120 len); \
121 memcpy(dst, src, len); \
122 instrument_copy_from_user_after(dst, (void __user *)src, len, \
123 0); \
124 } while (0)
125#endif
126
127#endif /* _ASM_GENERIC_CACHEFLUSH_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_GENERIC_CACHEFLUSH_H
3#define _ASM_GENERIC_CACHEFLUSH_H
4
5struct mm_struct;
6struct vm_area_struct;
7struct page;
8struct address_space;
9
10/*
11 * The cache doesn't need to be flushed when TLB entries change when
12 * the cache is mapped to physical memory, not virtual memory
13 */
14#ifndef flush_cache_all
15static inline void flush_cache_all(void)
16{
17}
18#endif
19
20#ifndef flush_cache_mm
21static inline void flush_cache_mm(struct mm_struct *mm)
22{
23}
24#endif
25
26#ifndef flush_cache_dup_mm
27static inline void flush_cache_dup_mm(struct mm_struct *mm)
28{
29}
30#endif
31
32#ifndef flush_cache_range
33static inline void flush_cache_range(struct vm_area_struct *vma,
34 unsigned long start,
35 unsigned long end)
36{
37}
38#endif
39
40#ifndef flush_cache_page
41static inline void flush_cache_page(struct vm_area_struct *vma,
42 unsigned long vmaddr,
43 unsigned long pfn)
44{
45}
46#endif
47
48#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
49static inline void flush_dcache_page(struct page *page)
50{
51}
52#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
53#endif
54
55
56#ifndef flush_dcache_mmap_lock
57static inline void flush_dcache_mmap_lock(struct address_space *mapping)
58{
59}
60#endif
61
62#ifndef flush_dcache_mmap_unlock
63static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
64{
65}
66#endif
67
68#ifndef flush_icache_range
69static inline void flush_icache_range(unsigned long start, unsigned long end)
70{
71}
72#endif
73
74#ifndef flush_icache_user_range
75#define flush_icache_user_range flush_icache_range
76#endif
77
78#ifndef flush_icache_page
79static inline void flush_icache_page(struct vm_area_struct *vma,
80 struct page *page)
81{
82}
83#endif
84
85#ifndef flush_icache_user_page
86static inline void flush_icache_user_page(struct vm_area_struct *vma,
87 struct page *page,
88 unsigned long addr, int len)
89{
90}
91#endif
92
93#ifndef flush_cache_vmap
94static inline void flush_cache_vmap(unsigned long start, unsigned long end)
95{
96}
97#endif
98
99#ifndef flush_cache_vunmap
100static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
101{
102}
103#endif
104
105#ifndef copy_to_user_page
106#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
107 do { \
108 memcpy(dst, src, len); \
109 flush_icache_user_page(vma, page, vaddr, len); \
110 } while (0)
111#endif
112
113#ifndef copy_from_user_page
114#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
115 memcpy(dst, src, len)
116#endif
117
118#endif /* _ASM_GENERIC_CACHEFLUSH_H */