Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009, Wind River Systems Inc
7 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
8 */
9
10#include <linux/export.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/pagemap.h>
15
16#include <asm/cacheflush.h>
17#include <asm/cpuinfo.h>
18
19static void __flush_dcache(unsigned long start, unsigned long end)
20{
21 unsigned long addr;
22
23 start &= ~(cpuinfo.dcache_line_size - 1);
24 end += (cpuinfo.dcache_line_size - 1);
25 end &= ~(cpuinfo.dcache_line_size - 1);
26
27 if (end > start + cpuinfo.dcache_size)
28 end = start + cpuinfo.dcache_size;
29
30 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
31 __asm__ __volatile__ (" flushd 0(%0)\n"
32 : /* Outputs */
33 : /* Inputs */ "r"(addr)
34 /* : No clobber */);
35 }
36}
37
38static void __invalidate_dcache(unsigned long start, unsigned long end)
39{
40 unsigned long addr;
41
42 start &= ~(cpuinfo.dcache_line_size - 1);
43 end += (cpuinfo.dcache_line_size - 1);
44 end &= ~(cpuinfo.dcache_line_size - 1);
45
46 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
47 __asm__ __volatile__ (" initda 0(%0)\n"
48 : /* Outputs */
49 : /* Inputs */ "r"(addr)
50 /* : No clobber */);
51 }
52}
53
54static void __flush_icache(unsigned long start, unsigned long end)
55{
56 unsigned long addr;
57
58 start &= ~(cpuinfo.icache_line_size - 1);
59 end += (cpuinfo.icache_line_size - 1);
60 end &= ~(cpuinfo.icache_line_size - 1);
61
62 if (end > start + cpuinfo.icache_size)
63 end = start + cpuinfo.icache_size;
64
65 for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
66 __asm__ __volatile__ (" flushi %0\n"
67 : /* Outputs */
68 : /* Inputs */ "r"(addr)
69 /* : No clobber */);
70 }
71 __asm__ __volatile(" flushp\n");
72}
73
74static void flush_aliases(struct address_space *mapping, struct page *page)
75{
76 struct mm_struct *mm = current->active_mm;
77 struct vm_area_struct *mpnt;
78 pgoff_t pgoff;
79
80 pgoff = page->index;
81
82 flush_dcache_mmap_lock(mapping);
83 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
84 unsigned long offset;
85
86 if (mpnt->vm_mm != mm)
87 continue;
88 if (!(mpnt->vm_flags & VM_MAYSHARE))
89 continue;
90
91 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
92 flush_cache_page(mpnt, mpnt->vm_start + offset,
93 page_to_pfn(page));
94 }
95 flush_dcache_mmap_unlock(mapping);
96}
97
98void flush_cache_all(void)
99{
100 __flush_dcache(0, cpuinfo.dcache_size);
101 __flush_icache(0, cpuinfo.icache_size);
102}
103
104void flush_cache_mm(struct mm_struct *mm)
105{
106 flush_cache_all();
107}
108
109void flush_cache_dup_mm(struct mm_struct *mm)
110{
111 flush_cache_all();
112}
113
114void flush_icache_range(unsigned long start, unsigned long end)
115{
116 __flush_dcache(start, end);
117 __flush_icache(start, end);
118}
119
120void flush_dcache_range(unsigned long start, unsigned long end)
121{
122 __flush_dcache(start, end);
123 __flush_icache(start, end);
124}
125EXPORT_SYMBOL(flush_dcache_range);
126
127void invalidate_dcache_range(unsigned long start, unsigned long end)
128{
129 __invalidate_dcache(start, end);
130}
131EXPORT_SYMBOL(invalidate_dcache_range);
132
133void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
134 unsigned long end)
135{
136 __flush_dcache(start, end);
137 if (vma == NULL || (vma->vm_flags & VM_EXEC))
138 __flush_icache(start, end);
139}
140
141void flush_icache_page(struct vm_area_struct *vma, struct page *page)
142{
143 unsigned long start = (unsigned long) page_address(page);
144 unsigned long end = start + PAGE_SIZE;
145
146 __flush_dcache(start, end);
147 __flush_icache(start, end);
148}
149
150void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
151 unsigned long pfn)
152{
153 unsigned long start = vmaddr;
154 unsigned long end = start + PAGE_SIZE;
155
156 __flush_dcache(start, end);
157 if (vma->vm_flags & VM_EXEC)
158 __flush_icache(start, end);
159}
160
161void __flush_dcache_page(struct address_space *mapping, struct page *page)
162{
163 /*
164 * Writeback any data associated with the kernel mapping of this
165 * page. This ensures that data in the physical page is mutually
166 * coherent with the kernels mapping.
167 */
168 unsigned long start = (unsigned long)page_address(page);
169
170 __flush_dcache(start, start + PAGE_SIZE);
171}
172
173void flush_dcache_page(struct page *page)
174{
175 struct address_space *mapping;
176
177 /*
178 * The zero page is never written to, so never has any dirty
179 * cache lines, and therefore never needs to be flushed.
180 */
181 if (page == ZERO_PAGE(0))
182 return;
183
184 mapping = page_mapping_file(page);
185
186 /* Flush this page if there are aliases. */
187 if (mapping && !mapping_mapped(mapping)) {
188 clear_bit(PG_dcache_clean, &page->flags);
189 } else {
190 __flush_dcache_page(mapping, page);
191 if (mapping) {
192 unsigned long start = (unsigned long)page_address(page);
193 flush_aliases(mapping, page);
194 flush_icache_range(start, start + PAGE_SIZE);
195 }
196 set_bit(PG_dcache_clean, &page->flags);
197 }
198}
199EXPORT_SYMBOL(flush_dcache_page);
200
201void update_mmu_cache(struct vm_area_struct *vma,
202 unsigned long address, pte_t *ptep)
203{
204 pte_t pte = *ptep;
205 unsigned long pfn = pte_pfn(pte);
206 struct page *page;
207 struct address_space *mapping;
208
209 reload_tlb_page(vma, address, pte);
210
211 if (!pfn_valid(pfn))
212 return;
213
214 /*
215 * The zero page is never written to, so never has any dirty
216 * cache lines, and therefore never needs to be flushed.
217 */
218 page = pfn_to_page(pfn);
219 if (page == ZERO_PAGE(0))
220 return;
221
222 mapping = page_mapping_file(page);
223 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
224 __flush_dcache_page(mapping, page);
225
226 if(mapping)
227 {
228 flush_aliases(mapping, page);
229 if (vma->vm_flags & VM_EXEC)
230 flush_icache_page(vma, page);
231 }
232}
233
234void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
235 struct page *to)
236{
237 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
238 __flush_icache(vaddr, vaddr + PAGE_SIZE);
239 copy_page(vto, vfrom);
240 __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
241 __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
242}
243
244void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
245{
246 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
247 __flush_icache(vaddr, vaddr + PAGE_SIZE);
248 clear_page(addr);
249 __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
250 __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
251}
252
253void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
254 unsigned long user_vaddr,
255 void *dst, void *src, int len)
256{
257 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
258 memcpy(dst, src, len);
259 __flush_dcache((unsigned long)src, (unsigned long)src + len);
260 if (vma->vm_flags & VM_EXEC)
261 __flush_icache((unsigned long)src, (unsigned long)src + len);
262}
263
264void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
265 unsigned long user_vaddr,
266 void *dst, void *src, int len)
267{
268 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
269 memcpy(dst, src, len);
270 __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
271 if (vma->vm_flags & VM_EXEC)
272 __flush_icache((unsigned long)dst, (unsigned long)dst + len);
273}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009, Wind River Systems Inc
7 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
8 */
9
10#include <linux/export.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/pagemap.h>
15
16#include <asm/cacheflush.h>
17#include <asm/cpuinfo.h>
18
19static void __flush_dcache(unsigned long start, unsigned long end)
20{
21 unsigned long addr;
22
23 start &= ~(cpuinfo.dcache_line_size - 1);
24 end += (cpuinfo.dcache_line_size - 1);
25 end &= ~(cpuinfo.dcache_line_size - 1);
26
27 if (end > start + cpuinfo.dcache_size)
28 end = start + cpuinfo.dcache_size;
29
30 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
31 __asm__ __volatile__ (" flushd 0(%0)\n"
32 : /* Outputs */
33 : /* Inputs */ "r"(addr)
34 /* : No clobber */);
35 }
36}
37
38static void __invalidate_dcache(unsigned long start, unsigned long end)
39{
40 unsigned long addr;
41
42 start &= ~(cpuinfo.dcache_line_size - 1);
43 end += (cpuinfo.dcache_line_size - 1);
44 end &= ~(cpuinfo.dcache_line_size - 1);
45
46 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
47 __asm__ __volatile__ (" initda 0(%0)\n"
48 : /* Outputs */
49 : /* Inputs */ "r"(addr)
50 /* : No clobber */);
51 }
52}
53
54static void __flush_icache(unsigned long start, unsigned long end)
55{
56 unsigned long addr;
57
58 start &= ~(cpuinfo.icache_line_size - 1);
59 end += (cpuinfo.icache_line_size - 1);
60 end &= ~(cpuinfo.icache_line_size - 1);
61
62 if (end > start + cpuinfo.icache_size)
63 end = start + cpuinfo.icache_size;
64
65 for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
66 __asm__ __volatile__ (" flushi %0\n"
67 : /* Outputs */
68 : /* Inputs */ "r"(addr)
69 /* : No clobber */);
70 }
71 __asm__ __volatile(" flushp\n");
72}
73
74static void flush_aliases(struct address_space *mapping, struct folio *folio)
75{
76 struct mm_struct *mm = current->active_mm;
77 struct vm_area_struct *vma;
78 unsigned long flags;
79 pgoff_t pgoff;
80 unsigned long nr = folio_nr_pages(folio);
81
82 pgoff = folio->index;
83
84 flush_dcache_mmap_lock_irqsave(mapping, flags);
85 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
86 unsigned long start;
87
88 if (vma->vm_mm != mm)
89 continue;
90 if (!(vma->vm_flags & VM_MAYSHARE))
91 continue;
92
93 start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
94 flush_cache_range(vma, start, start + nr * PAGE_SIZE);
95 }
96 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
97}
98
99void flush_cache_all(void)
100{
101 __flush_dcache(0, cpuinfo.dcache_size);
102 __flush_icache(0, cpuinfo.icache_size);
103}
104
105void flush_cache_mm(struct mm_struct *mm)
106{
107 flush_cache_all();
108}
109
110void flush_cache_dup_mm(struct mm_struct *mm)
111{
112 flush_cache_all();
113}
114
115void flush_icache_range(unsigned long start, unsigned long end)
116{
117 __flush_dcache(start, end);
118 __flush_icache(start, end);
119}
120
121void flush_dcache_range(unsigned long start, unsigned long end)
122{
123 __flush_dcache(start, end);
124 __flush_icache(start, end);
125}
126EXPORT_SYMBOL(flush_dcache_range);
127
128void invalidate_dcache_range(unsigned long start, unsigned long end)
129{
130 __invalidate_dcache(start, end);
131}
132EXPORT_SYMBOL(invalidate_dcache_range);
133
134void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
135 unsigned long end)
136{
137 __flush_dcache(start, end);
138 if (vma == NULL || (vma->vm_flags & VM_EXEC))
139 __flush_icache(start, end);
140}
141
142void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
143 unsigned int nr)
144{
145 unsigned long start = (unsigned long) page_address(page);
146 unsigned long end = start + nr * PAGE_SIZE;
147
148 __flush_dcache(start, end);
149 __flush_icache(start, end);
150}
151
152void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
153 unsigned long pfn)
154{
155 unsigned long start = vmaddr;
156 unsigned long end = start + PAGE_SIZE;
157
158 __flush_dcache(start, end);
159 if (vma->vm_flags & VM_EXEC)
160 __flush_icache(start, end);
161}
162
163static void __flush_dcache_folio(struct folio *folio)
164{
165 /*
166 * Writeback any data associated with the kernel mapping of this
167 * page. This ensures that data in the physical page is mutually
168 * coherent with the kernels mapping.
169 */
170 unsigned long start = (unsigned long)folio_address(folio);
171
172 __flush_dcache(start, start + folio_size(folio));
173}
174
175void flush_dcache_folio(struct folio *folio)
176{
177 struct address_space *mapping;
178
179 /*
180 * The zero page is never written to, so never has any dirty
181 * cache lines, and therefore never needs to be flushed.
182 */
183 if (is_zero_pfn(folio_pfn(folio)))
184 return;
185
186 mapping = folio_flush_mapping(folio);
187
188 /* Flush this page if there are aliases. */
189 if (mapping && !mapping_mapped(mapping)) {
190 clear_bit(PG_dcache_clean, &folio->flags);
191 } else {
192 __flush_dcache_folio(folio);
193 if (mapping) {
194 unsigned long start = (unsigned long)folio_address(folio);
195 flush_aliases(mapping, folio);
196 flush_icache_range(start, start + folio_size(folio));
197 }
198 set_bit(PG_dcache_clean, &folio->flags);
199 }
200}
201EXPORT_SYMBOL(flush_dcache_folio);
202
203void flush_dcache_page(struct page *page)
204{
205 flush_dcache_folio(page_folio(page));
206}
207EXPORT_SYMBOL(flush_dcache_page);
208
209void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
210 unsigned long address, pte_t *ptep, unsigned int nr)
211{
212 pte_t pte = *ptep;
213 unsigned long pfn = pte_pfn(pte);
214 struct folio *folio;
215 struct address_space *mapping;
216
217 reload_tlb_page(vma, address, pte);
218
219 if (!pfn_valid(pfn))
220 return;
221
222 /*
223 * The zero page is never written to, so never has any dirty
224 * cache lines, and therefore never needs to be flushed.
225 */
226 if (is_zero_pfn(pfn))
227 return;
228
229 folio = page_folio(pfn_to_page(pfn));
230 if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
231 __flush_dcache_folio(folio);
232
233 mapping = folio_flush_mapping(folio);
234 if (mapping) {
235 flush_aliases(mapping, folio);
236 if (vma->vm_flags & VM_EXEC)
237 flush_icache_pages(vma, &folio->page,
238 folio_nr_pages(folio));
239 }
240}
241
242void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
243 struct page *to)
244{
245 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
246 __flush_icache(vaddr, vaddr + PAGE_SIZE);
247 copy_page(vto, vfrom);
248 __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
249 __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
250}
251
252void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
253{
254 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
255 __flush_icache(vaddr, vaddr + PAGE_SIZE);
256 clear_page(addr);
257 __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
258 __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
259}
260
261void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
262 unsigned long user_vaddr,
263 void *dst, void *src, int len)
264{
265 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
266 memcpy(dst, src, len);
267 __flush_dcache((unsigned long)src, (unsigned long)src + len);
268 if (vma->vm_flags & VM_EXEC)
269 __flush_icache((unsigned long)src, (unsigned long)src + len);
270}
271
272void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
273 unsigned long user_vaddr,
274 void *dst, void *src, int len)
275{
276 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
277 memcpy(dst, src, len);
278 __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
279 if (vma->vm_flags & VM_EXEC)
280 __flush_icache((unsigned long)dst, (unsigned long)dst + len);
281}