Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009, Wind River Systems Inc
7 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
8 */
9
10#include <linux/export.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14
15#include <asm/cacheflush.h>
16#include <asm/cpuinfo.h>
17
18static void __flush_dcache(unsigned long start, unsigned long end)
19{
20 unsigned long addr;
21
22 start &= ~(cpuinfo.dcache_line_size - 1);
23 end += (cpuinfo.dcache_line_size - 1);
24 end &= ~(cpuinfo.dcache_line_size - 1);
25
26 if (end > start + cpuinfo.dcache_size)
27 end = start + cpuinfo.dcache_size;
28
29 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
30 __asm__ __volatile__ (" flushd 0(%0)\n"
31 : /* Outputs */
32 : /* Inputs */ "r"(addr)
33 /* : No clobber */);
34 }
35}
36
37static void __invalidate_dcache(unsigned long start, unsigned long end)
38{
39 unsigned long addr;
40
41 start &= ~(cpuinfo.dcache_line_size - 1);
42 end += (cpuinfo.dcache_line_size - 1);
43 end &= ~(cpuinfo.dcache_line_size - 1);
44
45 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
46 __asm__ __volatile__ (" initda 0(%0)\n"
47 : /* Outputs */
48 : /* Inputs */ "r"(addr)
49 /* : No clobber */);
50 }
51}
52
53static void __flush_icache(unsigned long start, unsigned long end)
54{
55 unsigned long addr;
56
57 start &= ~(cpuinfo.icache_line_size - 1);
58 end += (cpuinfo.icache_line_size - 1);
59 end &= ~(cpuinfo.icache_line_size - 1);
60
61 if (end > start + cpuinfo.icache_size)
62 end = start + cpuinfo.icache_size;
63
64 for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
65 __asm__ __volatile__ (" flushi %0\n"
66 : /* Outputs */
67 : /* Inputs */ "r"(addr)
68 /* : No clobber */);
69 }
70 __asm__ __volatile(" flushp\n");
71}
72
73static void flush_aliases(struct address_space *mapping, struct page *page)
74{
75 struct mm_struct *mm = current->active_mm;
76 struct vm_area_struct *mpnt;
77 pgoff_t pgoff;
78
79 pgoff = page->index;
80
81 flush_dcache_mmap_lock(mapping);
82 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
83 unsigned long offset;
84
85 if (mpnt->vm_mm != mm)
86 continue;
87 if (!(mpnt->vm_flags & VM_MAYSHARE))
88 continue;
89
90 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
91 flush_cache_page(mpnt, mpnt->vm_start + offset,
92 page_to_pfn(page));
93 }
94 flush_dcache_mmap_unlock(mapping);
95}
96
97void flush_cache_all(void)
98{
99 __flush_dcache(0, cpuinfo.dcache_size);
100 __flush_icache(0, cpuinfo.icache_size);
101}
102
103void flush_cache_mm(struct mm_struct *mm)
104{
105 flush_cache_all();
106}
107
108void flush_cache_dup_mm(struct mm_struct *mm)
109{
110 flush_cache_all();
111}
112
113void flush_icache_range(unsigned long start, unsigned long end)
114{
115 __flush_dcache(start, end);
116 __flush_icache(start, end);
117}
118
119void flush_dcache_range(unsigned long start, unsigned long end)
120{
121 __flush_dcache(start, end);
122 __flush_icache(start, end);
123}
124EXPORT_SYMBOL(flush_dcache_range);
125
126void invalidate_dcache_range(unsigned long start, unsigned long end)
127{
128 __invalidate_dcache(start, end);
129}
130EXPORT_SYMBOL(invalidate_dcache_range);
131
132void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
133 unsigned long end)
134{
135 __flush_dcache(start, end);
136 if (vma == NULL || (vma->vm_flags & VM_EXEC))
137 __flush_icache(start, end);
138}
139
140void flush_icache_page(struct vm_area_struct *vma, struct page *page)
141{
142 unsigned long start = (unsigned long) page_address(page);
143 unsigned long end = start + PAGE_SIZE;
144
145 __flush_dcache(start, end);
146 __flush_icache(start, end);
147}
148
149void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
150 unsigned long pfn)
151{
152 unsigned long start = vmaddr;
153 unsigned long end = start + PAGE_SIZE;
154
155 __flush_dcache(start, end);
156 if (vma->vm_flags & VM_EXEC)
157 __flush_icache(start, end);
158}
159
160void __flush_dcache_page(struct address_space *mapping, struct page *page)
161{
162 /*
163 * Writeback any data associated with the kernel mapping of this
164 * page. This ensures that data in the physical page is mutually
165 * coherent with the kernels mapping.
166 */
167 unsigned long start = (unsigned long)page_address(page);
168
169 __flush_dcache(start, start + PAGE_SIZE);
170}
171
172void flush_dcache_page(struct page *page)
173{
174 struct address_space *mapping;
175
176 /*
177 * The zero page is never written to, so never has any dirty
178 * cache lines, and therefore never needs to be flushed.
179 */
180 if (page == ZERO_PAGE(0))
181 return;
182
183 mapping = page_mapping_file(page);
184
185 /* Flush this page if there are aliases. */
186 if (mapping && !mapping_mapped(mapping)) {
187 clear_bit(PG_dcache_clean, &page->flags);
188 } else {
189 __flush_dcache_page(mapping, page);
190 if (mapping) {
191 unsigned long start = (unsigned long)page_address(page);
192 flush_aliases(mapping, page);
193 flush_icache_range(start, start + PAGE_SIZE);
194 }
195 set_bit(PG_dcache_clean, &page->flags);
196 }
197}
198EXPORT_SYMBOL(flush_dcache_page);
199
200void update_mmu_cache(struct vm_area_struct *vma,
201 unsigned long address, pte_t *ptep)
202{
203 pte_t pte = *ptep;
204 unsigned long pfn = pte_pfn(pte);
205 struct page *page;
206 struct address_space *mapping;
207
208 reload_tlb_page(vma, address, pte);
209
210 if (!pfn_valid(pfn))
211 return;
212
213 /*
214 * The zero page is never written to, so never has any dirty
215 * cache lines, and therefore never needs to be flushed.
216 */
217 page = pfn_to_page(pfn);
218 if (page == ZERO_PAGE(0))
219 return;
220
221 mapping = page_mapping_file(page);
222 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
223 __flush_dcache_page(mapping, page);
224
225 if(mapping)
226 {
227 flush_aliases(mapping, page);
228 if (vma->vm_flags & VM_EXEC)
229 flush_icache_page(vma, page);
230 }
231}
232
233void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
234 struct page *to)
235{
236 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
237 __flush_icache(vaddr, vaddr + PAGE_SIZE);
238 copy_page(vto, vfrom);
239 __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
240 __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
241}
242
243void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
244{
245 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
246 __flush_icache(vaddr, vaddr + PAGE_SIZE);
247 clear_page(addr);
248 __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
249 __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
250}
251
252void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
253 unsigned long user_vaddr,
254 void *dst, void *src, int len)
255{
256 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
257 memcpy(dst, src, len);
258 __flush_dcache((unsigned long)src, (unsigned long)src + len);
259 if (vma->vm_flags & VM_EXEC)
260 __flush_icache((unsigned long)src, (unsigned long)src + len);
261}
262
263void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
264 unsigned long user_vaddr,
265 void *dst, void *src, int len)
266{
267 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
268 memcpy(dst, src, len);
269 __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
270 if (vma->vm_flags & VM_EXEC)
271 __flush_icache((unsigned long)dst, (unsigned long)dst + len);
272}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2009, Wind River Systems Inc
7 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
8 */
9
10#include <linux/export.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <linux/fs.h>
14#include <linux/pagemap.h>
15
16#include <asm/cacheflush.h>
17#include <asm/cpuinfo.h>
18
19static void __flush_dcache(unsigned long start, unsigned long end)
20{
21 unsigned long addr;
22
23 start &= ~(cpuinfo.dcache_line_size - 1);
24 end += (cpuinfo.dcache_line_size - 1);
25 end &= ~(cpuinfo.dcache_line_size - 1);
26
27 if (end > start + cpuinfo.dcache_size)
28 end = start + cpuinfo.dcache_size;
29
30 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
31 __asm__ __volatile__ (" flushd 0(%0)\n"
32 : /* Outputs */
33 : /* Inputs */ "r"(addr)
34 /* : No clobber */);
35 }
36}
37
38static void __invalidate_dcache(unsigned long start, unsigned long end)
39{
40 unsigned long addr;
41
42 start &= ~(cpuinfo.dcache_line_size - 1);
43 end += (cpuinfo.dcache_line_size - 1);
44 end &= ~(cpuinfo.dcache_line_size - 1);
45
46 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
47 __asm__ __volatile__ (" initda 0(%0)\n"
48 : /* Outputs */
49 : /* Inputs */ "r"(addr)
50 /* : No clobber */);
51 }
52}
53
54static void __flush_icache(unsigned long start, unsigned long end)
55{
56 unsigned long addr;
57
58 start &= ~(cpuinfo.icache_line_size - 1);
59 end += (cpuinfo.icache_line_size - 1);
60 end &= ~(cpuinfo.icache_line_size - 1);
61
62 if (end > start + cpuinfo.icache_size)
63 end = start + cpuinfo.icache_size;
64
65 for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
66 __asm__ __volatile__ (" flushi %0\n"
67 : /* Outputs */
68 : /* Inputs */ "r"(addr)
69 /* : No clobber */);
70 }
71 __asm__ __volatile(" flushp\n");
72}
73
74static void flush_aliases(struct address_space *mapping, struct folio *folio)
75{
76 struct mm_struct *mm = current->active_mm;
77 struct vm_area_struct *vma;
78 unsigned long flags;
79 pgoff_t pgoff;
80 unsigned long nr = folio_nr_pages(folio);
81
82 pgoff = folio->index;
83
84 flush_dcache_mmap_lock_irqsave(mapping, flags);
85 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
86 unsigned long start;
87
88 if (vma->vm_mm != mm)
89 continue;
90 if (!(vma->vm_flags & VM_MAYSHARE))
91 continue;
92
93 start = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
94 flush_cache_range(vma, start, start + nr * PAGE_SIZE);
95 }
96 flush_dcache_mmap_unlock_irqrestore(mapping, flags);
97}
98
99void flush_cache_all(void)
100{
101 __flush_dcache(0, cpuinfo.dcache_size);
102 __flush_icache(0, cpuinfo.icache_size);
103}
104
105void flush_cache_mm(struct mm_struct *mm)
106{
107 flush_cache_all();
108}
109
110void flush_cache_dup_mm(struct mm_struct *mm)
111{
112 flush_cache_all();
113}
114
115void flush_icache_range(unsigned long start, unsigned long end)
116{
117 __flush_dcache(start, end);
118 __flush_icache(start, end);
119}
120
121void flush_dcache_range(unsigned long start, unsigned long end)
122{
123 __flush_dcache(start, end);
124 __flush_icache(start, end);
125}
126EXPORT_SYMBOL(flush_dcache_range);
127
128void invalidate_dcache_range(unsigned long start, unsigned long end)
129{
130 __invalidate_dcache(start, end);
131}
132EXPORT_SYMBOL(invalidate_dcache_range);
133
134void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
135 unsigned long end)
136{
137 __flush_dcache(start, end);
138 if (vma == NULL || (vma->vm_flags & VM_EXEC))
139 __flush_icache(start, end);
140}
141
142void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
143 unsigned int nr)
144{
145 unsigned long start = (unsigned long) page_address(page);
146 unsigned long end = start + nr * PAGE_SIZE;
147
148 __flush_dcache(start, end);
149 __flush_icache(start, end);
150}
151
152void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
153 unsigned long pfn)
154{
155 unsigned long start = vmaddr;
156 unsigned long end = start + PAGE_SIZE;
157
158 __flush_dcache(start, end);
159 if (vma->vm_flags & VM_EXEC)
160 __flush_icache(start, end);
161}
162
163static void __flush_dcache_folio(struct folio *folio)
164{
165 /*
166 * Writeback any data associated with the kernel mapping of this
167 * page. This ensures that data in the physical page is mutually
168 * coherent with the kernels mapping.
169 */
170 unsigned long start = (unsigned long)folio_address(folio);
171
172 __flush_dcache(start, start + folio_size(folio));
173}
174
175void flush_dcache_folio(struct folio *folio)
176{
177 struct address_space *mapping;
178
179 /*
180 * The zero page is never written to, so never has any dirty
181 * cache lines, and therefore never needs to be flushed.
182 */
183 if (is_zero_pfn(folio_pfn(folio)))
184 return;
185
186 mapping = folio_flush_mapping(folio);
187
188 /* Flush this page if there are aliases. */
189 if (mapping && !mapping_mapped(mapping)) {
190 clear_bit(PG_dcache_clean, &folio->flags);
191 } else {
192 __flush_dcache_folio(folio);
193 if (mapping) {
194 unsigned long start = (unsigned long)folio_address(folio);
195 flush_aliases(mapping, folio);
196 flush_icache_range(start, start + folio_size(folio));
197 }
198 set_bit(PG_dcache_clean, &folio->flags);
199 }
200}
201EXPORT_SYMBOL(flush_dcache_folio);
202
203void flush_dcache_page(struct page *page)
204{
205 flush_dcache_folio(page_folio(page));
206}
207EXPORT_SYMBOL(flush_dcache_page);
208
209void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
210 unsigned long address, pte_t *ptep, unsigned int nr)
211{
212 pte_t pte = *ptep;
213 unsigned long pfn = pte_pfn(pte);
214 struct folio *folio;
215 struct address_space *mapping;
216
217 reload_tlb_page(vma, address, pte);
218
219 if (!pfn_valid(pfn))
220 return;
221
222 /*
223 * The zero page is never written to, so never has any dirty
224 * cache lines, and therefore never needs to be flushed.
225 */
226 if (is_zero_pfn(pfn))
227 return;
228
229 folio = page_folio(pfn_to_page(pfn));
230 if (!test_and_set_bit(PG_dcache_clean, &folio->flags))
231 __flush_dcache_folio(folio);
232
233 mapping = folio_flush_mapping(folio);
234 if (mapping) {
235 flush_aliases(mapping, folio);
236 if (vma->vm_flags & VM_EXEC)
237 flush_icache_pages(vma, &folio->page,
238 folio_nr_pages(folio));
239 }
240}
241
242void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
243 struct page *to)
244{
245 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
246 __flush_icache(vaddr, vaddr + PAGE_SIZE);
247 copy_page(vto, vfrom);
248 __flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
249 __flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
250}
251
252void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
253{
254 __flush_dcache(vaddr, vaddr + PAGE_SIZE);
255 __flush_icache(vaddr, vaddr + PAGE_SIZE);
256 clear_page(addr);
257 __flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
258 __flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
259}
260
261void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
262 unsigned long user_vaddr,
263 void *dst, void *src, int len)
264{
265 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
266 memcpy(dst, src, len);
267 __flush_dcache((unsigned long)src, (unsigned long)src + len);
268 if (vma->vm_flags & VM_EXEC)
269 __flush_icache((unsigned long)src, (unsigned long)src + len);
270}
271
272void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
273 unsigned long user_vaddr,
274 void *dst, void *src, int len)
275{
276 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
277 memcpy(dst, src, len);
278 __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
279 if (vma->vm_flags & VM_EXEC)
280 __flush_icache((unsigned long)dst, (unsigned long)dst + len);
281}