Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2005-2017 Andes Technology Corporation
  3
  4#include <linux/mm.h>
  5#include <linux/sched.h>
  6#include <linux/fs.h>
  7#include <linux/pagemap.h>
  8#include <linux/module.h>
  9#include <asm/cacheflush.h>
 10#include <asm/proc-fns.h>
 11#include <asm/shmparam.h>
 12#include <asm/cache_info.h>
 13
 14extern struct cache_info L1_cache_info[2];
 15
 16#ifndef CONFIG_CPU_CACHE_ALIASING
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 17void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 18		      pte_t * pte)
 19{
 20	struct page *page;
 21	unsigned long pfn = pte_pfn(*pte);
 22	unsigned long flags;
 23
 24	if (!pfn_valid(pfn))
 25		return;
 26
 27	if (vma->vm_mm == current->active_mm) {
 28		local_irq_save(flags);
 29		__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
 30		__nds32__tlbop_rwr(*pte);
 31		__nds32__isb();
 32		local_irq_restore(flags);
 33	}
 34	page = pfn_to_page(pfn);
 35
 36	if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
 37	    (vma->vm_flags & VM_EXEC)) {
 38
 39		if (!PageHighMem(page)) {
 40			cpu_cache_wbinval_page((unsigned long)
 41					       page_address(page),
 42					       vma->vm_flags & VM_EXEC);
 43		} else {
 44			unsigned long kaddr = (unsigned long)kmap_atomic(page);
 45			cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
 46			kunmap_atomic((void *)kaddr);
 47		}
 48	}
 49}
 50#else
 51extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
 52
 53static inline unsigned long aliasing(unsigned long addr, unsigned long page)
 54{
 55	return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
 56}
 57
 58static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
 59{
 60	unsigned long kaddr, pte;
 61
 62#define BASE_ADDR0 0xffffc000
 63	kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
 64	pte = (pa | PAGE_KERNEL);
 65	__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
 66	__nds32__tlbop_rwlk(pte);
 67	__nds32__isb();
 68	return kaddr;
 69}
 70
 71static inline void kunmap01(unsigned long kaddr)
 72{
 73	__nds32__tlbop_unlk(kaddr);
 74	__nds32__tlbop_inv(kaddr);
 75	__nds32__isb();
 76}
 77
 78static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
 79{
 80	unsigned long kaddr, pte;
 81
 82#define BASE_ADDR1 0xffff8000
 83	kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
 84	pte = (pa | PAGE_KERNEL);
 85	__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
 86	__nds32__tlbop_rwlk(pte);
 87	__nds32__isb();
 88	return kaddr;
 89}
 90
 91void flush_cache_mm(struct mm_struct *mm)
 92{
 93	unsigned long flags;
 94
 95	local_irq_save(flags);
 96	cpu_dcache_wbinval_all();
 97	cpu_icache_inval_all();
 98	local_irq_restore(flags);
 99}
100
101void flush_cache_dup_mm(struct mm_struct *mm)
102{
103}
104
105void flush_cache_range(struct vm_area_struct *vma,
106		       unsigned long start, unsigned long end)
107{
108	unsigned long flags;
109
110	if ((end - start) > 8 * PAGE_SIZE) {
111		cpu_dcache_wbinval_all();
112		if (vma->vm_flags & VM_EXEC)
113			cpu_icache_inval_all();
114		return;
115	}
116	local_irq_save(flags);
117	while (start < end) {
118		if (va_present(vma->vm_mm, start))
119			cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
120		start += PAGE_SIZE;
121	}
122	local_irq_restore(flags);
123	return;
124}
125
126void flush_cache_page(struct vm_area_struct *vma,
127		      unsigned long addr, unsigned long pfn)
128{
129	unsigned long vto, flags;
130
131	local_irq_save(flags);
132	vto = kremap0(addr, pfn << PAGE_SHIFT);
133	cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
134	kunmap01(vto);
135	local_irq_restore(flags);
136}
137
138void flush_cache_vmap(unsigned long start, unsigned long end)
139{
140	cpu_dcache_wbinval_all();
141	cpu_icache_inval_all();
142}
143
144void flush_cache_vunmap(unsigned long start, unsigned long end)
145{
146	cpu_dcache_wbinval_all();
147	cpu_icache_inval_all();
148}
149
150void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
151		    struct page *to)
152{
153	cpu_dcache_wbinval_page((unsigned long)vaddr);
154	cpu_icache_inval_page((unsigned long)vaddr);
155	copy_page(vto, vfrom);
156	cpu_dcache_wbinval_page((unsigned long)vto);
157	cpu_icache_inval_page((unsigned long)vto);
158}
159
160void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
161{
162	cpu_dcache_wbinval_page((unsigned long)vaddr);
163	cpu_icache_inval_page((unsigned long)vaddr);
164	clear_page(addr);
165	cpu_dcache_wbinval_page((unsigned long)addr);
166	cpu_icache_inval_page((unsigned long)addr);
167}
168
169void copy_user_highpage(struct page *to, struct page *from,
170			unsigned long vaddr, struct vm_area_struct *vma)
171{
172	unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
173	kto = ((unsigned long)page_address(to) & PAGE_MASK);
174	kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
175	pto = page_to_phys(to);
176	pfrom = page_to_phys(from);
177
178	local_irq_save(flags);
179	if (aliasing(vaddr, (unsigned long)kfrom))
180		cpu_dcache_wb_page((unsigned long)kfrom);
181	vto = kremap0(vaddr, pto);
182	vfrom = kremap1(vaddr, pfrom);
183	copy_page((void *)vto, (void *)vfrom);
184	kunmap01(vfrom);
185	kunmap01(vto);
186	local_irq_restore(flags);
187}
188
189EXPORT_SYMBOL(copy_user_highpage);
190
191void clear_user_highpage(struct page *page, unsigned long vaddr)
192{
193	unsigned long vto, flags, kto;
194
195	kto = ((unsigned long)page_address(page) & PAGE_MASK);
196
197	local_irq_save(flags);
198	if (aliasing(kto, vaddr) && kto != 0) {
199		cpu_dcache_inval_page(kto);
200		cpu_icache_inval_page(kto);
201	}
202	vto = kremap0(vaddr, page_to_phys(page));
203	clear_page((void *)vto);
204	kunmap01(vto);
205	local_irq_restore(flags);
206}
207
208EXPORT_SYMBOL(clear_user_highpage);
209
210void flush_dcache_page(struct page *page)
211{
212	struct address_space *mapping;
213
214	mapping = page_mapping(page);
215	if (mapping && !mapping_mapped(mapping))
216		set_bit(PG_dcache_dirty, &page->flags);
217	else {
218		unsigned long kaddr, flags;
219
220		kaddr = (unsigned long)page_address(page);
221		local_irq_save(flags);
222		cpu_dcache_wbinval_page(kaddr);
223		if (mapping) {
224			unsigned long vaddr, kto;
225
226			vaddr = page->index << PAGE_SHIFT;
227			if (aliasing(vaddr, kaddr)) {
228				kto = kremap0(vaddr, page_to_phys(page));
229				cpu_dcache_wbinval_page(kto);
230				kunmap01(kto);
231			}
232		}
233		local_irq_restore(flags);
234	}
235}
236EXPORT_SYMBOL(flush_dcache_page);
237
238void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
239		       unsigned long vaddr, void *dst, void *src, int len)
240{
241	unsigned long line_size, start, end, vto, flags;
242
243	local_irq_save(flags);
244	vto = kremap0(vaddr, page_to_phys(page));
245	dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
246	memcpy(dst, src, len);
247	if (vma->vm_flags & VM_EXEC) {
248		line_size = L1_cache_info[DCACHE].line_size;
249		start = (unsigned long)dst & ~(line_size - 1);
250		end =
251		    ((unsigned long)dst + len + line_size - 1) & ~(line_size -
252								   1);
253		cpu_cache_wbinval_range(start, end, 1);
254	}
255	kunmap01(vto);
256	local_irq_restore(flags);
257}
258
259void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
260			 unsigned long vaddr, void *dst, void *src, int len)
261{
262	unsigned long vto, flags;
263
264	local_irq_save(flags);
265	vto = kremap0(vaddr, page_to_phys(page));
266	src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
267	memcpy(dst, src, len);
268	kunmap01(vto);
269	local_irq_restore(flags);
270}
271
272void flush_anon_page(struct vm_area_struct *vma,
273		     struct page *page, unsigned long vaddr)
274{
275	unsigned long kaddr, flags, ktmp;
276	if (!PageAnon(page))
277		return;
278
279	if (vma->vm_mm != current->active_mm)
280		return;
281
282	local_irq_save(flags);
283	if (vma->vm_flags & VM_EXEC)
284		cpu_icache_inval_page(vaddr & PAGE_MASK);
285	kaddr = (unsigned long)page_address(page);
286	if (aliasing(vaddr, kaddr)) {
287		ktmp = kremap0(vaddr, page_to_phys(page));
288		cpu_dcache_wbinval_page(ktmp);
289		kunmap01(ktmp);
290	}
291	local_irq_restore(flags);
292}
293
294void flush_kernel_dcache_page(struct page *page)
295{
296	unsigned long flags;
297	local_irq_save(flags);
298	cpu_dcache_wbinval_page((unsigned long)page_address(page));
299	local_irq_restore(flags);
300}
301EXPORT_SYMBOL(flush_kernel_dcache_page);
302
303void flush_kernel_vmap_range(void *addr, int size)
304{
305	unsigned long flags;
306	local_irq_save(flags);
307	cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr +  size);
308	local_irq_restore(flags);
309}
310EXPORT_SYMBOL(flush_kernel_vmap_range);
311
312void invalidate_kernel_vmap_range(void *addr, int size)
313{
314	unsigned long flags;
315	local_irq_save(flags);
316	cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
317	local_irq_restore(flags);
318}
319EXPORT_SYMBOL(invalidate_kernel_vmap_range);
320
321void flush_icache_range(unsigned long start, unsigned long end)
322{
323	unsigned long line_size, flags;
324	line_size = L1_cache_info[DCACHE].line_size;
325	start = start & ~(line_size - 1);
326	end = (end + line_size - 1) & ~(line_size - 1);
327	local_irq_save(flags);
328	cpu_cache_wbinval_range(start, end, 1);
329	local_irq_restore(flags);
330}
331EXPORT_SYMBOL(flush_icache_range);
332
333void flush_icache_page(struct vm_area_struct *vma, struct page *page)
334{
335	unsigned long flags;
336	local_irq_save(flags);
337	cpu_cache_wbinval_page((unsigned long)page_address(page),
338			       vma->vm_flags & VM_EXEC);
339	local_irq_restore(flags);
340}
341
342void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
343		      pte_t * pte)
344{
345	struct page *page;
346	unsigned long flags;
347	unsigned long pfn = pte_pfn(*pte);
348
349	if (!pfn_valid(pfn))
350		return;
351
352	if (vma->vm_mm == current->active_mm) {
353		local_irq_save(flags);
354		__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
355		__nds32__tlbop_rwr(*pte);
356		__nds32__isb();
357		local_irq_restore(flags);
358	}
359
360	page = pfn_to_page(pfn);
361	if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
362	    (vma->vm_flags & VM_EXEC)) {
363		local_irq_save(flags);
364		cpu_dcache_wbinval_page((unsigned long)page_address(page));
365		local_irq_restore(flags);
366	}
367}
368#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2005-2017 Andes Technology Corporation
  3
  4#include <linux/mm.h>
  5#include <linux/sched.h>
  6#include <linux/fs.h>
  7#include <linux/pagemap.h>
  8#include <linux/module.h>
  9#include <asm/cacheflush.h>
 10#include <asm/proc-fns.h>
 11#include <asm/shmparam.h>
 12#include <asm/cache_info.h>
 13
 14extern struct cache_info L1_cache_info[2];
 15
 16void flush_icache_range(unsigned long start, unsigned long end)
 17{
 18	unsigned long line_size, flags;
 19	line_size = L1_cache_info[DCACHE].line_size;
 20	start = start & ~(line_size - 1);
 21	end = (end + line_size - 1) & ~(line_size - 1);
 22	local_irq_save(flags);
 23	cpu_cache_wbinval_range(start, end, 1);
 24	local_irq_restore(flags);
 25}
 26EXPORT_SYMBOL(flush_icache_range);
 27
 28void flush_icache_page(struct vm_area_struct *vma, struct page *page)
 29{
 30	unsigned long flags;
 31	unsigned long kaddr;
 32	local_irq_save(flags);
 33	kaddr = (unsigned long)kmap_atomic(page);
 34	cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
 35	kunmap_atomic((void *)kaddr);
 36	local_irq_restore(flags);
 37}
 38
 39void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
 40	                     unsigned long addr, int len)
 41{
 42	unsigned long kaddr;
 43	kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
 44	flush_icache_range(kaddr, kaddr + len);
 45	kunmap_atomic((void *)kaddr);
 46}
 47
 48void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
 49		      pte_t * pte)
 50{
 51	struct page *page;
 52	unsigned long pfn = pte_pfn(*pte);
 53	unsigned long flags;
 54
 55	if (!pfn_valid(pfn))
 56		return;
 57
 58	if (vma->vm_mm == current->active_mm) {
 59		local_irq_save(flags);
 60		__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
 61		__nds32__tlbop_rwr(*pte);
 62		__nds32__isb();
 63		local_irq_restore(flags);
 64	}
 65	page = pfn_to_page(pfn);
 66
 67	if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
 68	    (vma->vm_flags & VM_EXEC)) {
 69		unsigned long kaddr;
 70		local_irq_save(flags);
 71		kaddr = (unsigned long)kmap_atomic(page);
 72		cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
 73		kunmap_atomic((void *)kaddr);
 74		local_irq_restore(flags);
 
 
 
 
 75	}
 76}
 77#ifdef CONFIG_CPU_CACHE_ALIASING
 78extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
 79
 80static inline unsigned long aliasing(unsigned long addr, unsigned long page)
 81{
 82	return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
 83}
 84
 85static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
 86{
 87	unsigned long kaddr, pte;
 88
 89#define BASE_ADDR0 0xffffc000
 90	kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
 91	pte = (pa | PAGE_KERNEL);
 92	__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
 93	__nds32__tlbop_rwlk(pte);
 94	__nds32__isb();
 95	return kaddr;
 96}
 97
 98static inline void kunmap01(unsigned long kaddr)
 99{
100	__nds32__tlbop_unlk(kaddr);
101	__nds32__tlbop_inv(kaddr);
102	__nds32__isb();
103}
104
105static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
106{
107	unsigned long kaddr, pte;
108
109#define BASE_ADDR1 0xffff8000
110	kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
111	pte = (pa | PAGE_KERNEL);
112	__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
113	__nds32__tlbop_rwlk(pte);
114	__nds32__isb();
115	return kaddr;
116}
117
118void flush_cache_mm(struct mm_struct *mm)
119{
120	unsigned long flags;
121
122	local_irq_save(flags);
123	cpu_dcache_wbinval_all();
124	cpu_icache_inval_all();
125	local_irq_restore(flags);
126}
127
128void flush_cache_dup_mm(struct mm_struct *mm)
129{
130}
131
132void flush_cache_range(struct vm_area_struct *vma,
133		       unsigned long start, unsigned long end)
134{
135	unsigned long flags;
136
137	if ((end - start) > 8 * PAGE_SIZE) {
138		cpu_dcache_wbinval_all();
139		if (vma->vm_flags & VM_EXEC)
140			cpu_icache_inval_all();
141		return;
142	}
143	local_irq_save(flags);
144	while (start < end) {
145		if (va_present(vma->vm_mm, start))
146			cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
147		start += PAGE_SIZE;
148	}
149	local_irq_restore(flags);
150	return;
151}
152
153void flush_cache_page(struct vm_area_struct *vma,
154		      unsigned long addr, unsigned long pfn)
155{
156	unsigned long vto, flags;
157
158	local_irq_save(flags);
159	vto = kremap0(addr, pfn << PAGE_SHIFT);
160	cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
161	kunmap01(vto);
162	local_irq_restore(flags);
163}
164
165void flush_cache_vmap(unsigned long start, unsigned long end)
166{
167	cpu_dcache_wbinval_all();
168	cpu_icache_inval_all();
169}
170
171void flush_cache_vunmap(unsigned long start, unsigned long end)
172{
173	cpu_dcache_wbinval_all();
174	cpu_icache_inval_all();
175}
176
177void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
178		    struct page *to)
179{
180	cpu_dcache_wbinval_page((unsigned long)vaddr);
181	cpu_icache_inval_page((unsigned long)vaddr);
182	copy_page(vto, vfrom);
183	cpu_dcache_wbinval_page((unsigned long)vto);
184	cpu_icache_inval_page((unsigned long)vto);
185}
186
187void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
188{
189	cpu_dcache_wbinval_page((unsigned long)vaddr);
190	cpu_icache_inval_page((unsigned long)vaddr);
191	clear_page(addr);
192	cpu_dcache_wbinval_page((unsigned long)addr);
193	cpu_icache_inval_page((unsigned long)addr);
194}
195
196void copy_user_highpage(struct page *to, struct page *from,
197			unsigned long vaddr, struct vm_area_struct *vma)
198{
199	unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
200	kto = ((unsigned long)page_address(to) & PAGE_MASK);
201	kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
202	pto = page_to_phys(to);
203	pfrom = page_to_phys(from);
204
205	local_irq_save(flags);
206	if (aliasing(vaddr, (unsigned long)kfrom))
207		cpu_dcache_wb_page((unsigned long)kfrom);
208	vto = kremap0(vaddr, pto);
209	vfrom = kremap1(vaddr, pfrom);
210	copy_page((void *)vto, (void *)vfrom);
211	kunmap01(vfrom);
212	kunmap01(vto);
213	local_irq_restore(flags);
214}
215
216EXPORT_SYMBOL(copy_user_highpage);
217
218void clear_user_highpage(struct page *page, unsigned long vaddr)
219{
220	unsigned long vto, flags, kto;
221
222	kto = ((unsigned long)page_address(page) & PAGE_MASK);
223
224	local_irq_save(flags);
225	if (aliasing(kto, vaddr) && kto != 0) {
226		cpu_dcache_inval_page(kto);
227		cpu_icache_inval_page(kto);
228	}
229	vto = kremap0(vaddr, page_to_phys(page));
230	clear_page((void *)vto);
231	kunmap01(vto);
232	local_irq_restore(flags);
233}
234
235EXPORT_SYMBOL(clear_user_highpage);
236
237void flush_dcache_page(struct page *page)
238{
239	struct address_space *mapping;
240
241	mapping = page_mapping(page);
242	if (mapping && !mapping_mapped(mapping))
243		set_bit(PG_dcache_dirty, &page->flags);
244	else {
245		unsigned long kaddr, flags;
246
247		kaddr = (unsigned long)page_address(page);
248		local_irq_save(flags);
249		cpu_dcache_wbinval_page(kaddr);
250		if (mapping) {
251			unsigned long vaddr, kto;
252
253			vaddr = page->index << PAGE_SHIFT;
254			if (aliasing(vaddr, kaddr)) {
255				kto = kremap0(vaddr, page_to_phys(page));
256				cpu_dcache_wbinval_page(kto);
257				kunmap01(kto);
258			}
259		}
260		local_irq_restore(flags);
261	}
262}
263EXPORT_SYMBOL(flush_dcache_page);
264
265void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
266		       unsigned long vaddr, void *dst, void *src, int len)
267{
268	unsigned long line_size, start, end, vto, flags;
269
270	local_irq_save(flags);
271	vto = kremap0(vaddr, page_to_phys(page));
272	dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
273	memcpy(dst, src, len);
274	if (vma->vm_flags & VM_EXEC) {
275		line_size = L1_cache_info[DCACHE].line_size;
276		start = (unsigned long)dst & ~(line_size - 1);
277		end =
278		    ((unsigned long)dst + len + line_size - 1) & ~(line_size -
279								   1);
280		cpu_cache_wbinval_range(start, end, 1);
281	}
282	kunmap01(vto);
283	local_irq_restore(flags);
284}
285
286void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
287			 unsigned long vaddr, void *dst, void *src, int len)
288{
289	unsigned long vto, flags;
290
291	local_irq_save(flags);
292	vto = kremap0(vaddr, page_to_phys(page));
293	src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
294	memcpy(dst, src, len);
295	kunmap01(vto);
296	local_irq_restore(flags);
297}
298
299void flush_anon_page(struct vm_area_struct *vma,
300		     struct page *page, unsigned long vaddr)
301{
302	unsigned long kaddr, flags, ktmp;
303	if (!PageAnon(page))
304		return;
305
306	if (vma->vm_mm != current->active_mm)
307		return;
308
309	local_irq_save(flags);
310	if (vma->vm_flags & VM_EXEC)
311		cpu_icache_inval_page(vaddr & PAGE_MASK);
312	kaddr = (unsigned long)page_address(page);
313	if (aliasing(vaddr, kaddr)) {
314		ktmp = kremap0(vaddr, page_to_phys(page));
315		cpu_dcache_wbinval_page(ktmp);
316		kunmap01(ktmp);
317	}
318	local_irq_restore(flags);
319}
320
321void flush_kernel_dcache_page(struct page *page)
322{
323	unsigned long flags;
324	local_irq_save(flags);
325	cpu_dcache_wbinval_page((unsigned long)page_address(page));
326	local_irq_restore(flags);
327}
328EXPORT_SYMBOL(flush_kernel_dcache_page);
329
330void flush_kernel_vmap_range(void *addr, int size)
331{
332	unsigned long flags;
333	local_irq_save(flags);
334	cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr +  size);
335	local_irq_restore(flags);
336}
337EXPORT_SYMBOL(flush_kernel_vmap_range);
338
339void invalidate_kernel_vmap_range(void *addr, int size)
340{
341	unsigned long flags;
342	local_irq_save(flags);
343	cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
344	local_irq_restore(flags);
345}
346EXPORT_SYMBOL(invalidate_kernel_vmap_range);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347#endif