Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
8 */
9#include <linux/fs.h>
10#include <linux/fcntl.h>
11#include <linux/kernel.h>
12#include <linux/linkage.h>
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/syscalls.h>
16#include <linux/mm.h>
17
18#include <asm/cacheflush.h>
19#include <asm/processor.h>
20#include <asm/cpu.h>
21#include <asm/cpu-features.h>
22
23/* Cache operations. */
24void (*flush_cache_all)(void);
25void (*__flush_cache_all)(void);
26void (*flush_cache_mm)(struct mm_struct *mm);
27void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
28 unsigned long end);
29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
30 unsigned long pfn);
31void (*flush_icache_range)(unsigned long start, unsigned long end);
32EXPORT_SYMBOL_GPL(flush_icache_range);
33void (*local_flush_icache_range)(unsigned long start, unsigned long end);
34EXPORT_SYMBOL_GPL(local_flush_icache_range);
35
36void (*__flush_cache_vmap)(void);
37void (*__flush_cache_vunmap)(void);
38
39void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
40EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
41void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
42
43/* MIPS specific cache operations */
44void (*flush_cache_sigtramp)(unsigned long addr);
45void (*local_flush_data_cache_page)(void * addr);
46void (*flush_data_cache_page)(unsigned long addr);
47void (*flush_icache_all)(void);
48
49EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
50EXPORT_SYMBOL(flush_data_cache_page);
51EXPORT_SYMBOL(flush_icache_all);
52
53#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
54
55/* DMA cache operations. */
56void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
57void (*_dma_cache_wback)(unsigned long start, unsigned long size);
58void (*_dma_cache_inv)(unsigned long start, unsigned long size);
59
60EXPORT_SYMBOL(_dma_cache_wback_inv);
61
62#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
63
64/*
65 * We could optimize the case where the cache argument is not BCACHE but
66 * that seems very atypical use ...
67 */
68SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
69 unsigned int, cache)
70{
71 if (bytes == 0)
72 return 0;
73 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
74 return -EFAULT;
75
76 flush_icache_range(addr, addr + bytes);
77
78 return 0;
79}
80
81void __flush_dcache_page(struct page *page)
82{
83 struct address_space *mapping = page_mapping(page);
84 unsigned long addr;
85
86 if (PageHighMem(page))
87 return;
88 if (mapping && !mapping_mapped(mapping)) {
89 SetPageDcacheDirty(page);
90 return;
91 }
92
93 /*
94 * We could delay the flush for the !page_mapping case too. But that
95 * case is for exec env/arg pages and those are %99 certainly going to
96 * get faulted into the tlb (and thus flushed) anyways.
97 */
98 addr = (unsigned long) page_address(page);
99 flush_data_cache_page(addr);
100}
101
102EXPORT_SYMBOL(__flush_dcache_page);
103
104void __flush_anon_page(struct page *page, unsigned long vmaddr)
105{
106 unsigned long addr = (unsigned long) page_address(page);
107
108 if (pages_do_alias(addr, vmaddr)) {
109 if (page_mapcount(page) && !Page_dcache_dirty(page)) {
110 void *kaddr;
111
112 kaddr = kmap_coherent(page, vmaddr);
113 flush_data_cache_page((unsigned long)kaddr);
114 kunmap_coherent();
115 } else
116 flush_data_cache_page(addr);
117 }
118}
119
120EXPORT_SYMBOL(__flush_anon_page);
121
122void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
123{
124 unsigned long addr;
125
126 if (PageHighMem(page))
127 return;
128
129 addr = (unsigned long) page_address(page);
130 flush_data_cache_page(addr);
131}
132EXPORT_SYMBOL_GPL(__flush_icache_page);
133
134void __update_cache(struct vm_area_struct *vma, unsigned long address,
135 pte_t pte)
136{
137 struct page *page;
138 unsigned long pfn, addr;
139 int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
140
141 pfn = pte_pfn(pte);
142 if (unlikely(!pfn_valid(pfn)))
143 return;
144 page = pfn_to_page(pfn);
145 if (page_mapping(page) && Page_dcache_dirty(page)) {
146 addr = (unsigned long) page_address(page);
147 if (exec || pages_do_alias(addr, address & PAGE_MASK))
148 flush_data_cache_page(addr);
149 ClearPageDcacheDirty(page);
150 }
151}
152
153unsigned long _page_cachable_default;
154EXPORT_SYMBOL(_page_cachable_default);
155
156static inline void setup_protection_map(void)
157{
158 if (cpu_has_rixi) {
159 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167
168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
176
177 } else {
178 protection_map[0] = PAGE_NONE;
179 protection_map[1] = PAGE_READONLY;
180 protection_map[2] = PAGE_COPY;
181 protection_map[3] = PAGE_COPY;
182 protection_map[4] = PAGE_READONLY;
183 protection_map[5] = PAGE_READONLY;
184 protection_map[6] = PAGE_COPY;
185 protection_map[7] = PAGE_COPY;
186 protection_map[8] = PAGE_NONE;
187 protection_map[9] = PAGE_READONLY;
188 protection_map[10] = PAGE_SHARED;
189 protection_map[11] = PAGE_SHARED;
190 protection_map[12] = PAGE_READONLY;
191 protection_map[13] = PAGE_READONLY;
192 protection_map[14] = PAGE_SHARED;
193 protection_map[15] = PAGE_SHARED;
194 }
195}
196
197void cpu_cache_init(void)
198{
199 if (cpu_has_3k_cache) {
200 extern void __weak r3k_cache_init(void);
201
202 r3k_cache_init();
203 }
204 if (cpu_has_6k_cache) {
205 extern void __weak r6k_cache_init(void);
206
207 r6k_cache_init();
208 }
209 if (cpu_has_4k_cache) {
210 extern void __weak r4k_cache_init(void);
211
212 r4k_cache_init();
213 }
214 if (cpu_has_8k_cache) {
215 extern void __weak r8k_cache_init(void);
216
217 r8k_cache_init();
218 }
219 if (cpu_has_tx39_cache) {
220 extern void __weak tx39_cache_init(void);
221
222 tx39_cache_init();
223 }
224
225 if (cpu_has_octeon_cache) {
226 extern void __weak octeon_cache_init(void);
227
228 octeon_cache_init();
229 }
230
231 setup_protection_map();
232}
233
234int __weak __uncached_access(struct file *file, unsigned long addr)
235{
236 if (file->f_flags & O_DSYNC)
237 return 1;
238
239 return addr >= __pa(high_memory);
240}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
8 */
9#include <linux/fs.h>
10#include <linux/fcntl.h>
11#include <linux/kernel.h>
12#include <linux/linkage.h>
13#include <linux/export.h>
14#include <linux/sched.h>
15#include <linux/syscalls.h>
16#include <linux/mm.h>
17#include <linux/highmem.h>
18
19#include <asm/cacheflush.h>
20#include <asm/processor.h>
21#include <asm/cpu.h>
22#include <asm/cpu-features.h>
23#include <asm/setup.h>
24
25/* Cache operations. */
26void (*flush_cache_all)(void);
27void (*__flush_cache_all)(void);
28EXPORT_SYMBOL_GPL(__flush_cache_all);
29void (*flush_cache_mm)(struct mm_struct *mm);
30void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
31 unsigned long end);
32void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
33 unsigned long pfn);
34void (*flush_icache_range)(unsigned long start, unsigned long end);
35EXPORT_SYMBOL_GPL(flush_icache_range);
36void (*local_flush_icache_range)(unsigned long start, unsigned long end);
37EXPORT_SYMBOL_GPL(local_flush_icache_range);
38void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
39void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
40EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
41
42void (*__flush_cache_vmap)(void);
43void (*__flush_cache_vunmap)(void);
44
45void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
46EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
47
48/* MIPS specific cache operations */
49void (*local_flush_data_cache_page)(void * addr);
50void (*flush_data_cache_page)(unsigned long addr);
51void (*flush_icache_all)(void);
52
53EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
54EXPORT_SYMBOL(flush_data_cache_page);
55EXPORT_SYMBOL(flush_icache_all);
56
57#ifdef CONFIG_DMA_NONCOHERENT
58
59/* DMA cache operations. */
60void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
61void (*_dma_cache_wback)(unsigned long start, unsigned long size);
62void (*_dma_cache_inv)(unsigned long start, unsigned long size);
63
64#endif /* CONFIG_DMA_NONCOHERENT */
65
66/*
67 * We could optimize the case where the cache argument is not BCACHE but
68 * that seems very atypical use ...
69 */
70SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
71 unsigned int, cache)
72{
73 if (bytes == 0)
74 return 0;
75 if (!access_ok((void __user *) addr, bytes))
76 return -EFAULT;
77
78 __flush_icache_user_range(addr, addr + bytes);
79
80 return 0;
81}
82
83void __flush_dcache_page(struct page *page)
84{
85 struct address_space *mapping = page_mapping_file(page);
86 unsigned long addr;
87
88 if (mapping && !mapping_mapped(mapping)) {
89 SetPageDcacheDirty(page);
90 return;
91 }
92
93 /*
94 * We could delay the flush for the !page_mapping case too. But that
95 * case is for exec env/arg pages and those are %99 certainly going to
96 * get faulted into the tlb (and thus flushed) anyways.
97 */
98 if (PageHighMem(page))
99 addr = (unsigned long)kmap_atomic(page);
100 else
101 addr = (unsigned long)page_address(page);
102
103 flush_data_cache_page(addr);
104
105 if (PageHighMem(page))
106 kunmap_atomic((void *)addr);
107}
108
109EXPORT_SYMBOL(__flush_dcache_page);
110
111void __flush_anon_page(struct page *page, unsigned long vmaddr)
112{
113 unsigned long addr = (unsigned long) page_address(page);
114
115 if (pages_do_alias(addr, vmaddr)) {
116 if (page_mapcount(page) && !Page_dcache_dirty(page)) {
117 void *kaddr;
118
119 kaddr = kmap_coherent(page, vmaddr);
120 flush_data_cache_page((unsigned long)kaddr);
121 kunmap_coherent();
122 } else
123 flush_data_cache_page(addr);
124 }
125}
126
127EXPORT_SYMBOL(__flush_anon_page);
128
129void __update_cache(unsigned long address, pte_t pte)
130{
131 struct page *page;
132 unsigned long pfn, addr;
133 int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
134
135 pfn = pte_pfn(pte);
136 if (unlikely(!pfn_valid(pfn)))
137 return;
138 page = pfn_to_page(pfn);
139 if (Page_dcache_dirty(page)) {
140 if (PageHighMem(page))
141 addr = (unsigned long)kmap_atomic(page);
142 else
143 addr = (unsigned long)page_address(page);
144
145 if (exec || pages_do_alias(addr, address & PAGE_MASK))
146 flush_data_cache_page(addr);
147
148 if (PageHighMem(page))
149 kunmap_atomic((void *)addr);
150
151 ClearPageDcacheDirty(page);
152 }
153}
154
155unsigned long _page_cachable_default;
156EXPORT_SYMBOL(_page_cachable_default);
157
158static inline void setup_protection_map(void)
159{
160 if (cpu_has_rixi) {
161 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
162 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
163 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
164 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
165 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
166 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
168 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
169
170 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
171 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
172 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
173 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
174 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
175 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
176 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
177 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
178
179 } else {
180 protection_map[0] = PAGE_NONE;
181 protection_map[1] = PAGE_READONLY;
182 protection_map[2] = PAGE_COPY;
183 protection_map[3] = PAGE_COPY;
184 protection_map[4] = PAGE_READONLY;
185 protection_map[5] = PAGE_READONLY;
186 protection_map[6] = PAGE_COPY;
187 protection_map[7] = PAGE_COPY;
188 protection_map[8] = PAGE_NONE;
189 protection_map[9] = PAGE_READONLY;
190 protection_map[10] = PAGE_SHARED;
191 protection_map[11] = PAGE_SHARED;
192 protection_map[12] = PAGE_READONLY;
193 protection_map[13] = PAGE_READONLY;
194 protection_map[14] = PAGE_SHARED;
195 protection_map[15] = PAGE_SHARED;
196 }
197}
198
199void cpu_cache_init(void)
200{
201 if (cpu_has_3k_cache) {
202 extern void __weak r3k_cache_init(void);
203
204 r3k_cache_init();
205 }
206 if (cpu_has_6k_cache) {
207 extern void __weak r6k_cache_init(void);
208
209 r6k_cache_init();
210 }
211 if (cpu_has_4k_cache) {
212 extern void __weak r4k_cache_init(void);
213
214 r4k_cache_init();
215 }
216 if (cpu_has_8k_cache) {
217 extern void __weak r8k_cache_init(void);
218
219 r8k_cache_init();
220 }
221 if (cpu_has_tx39_cache) {
222 extern void __weak tx39_cache_init(void);
223
224 tx39_cache_init();
225 }
226
227 if (cpu_has_octeon_cache) {
228 extern void __weak octeon_cache_init(void);
229
230 octeon_cache_init();
231 }
232
233 setup_protection_map();
234}
235
236int __weak __uncached_access(struct file *file, unsigned long addr)
237{
238 if (file->f_flags & O_DSYNC)
239 return 1;
240
241 return addr >= __pa(high_memory);
242}