Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
8 */
9#include <linux/fs.h>
10#include <linux/fcntl.h>
11#include <linux/kernel.h>
12#include <linux/linkage.h>
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/syscalls.h>
16#include <linux/mm.h>
17
18#include <asm/cacheflush.h>
19#include <asm/processor.h>
20#include <asm/cpu.h>
21#include <asm/cpu-features.h>
22
23/* Cache operations. */
24void (*flush_cache_all)(void);
25void (*__flush_cache_all)(void);
26void (*flush_cache_mm)(struct mm_struct *mm);
27void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
28 unsigned long end);
29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
30 unsigned long pfn);
31void (*flush_icache_range)(unsigned long start, unsigned long end);
32EXPORT_SYMBOL_GPL(flush_icache_range);
33void (*local_flush_icache_range)(unsigned long start, unsigned long end);
34EXPORT_SYMBOL_GPL(local_flush_icache_range);
35
36void (*__flush_cache_vmap)(void);
37void (*__flush_cache_vunmap)(void);
38
39void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
40EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
41void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
42
43/* MIPS specific cache operations */
44void (*flush_cache_sigtramp)(unsigned long addr);
45void (*local_flush_data_cache_page)(void * addr);
46void (*flush_data_cache_page)(unsigned long addr);
47void (*flush_icache_all)(void);
48
49EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
50EXPORT_SYMBOL(flush_data_cache_page);
51EXPORT_SYMBOL(flush_icache_all);
52
53#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
54
55/* DMA cache operations. */
56void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
57void (*_dma_cache_wback)(unsigned long start, unsigned long size);
58void (*_dma_cache_inv)(unsigned long start, unsigned long size);
59
60EXPORT_SYMBOL(_dma_cache_wback_inv);
61
62#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
63
64/*
65 * We could optimize the case where the cache argument is not BCACHE but
66 * that seems very atypical use ...
67 */
68SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
69 unsigned int, cache)
70{
71 if (bytes == 0)
72 return 0;
73 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
74 return -EFAULT;
75
76 flush_icache_range(addr, addr + bytes);
77
78 return 0;
79}
80
81void __flush_dcache_page(struct page *page)
82{
83 struct address_space *mapping = page_mapping(page);
84 unsigned long addr;
85
86 if (PageHighMem(page))
87 return;
88 if (mapping && !mapping_mapped(mapping)) {
89 SetPageDcacheDirty(page);
90 return;
91 }
92
93 /*
94 * We could delay the flush for the !page_mapping case too. But that
95 * case is for exec env/arg pages and those are %99 certainly going to
96 * get faulted into the tlb (and thus flushed) anyways.
97 */
98 addr = (unsigned long) page_address(page);
99 flush_data_cache_page(addr);
100}
101
102EXPORT_SYMBOL(__flush_dcache_page);
103
104void __flush_anon_page(struct page *page, unsigned long vmaddr)
105{
106 unsigned long addr = (unsigned long) page_address(page);
107
108 if (pages_do_alias(addr, vmaddr)) {
109 if (page_mapcount(page) && !Page_dcache_dirty(page)) {
110 void *kaddr;
111
112 kaddr = kmap_coherent(page, vmaddr);
113 flush_data_cache_page((unsigned long)kaddr);
114 kunmap_coherent();
115 } else
116 flush_data_cache_page(addr);
117 }
118}
119
120EXPORT_SYMBOL(__flush_anon_page);
121
122void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
123{
124 unsigned long addr;
125
126 if (PageHighMem(page))
127 return;
128
129 addr = (unsigned long) page_address(page);
130 flush_data_cache_page(addr);
131}
132EXPORT_SYMBOL_GPL(__flush_icache_page);
133
134void __update_cache(struct vm_area_struct *vma, unsigned long address,
135 pte_t pte)
136{
137 struct page *page;
138 unsigned long pfn, addr;
139 int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
140
141 pfn = pte_pfn(pte);
142 if (unlikely(!pfn_valid(pfn)))
143 return;
144 page = pfn_to_page(pfn);
145 if (page_mapping(page) && Page_dcache_dirty(page)) {
146 addr = (unsigned long) page_address(page);
147 if (exec || pages_do_alias(addr, address & PAGE_MASK))
148 flush_data_cache_page(addr);
149 ClearPageDcacheDirty(page);
150 }
151}
152
153unsigned long _page_cachable_default;
154EXPORT_SYMBOL(_page_cachable_default);
155
156static inline void setup_protection_map(void)
157{
158 if (cpu_has_rixi) {
159 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167
168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
176
177 } else {
178 protection_map[0] = PAGE_NONE;
179 protection_map[1] = PAGE_READONLY;
180 protection_map[2] = PAGE_COPY;
181 protection_map[3] = PAGE_COPY;
182 protection_map[4] = PAGE_READONLY;
183 protection_map[5] = PAGE_READONLY;
184 protection_map[6] = PAGE_COPY;
185 protection_map[7] = PAGE_COPY;
186 protection_map[8] = PAGE_NONE;
187 protection_map[9] = PAGE_READONLY;
188 protection_map[10] = PAGE_SHARED;
189 protection_map[11] = PAGE_SHARED;
190 protection_map[12] = PAGE_READONLY;
191 protection_map[13] = PAGE_READONLY;
192 protection_map[14] = PAGE_SHARED;
193 protection_map[15] = PAGE_SHARED;
194 }
195}
196
197void cpu_cache_init(void)
198{
199 if (cpu_has_3k_cache) {
200 extern void __weak r3k_cache_init(void);
201
202 r3k_cache_init();
203 }
204 if (cpu_has_6k_cache) {
205 extern void __weak r6k_cache_init(void);
206
207 r6k_cache_init();
208 }
209 if (cpu_has_4k_cache) {
210 extern void __weak r4k_cache_init(void);
211
212 r4k_cache_init();
213 }
214 if (cpu_has_8k_cache) {
215 extern void __weak r8k_cache_init(void);
216
217 r8k_cache_init();
218 }
219 if (cpu_has_tx39_cache) {
220 extern void __weak tx39_cache_init(void);
221
222 tx39_cache_init();
223 }
224
225 if (cpu_has_octeon_cache) {
226 extern void __weak octeon_cache_init(void);
227
228 octeon_cache_init();
229 }
230
231 setup_protection_map();
232}
233
234int __weak __uncached_access(struct file *file, unsigned long addr)
235{
236 if (file->f_flags & O_DSYNC)
237 return 1;
238
239 return addr >= __pa(high_memory);
240}
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2007 MIPS Technologies, Inc.
8 */
9#include <linux/fs.h>
10#include <linux/fcntl.h>
11#include <linux/kernel.h>
12#include <linux/linkage.h>
13#include <linux/export.h>
14#include <linux/sched.h>
15#include <linux/syscalls.h>
16#include <linux/mm.h>
17
18#include <asm/cacheflush.h>
19#include <asm/highmem.h>
20#include <asm/processor.h>
21#include <asm/cpu.h>
22#include <asm/cpu-features.h>
23
24/* Cache operations. */
25void (*flush_cache_all)(void);
26void (*__flush_cache_all)(void);
27void (*flush_cache_mm)(struct mm_struct *mm);
28void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
29 unsigned long end);
30void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
31 unsigned long pfn);
32void (*flush_icache_range)(unsigned long start, unsigned long end);
33EXPORT_SYMBOL_GPL(flush_icache_range);
34void (*local_flush_icache_range)(unsigned long start, unsigned long end);
35EXPORT_SYMBOL_GPL(local_flush_icache_range);
36void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
37EXPORT_SYMBOL_GPL(__flush_icache_user_range);
38void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
39EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
40
41void (*__flush_cache_vmap)(void);
42void (*__flush_cache_vunmap)(void);
43
44void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
45EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
46void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
47
48/* MIPS specific cache operations */
49void (*flush_cache_sigtramp)(unsigned long addr);
50void (*local_flush_data_cache_page)(void * addr);
51void (*flush_data_cache_page)(unsigned long addr);
52void (*flush_icache_all)(void);
53
54EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
55EXPORT_SYMBOL(flush_data_cache_page);
56EXPORT_SYMBOL(flush_icache_all);
57
58#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
59
60/* DMA cache operations. */
61void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
62void (*_dma_cache_wback)(unsigned long start, unsigned long size);
63void (*_dma_cache_inv)(unsigned long start, unsigned long size);
64
65EXPORT_SYMBOL(_dma_cache_wback_inv);
66
67#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
68
69/*
70 * We could optimize the case where the cache argument is not BCACHE but
71 * that seems very atypical use ...
72 */
73SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
74 unsigned int, cache)
75{
76 if (bytes == 0)
77 return 0;
78 if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
79 return -EFAULT;
80
81 __flush_icache_user_range(addr, addr + bytes);
82
83 return 0;
84}
85
86void __flush_dcache_page(struct page *page)
87{
88 struct address_space *mapping = page_mapping(page);
89 unsigned long addr;
90
91 if (mapping && !mapping_mapped(mapping)) {
92 SetPageDcacheDirty(page);
93 return;
94 }
95
96 /*
97 * We could delay the flush for the !page_mapping case too. But that
98 * case is for exec env/arg pages and those are %99 certainly going to
99 * get faulted into the tlb (and thus flushed) anyways.
100 */
101 if (PageHighMem(page))
102 addr = (unsigned long)kmap_atomic(page);
103 else
104 addr = (unsigned long)page_address(page);
105
106 flush_data_cache_page(addr);
107
108 if (PageHighMem(page))
109 __kunmap_atomic((void *)addr);
110}
111
112EXPORT_SYMBOL(__flush_dcache_page);
113
114void __flush_anon_page(struct page *page, unsigned long vmaddr)
115{
116 unsigned long addr = (unsigned long) page_address(page);
117
118 if (pages_do_alias(addr, vmaddr)) {
119 if (page_mapcount(page) && !Page_dcache_dirty(page)) {
120 void *kaddr;
121
122 kaddr = kmap_coherent(page, vmaddr);
123 flush_data_cache_page((unsigned long)kaddr);
124 kunmap_coherent();
125 } else
126 flush_data_cache_page(addr);
127 }
128}
129
130EXPORT_SYMBOL(__flush_anon_page);
131
132void __update_cache(unsigned long address, pte_t pte)
133{
134 struct page *page;
135 unsigned long pfn, addr;
136 int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
137
138 pfn = pte_pfn(pte);
139 if (unlikely(!pfn_valid(pfn)))
140 return;
141 page = pfn_to_page(pfn);
142 if (Page_dcache_dirty(page)) {
143 if (PageHighMem(page))
144 addr = (unsigned long)kmap_atomic(page);
145 else
146 addr = (unsigned long)page_address(page);
147
148 if (exec || pages_do_alias(addr, address & PAGE_MASK))
149 flush_data_cache_page(addr);
150
151 if (PageHighMem(page))
152 __kunmap_atomic((void *)addr);
153
154 ClearPageDcacheDirty(page);
155 }
156}
157
158unsigned long _page_cachable_default;
159EXPORT_SYMBOL(_page_cachable_default);
160
161static inline void setup_protection_map(void)
162{
163 if (cpu_has_rixi) {
164 protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
165 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
166 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
167 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
168 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
169 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
170 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
171 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
172
173 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
174 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
175 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
176 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
177 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
178 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
179 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
180 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
181
182 } else {
183 protection_map[0] = PAGE_NONE;
184 protection_map[1] = PAGE_READONLY;
185 protection_map[2] = PAGE_COPY;
186 protection_map[3] = PAGE_COPY;
187 protection_map[4] = PAGE_READONLY;
188 protection_map[5] = PAGE_READONLY;
189 protection_map[6] = PAGE_COPY;
190 protection_map[7] = PAGE_COPY;
191 protection_map[8] = PAGE_NONE;
192 protection_map[9] = PAGE_READONLY;
193 protection_map[10] = PAGE_SHARED;
194 protection_map[11] = PAGE_SHARED;
195 protection_map[12] = PAGE_READONLY;
196 protection_map[13] = PAGE_READONLY;
197 protection_map[14] = PAGE_SHARED;
198 protection_map[15] = PAGE_SHARED;
199 }
200}
201
202void cpu_cache_init(void)
203{
204 if (cpu_has_3k_cache) {
205 extern void __weak r3k_cache_init(void);
206
207 r3k_cache_init();
208 }
209 if (cpu_has_6k_cache) {
210 extern void __weak r6k_cache_init(void);
211
212 r6k_cache_init();
213 }
214 if (cpu_has_4k_cache) {
215 extern void __weak r4k_cache_init(void);
216
217 r4k_cache_init();
218 }
219 if (cpu_has_8k_cache) {
220 extern void __weak r8k_cache_init(void);
221
222 r8k_cache_init();
223 }
224 if (cpu_has_tx39_cache) {
225 extern void __weak tx39_cache_init(void);
226
227 tx39_cache_init();
228 }
229
230 if (cpu_has_octeon_cache) {
231 extern void __weak octeon_cache_init(void);
232
233 octeon_cache_init();
234 }
235
236 setup_protection_map();
237}
238
239int __weak __uncached_access(struct file *file, unsigned long addr)
240{
241 if (file->f_flags & O_DSYNC)
242 return 1;
243
244 return addr >= __pa(high_memory);
245}