Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (C) 2007 MIPS Technologies, Inc.
  8 */
  9#include <linux/fs.h>
 10#include <linux/fcntl.h>
 11#include <linux/kernel.h>
 12#include <linux/linkage.h>
 13#include <linux/export.h>
 14#include <linux/sched.h>
 15#include <linux/syscalls.h>
 16#include <linux/mm.h>
 17
 18#include <asm/cacheflush.h>
 19#include <asm/highmem.h>
 20#include <asm/processor.h>
 21#include <asm/cpu.h>
 22#include <asm/cpu-features.h>
 23#include <asm/setup.h>
 24
 25/* Cache operations. */
 26void (*flush_cache_all)(void);
 27void (*__flush_cache_all)(void);
 28EXPORT_SYMBOL_GPL(__flush_cache_all);
 29void (*flush_cache_mm)(struct mm_struct *mm);
 30void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
 31	unsigned long end);
 32void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
 33	unsigned long pfn);
 34void (*flush_icache_range)(unsigned long start, unsigned long end);
 35EXPORT_SYMBOL_GPL(flush_icache_range);
 36void (*local_flush_icache_range)(unsigned long start, unsigned long end);
 37EXPORT_SYMBOL_GPL(local_flush_icache_range);
 38void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
 39EXPORT_SYMBOL_GPL(__flush_icache_user_range);
 40void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
 41EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
 42
 43void (*__flush_cache_vmap)(void);
 44void (*__flush_cache_vunmap)(void);
 45
 46void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
 47EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
 
 48
 49/* MIPS specific cache operations */
 50void (*flush_cache_sigtramp)(unsigned long addr);
 51void (*local_flush_data_cache_page)(void * addr);
 52void (*flush_data_cache_page)(unsigned long addr);
 53void (*flush_icache_all)(void);
 54
 55EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 56EXPORT_SYMBOL(flush_data_cache_page);
 57EXPORT_SYMBOL(flush_icache_all);
 58
 59#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
 60
 61/* DMA cache operations. */
 62void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
 63void (*_dma_cache_wback)(unsigned long start, unsigned long size);
 64void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 65
 66EXPORT_SYMBOL(_dma_cache_wback_inv);
 67
 68#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
 69
 70/*
 71 * We could optimize the case where the cache argument is not BCACHE but
 72 * that seems very atypical use ...
 73 */
 74SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
 75	unsigned int, cache)
 76{
 77	if (bytes == 0)
 78		return 0;
 79	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
 80		return -EFAULT;
 81
 82	__flush_icache_user_range(addr, addr + bytes);
 83
 84	return 0;
 85}
 86
 87void __flush_dcache_page(struct page *page)
 88{
 89	struct address_space *mapping = page_mapping_file(page);
 90	unsigned long addr;
 91
 
 
 92	if (mapping && !mapping_mapped(mapping)) {
 93		SetPageDcacheDirty(page);
 94		return;
 95	}
 96
 97	/*
 98	 * We could delay the flush for the !page_mapping case too.  But that
 99	 * case is for exec env/arg pages and those are %99 certainly going to
100	 * get faulted into the tlb (and thus flushed) anyways.
101	 */
102	if (PageHighMem(page))
103		addr = (unsigned long)kmap_atomic(page);
104	else
105		addr = (unsigned long)page_address(page);
106
107	flush_data_cache_page(addr);
108
109	if (PageHighMem(page))
110		__kunmap_atomic((void *)addr);
111}
112
113EXPORT_SYMBOL(__flush_dcache_page);
114
115void __flush_anon_page(struct page *page, unsigned long vmaddr)
116{
117	unsigned long addr = (unsigned long) page_address(page);
118
119	if (pages_do_alias(addr, vmaddr)) {
120		if (page_mapcount(page) && !Page_dcache_dirty(page)) {
121			void *kaddr;
122
123			kaddr = kmap_coherent(page, vmaddr);
124			flush_data_cache_page((unsigned long)kaddr);
125			kunmap_coherent();
126		} else
127			flush_data_cache_page(addr);
128	}
129}
130
131EXPORT_SYMBOL(__flush_anon_page);
132
133void __update_cache(unsigned long address, pte_t pte)
 
134{
135	struct page *page;
136	unsigned long pfn, addr;
137	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
138
139	pfn = pte_pfn(pte);
140	if (unlikely(!pfn_valid(pfn)))
141		return;
142	page = pfn_to_page(pfn);
143	if (Page_dcache_dirty(page)) {
144		if (PageHighMem(page))
145			addr = (unsigned long)kmap_atomic(page);
146		else
147			addr = (unsigned long)page_address(page);
148
149		if (exec || pages_do_alias(addr, address & PAGE_MASK))
150			flush_data_cache_page(addr);
151
152		if (PageHighMem(page))
153			__kunmap_atomic((void *)addr);
154
155		ClearPageDcacheDirty(page);
156	}
157}
158
159unsigned long _page_cachable_default;
160EXPORT_SYMBOL(_page_cachable_default);
161
162static inline void setup_protection_map(void)
163{
164	if (cpu_has_rixi) {
165		protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
166		protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
167		protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
168		protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
169		protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
170		protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
171		protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
172		protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
173
174		protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
175		protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
176		protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
177		protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
178		protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
179		protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
180		protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
181		protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
182
183	} else {
184		protection_map[0] = PAGE_NONE;
185		protection_map[1] = PAGE_READONLY;
186		protection_map[2] = PAGE_COPY;
187		protection_map[3] = PAGE_COPY;
188		protection_map[4] = PAGE_READONLY;
189		protection_map[5] = PAGE_READONLY;
190		protection_map[6] = PAGE_COPY;
191		protection_map[7] = PAGE_COPY;
192		protection_map[8] = PAGE_NONE;
193		protection_map[9] = PAGE_READONLY;
194		protection_map[10] = PAGE_SHARED;
195		protection_map[11] = PAGE_SHARED;
196		protection_map[12] = PAGE_READONLY;
197		protection_map[13] = PAGE_READONLY;
198		protection_map[14] = PAGE_SHARED;
199		protection_map[15] = PAGE_SHARED;
200	}
201}
202
203void cpu_cache_init(void)
204{
205	if (cpu_has_3k_cache) {
206		extern void __weak r3k_cache_init(void);
207
208		r3k_cache_init();
209	}
210	if (cpu_has_6k_cache) {
211		extern void __weak r6k_cache_init(void);
212
213		r6k_cache_init();
214	}
215	if (cpu_has_4k_cache) {
216		extern void __weak r4k_cache_init(void);
217
218		r4k_cache_init();
219	}
220	if (cpu_has_8k_cache) {
221		extern void __weak r8k_cache_init(void);
222
223		r8k_cache_init();
224	}
225	if (cpu_has_tx39_cache) {
226		extern void __weak tx39_cache_init(void);
227
228		tx39_cache_init();
229	}
230
231	if (cpu_has_octeon_cache) {
232		extern void __weak octeon_cache_init(void);
233
234		octeon_cache_init();
235	}
236
237	setup_protection_map();
238}
239
240int __weak __uncached_access(struct file *file, unsigned long addr)
241{
242	if (file->f_flags & O_DSYNC)
243		return 1;
244
245	return addr >= __pa(high_memory);
246}
v3.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (C) 2007 MIPS Technologies, Inc.
  8 */
  9#include <linux/fs.h>
 10#include <linux/fcntl.h>
 11#include <linux/kernel.h>
 12#include <linux/linkage.h>
 13#include <linux/module.h>
 14#include <linux/sched.h>
 15#include <linux/syscalls.h>
 16#include <linux/mm.h>
 17
 18#include <asm/cacheflush.h>
 
 19#include <asm/processor.h>
 20#include <asm/cpu.h>
 21#include <asm/cpu-features.h>
 
 22
 23/* Cache operations. */
 24void (*flush_cache_all)(void);
 25void (*__flush_cache_all)(void);
 
 26void (*flush_cache_mm)(struct mm_struct *mm);
 27void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
 28	unsigned long end);
 29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
 30	unsigned long pfn);
 31void (*flush_icache_range)(unsigned long start, unsigned long end);
 32EXPORT_SYMBOL_GPL(flush_icache_range);
 33void (*local_flush_icache_range)(unsigned long start, unsigned long end);
 
 
 
 
 
 34
 35void (*__flush_cache_vmap)(void);
 36void (*__flush_cache_vunmap)(void);
 37
 38void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
 39EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
 40void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
 41
 42/* MIPS specific cache operations */
 43void (*flush_cache_sigtramp)(unsigned long addr);
 44void (*local_flush_data_cache_page)(void * addr);
 45void (*flush_data_cache_page)(unsigned long addr);
 46void (*flush_icache_all)(void);
 47
 48EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 49EXPORT_SYMBOL(flush_data_cache_page);
 50EXPORT_SYMBOL(flush_icache_all);
 51
 52#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
 53
 54/* DMA cache operations. */
 55void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
 56void (*_dma_cache_wback)(unsigned long start, unsigned long size);
 57void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 58
 59EXPORT_SYMBOL(_dma_cache_wback_inv);
 60
 61#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
 62
 63/*
 64 * We could optimize the case where the cache argument is not BCACHE but
 65 * that seems very atypical use ...
 66 */
 67SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
 68	unsigned int, cache)
 69{
 70	if (bytes == 0)
 71		return 0;
 72	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
 73		return -EFAULT;
 74
 75	flush_icache_range(addr, addr + bytes);
 76
 77	return 0;
 78}
 79
 80void __flush_dcache_page(struct page *page)
 81{
 82	struct address_space *mapping = page_mapping(page);
 83	unsigned long addr;
 84
 85	if (PageHighMem(page))
 86		return;
 87	if (mapping && !mapping_mapped(mapping)) {
 88		SetPageDcacheDirty(page);
 89		return;
 90	}
 91
 92	/*
 93	 * We could delay the flush for the !page_mapping case too.  But that
 94	 * case is for exec env/arg pages and those are %99 certainly going to
 95	 * get faulted into the tlb (and thus flushed) anyways.
 96	 */
 97	addr = (unsigned long) page_address(page);
 
 
 
 
 98	flush_data_cache_page(addr);
 
 
 
 99}
100
101EXPORT_SYMBOL(__flush_dcache_page);
102
103void __flush_anon_page(struct page *page, unsigned long vmaddr)
104{
105	unsigned long addr = (unsigned long) page_address(page);
106
107	if (pages_do_alias(addr, vmaddr)) {
108		if (page_mapped(page) && !Page_dcache_dirty(page)) {
109			void *kaddr;
110
111			kaddr = kmap_coherent(page, vmaddr);
112			flush_data_cache_page((unsigned long)kaddr);
113			kunmap_coherent();
114		} else
115			flush_data_cache_page(addr);
116	}
117}
118
119EXPORT_SYMBOL(__flush_anon_page);
120
121void __update_cache(struct vm_area_struct *vma, unsigned long address,
122	pte_t pte)
123{
124	struct page *page;
125	unsigned long pfn, addr;
126	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
127
128	pfn = pte_pfn(pte);
129	if (unlikely(!pfn_valid(pfn)))
130		return;
131	page = pfn_to_page(pfn);
132	if (page_mapping(page) && Page_dcache_dirty(page)) {
133		addr = (unsigned long) page_address(page);
 
 
 
 
134		if (exec || pages_do_alias(addr, address & PAGE_MASK))
135			flush_data_cache_page(addr);
 
 
 
 
136		ClearPageDcacheDirty(page);
137	}
138}
139
140unsigned long _page_cachable_default;
141EXPORT_SYMBOL(_page_cachable_default);
142
143static inline void setup_protection_map(void)
144{
145	if (cpu_has_rixi) {
146		protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
147		protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
148		protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
149		protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
150		protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
151		protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
152		protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
153		protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
154
155		protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
156		protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
157		protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
158		protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
159		protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
160		protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
161		protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE  | _PAGE_NO_READ);
162		protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
163
164	} else {
165		protection_map[0] = PAGE_NONE;
166		protection_map[1] = PAGE_READONLY;
167		protection_map[2] = PAGE_COPY;
168		protection_map[3] = PAGE_COPY;
169		protection_map[4] = PAGE_READONLY;
170		protection_map[5] = PAGE_READONLY;
171		protection_map[6] = PAGE_COPY;
172		protection_map[7] = PAGE_COPY;
173		protection_map[8] = PAGE_NONE;
174		protection_map[9] = PAGE_READONLY;
175		protection_map[10] = PAGE_SHARED;
176		protection_map[11] = PAGE_SHARED;
177		protection_map[12] = PAGE_READONLY;
178		protection_map[13] = PAGE_READONLY;
179		protection_map[14] = PAGE_SHARED;
180		protection_map[15] = PAGE_SHARED;
181	}
182}
183
184void cpu_cache_init(void)
185{
186	if (cpu_has_3k_cache) {
187		extern void __weak r3k_cache_init(void);
188
189		r3k_cache_init();
190	}
191	if (cpu_has_6k_cache) {
192		extern void __weak r6k_cache_init(void);
193
194		r6k_cache_init();
195	}
196	if (cpu_has_4k_cache) {
197		extern void __weak r4k_cache_init(void);
198
199		r4k_cache_init();
200	}
201	if (cpu_has_8k_cache) {
202		extern void __weak r8k_cache_init(void);
203
204		r8k_cache_init();
205	}
206	if (cpu_has_tx39_cache) {
207		extern void __weak tx39_cache_init(void);
208
209		tx39_cache_init();
210	}
211
212	if (cpu_has_octeon_cache) {
213		extern void __weak octeon_cache_init(void);
214
215		octeon_cache_init();
216	}
217
218	setup_protection_map();
219}
220
221int __weak __uncached_access(struct file *file, unsigned long addr)
222{
223	if (file->f_flags & O_DSYNC)
224		return 1;
225
226	return addr >= __pa(high_memory);
227}