Linux Audio

Check our new training course

Loading...
v6.8
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (C) 2007 MIPS Technologies, Inc.
  8 */
  9#include <linux/fs.h>
 10#include <linux/fcntl.h>
 11#include <linux/kernel.h>
 12#include <linux/linkage.h>
 13#include <linux/export.h>
 14#include <linux/sched.h>
 15#include <linux/syscalls.h>
 16#include <linux/mm.h>
 17#include <linux/highmem.h>
 18#include <linux/pagemap.h>
 19
 20#include <asm/bcache.h>
 21#include <asm/cacheflush.h>
 22#include <asm/processor.h>
 23#include <asm/cpu.h>
 24#include <asm/cpu-features.h>
 25#include <asm/setup.h>
 26#include <asm/pgtable.h>
 27
 28/* Cache operations. */
 29void (*flush_cache_all)(void);
 30void (*__flush_cache_all)(void);
 31EXPORT_SYMBOL_GPL(__flush_cache_all);
 32void (*flush_cache_mm)(struct mm_struct *mm);
 33void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
 34	unsigned long end);
 35void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
 36	unsigned long pfn);
 37void (*flush_icache_range)(unsigned long start, unsigned long end);
 38EXPORT_SYMBOL_GPL(flush_icache_range);
 39void (*local_flush_icache_range)(unsigned long start, unsigned long end);
 40EXPORT_SYMBOL_GPL(local_flush_icache_range);
 41void (*__flush_icache_user_range)(unsigned long start, unsigned long end);
 42void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end);
 43EXPORT_SYMBOL_GPL(__local_flush_icache_user_range);
 44
 45void (*__flush_cache_vmap)(void);
 46void (*__flush_cache_vunmap)(void);
 47
 48void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
 49EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
 
 50
 51/* MIPS specific cache operations */
 
 
 52void (*flush_data_cache_page)(unsigned long addr);
 53void (*flush_icache_all)(void);
 54
 
 55EXPORT_SYMBOL(flush_data_cache_page);
 56EXPORT_SYMBOL(flush_icache_all);
 57
 58/*
 59 * Dummy cache handling routine
 60 */
 61
 62void cache_noop(void) {}
 63
 64#ifdef CONFIG_BOARD_SCACHE
 65
 66static struct bcache_ops no_sc_ops = {
 67	.bc_enable = (void *)cache_noop,
 68	.bc_disable = (void *)cache_noop,
 69	.bc_wback_inv = (void *)cache_noop,
 70	.bc_inv = (void *)cache_noop
 71};
 72
 73struct bcache_ops *bcops = &no_sc_ops;
 74#endif
 75
 76#ifdef CONFIG_DMA_NONCOHERENT
 77
 78/* DMA cache operations. */
 79void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
 80void (*_dma_cache_wback)(unsigned long start, unsigned long size);
 81void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 82
 83#endif /* CONFIG_DMA_NONCOHERENT */
 
 
 84
 85/*
 86 * We could optimize the case where the cache argument is not BCACHE but
 87 * that seems very atypical use ...
 88 */
 89SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
 90	unsigned int, cache)
 91{
 92	if (bytes == 0)
 93		return 0;
 94	if (!access_ok((void __user *) addr, bytes))
 95		return -EFAULT;
 96
 97	__flush_icache_user_range(addr, addr + bytes);
 98
 99	return 0;
100}
101
102void __flush_dcache_pages(struct page *page, unsigned int nr)
103{
104	struct folio *folio = page_folio(page);
105	struct address_space *mapping = folio_flush_mapping(folio);
106	unsigned long addr;
107	unsigned int i;
108
 
 
109	if (mapping && !mapping_mapped(mapping)) {
110		folio_set_dcache_dirty(folio);
111		return;
112	}
113
114	/*
115	 * We could delay the flush for the !page_mapping case too.  But that
116	 * case is for exec env/arg pages and those are %99 certainly going to
117	 * get faulted into the tlb (and thus flushed) anyways.
118	 */
119	for (i = 0; i < nr; i++) {
120		addr = (unsigned long)kmap_local_page(nth_page(page, i));
121		flush_data_cache_page(addr);
122		kunmap_local((void *)addr);
123	}
124}
125EXPORT_SYMBOL(__flush_dcache_pages);
 
126
127void __flush_anon_page(struct page *page, unsigned long vmaddr)
128{
129	unsigned long addr = (unsigned long) page_address(page);
130	struct folio *folio = page_folio(page);
131
132	if (pages_do_alias(addr, vmaddr)) {
133		if (folio_mapped(folio) && !folio_test_dcache_dirty(folio)) {
134			void *kaddr;
135
136			kaddr = kmap_coherent(page, vmaddr);
137			flush_data_cache_page((unsigned long)kaddr);
138			kunmap_coherent();
139		} else
140			flush_data_cache_page(addr);
141	}
142}
143
144EXPORT_SYMBOL(__flush_anon_page);
145
146void __update_cache(unsigned long address, pte_t pte)
 
 
 
 
 
 
 
 
 
 
 
 
 
147{
148	struct folio *folio;
149	unsigned long pfn, addr;
150	int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
151	unsigned int i;
152
153	pfn = pte_pfn(pte);
154	if (unlikely(!pfn_valid(pfn)))
155		return;
156
157	folio = page_folio(pfn_to_page(pfn));
158	address &= PAGE_MASK;
159	address -= offset_in_folio(folio, pfn << PAGE_SHIFT);
160
161	if (folio_test_dcache_dirty(folio)) {
162		for (i = 0; i < folio_nr_pages(folio); i++) {
163			addr = (unsigned long)kmap_local_folio(folio, i);
164
165			if (exec || pages_do_alias(addr, address))
166				flush_data_cache_page(addr);
167			kunmap_local((void *)addr);
168			address += PAGE_SIZE;
169		}
170		folio_clear_dcache_dirty(folio);
171	}
172}
173
174unsigned long _page_cachable_default;
175EXPORT_SYMBOL(_page_cachable_default);
176
177#define PM(p)	__pgprot(_page_cachable_default | (p))
178
179static pgprot_t protection_map[16] __ro_after_init;
180DECLARE_VM_GET_PAGE_PROT
181
182static inline void setup_protection_map(void)
183{
184	protection_map[0]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
185	protection_map[1]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
186	protection_map[2]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
187	protection_map[3]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
188	protection_map[4]  = PM(_PAGE_PRESENT);
189	protection_map[5]  = PM(_PAGE_PRESENT);
190	protection_map[6]  = PM(_PAGE_PRESENT);
191	protection_map[7]  = PM(_PAGE_PRESENT);
192
193	protection_map[8]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
194	protection_map[9]  = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
195	protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
196				_PAGE_NO_READ);
197	protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
198	protection_map[12] = PM(_PAGE_PRESENT);
199	protection_map[13] = PM(_PAGE_PRESENT);
200	protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
201	protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202}
203
204#undef PM
205
206void cpu_cache_init(void)
207{
208	if (IS_ENABLED(CONFIG_CPU_R3000) && cpu_has_3k_cache)
 
 
209		r3k_cache_init();
210	if (IS_ENABLED(CONFIG_CPU_R4K_CACHE_TLB) && cpu_has_4k_cache)
 
 
 
 
 
 
 
 
211		r4k_cache_init();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
213	if (IS_ENABLED(CONFIG_CPU_CAVIUM_OCTEON) && cpu_has_octeon_cache)
214		octeon_cache_init();
 
215
216	setup_protection_map();
 
 
 
 
 
 
 
 
217}
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
  7 * Copyright (C) 2007 MIPS Technologies, Inc.
  8 */
  9#include <linux/fs.h>
 10#include <linux/fcntl.h>
 11#include <linux/kernel.h>
 12#include <linux/linkage.h>
 13#include <linux/module.h>
 14#include <linux/sched.h>
 15#include <linux/syscalls.h>
 16#include <linux/mm.h>
 
 
 17
 
 18#include <asm/cacheflush.h>
 19#include <asm/processor.h>
 20#include <asm/cpu.h>
 21#include <asm/cpu-features.h>
 
 
 22
 23/* Cache operations. */
 24void (*flush_cache_all)(void);
 25void (*__flush_cache_all)(void);
 
 26void (*flush_cache_mm)(struct mm_struct *mm);
 27void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
 28	unsigned long end);
 29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
 30	unsigned long pfn);
 31void (*flush_icache_range)(unsigned long start, unsigned long end);
 32EXPORT_SYMBOL_GPL(flush_icache_range);
 33void (*local_flush_icache_range)(unsigned long start, unsigned long end);
 34EXPORT_SYMBOL_GPL(local_flush_icache_range);
 
 
 
 35
 36void (*__flush_cache_vmap)(void);
 37void (*__flush_cache_vunmap)(void);
 38
 39void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
 40EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
 41void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
 42
 43/* MIPS specific cache operations */
 44void (*flush_cache_sigtramp)(unsigned long addr);
 45void (*local_flush_data_cache_page)(void * addr);
 46void (*flush_data_cache_page)(unsigned long addr);
 47void (*flush_icache_all)(void);
 48
 49EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
 50EXPORT_SYMBOL(flush_data_cache_page);
 51EXPORT_SYMBOL(flush_icache_all);
 52
 53#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54
 55/* DMA cache operations. */
 56void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
 57void (*_dma_cache_wback)(unsigned long start, unsigned long size);
 58void (*_dma_cache_inv)(unsigned long start, unsigned long size);
 59
 60EXPORT_SYMBOL(_dma_cache_wback_inv);
 61
 62#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
 63
 64/*
 65 * We could optimize the case where the cache argument is not BCACHE but
 66 * that seems very atypical use ...
 67 */
 68SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
 69	unsigned int, cache)
 70{
 71	if (bytes == 0)
 72		return 0;
 73	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
 74		return -EFAULT;
 75
 76	flush_icache_range(addr, addr + bytes);
 77
 78	return 0;
 79}
 80
 81void __flush_dcache_page(struct page *page)
 82{
 83	struct address_space *mapping = page_mapping(page);
 
 84	unsigned long addr;
 
 85
 86	if (PageHighMem(page))
 87		return;
 88	if (mapping && !mapping_mapped(mapping)) {
 89		SetPageDcacheDirty(page);
 90		return;
 91	}
 92
 93	/*
 94	 * We could delay the flush for the !page_mapping case too.  But that
 95	 * case is for exec env/arg pages and those are %99 certainly going to
 96	 * get faulted into the tlb (and thus flushed) anyways.
 97	 */
 98	addr = (unsigned long) page_address(page);
 99	flush_data_cache_page(addr);
 
 
 
100}
101
102EXPORT_SYMBOL(__flush_dcache_page);
103
104void __flush_anon_page(struct page *page, unsigned long vmaddr)
105{
106	unsigned long addr = (unsigned long) page_address(page);
 
107
108	if (pages_do_alias(addr, vmaddr)) {
109		if (page_mapcount(page) && !Page_dcache_dirty(page)) {
110			void *kaddr;
111
112			kaddr = kmap_coherent(page, vmaddr);
113			flush_data_cache_page((unsigned long)kaddr);
114			kunmap_coherent();
115		} else
116			flush_data_cache_page(addr);
117	}
118}
119
120EXPORT_SYMBOL(__flush_anon_page);
121
122void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
123{
124	unsigned long addr;
125
126	if (PageHighMem(page))
127		return;
128
129	addr = (unsigned long) page_address(page);
130	flush_data_cache_page(addr);
131}
132EXPORT_SYMBOL_GPL(__flush_icache_page);
133
134void __update_cache(struct vm_area_struct *vma, unsigned long address,
135	pte_t pte)
136{
137	struct page *page;
138	unsigned long pfn, addr;
139	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
 
140
141	pfn = pte_pfn(pte);
142	if (unlikely(!pfn_valid(pfn)))
143		return;
144	page = pfn_to_page(pfn);
145	if (page_mapping(page) && Page_dcache_dirty(page)) {
146		addr = (unsigned long) page_address(page);
147		if (exec || pages_do_alias(addr, address & PAGE_MASK))
148			flush_data_cache_page(addr);
149		ClearPageDcacheDirty(page);
 
 
 
 
 
 
 
 
 
150	}
151}
152
153unsigned long _page_cachable_default;
154EXPORT_SYMBOL(_page_cachable_default);
155
 
 
 
 
 
156static inline void setup_protection_map(void)
157{
158	if (cpu_has_rixi) {
159		protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
160		protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
161		protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
162		protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
163		protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
164		protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
165		protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
166		protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167
168		protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
169		protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
170		protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
171		protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
172		protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
173		protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
174		protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
175		protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
176
177	} else {
178		protection_map[0] = PAGE_NONE;
179		protection_map[1] = PAGE_READONLY;
180		protection_map[2] = PAGE_COPY;
181		protection_map[3] = PAGE_COPY;
182		protection_map[4] = PAGE_READONLY;
183		protection_map[5] = PAGE_READONLY;
184		protection_map[6] = PAGE_COPY;
185		protection_map[7] = PAGE_COPY;
186		protection_map[8] = PAGE_NONE;
187		protection_map[9] = PAGE_READONLY;
188		protection_map[10] = PAGE_SHARED;
189		protection_map[11] = PAGE_SHARED;
190		protection_map[12] = PAGE_READONLY;
191		protection_map[13] = PAGE_READONLY;
192		protection_map[14] = PAGE_SHARED;
193		protection_map[15] = PAGE_SHARED;
194	}
195}
196
 
 
197void cpu_cache_init(void)
198{
199	if (cpu_has_3k_cache) {
200		extern void __weak r3k_cache_init(void);
201
202		r3k_cache_init();
203	}
204	if (cpu_has_6k_cache) {
205		extern void __weak r6k_cache_init(void);
206
207		r6k_cache_init();
208	}
209	if (cpu_has_4k_cache) {
210		extern void __weak r4k_cache_init(void);
211
212		r4k_cache_init();
213	}
214	if (cpu_has_8k_cache) {
215		extern void __weak r8k_cache_init(void);
216
217		r8k_cache_init();
218	}
219	if (cpu_has_tx39_cache) {
220		extern void __weak tx39_cache_init(void);
221
222		tx39_cache_init();
223	}
224
225	if (cpu_has_octeon_cache) {
226		extern void __weak octeon_cache_init(void);
227
 
228		octeon_cache_init();
229	}
230
231	setup_protection_map();
232}
233
234int __weak __uncached_access(struct file *file, unsigned long addr)
235{
236	if (file->f_flags & O_DSYNC)
237		return 1;
238
239	return addr >= __pa(high_memory);
240}