Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
  7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_CACHEFLUSH_H
 10#define _ASM_CACHEFLUSH_H
 11
 12/* Keep includes the same across arches.  */
 13#include <linux/mm.h>
 14#include <asm/cpu-features.h>
 15
 16/* Cache flushing:
 17 *
 18 *  - flush_cache_all() flushes entire cache
 19 *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
 20 *  - flush_cache_dup mm(mm) handles cache flushing when forking
 21 *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
 22 *  - flush_cache_range(vma, start, end) flushes a range of pages
 23 *  - flush_icache_range(start, end) flush a range of instructions
 24 *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
 25 *
 26 * MIPS specific flush operations:
 27 *
 28 *  - flush_cache_sigtramp() flush signal trampoline
 29 *  - flush_icache_all() flush the entire instruction cache
 30 *  - flush_data_cache_page() flushes a page from the data cache
 31 *  - __flush_icache_user_range(start, end) flushes range of user instructions
 32 */
 33
 34 /*
 35 * This flag is used to indicate that the page pointed to by a pte
 36 * is dirty and requires cleaning before returning it to the user.
 37 */
 38#define PG_dcache_dirty			PG_arch_1
 39
 40#define Page_dcache_dirty(page)		\
 41	test_bit(PG_dcache_dirty, &(page)->flags)
 42#define SetPageDcacheDirty(page)	\
 43	set_bit(PG_dcache_dirty, &(page)->flags)
 44#define ClearPageDcacheDirty(page)	\
 45	clear_bit(PG_dcache_dirty, &(page)->flags)
 46
 47extern void (*flush_cache_all)(void);
 48extern void (*__flush_cache_all)(void);
 49extern void (*flush_cache_mm)(struct mm_struct *mm);
 50#define flush_cache_dup_mm(mm)	do { (void) (mm); } while (0)
 51extern void (*flush_cache_range)(struct vm_area_struct *vma,
 52	unsigned long start, unsigned long end);
 53extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
 54extern void __flush_dcache_page(struct page *page);
 55
 56#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 57static inline void flush_dcache_page(struct page *page)
 58{
 59	if (cpu_has_dc_aliases)
 60		__flush_dcache_page(page);
 61	else if (!cpu_has_ic_fills_f_dc)
 62		SetPageDcacheDirty(page);
 63}
 64
 65#define flush_dcache_mmap_lock(mapping)		do { } while (0)
 66#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
 67
 68#define ARCH_HAS_FLUSH_ANON_PAGE
 69extern void __flush_anon_page(struct page *, unsigned long);
 70static inline void flush_anon_page(struct vm_area_struct *vma,
 71	struct page *page, unsigned long vmaddr)
 72{
 73	if (cpu_has_dc_aliases && PageAnon(page))
 74		__flush_anon_page(page, vmaddr);
 75}
 76
 77static inline void flush_icache_page(struct vm_area_struct *vma,
 78	struct page *page)
 79{
 80}
 81
 82extern void (*flush_icache_range)(unsigned long start, unsigned long end);
 83extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
 84extern void (*__flush_icache_user_range)(unsigned long start,
 85					 unsigned long end);
 86extern void (*__local_flush_icache_user_range)(unsigned long start,
 87					       unsigned long end);
 88
 89extern void (*__flush_cache_vmap)(void);
 90
 91static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 92{
 93	if (cpu_has_dc_aliases)
 94		__flush_cache_vmap();
 95}
 96
 97extern void (*__flush_cache_vunmap)(void);
 98
 99static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
100{
101	if (cpu_has_dc_aliases)
102		__flush_cache_vunmap();
103}
104
105extern void copy_to_user_page(struct vm_area_struct *vma,
106	struct page *page, unsigned long vaddr, void *dst, const void *src,
107	unsigned long len);
108
109extern void copy_from_user_page(struct vm_area_struct *vma,
110	struct page *page, unsigned long vaddr, void *dst, const void *src,
111	unsigned long len);
112
113extern void (*flush_cache_sigtramp)(unsigned long addr);
114extern void (*flush_icache_all)(void);
115extern void (*local_flush_data_cache_page)(void * addr);
116extern void (*flush_data_cache_page)(unsigned long addr);
117
118/* Run kernel code uncached, useful for cache probing functions. */
119unsigned long run_uncached(void *func);
120
121extern void *kmap_coherent(struct page *page, unsigned long addr);
122extern void kunmap_coherent(void);
123extern void *kmap_noncoherent(struct page *page, unsigned long addr);
124
125static inline void kunmap_noncoherent(void)
126{
127	kunmap_coherent();
128}
129
130#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
131static inline void flush_kernel_dcache_page(struct page *page)
132{
133	BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
134	flush_dcache_page(page);
135}
136
137/*
138 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
139 * cache writeback and invalidate operation.
140 */
141extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
142
143static inline void flush_kernel_vmap_range(void *vaddr, int size)
144{
145	if (cpu_has_dc_aliases)
146		__flush_kernel_vmap_range((unsigned long) vaddr, size);
147}
148
149static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
150{
151	if (cpu_has_dc_aliases)
152		__flush_kernel_vmap_range((unsigned long) vaddr, size);
153}
154
155#endif /* _ASM_CACHEFLUSH_H */
v5.14.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
  7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8 */
  9#ifndef _ASM_CACHEFLUSH_H
 10#define _ASM_CACHEFLUSH_H
 11
 12/* Keep includes the same across arches.  */
 13#include <linux/mm.h>
 14#include <asm/cpu-features.h>
 15
 16/* Cache flushing:
 17 *
 18 *  - flush_cache_all() flushes entire cache
 19 *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
 20 *  - flush_cache_dup mm(mm) handles cache flushing when forking
 21 *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
 22 *  - flush_cache_range(vma, start, end) flushes a range of pages
 23 *  - flush_icache_range(start, end) flush a range of instructions
 24 *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
 25 *
 26 * MIPS specific flush operations:
 27 *
 
 28 *  - flush_icache_all() flush the entire instruction cache
 29 *  - flush_data_cache_page() flushes a page from the data cache
 30 *  - __flush_icache_user_range(start, end) flushes range of user instructions
 31 */
 32
 33 /*
 34 * This flag is used to indicate that the page pointed to by a pte
 35 * is dirty and requires cleaning before returning it to the user.
 36 */
 37#define PG_dcache_dirty			PG_arch_1
 38
 39#define Page_dcache_dirty(page)		\
 40	test_bit(PG_dcache_dirty, &(page)->flags)
 41#define SetPageDcacheDirty(page)	\
 42	set_bit(PG_dcache_dirty, &(page)->flags)
 43#define ClearPageDcacheDirty(page)	\
 44	clear_bit(PG_dcache_dirty, &(page)->flags)
 45
 46extern void (*flush_cache_all)(void);
 47extern void (*__flush_cache_all)(void);
 48extern void (*flush_cache_mm)(struct mm_struct *mm);
 49#define flush_cache_dup_mm(mm)	do { (void) (mm); } while (0)
 50extern void (*flush_cache_range)(struct vm_area_struct *vma,
 51	unsigned long start, unsigned long end);
 52extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
 53extern void __flush_dcache_page(struct page *page);
 54
 55#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 56static inline void flush_dcache_page(struct page *page)
 57{
 58	if (cpu_has_dc_aliases)
 59		__flush_dcache_page(page);
 60	else if (!cpu_has_ic_fills_f_dc)
 61		SetPageDcacheDirty(page);
 62}
 63
 64#define flush_dcache_mmap_lock(mapping)		do { } while (0)
 65#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
 66
 67#define ARCH_HAS_FLUSH_ANON_PAGE
 68extern void __flush_anon_page(struct page *, unsigned long);
 69static inline void flush_anon_page(struct vm_area_struct *vma,
 70	struct page *page, unsigned long vmaddr)
 71{
 72	if (cpu_has_dc_aliases && PageAnon(page))
 73		__flush_anon_page(page, vmaddr);
 74}
 75
 76static inline void flush_icache_page(struct vm_area_struct *vma,
 77	struct page *page)
 78{
 79}
 80
 81extern void (*flush_icache_range)(unsigned long start, unsigned long end);
 82extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
 83extern void (*__flush_icache_user_range)(unsigned long start,
 84					 unsigned long end);
 85extern void (*__local_flush_icache_user_range)(unsigned long start,
 86					       unsigned long end);
 87
 88extern void (*__flush_cache_vmap)(void);
 89
 90static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 91{
 92	if (cpu_has_dc_aliases)
 93		__flush_cache_vmap();
 94}
 95
 96extern void (*__flush_cache_vunmap)(void);
 97
 98static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 99{
100	if (cpu_has_dc_aliases)
101		__flush_cache_vunmap();
102}
103
104extern void copy_to_user_page(struct vm_area_struct *vma,
105	struct page *page, unsigned long vaddr, void *dst, const void *src,
106	unsigned long len);
107
108extern void copy_from_user_page(struct vm_area_struct *vma,
109	struct page *page, unsigned long vaddr, void *dst, const void *src,
110	unsigned long len);
111
 
112extern void (*flush_icache_all)(void);
113extern void (*local_flush_data_cache_page)(void * addr);
114extern void (*flush_data_cache_page)(unsigned long addr);
115
116/* Run kernel code uncached, useful for cache probing functions. */
117unsigned long run_uncached(void *func);
118
119extern void *kmap_coherent(struct page *page, unsigned long addr);
120extern void kunmap_coherent(void);
121extern void *kmap_noncoherent(struct page *page, unsigned long addr);
122
123static inline void kunmap_noncoherent(void)
124{
125	kunmap_coherent();
126}
127
128#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
129static inline void flush_kernel_dcache_page(struct page *page)
130{
131	BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
132	flush_dcache_page(page);
133}
134
135/*
136 * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
137 * cache writeback and invalidate operation.
138 */
139extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
140
141static inline void flush_kernel_vmap_range(void *vaddr, int size)
142{
143	if (cpu_has_dc_aliases)
144		__flush_kernel_vmap_range((unsigned long) vaddr, size);
145}
146
147static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
148{
149	if (cpu_has_dc_aliases)
150		__flush_kernel_vmap_range((unsigned long) vaddr, size);
151}
152
153#endif /* _ASM_CACHEFLUSH_H */