Loading...
1#ifndef _SPARC_CACHEFLUSH_H
2#define _SPARC_CACHEFLUSH_H
3
4#include <linux/mm.h> /* Common for other includes */
5// #include <linux/kernel.h> from pgalloc.h
6// #include <linux/sched.h> from pgalloc.h
7
8// #include <asm/page.h>
9#include <asm/btfixup.h>
10
11/*
12 * Fine grained cache flushing.
13 */
14#ifdef CONFIG_SMP
15
16BTFIXUPDEF_CALL(void, local_flush_cache_all, void)
17BTFIXUPDEF_CALL(void, local_flush_cache_mm, struct mm_struct *)
18BTFIXUPDEF_CALL(void, local_flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
19BTFIXUPDEF_CALL(void, local_flush_cache_page, struct vm_area_struct *, unsigned long)
20
21#define local_flush_cache_all() BTFIXUP_CALL(local_flush_cache_all)()
22#define local_flush_cache_mm(mm) BTFIXUP_CALL(local_flush_cache_mm)(mm)
23#define local_flush_cache_range(vma,start,end) BTFIXUP_CALL(local_flush_cache_range)(vma,start,end)
24#define local_flush_cache_page(vma,addr) BTFIXUP_CALL(local_flush_cache_page)(vma,addr)
25
26BTFIXUPDEF_CALL(void, local_flush_page_to_ram, unsigned long)
27BTFIXUPDEF_CALL(void, local_flush_sig_insns, struct mm_struct *, unsigned long)
28
29#define local_flush_page_to_ram(addr) BTFIXUP_CALL(local_flush_page_to_ram)(addr)
30#define local_flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(local_flush_sig_insns)(mm,insn_addr)
31
32extern void smp_flush_cache_all(void);
33extern void smp_flush_cache_mm(struct mm_struct *mm);
34extern void smp_flush_cache_range(struct vm_area_struct *vma,
35 unsigned long start,
36 unsigned long end);
37extern void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
38
39extern void smp_flush_page_to_ram(unsigned long page);
40extern void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
41
42#endif /* CONFIG_SMP */
43
44BTFIXUPDEF_CALL(void, flush_cache_all, void)
45BTFIXUPDEF_CALL(void, flush_cache_mm, struct mm_struct *)
46BTFIXUPDEF_CALL(void, flush_cache_range, struct vm_area_struct *, unsigned long, unsigned long)
47BTFIXUPDEF_CALL(void, flush_cache_page, struct vm_area_struct *, unsigned long)
48
49#define flush_cache_all() BTFIXUP_CALL(flush_cache_all)()
50#define flush_cache_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
51#define flush_cache_dup_mm(mm) BTFIXUP_CALL(flush_cache_mm)(mm)
52#define flush_cache_range(vma,start,end) BTFIXUP_CALL(flush_cache_range)(vma,start,end)
53#define flush_cache_page(vma,addr,pfn) BTFIXUP_CALL(flush_cache_page)(vma,addr)
54#define flush_icache_range(start, end) do { } while (0)
55#define flush_icache_page(vma, pg) do { } while (0)
56
57#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
58
59#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
60 do { \
61 flush_cache_page(vma, vaddr, page_to_pfn(page));\
62 memcpy(dst, src, len); \
63 } while (0)
64#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
65 do { \
66 flush_cache_page(vma, vaddr, page_to_pfn(page));\
67 memcpy(dst, src, len); \
68 } while (0)
69
70BTFIXUPDEF_CALL(void, __flush_page_to_ram, unsigned long)
71BTFIXUPDEF_CALL(void, flush_sig_insns, struct mm_struct *, unsigned long)
72
73#define __flush_page_to_ram(addr) BTFIXUP_CALL(__flush_page_to_ram)(addr)
74#define flush_sig_insns(mm,insn_addr) BTFIXUP_CALL(flush_sig_insns)(mm,insn_addr)
75
76extern void sparc_flush_page_to_ram(struct page *page);
77
78#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
79#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
80#define flush_dcache_mmap_lock(mapping) do { } while (0)
81#define flush_dcache_mmap_unlock(mapping) do { } while (0)
82
83#define flush_cache_vmap(start, end) flush_cache_all()
84#define flush_cache_vunmap(start, end) flush_cache_all()
85
86#endif /* _SPARC_CACHEFLUSH_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _SPARC_CACHEFLUSH_H
3#define _SPARC_CACHEFLUSH_H
4
5#include <asm/cachetlb_32.h>
6
7#define flush_cache_all() \
8 sparc32_cachetlb_ops->cache_all()
9#define flush_cache_mm(mm) \
10 sparc32_cachetlb_ops->cache_mm(mm)
11#define flush_cache_dup_mm(mm) \
12 sparc32_cachetlb_ops->cache_mm(mm)
13#define flush_cache_range(vma,start,end) \
14 sparc32_cachetlb_ops->cache_range(vma, start, end)
15#define flush_cache_page(vma,addr,pfn) \
16 sparc32_cachetlb_ops->cache_page(vma, addr)
17#define flush_icache_range(start, end) do { } while (0)
18#define flush_icache_page(vma, pg) do { } while (0)
19
20#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
21 do { \
22 flush_cache_page(vma, vaddr, page_to_pfn(page));\
23 memcpy(dst, src, len); \
24 } while (0)
25#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
26 do { \
27 flush_cache_page(vma, vaddr, page_to_pfn(page));\
28 memcpy(dst, src, len); \
29 } while (0)
30
31#define __flush_page_to_ram(addr) \
32 sparc32_cachetlb_ops->page_to_ram(addr)
33#define flush_sig_insns(mm,insn_addr) \
34 sparc32_cachetlb_ops->sig_insns(mm, insn_addr)
35#define flush_page_for_dma(addr) \
36 sparc32_cachetlb_ops->page_for_dma(addr)
37
38void sparc_flush_page_to_ram(struct page *page);
39
40#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
41#define flush_dcache_page(page) sparc_flush_page_to_ram(page)
42#define flush_dcache_mmap_lock(mapping) do { } while (0)
43#define flush_dcache_mmap_unlock(mapping) do { } while (0)
44
45#define flush_cache_vmap(start, end) flush_cache_all()
46#define flush_cache_vunmap(start, end) flush_cache_all()
47
48/* When a context switch happens we must flush all user windows so that
49 * the windows of the current process are flushed onto its stack. This
50 * way the windows are all clean for the next process and the stack
51 * frames are up to date.
52 */
53void flush_user_windows(void);
54void kill_user_windows(void);
55void flushw_all(void);
56
57#endif /* _SPARC_CACHEFLUSH_H */