Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 */
4#ifndef _ASM_POWERPC_CACHEFLUSH_H
5#define _ASM_POWERPC_CACHEFLUSH_H
6
7#include <linux/mm.h>
8#include <asm/cputable.h>
9#include <asm/cpu_has_feature.h>
10
11/*
12 * This flag is used to indicate that the page pointed to by a pte is clean
13 * and does not require cleaning before returning it to the user.
14 */
15#define PG_dcache_clean PG_arch_1
16
17#ifdef CONFIG_PPC_BOOK3S_64
18/*
19 * Book3s has no ptesync after setting a pte, so without this ptesync it's
20 * possible for a kernel virtual mapping access to return a spurious fault
21 * if it's accessed right after the pte is set. The page fault handler does
22 * not expect this type of fault. flush_cache_vmap is not exactly the right
23 * place to put this, but it seems to work well enough.
24 */
25static inline void flush_cache_vmap(unsigned long start, unsigned long end)
26{
27 asm volatile("ptesync" ::: "memory");
28}
29#define flush_cache_vmap flush_cache_vmap
30#endif /* CONFIG_PPC_BOOK3S_64 */
31
32#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
33/*
34 * This is called when a page has been modified by the kernel.
35 * It just marks the page as not i-cache clean. We do the i-cache
36 * flush later when the page is given to a user process, if necessary.
37 */
38static inline void flush_dcache_folio(struct folio *folio)
39{
40 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
41 return;
42 /* avoid an atomic op if possible */
43 if (test_bit(PG_dcache_clean, &folio->flags))
44 clear_bit(PG_dcache_clean, &folio->flags);
45}
46#define flush_dcache_folio flush_dcache_folio
47
48static inline void flush_dcache_page(struct page *page)
49{
50 flush_dcache_folio(page_folio(page));
51}
52
53void flush_icache_range(unsigned long start, unsigned long stop);
54#define flush_icache_range flush_icache_range
55
56void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
57 unsigned long addr, int len);
58#define flush_icache_user_page flush_icache_user_page
59
60void flush_dcache_icache_folio(struct folio *folio);
61
62/**
63 * flush_dcache_range(): Write any modified data cache blocks out to memory and
64 * invalidate them. Does not invalidate the corresponding instruction cache
65 * blocks.
66 *
67 * @start: the start address
68 * @stop: the stop address (exclusive)
69 */
70static inline void flush_dcache_range(unsigned long start, unsigned long stop)
71{
72 unsigned long shift = l1_dcache_shift();
73 unsigned long bytes = l1_dcache_bytes();
74 void *addr = (void *)(start & ~(bytes - 1));
75 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
76 unsigned long i;
77
78 if (IS_ENABLED(CONFIG_PPC64))
79 mb(); /* sync */
80
81 for (i = 0; i < size >> shift; i++, addr += bytes)
82 dcbf(addr);
83 mb(); /* sync */
84
85}
86
87/*
88 * Write any modified data cache blocks out to memory.
89 * Does not invalidate the corresponding cache lines (especially for
90 * any corresponding instruction cache).
91 */
92static inline void clean_dcache_range(unsigned long start, unsigned long stop)
93{
94 unsigned long shift = l1_dcache_shift();
95 unsigned long bytes = l1_dcache_bytes();
96 void *addr = (void *)(start & ~(bytes - 1));
97 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
98 unsigned long i;
99
100 for (i = 0; i < size >> shift; i++, addr += bytes)
101 dcbst(addr);
102 mb(); /* sync */
103}
104
105/*
106 * Like above, but invalidate the D-cache. This is used by the 8xx
107 * to invalidate the cache so the PPC core doesn't get stale data
108 * from the CPM (no cache snooping here :-).
109 */
110static inline void invalidate_dcache_range(unsigned long start,
111 unsigned long stop)
112{
113 unsigned long shift = l1_dcache_shift();
114 unsigned long bytes = l1_dcache_bytes();
115 void *addr = (void *)(start & ~(bytes - 1));
116 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
117 unsigned long i;
118
119 for (i = 0; i < size >> shift; i++, addr += bytes)
120 dcbi(addr);
121 mb(); /* sync */
122}
123
124#ifdef CONFIG_4xx
125static inline void flush_instruction_cache(void)
126{
127 iccci((void *)KERNELBASE);
128 isync();
129}
130#else
131void flush_instruction_cache(void);
132#endif
133
134#include <asm-generic/cacheflush.h>
135
136#endif /* _ASM_POWERPC_CACHEFLUSH_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 */
4#ifndef _ASM_POWERPC_CACHEFLUSH_H
5#define _ASM_POWERPC_CACHEFLUSH_H
6
7#include <linux/mm.h>
8#include <asm/cputable.h>
9#include <asm/cpu_has_feature.h>
10
11#ifdef CONFIG_PPC_BOOK3S_64
12/*
13 * Book3s has no ptesync after setting a pte, so without this ptesync it's
14 * possible for a kernel virtual mapping access to return a spurious fault
15 * if it's accessed right after the pte is set. The page fault handler does
16 * not expect this type of fault. flush_cache_vmap is not exactly the right
17 * place to put this, but it seems to work well enough.
18 */
19static inline void flush_cache_vmap(unsigned long start, unsigned long end)
20{
21 asm volatile("ptesync" ::: "memory");
22}
23#define flush_cache_vmap flush_cache_vmap
24#endif /* CONFIG_PPC_BOOK3S_64 */
25
26#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
27extern void flush_dcache_page(struct page *page);
28
29void flush_icache_range(unsigned long start, unsigned long stop);
30#define flush_icache_range flush_icache_range
31
32void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
33 unsigned long addr, int len);
34#define flush_icache_user_page flush_icache_user_page
35
36void flush_dcache_icache_page(struct page *page);
37void __flush_dcache_icache(void *page);
38
39/**
40 * flush_dcache_range(): Write any modified data cache blocks out to memory and
41 * invalidate them. Does not invalidate the corresponding instruction cache
42 * blocks.
43 *
44 * @start: the start address
45 * @stop: the stop address (exclusive)
46 */
47static inline void flush_dcache_range(unsigned long start, unsigned long stop)
48{
49 unsigned long shift = l1_dcache_shift();
50 unsigned long bytes = l1_dcache_bytes();
51 void *addr = (void *)(start & ~(bytes - 1));
52 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
53 unsigned long i;
54
55 if (IS_ENABLED(CONFIG_PPC64))
56 mb(); /* sync */
57
58 for (i = 0; i < size >> shift; i++, addr += bytes)
59 dcbf(addr);
60 mb(); /* sync */
61
62}
63
64/*
65 * Write any modified data cache blocks out to memory.
66 * Does not invalidate the corresponding cache lines (especially for
67 * any corresponding instruction cache).
68 */
69static inline void clean_dcache_range(unsigned long start, unsigned long stop)
70{
71 unsigned long shift = l1_dcache_shift();
72 unsigned long bytes = l1_dcache_bytes();
73 void *addr = (void *)(start & ~(bytes - 1));
74 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
75 unsigned long i;
76
77 for (i = 0; i < size >> shift; i++, addr += bytes)
78 dcbst(addr);
79 mb(); /* sync */
80}
81
82/*
83 * Like above, but invalidate the D-cache. This is used by the 8xx
84 * to invalidate the cache so the PPC core doesn't get stale data
85 * from the CPM (no cache snooping here :-).
86 */
87static inline void invalidate_dcache_range(unsigned long start,
88 unsigned long stop)
89{
90 unsigned long shift = l1_dcache_shift();
91 unsigned long bytes = l1_dcache_bytes();
92 void *addr = (void *)(start & ~(bytes - 1));
93 unsigned long size = stop - (unsigned long)addr + (bytes - 1);
94 unsigned long i;
95
96 for (i = 0; i < size >> shift; i++, addr += bytes)
97 dcbi(addr);
98 mb(); /* sync */
99}
100
101#include <asm-generic/cacheflush.h>
102
103#endif /* _ASM_POWERPC_CACHEFLUSH_H */