Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_CACHE_H
3#define _ASM_POWERPC_CACHE_H
4
5#ifdef __KERNEL__
6
7
8/* bytes per L1 cache line */
9#if defined(CONFIG_PPC_8xx)
10#define L1_CACHE_SHIFT 4
11#define MAX_COPY_PREFETCH 1
12#define IFETCH_ALIGN_SHIFT 2
13#elif defined(CONFIG_PPC_E500MC)
14#define L1_CACHE_SHIFT 6
15#define MAX_COPY_PREFETCH 4
16#define IFETCH_ALIGN_SHIFT 3
17#elif defined(CONFIG_PPC32)
18#define MAX_COPY_PREFETCH 4
19#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */
20#if defined(CONFIG_PPC_47x)
21#define L1_CACHE_SHIFT 7
22#else
23#define L1_CACHE_SHIFT 5
24#endif
25#else /* CONFIG_PPC64 */
26#define L1_CACHE_SHIFT 7
27#define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */
28#endif
29
30#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
31
32#define SMP_CACHE_BYTES L1_CACHE_BYTES
33
34#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT)
35
36#ifdef CONFIG_NOT_COHERENT_CACHE
37#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
38#endif
39
40#if !defined(__ASSEMBLY__)
41#ifdef CONFIG_PPC64
42
43struct ppc_cache_info {
44 u32 size;
45 u32 line_size;
46 u32 block_size; /* L1 only */
47 u32 log_block_size;
48 u32 blocks_per_page;
49 u32 sets;
50 u32 assoc;
51};
52
53struct ppc64_caches {
54 struct ppc_cache_info l1d;
55 struct ppc_cache_info l1i;
56 struct ppc_cache_info l2;
57 struct ppc_cache_info l3;
58};
59
60extern struct ppc64_caches ppc64_caches;
61
62static inline u32 l1_dcache_shift(void)
63{
64 return ppc64_caches.l1d.log_block_size;
65}
66
67static inline u32 l1_dcache_bytes(void)
68{
69 return ppc64_caches.l1d.block_size;
70}
71
72static inline u32 l1_icache_shift(void)
73{
74 return ppc64_caches.l1i.log_block_size;
75}
76
77static inline u32 l1_icache_bytes(void)
78{
79 return ppc64_caches.l1i.block_size;
80}
81#else
82static inline u32 l1_dcache_shift(void)
83{
84 return L1_CACHE_SHIFT;
85}
86
87static inline u32 l1_dcache_bytes(void)
88{
89 return L1_CACHE_BYTES;
90}
91
92static inline u32 l1_icache_shift(void)
93{
94 return L1_CACHE_SHIFT;
95}
96
97static inline u32 l1_icache_bytes(void)
98{
99 return L1_CACHE_BYTES;
100}
101
102#endif
103
104#define __read_mostly __section(".data..read_mostly")
105
106#ifdef CONFIG_PPC_BOOK3S_32
107extern long _get_L2CR(void);
108extern long _get_L3CR(void);
109extern void _set_L2CR(unsigned long);
110extern void _set_L3CR(unsigned long);
111#else
112#define _get_L2CR() 0L
113#define _get_L3CR() 0L
114#define _set_L2CR(val) do { } while(0)
115#define _set_L3CR(val) do { } while(0)
116#endif
117
118static inline void dcbz(void *addr)
119{
120 __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
121}
122
123static inline void dcbi(void *addr)
124{
125 __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
126}
127
128static inline void dcbf(void *addr)
129{
130 __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
131}
132
133static inline void dcbst(void *addr)
134{
135 __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
136}
137
138static inline void icbi(void *addr)
139{
140 asm volatile ("icbi 0, %0" : : "r"(addr) : "memory");
141}
142
143static inline void iccci(void *addr)
144{
145 asm volatile ("iccci 0, %0" : : "r"(addr) : "memory");
146}
147
148#endif /* !__ASSEMBLY__ */
149#endif /* __KERNEL__ */
150#endif /* _ASM_POWERPC_CACHE_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_CACHE_H
3#define _ASM_POWERPC_CACHE_H
4
5#ifdef __KERNEL__
6
7
8/* bytes per L1 cache line */
9#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
10#define L1_CACHE_SHIFT 4
11#define MAX_COPY_PREFETCH 1
12#elif defined(CONFIG_PPC_E500MC)
13#define L1_CACHE_SHIFT 6
14#define MAX_COPY_PREFETCH 4
15#elif defined(CONFIG_PPC32)
16#define MAX_COPY_PREFETCH 4
17#if defined(CONFIG_PPC_47x)
18#define L1_CACHE_SHIFT 7
19#else
20#define L1_CACHE_SHIFT 5
21#endif
22#else /* CONFIG_PPC64 */
23#define L1_CACHE_SHIFT 7
24#define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */
25#endif
26
27#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
28
29#define SMP_CACHE_BYTES L1_CACHE_BYTES
30
31#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT)
32
33#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
34
35struct ppc_cache_info {
36 u32 size;
37 u32 line_size;
38 u32 block_size; /* L1 only */
39 u32 log_block_size;
40 u32 blocks_per_page;
41 u32 sets;
42 u32 assoc;
43};
44
45struct ppc64_caches {
46 struct ppc_cache_info l1d;
47 struct ppc_cache_info l1i;
48 struct ppc_cache_info l2;
49 struct ppc_cache_info l3;
50};
51
52extern struct ppc64_caches ppc64_caches;
53#endif /* __powerpc64__ && ! __ASSEMBLY__ */
54
55#if defined(__ASSEMBLY__)
56/*
57 * For a snooping icache, we still need a dummy icbi to purge all the
58 * prefetched instructions from the ifetch buffers. We also need a sync
59 * before the icbi to order the the actual stores to memory that might
60 * have modified instructions with the icbi.
61 */
62#define PURGE_PREFETCHED_INS \
63 sync; \
64 icbi 0,r3; \
65 sync; \
66 isync
67
68#else
69#define __read_mostly __attribute__((__section__(".data..read_mostly")))
70
71#ifdef CONFIG_6xx
72extern long _get_L2CR(void);
73extern long _get_L3CR(void);
74extern void _set_L2CR(unsigned long);
75extern void _set_L3CR(unsigned long);
76#else
77#define _get_L2CR() 0L
78#define _get_L3CR() 0L
79#define _set_L2CR(val) do { } while(0)
80#define _set_L3CR(val) do { } while(0)
81#endif
82
83static inline void dcbz(void *addr)
84{
85 __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
86}
87
88static inline void dcbi(void *addr)
89{
90 __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
91}
92
93static inline void dcbf(void *addr)
94{
95 __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
96}
97
98static inline void dcbst(void *addr)
99{
100 __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
101}
102#endif /* !__ASSEMBLY__ */
103#endif /* __KERNEL__ */
104#endif /* _ASM_POWERPC_CACHE_H */