Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_CACHE_H
3#define _ASM_POWERPC_CACHE_H
4
5#ifdef __KERNEL__
6
7
8/* bytes per L1 cache line */
9#if defined(CONFIG_PPC_8xx)
10#define L1_CACHE_SHIFT 4
11#define MAX_COPY_PREFETCH 1
12#define IFETCH_ALIGN_SHIFT 2
13#elif defined(CONFIG_PPC_E500MC)
14#define L1_CACHE_SHIFT 6
15#define MAX_COPY_PREFETCH 4
16#define IFETCH_ALIGN_SHIFT 3
17#elif defined(CONFIG_PPC32)
18#define MAX_COPY_PREFETCH 4
19#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */
20#if defined(CONFIG_PPC_47x)
21#define L1_CACHE_SHIFT 7
22#else
23#define L1_CACHE_SHIFT 5
24#endif
25#else /* CONFIG_PPC64 */
26#define L1_CACHE_SHIFT 7
27#define IFETCH_ALIGN_SHIFT 4 /* POWER8,9 */
28#endif
29
30#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
31
32#define SMP_CACHE_BYTES L1_CACHE_BYTES
33
34#define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT)
35
36#ifdef CONFIG_NOT_COHERENT_CACHE
37#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
38#endif
39
40#if !defined(__ASSEMBLY__)
41#ifdef CONFIG_PPC64
42
43struct ppc_cache_info {
44 u32 size;
45 u32 line_size;
46 u32 block_size; /* L1 only */
47 u32 log_block_size;
48 u32 blocks_per_page;
49 u32 sets;
50 u32 assoc;
51};
52
53struct ppc64_caches {
54 struct ppc_cache_info l1d;
55 struct ppc_cache_info l1i;
56 struct ppc_cache_info l2;
57 struct ppc_cache_info l3;
58};
59
60extern struct ppc64_caches ppc64_caches;
61
62static inline u32 l1_dcache_shift(void)
63{
64 return ppc64_caches.l1d.log_block_size;
65}
66
67static inline u32 l1_dcache_bytes(void)
68{
69 return ppc64_caches.l1d.block_size;
70}
71
72static inline u32 l1_icache_shift(void)
73{
74 return ppc64_caches.l1i.log_block_size;
75}
76
77static inline u32 l1_icache_bytes(void)
78{
79 return ppc64_caches.l1i.block_size;
80}
81#else
82static inline u32 l1_dcache_shift(void)
83{
84 return L1_CACHE_SHIFT;
85}
86
87static inline u32 l1_dcache_bytes(void)
88{
89 return L1_CACHE_BYTES;
90}
91
92static inline u32 l1_icache_shift(void)
93{
94 return L1_CACHE_SHIFT;
95}
96
97static inline u32 l1_icache_bytes(void)
98{
99 return L1_CACHE_BYTES;
100}
101
102#endif
103
104#define __read_mostly __section(".data..read_mostly")
105
106#ifdef CONFIG_PPC_BOOK3S_32
107extern long _get_L2CR(void);
108extern long _get_L3CR(void);
109extern void _set_L2CR(unsigned long);
110extern void _set_L3CR(unsigned long);
111#else
112#define _get_L2CR() 0L
113#define _get_L3CR() 0L
114#define _set_L2CR(val) do { } while(0)
115#define _set_L3CR(val) do { } while(0)
116#endif
117
118static inline void dcbz(void *addr)
119{
120 __asm__ __volatile__ ("dcbz 0, %0" : : "r"(addr) : "memory");
121}
122
123static inline void dcbi(void *addr)
124{
125 __asm__ __volatile__ ("dcbi 0, %0" : : "r"(addr) : "memory");
126}
127
128static inline void dcbf(void *addr)
129{
130 __asm__ __volatile__ ("dcbf 0, %0" : : "r"(addr) : "memory");
131}
132
133static inline void dcbst(void *addr)
134{
135 __asm__ __volatile__ ("dcbst 0, %0" : : "r"(addr) : "memory");
136}
137
138static inline void icbi(void *addr)
139{
140 asm volatile ("icbi 0, %0" : : "r"(addr) : "memory");
141}
142
143static inline void iccci(void *addr)
144{
145 asm volatile ("iccci 0, %0" : : "r"(addr) : "memory");
146}
147
148#endif /* !__ASSEMBLY__ */
149#endif /* __KERNEL__ */
150#endif /* _ASM_POWERPC_CACHE_H */
1#ifndef _ASM_POWERPC_CACHE_H
2#define _ASM_POWERPC_CACHE_H
3
4#ifdef __KERNEL__
5
6
7/* bytes per L1 cache line */
8#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
9#define L1_CACHE_SHIFT 4
10#define MAX_COPY_PREFETCH 1
11#elif defined(CONFIG_PPC_E500MC)
12#define L1_CACHE_SHIFT 6
13#define MAX_COPY_PREFETCH 4
14#elif defined(CONFIG_PPC32)
15#define MAX_COPY_PREFETCH 4
16#if defined(CONFIG_PPC_47x)
17#define L1_CACHE_SHIFT 7
18#else
19#define L1_CACHE_SHIFT 5
20#endif
21#else /* CONFIG_PPC64 */
22#define L1_CACHE_SHIFT 7
23#endif
24
25#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
26
27#define SMP_CACHE_BYTES L1_CACHE_BYTES
28
29#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
30struct ppc64_caches {
31 u32 dsize; /* L1 d-cache size */
32 u32 dline_size; /* L1 d-cache line size */
33 u32 log_dline_size;
34 u32 dlines_per_page;
35 u32 isize; /* L1 i-cache size */
36 u32 iline_size; /* L1 i-cache line size */
37 u32 log_iline_size;
38 u32 ilines_per_page;
39};
40
41extern struct ppc64_caches ppc64_caches;
42#endif /* __powerpc64__ && ! __ASSEMBLY__ */
43
44#if defined(__ASSEMBLY__)
45/*
46 * For a snooping icache, we still need a dummy icbi to purge all the
47 * prefetched instructions from the ifetch buffers. We also need a sync
48 * before the icbi to order the the actual stores to memory that might
49 * have modified instructions with the icbi.
50 */
51#define PURGE_PREFETCHED_INS \
52 sync; \
53 icbi 0,r3; \
54 sync; \
55 isync
56
57#else
58#define __read_mostly __attribute__((__section__(".data..read_mostly")))
59
60#ifdef CONFIG_6xx
61extern long _get_L2CR(void);
62extern long _get_L3CR(void);
63extern void _set_L2CR(unsigned long);
64extern void _set_L3CR(unsigned long);
65#else
66#define _get_L2CR() 0L
67#define _get_L3CR() 0L
68#define _set_L2CR(val) do { } while(0)
69#define _set_L3CR(val) do { } while(0)
70#endif
71
72extern void cacheable_memzero(void *p, unsigned int nb);
73extern void *cacheable_memcpy(void *, const void *, unsigned int);
74
75#endif /* !__ASSEMBLY__ */
76#endif /* __KERNEL__ */
77#endif /* _ASM_POWERPC_CACHE_H */