Loading...
1/*
2 * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
3 *
4 * Copyright (C) 2008 Marvell Semiconductor
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 *
10 * References:
11 * - Unified Layer 2 Cache for Feroceon CPU Cores,
12 * Document ID MV-S104858-00, Rev. A, October 23 2007.
13 */
14
15#include <linux/init.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/highmem.h>
19#include <linux/io.h>
20#include <asm/cacheflush.h>
21#include <asm/cp15.h>
22#include <asm/hardware/cache-feroceon-l2.h>
23
24#define L2_WRITETHROUGH_KIRKWOOD BIT(4)
25
26/*
27 * Low-level cache maintenance operations.
28 *
29 * As well as the regular 'clean/invalidate/flush L2 cache line by
30 * MVA' instructions, the Feroceon L2 cache controller also features
31 * 'clean/invalidate L2 range by MVA' operations.
32 *
33 * Cache range operations are initiated by writing the start and
34 * end addresses to successive cp15 registers, and process every
35 * cache line whose first byte address lies in the inclusive range
36 * [start:end].
37 *
38 * The cache range operations stall the CPU pipeline until completion.
39 *
40 * The range operations require two successive cp15 writes, in
41 * between which we don't want to be preempted.
42 */
43
44static inline unsigned long l2_get_va(unsigned long paddr)
45{
46#ifdef CONFIG_HIGHMEM
47 /*
48 * Because range ops can't be done on physical addresses,
49 * we simply install a virtual mapping for it only for the
50 * TLB lookup to occur, hence no need to flush the untouched
51 * memory mapping afterwards (note: a cache flush may happen
52 * in some circumstances depending on the path taken in kunmap_atomic).
53 */
54 void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
55 return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
56#else
57 return __phys_to_virt(paddr);
58#endif
59}
60
61static inline void l2_put_va(unsigned long vaddr)
62{
63#ifdef CONFIG_HIGHMEM
64 kunmap_atomic((void *)vaddr);
65#endif
66}
67
68static inline void l2_clean_pa(unsigned long addr)
69{
70 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
71}
72
73static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
74{
75 unsigned long va_start, va_end, flags;
76
77 /*
78 * Make sure 'start' and 'end' reference the same page, as
79 * L2 is PIPT and range operations only do a TLB lookup on
80 * the start address.
81 */
82 BUG_ON((start ^ end) >> PAGE_SHIFT);
83
84 va_start = l2_get_va(start);
85 va_end = va_start + (end - start);
86 raw_local_irq_save(flags);
87 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
88 "mcr p15, 1, %1, c15, c9, 5"
89 : : "r" (va_start), "r" (va_end));
90 raw_local_irq_restore(flags);
91 l2_put_va(va_start);
92}
93
94static inline void l2_clean_inv_pa(unsigned long addr)
95{
96 __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
97}
98
99static inline void l2_inv_pa(unsigned long addr)
100{
101 __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
102}
103
104static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
105{
106 unsigned long va_start, va_end, flags;
107
108 /*
109 * Make sure 'start' and 'end' reference the same page, as
110 * L2 is PIPT and range operations only do a TLB lookup on
111 * the start address.
112 */
113 BUG_ON((start ^ end) >> PAGE_SHIFT);
114
115 va_start = l2_get_va(start);
116 va_end = va_start + (end - start);
117 raw_local_irq_save(flags);
118 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
119 "mcr p15, 1, %1, c15, c11, 5"
120 : : "r" (va_start), "r" (va_end));
121 raw_local_irq_restore(flags);
122 l2_put_va(va_start);
123}
124
125static inline void l2_inv_all(void)
126{
127 __asm__("mcr p15, 1, %0, c15, c11, 0" : : "r" (0));
128}
129
130/*
131 * Linux primitives.
132 *
133 * Note that the end addresses passed to Linux primitives are
134 * noninclusive, while the hardware cache range operations use
135 * inclusive start and end addresses.
136 */
137#define CACHE_LINE_SIZE 32
138#define MAX_RANGE_SIZE 1024
139
140static int l2_wt_override;
141
142static unsigned long calc_range_end(unsigned long start, unsigned long end)
143{
144 unsigned long range_end;
145
146 BUG_ON(start & (CACHE_LINE_SIZE - 1));
147 BUG_ON(end & (CACHE_LINE_SIZE - 1));
148
149 /*
150 * Try to process all cache lines between 'start' and 'end'.
151 */
152 range_end = end;
153
154 /*
155 * Limit the number of cache lines processed at once,
156 * since cache range operations stall the CPU pipeline
157 * until completion.
158 */
159 if (range_end > start + MAX_RANGE_SIZE)
160 range_end = start + MAX_RANGE_SIZE;
161
162 /*
163 * Cache range operations can't straddle a page boundary.
164 */
165 if (range_end > (start | (PAGE_SIZE - 1)) + 1)
166 range_end = (start | (PAGE_SIZE - 1)) + 1;
167
168 return range_end;
169}
170
171static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
172{
173 /*
174 * Clean and invalidate partial first cache line.
175 */
176 if (start & (CACHE_LINE_SIZE - 1)) {
177 l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
178 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
179 }
180
181 /*
182 * Clean and invalidate partial last cache line.
183 */
184 if (start < end && end & (CACHE_LINE_SIZE - 1)) {
185 l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
186 end &= ~(CACHE_LINE_SIZE - 1);
187 }
188
189 /*
190 * Invalidate all full cache lines between 'start' and 'end'.
191 */
192 while (start < end) {
193 unsigned long range_end = calc_range_end(start, end);
194 l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
195 start = range_end;
196 }
197
198 dsb();
199}
200
201static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
202{
203 /*
204 * If L2 is forced to WT, the L2 will always be clean and we
205 * don't need to do anything here.
206 */
207 if (!l2_wt_override) {
208 start &= ~(CACHE_LINE_SIZE - 1);
209 end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
210 while (start != end) {
211 unsigned long range_end = calc_range_end(start, end);
212 l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
213 start = range_end;
214 }
215 }
216
217 dsb();
218}
219
220static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
221{
222 start &= ~(CACHE_LINE_SIZE - 1);
223 end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
224 while (start != end) {
225 unsigned long range_end = calc_range_end(start, end);
226 if (!l2_wt_override)
227 l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
228 l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
229 start = range_end;
230 }
231
232 dsb();
233}
234
235
236/*
237 * Routines to disable and re-enable the D-cache and I-cache at run
238 * time. These are necessary because the L2 cache can only be enabled
239 * or disabled while the L1 Dcache and Icache are both disabled.
240 */
241static int __init flush_and_disable_dcache(void)
242{
243 u32 cr;
244
245 cr = get_cr();
246 if (cr & CR_C) {
247 unsigned long flags;
248
249 raw_local_irq_save(flags);
250 flush_cache_all();
251 set_cr(cr & ~CR_C);
252 raw_local_irq_restore(flags);
253 return 1;
254 }
255 return 0;
256}
257
258static void __init enable_dcache(void)
259{
260 u32 cr;
261
262 cr = get_cr();
263 set_cr(cr | CR_C);
264}
265
266static void __init __invalidate_icache(void)
267{
268 __asm__("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
269}
270
271static int __init invalidate_and_disable_icache(void)
272{
273 u32 cr;
274
275 cr = get_cr();
276 if (cr & CR_I) {
277 set_cr(cr & ~CR_I);
278 __invalidate_icache();
279 return 1;
280 }
281 return 0;
282}
283
284static void __init enable_icache(void)
285{
286 u32 cr;
287
288 cr = get_cr();
289 set_cr(cr | CR_I);
290}
291
292static inline u32 read_extra_features(void)
293{
294 u32 u;
295
296 __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
297
298 return u;
299}
300
301static inline void write_extra_features(u32 u)
302{
303 __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
304}
305
306static void __init disable_l2_prefetch(void)
307{
308 u32 u;
309
310 /*
311 * Read the CPU Extra Features register and verify that the
312 * Disable L2 Prefetch bit is set.
313 */
314 u = read_extra_features();
315 if (!(u & 0x01000000)) {
316 pr_info("Feroceon L2: Disabling L2 prefetch.\n");
317 write_extra_features(u | 0x01000000);
318 }
319}
320
321static void __init enable_l2(void)
322{
323 u32 u;
324
325 u = read_extra_features();
326 if (!(u & 0x00400000)) {
327 int i, d;
328
329 pr_info("Feroceon L2: Enabling L2\n");
330
331 d = flush_and_disable_dcache();
332 i = invalidate_and_disable_icache();
333 l2_inv_all();
334 write_extra_features(u | 0x00400000);
335 if (i)
336 enable_icache();
337 if (d)
338 enable_dcache();
339 } else
340 pr_err(FW_BUG
341 "Feroceon L2: bootloader left the L2 cache on!\n");
342}
343
344void __init feroceon_l2_init(int __l2_wt_override)
345{
346 l2_wt_override = __l2_wt_override;
347
348 disable_l2_prefetch();
349
350 outer_cache.inv_range = feroceon_l2_inv_range;
351 outer_cache.clean_range = feroceon_l2_clean_range;
352 outer_cache.flush_range = feroceon_l2_flush_range;
353
354 enable_l2();
355
356 pr_info("Feroceon L2: Cache support initialised%s.\n",
357 l2_wt_override ? ", in WT override mode" : "");
358}
359#ifdef CONFIG_OF
360static const struct of_device_id feroceon_ids[] __initconst = {
361 { .compatible = "marvell,kirkwood-cache"},
362 { .compatible = "marvell,feroceon-cache"},
363 {}
364};
365
366int __init feroceon_of_init(void)
367{
368 struct device_node *node;
369 void __iomem *base;
370 bool l2_wt_override = false;
371
372#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
373 l2_wt_override = true;
374#endif
375
376 node = of_find_matching_node(NULL, feroceon_ids);
377 if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
378 base = of_iomap(node, 0);
379 if (!base)
380 return -ENOMEM;
381
382 if (l2_wt_override)
383 writel(readl(base) | L2_WRITETHROUGH_KIRKWOOD, base);
384 else
385 writel(readl(base) & ~L2_WRITETHROUGH_KIRKWOOD, base);
386 }
387
388 feroceon_l2_init(l2_wt_override);
389
390 return 0;
391}
392#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * arch/arm/mm/cache-feroceon-l2.c - Feroceon L2 cache controller support
4 *
5 * Copyright (C) 2008 Marvell Semiconductor
6 *
7 * References:
8 * - Unified Layer 2 Cache for Feroceon CPU Cores,
9 * Document ID MV-S104858-00, Rev. A, October 23 2007.
10 */
11
12#include <linux/init.h>
13#include <linux/of.h>
14#include <linux/of_address.h>
15#include <linux/highmem.h>
16#include <linux/io.h>
17#include <asm/cacheflush.h>
18#include <asm/cp15.h>
19#include <asm/hardware/cache-feroceon-l2.h>
20
21#define L2_WRITETHROUGH_KIRKWOOD BIT(4)
22
23/*
24 * Low-level cache maintenance operations.
25 *
26 * As well as the regular 'clean/invalidate/flush L2 cache line by
27 * MVA' instructions, the Feroceon L2 cache controller also features
28 * 'clean/invalidate L2 range by MVA' operations.
29 *
30 * Cache range operations are initiated by writing the start and
31 * end addresses to successive cp15 registers, and process every
32 * cache line whose first byte address lies in the inclusive range
33 * [start:end].
34 *
35 * The cache range operations stall the CPU pipeline until completion.
36 *
37 * The range operations require two successive cp15 writes, in
38 * between which we don't want to be preempted.
39 */
40
41static inline unsigned long l2_get_va(unsigned long paddr)
42{
43#ifdef CONFIG_HIGHMEM
44 /*
45 * Because range ops can't be done on physical addresses,
46 * we simply install a virtual mapping for it only for the
47 * TLB lookup to occur, hence no need to flush the untouched
48 * memory mapping afterwards (note: a cache flush may happen
49 * in some circumstances depending on the path taken in kunmap_atomic).
50 */
51 void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
52 return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
53#else
54 return __phys_to_virt(paddr);
55#endif
56}
57
58static inline void l2_put_va(unsigned long vaddr)
59{
60#ifdef CONFIG_HIGHMEM
61 kunmap_atomic((void *)vaddr);
62#endif
63}
64
65static inline void l2_clean_pa(unsigned long addr)
66{
67 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
68}
69
70static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
71{
72 unsigned long va_start, va_end, flags;
73
74 /*
75 * Make sure 'start' and 'end' reference the same page, as
76 * L2 is PIPT and range operations only do a TLB lookup on
77 * the start address.
78 */
79 BUG_ON((start ^ end) >> PAGE_SHIFT);
80
81 va_start = l2_get_va(start);
82 va_end = va_start + (end - start);
83 raw_local_irq_save(flags);
84 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
85 "mcr p15, 1, %1, c15, c9, 5"
86 : : "r" (va_start), "r" (va_end));
87 raw_local_irq_restore(flags);
88 l2_put_va(va_start);
89}
90
91static inline void l2_clean_inv_pa(unsigned long addr)
92{
93 __asm__("mcr p15, 1, %0, c15, c10, 3" : : "r" (addr));
94}
95
96static inline void l2_inv_pa(unsigned long addr)
97{
98 __asm__("mcr p15, 1, %0, c15, c11, 3" : : "r" (addr));
99}
100
101static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
102{
103 unsigned long va_start, va_end, flags;
104
105 /*
106 * Make sure 'start' and 'end' reference the same page, as
107 * L2 is PIPT and range operations only do a TLB lookup on
108 * the start address.
109 */
110 BUG_ON((start ^ end) >> PAGE_SHIFT);
111
112 va_start = l2_get_va(start);
113 va_end = va_start + (end - start);
114 raw_local_irq_save(flags);
115 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
116 "mcr p15, 1, %1, c15, c11, 5"
117 : : "r" (va_start), "r" (va_end));
118 raw_local_irq_restore(flags);
119 l2_put_va(va_start);
120}
121
122static inline void l2_inv_all(void)
123{
124 __asm__("mcr p15, 1, %0, c15, c11, 0" : : "r" (0));
125}
126
127/*
128 * Linux primitives.
129 *
130 * Note that the end addresses passed to Linux primitives are
131 * noninclusive, while the hardware cache range operations use
132 * inclusive start and end addresses.
133 */
134#define CACHE_LINE_SIZE 32
135#define MAX_RANGE_SIZE 1024
136
137static int l2_wt_override;
138
139static unsigned long calc_range_end(unsigned long start, unsigned long end)
140{
141 unsigned long range_end;
142
143 BUG_ON(start & (CACHE_LINE_SIZE - 1));
144 BUG_ON(end & (CACHE_LINE_SIZE - 1));
145
146 /*
147 * Try to process all cache lines between 'start' and 'end'.
148 */
149 range_end = end;
150
151 /*
152 * Limit the number of cache lines processed at once,
153 * since cache range operations stall the CPU pipeline
154 * until completion.
155 */
156 if (range_end > start + MAX_RANGE_SIZE)
157 range_end = start + MAX_RANGE_SIZE;
158
159 /*
160 * Cache range operations can't straddle a page boundary.
161 */
162 if (range_end > (start | (PAGE_SIZE - 1)) + 1)
163 range_end = (start | (PAGE_SIZE - 1)) + 1;
164
165 return range_end;
166}
167
168static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
169{
170 /*
171 * Clean and invalidate partial first cache line.
172 */
173 if (start & (CACHE_LINE_SIZE - 1)) {
174 l2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
175 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
176 }
177
178 /*
179 * Clean and invalidate partial last cache line.
180 */
181 if (start < end && end & (CACHE_LINE_SIZE - 1)) {
182 l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
183 end &= ~(CACHE_LINE_SIZE - 1);
184 }
185
186 /*
187 * Invalidate all full cache lines between 'start' and 'end'.
188 */
189 while (start < end) {
190 unsigned long range_end = calc_range_end(start, end);
191 l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
192 start = range_end;
193 }
194
195 dsb();
196}
197
198static void feroceon_l2_clean_range(unsigned long start, unsigned long end)
199{
200 /*
201 * If L2 is forced to WT, the L2 will always be clean and we
202 * don't need to do anything here.
203 */
204 if (!l2_wt_override) {
205 start &= ~(CACHE_LINE_SIZE - 1);
206 end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
207 while (start != end) {
208 unsigned long range_end = calc_range_end(start, end);
209 l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
210 start = range_end;
211 }
212 }
213
214 dsb();
215}
216
217static void feroceon_l2_flush_range(unsigned long start, unsigned long end)
218{
219 start &= ~(CACHE_LINE_SIZE - 1);
220 end = (end + CACHE_LINE_SIZE - 1) & ~(CACHE_LINE_SIZE - 1);
221 while (start != end) {
222 unsigned long range_end = calc_range_end(start, end);
223 if (!l2_wt_override)
224 l2_clean_pa_range(start, range_end - CACHE_LINE_SIZE);
225 l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
226 start = range_end;
227 }
228
229 dsb();
230}
231
232
233/*
234 * Routines to disable and re-enable the D-cache and I-cache at run
235 * time. These are necessary because the L2 cache can only be enabled
236 * or disabled while the L1 Dcache and Icache are both disabled.
237 */
238static int __init flush_and_disable_dcache(void)
239{
240 u32 cr;
241
242 cr = get_cr();
243 if (cr & CR_C) {
244 unsigned long flags;
245
246 raw_local_irq_save(flags);
247 flush_cache_all();
248 set_cr(cr & ~CR_C);
249 raw_local_irq_restore(flags);
250 return 1;
251 }
252 return 0;
253}
254
255static void __init enable_dcache(void)
256{
257 u32 cr;
258
259 cr = get_cr();
260 set_cr(cr | CR_C);
261}
262
263static void __init __invalidate_icache(void)
264{
265 __asm__("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
266}
267
268static int __init invalidate_and_disable_icache(void)
269{
270 u32 cr;
271
272 cr = get_cr();
273 if (cr & CR_I) {
274 set_cr(cr & ~CR_I);
275 __invalidate_icache();
276 return 1;
277 }
278 return 0;
279}
280
281static void __init enable_icache(void)
282{
283 u32 cr;
284
285 cr = get_cr();
286 set_cr(cr | CR_I);
287}
288
289static inline u32 read_extra_features(void)
290{
291 u32 u;
292
293 __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
294
295 return u;
296}
297
298static inline void write_extra_features(u32 u)
299{
300 __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
301}
302
303static void __init disable_l2_prefetch(void)
304{
305 u32 u;
306
307 /*
308 * Read the CPU Extra Features register and verify that the
309 * Disable L2 Prefetch bit is set.
310 */
311 u = read_extra_features();
312 if (!(u & 0x01000000)) {
313 pr_info("Feroceon L2: Disabling L2 prefetch.\n");
314 write_extra_features(u | 0x01000000);
315 }
316}
317
318static void __init enable_l2(void)
319{
320 u32 u;
321
322 u = read_extra_features();
323 if (!(u & 0x00400000)) {
324 int i, d;
325
326 pr_info("Feroceon L2: Enabling L2\n");
327
328 d = flush_and_disable_dcache();
329 i = invalidate_and_disable_icache();
330 l2_inv_all();
331 write_extra_features(u | 0x00400000);
332 if (i)
333 enable_icache();
334 if (d)
335 enable_dcache();
336 } else
337 pr_err(FW_BUG
338 "Feroceon L2: bootloader left the L2 cache on!\n");
339}
340
341void __init feroceon_l2_init(int __l2_wt_override)
342{
343 l2_wt_override = __l2_wt_override;
344
345 disable_l2_prefetch();
346
347 outer_cache.inv_range = feroceon_l2_inv_range;
348 outer_cache.clean_range = feroceon_l2_clean_range;
349 outer_cache.flush_range = feroceon_l2_flush_range;
350
351 enable_l2();
352
353 pr_info("Feroceon L2: Cache support initialised%s.\n",
354 l2_wt_override ? ", in WT override mode" : "");
355}
356#ifdef CONFIG_OF
357static const struct of_device_id feroceon_ids[] __initconst = {
358 { .compatible = "marvell,kirkwood-cache"},
359 { .compatible = "marvell,feroceon-cache"},
360 {}
361};
362
363int __init feroceon_of_init(void)
364{
365 struct device_node *node;
366 void __iomem *base;
367 bool l2_wt_override = false;
368
369#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
370 l2_wt_override = true;
371#endif
372
373 node = of_find_matching_node(NULL, feroceon_ids);
374 if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
375 base = of_iomap(node, 0);
376 if (!base)
377 return -ENOMEM;
378
379 if (l2_wt_override)
380 writel(readl(base) | L2_WRITETHROUGH_KIRKWOOD, base);
381 else
382 writel(readl(base) & ~L2_WRITETHROUGH_KIRKWOOD, base);
383 }
384
385 feroceon_l2_init(l2_wt_override);
386
387 return 0;
388}
389#endif