Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Broadcom Brahma-B15 CPU read-ahead cache management functions
  4 *
  5 * Copyright (C) 2015-2016 Broadcom
  6 */
  7
  8#include <linux/cfi_types.h>
  9#include <linux/err.h>
 10#include <linux/spinlock.h>
 11#include <linux/io.h>
 12#include <linux/bitops.h>
 13#include <linux/of_address.h>
 14#include <linux/notifier.h>
 15#include <linux/cpu.h>
 16#include <linux/syscore_ops.h>
 17#include <linux/reboot.h>
 18
 19#include <asm/cacheflush.h>
 20#include <asm/hardware/cache-b15-rac.h>
 21
 22extern void v7_flush_kern_cache_all(void);
 23
 24/* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */
 25#define RAC_CONFIG0_REG			(0x78)
 26#define  RACENPREF_MASK			(0x3)
 27#define  RACPREFINST_SHIFT		(0)
 28#define  RACENINST_SHIFT		(2)
 29#define  RACPREFDATA_SHIFT		(4)
 30#define  RACENDATA_SHIFT		(6)
 31#define  RAC_CPU_SHIFT			(8)
 32#define  RACCFG_MASK			(0xff)
 33#define RAC_CONFIG1_REG			(0x7c)
 34/* Brahma-B15 is a quad-core only design */
 35#define B15_RAC_FLUSH_REG		(0x80)
 36/* Brahma-B53 is an octo-core design */
 37#define B53_RAC_FLUSH_REG		(0x84)
 38#define  FLUSH_RAC			(1 << 0)
 39
 40/* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
 41#define RAC_DATA_INST_EN_MASK		(1 << RACPREFINST_SHIFT | \
 42					 RACENPREF_MASK << RACENINST_SHIFT | \
 43					 1 << RACPREFDATA_SHIFT | \
 44					 RACENPREF_MASK << RACENDATA_SHIFT)
 45
 46#define RAC_ENABLED			0
 47/* Special state where we want to bypass the spinlock and call directly
 48 * into the v7 cache maintenance operations during suspend/resume
 49 */
 50#define RAC_SUSPENDED			1
 51
 52static void __iomem *b15_rac_base;
 53static DEFINE_SPINLOCK(rac_lock);
 54
 55static u32 rac_config0_reg;
 56static u32 rac_flush_offset;
 57
 58/* Initialization flag to avoid checking for b15_rac_base, and to prevent
 59 * multi-platform kernels from crashing here as well.
 60 */
 61static unsigned long b15_rac_flags;
 62
 63static inline u32 __b15_rac_disable(void)
 64{
 65	u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
 66	__raw_writel(0, b15_rac_base + RAC_CONFIG0_REG);
 67	dmb();
 68	return val;
 69}
 70
 71static inline void __b15_rac_flush(void)
 72{
 73	u32 reg;
 74
 75	__raw_writel(FLUSH_RAC, b15_rac_base + rac_flush_offset);
 76	do {
 77		/* This dmb() is required to force the Bus Interface Unit
 78		 * to clean outstanding writes, and forces an idle cycle
 79		 * to be inserted.
 80		 */
 81		dmb();
 82		reg = __raw_readl(b15_rac_base + rac_flush_offset);
 83	} while (reg & FLUSH_RAC);
 84}
 85
 86static inline u32 b15_rac_disable_and_flush(void)
 87{
 88	u32 reg;
 89
 90	reg = __b15_rac_disable();
 91	__b15_rac_flush();
 92	return reg;
 93}
 94
 95static inline void __b15_rac_enable(u32 val)
 96{
 97	__raw_writel(val, b15_rac_base + RAC_CONFIG0_REG);
 98	/* dsb() is required here to be consistent with __flush_icache_all() */
 99	dsb();
100}
101
102#define BUILD_RAC_CACHE_OP(name, bar)				\
103void b15_flush_##name(void)					\
104{								\
105	unsigned int do_flush;					\
106	u32 val = 0;						\
107								\
108	if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) {		\
109		v7_flush_##name();				\
110		bar;						\
111		return;						\
112	}							\
113								\
114	spin_lock(&rac_lock);					\
115	do_flush = test_bit(RAC_ENABLED, &b15_rac_flags);	\
116	if (do_flush)						\
117		val = b15_rac_disable_and_flush();		\
118	v7_flush_##name();					\
119	if (!do_flush)						\
120		bar;						\
121	else							\
122		__b15_rac_enable(val);				\
123	spin_unlock(&rac_lock);					\
124}
125
126#define nobarrier
127
128/* The readahead cache present in the Brahma-B15 CPU is a special piece of
129 * hardware after the integrated L2 cache of the B15 CPU complex whose purpose
130 * is to prefetch instruction and/or data with a line size of either 64 bytes
131 * or 256 bytes. The rationale is that the data-bus of the CPU interface is
132 * optimized for 256-bytes transactions, and enabling the readahead cache
133 * provides a significant performance boost we want it enabled (typically
134 * twice the performance for a memcpy benchmark application).
135 *
136 * The readahead cache is transparent for Modified Virtual Addresses
137 * cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and
138 * DCCIMVAC.
139 *
140 * It is however not transparent for the following cache maintenance
141 * operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely
142 * what we are patching here with our BUILD_RAC_CACHE_OP here.
143 */
144BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier);
145
146static void b15_rac_enable(void)
147{
148	unsigned int cpu;
149	u32 enable = 0;
150
151	for_each_possible_cpu(cpu)
152		enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
153
154	b15_rac_disable_and_flush();
155	__b15_rac_enable(enable);
156}
157
158static int b15_rac_reboot_notifier(struct notifier_block *nb,
159				   unsigned long action,
160				   void *data)
161{
162	/* During kexec, we are not yet migrated on the boot CPU, so we need to
163	 * make sure we are SMP safe here. Once the RAC is disabled, flag it as
164	 * suspended such that the hotplug notifier returns early.
165	 */
166	if (action == SYS_RESTART) {
167		spin_lock(&rac_lock);
168		b15_rac_disable_and_flush();
169		clear_bit(RAC_ENABLED, &b15_rac_flags);
170		set_bit(RAC_SUSPENDED, &b15_rac_flags);
171		spin_unlock(&rac_lock);
172	}
173
174	return NOTIFY_DONE;
175}
176
177static struct notifier_block b15_rac_reboot_nb = {
178	.notifier_call	= b15_rac_reboot_notifier,
179};
180
181/* The CPU hotplug case is the most interesting one, we basically need to make
182 * sure that the RAC is disabled for the entire system prior to having a CPU
183 * die, in particular prior to this dying CPU having exited the coherency
184 * domain.
185 *
186 * Once this CPU is marked dead, we can safely re-enable the RAC for the
187 * remaining CPUs in the system which are still online.
188 *
189 * Offlining a CPU is the problematic case, onlining a CPU is not much of an
190 * issue since the CPU and its cache-level hierarchy will start filling with
191 * the RAC disabled, so L1 and L2 only.
192 *
193 * In this function, we should NOT have to verify any unsafe setting/condition
194 * b15_rac_base:
195 *
196 *   It is protected by the RAC_ENABLED flag which is cleared by default, and
197 *   being cleared when initial procedure is done. b15_rac_base had been set at
198 *   that time.
199 *
200 * RAC_ENABLED:
201 *   There is a small timing windows, in b15_rac_init(), between
202 *      cpuhp_setup_state_*()
203 *      ...
204 *      set RAC_ENABLED
205 *   However, there is no hotplug activity based on the Linux booting procedure.
206 *
207 * Since we have to disable RAC for all cores, we keep RAC on as long as as
208 * possible (disable it as late as possible) to gain the cache benefit.
209 *
210 * Thus, dying/dead states are chosen here
211 *
212 * We are choosing not do disable the RAC on a per-CPU basis, here, if we did
213 * we would want to consider disabling it as early as possible to benefit the
214 * other active CPUs.
215 */
216
217/* Running on the dying CPU */
218static int b15_rac_dying_cpu(unsigned int cpu)
219{
220	/* During kexec/reboot, the RAC is disabled via the reboot notifier
221	 * return early here.
222	 */
223	if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
224		return 0;
225
226	spin_lock(&rac_lock);
227
228	/* Indicate that we are starting a hotplug procedure */
229	__clear_bit(RAC_ENABLED, &b15_rac_flags);
230
231	/* Disable the readahead cache and save its value to a global */
232	rac_config0_reg = b15_rac_disable_and_flush();
233
234	spin_unlock(&rac_lock);
235
236	return 0;
237}
238
239/* Running on a non-dying CPU */
240static int b15_rac_dead_cpu(unsigned int cpu)
241{
242	/* During kexec/reboot, the RAC is disabled via the reboot notifier
243	 * return early here.
244	 */
245	if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
246		return 0;
247
248	spin_lock(&rac_lock);
249
250	/* And enable it */
251	__b15_rac_enable(rac_config0_reg);
252	__set_bit(RAC_ENABLED, &b15_rac_flags);
253
254	spin_unlock(&rac_lock);
255
256	return 0;
257}
258
259static int b15_rac_suspend(void)
260{
261	/* Suspend the read-ahead cache oeprations, forcing our cache
262	 * implementation to fallback to the regular ARMv7 calls.
263	 *
264	 * We are guaranteed to be running on the boot CPU at this point and
265	 * with every other CPU quiesced, so setting RAC_SUSPENDED is not racy
266	 * here.
267	 */
268	rac_config0_reg = b15_rac_disable_and_flush();
269	set_bit(RAC_SUSPENDED, &b15_rac_flags);
270
271	return 0;
272}
273
274static void b15_rac_resume(void)
275{
276	/* Coming out of a S3 suspend/resume cycle, the read-ahead cache
277	 * register RAC_CONFIG0_REG will be restored to its default value, make
278	 * sure we re-enable it and set the enable flag, we are also guaranteed
279	 * to run on the boot CPU, so not racy again.
280	 */
281	__b15_rac_enable(rac_config0_reg);
282	clear_bit(RAC_SUSPENDED, &b15_rac_flags);
283}
284
285static struct syscore_ops b15_rac_syscore_ops = {
286	.suspend	= b15_rac_suspend,
287	.resume		= b15_rac_resume,
288};
289
290static int __init b15_rac_init(void)
291{
292	struct device_node *dn, *cpu_dn;
293	int ret = 0, cpu;
294	u32 reg, en_mask = 0;
295
296	dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
297	if (!dn)
298		return -ENODEV;
299
300	if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
301		goto out;
302
303	b15_rac_base = of_iomap(dn, 0);
304	if (!b15_rac_base) {
305		pr_err("failed to remap BIU control base\n");
306		ret = -ENOMEM;
307		goto out;
308	}
309
310	cpu_dn = of_get_cpu_node(0, NULL);
311	if (!cpu_dn) {
312		ret = -ENODEV;
313		goto out;
314	}
315
316	if (of_device_is_compatible(cpu_dn, "brcm,brahma-b15"))
317		rac_flush_offset = B15_RAC_FLUSH_REG;
318	else if (of_device_is_compatible(cpu_dn, "brcm,brahma-b53"))
319		rac_flush_offset = B53_RAC_FLUSH_REG;
320	else {
321		pr_err("Unsupported CPU\n");
322		of_node_put(cpu_dn);
323		ret = -EINVAL;
324		goto out;
325	}
326	of_node_put(cpu_dn);
327
328	ret = register_reboot_notifier(&b15_rac_reboot_nb);
329	if (ret) {
330		pr_err("failed to register reboot notifier\n");
331		iounmap(b15_rac_base);
332		goto out;
333	}
334
335	if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
336		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
337					"arm/cache-b15-rac:dead",
338					NULL, b15_rac_dead_cpu);
339		if (ret)
340			goto out_unmap;
341
342		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
343					"arm/cache-b15-rac:dying",
344					NULL, b15_rac_dying_cpu);
345		if (ret)
346			goto out_cpu_dead;
347	}
348
349	if (IS_ENABLED(CONFIG_PM_SLEEP))
350		register_syscore_ops(&b15_rac_syscore_ops);
351
352	spin_lock(&rac_lock);
353	reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
354	for_each_possible_cpu(cpu)
355		en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
356	WARN(reg & en_mask, "Read-ahead cache not previously disabled\n");
357
358	b15_rac_enable();
359	set_bit(RAC_ENABLED, &b15_rac_flags);
360	spin_unlock(&rac_lock);
361
362	pr_info("%pOF: Broadcom Brahma-B15 readahead cache\n", dn);
363
364	goto out;
365
366out_cpu_dead:
367	cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING);
368out_unmap:
369	unregister_reboot_notifier(&b15_rac_reboot_nb);
370	iounmap(b15_rac_base);
371out:
372	of_node_put(dn);
373	return ret;
374}
375arch_initcall(b15_rac_init);