Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
  3 *
  4 * Copyright (C) 2007 ARM Limited
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, write to the Free Software
 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 18 */
 19#include <linux/err.h>
 20#include <linux/init.h>
 21#include <linux/spinlock.h>
 22#include <linux/io.h>
 23#include <linux/of.h>
 24#include <linux/of_address.h>
 25
 26#include <asm/cacheflush.h>
 27#include <asm/hardware/cache-l2x0.h>
 28
 29#define CACHE_LINE_SIZE		32
 30
 31static void __iomem *l2x0_base;
 32static DEFINE_RAW_SPINLOCK(l2x0_lock);
 33static u32 l2x0_way_mask;	/* Bitmask of active ways */
 34static u32 l2x0_size;
 35static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
 36
 37struct l2x0_regs l2x0_saved_regs;
 38
 39struct l2x0_of_data {
 40	void (*setup)(const struct device_node *, u32 *, u32 *);
 41	void (*save)(void);
 42	void (*resume)(void);
 43};
 44
 45static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
 46{
 47	/* wait for cache operation by line or way to complete */
 48	while (readl_relaxed(reg) & mask)
 49		cpu_relax();
 50}
 51
 52#ifdef CONFIG_CACHE_PL310
 53static inline void cache_wait(void __iomem *reg, unsigned long mask)
 54{
 55	/* cache operations by line are atomic on PL310 */
 56}
 57#else
 58#define cache_wait	cache_wait_way
 59#endif
 60
 61static inline void cache_sync(void)
 62{
 63	void __iomem *base = l2x0_base;
 64
 65	writel_relaxed(0, base + sync_reg_offset);
 
 
 
 
 
 66	cache_wait(base + L2X0_CACHE_SYNC, 1);
 67}
 68
 69static inline void l2x0_clean_line(unsigned long addr)
 70{
 71	void __iomem *base = l2x0_base;
 72	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 73	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
 74}
 75
 76static inline void l2x0_inv_line(unsigned long addr)
 77{
 78	void __iomem *base = l2x0_base;
 79	cache_wait(base + L2X0_INV_LINE_PA, 1);
 80	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 81}
 82
 83#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
 84static inline void debug_writel(unsigned long val)
 85{
 86	if (outer_cache.set_debug)
 87		outer_cache.set_debug(val);
 88}
 89
 90static void pl310_set_debug(unsigned long val)
 
 
 91{
 92	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
 93}
 94#else
 95/* Optimised out for non-errata case */
 96static inline void debug_writel(unsigned long val)
 97{
 98}
 99
100#define pl310_set_debug	NULL
101#endif
102
103#ifdef CONFIG_PL310_ERRATA_588369
104static inline void l2x0_flush_line(unsigned long addr)
105{
106	void __iomem *base = l2x0_base;
107
108	/* Clean by PA followed by Invalidate by PA */
109	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
110	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
111	cache_wait(base + L2X0_INV_LINE_PA, 1);
112	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
113}
114#else
115
116static inline void l2x0_flush_line(unsigned long addr)
117{
118	void __iomem *base = l2x0_base;
119	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
120	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
121}
122#endif
123
124static void l2x0_cache_sync(void)
125{
126	unsigned long flags;
127
128	raw_spin_lock_irqsave(&l2x0_lock, flags);
129	cache_sync();
130	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
131}
132
133static void __l2x0_flush_all(void)
134{
135	debug_writel(0x03);
136	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
137	cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
138	cache_sync();
139	debug_writel(0x00);
140}
141
142static void l2x0_flush_all(void)
143{
144	unsigned long flags;
145
146	/* clean all ways */
147	raw_spin_lock_irqsave(&l2x0_lock, flags);
148	__l2x0_flush_all();
149	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
150}
151
152static void l2x0_clean_all(void)
153{
154	unsigned long flags;
155
156	/* clean all ways */
157	raw_spin_lock_irqsave(&l2x0_lock, flags);
158	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
159	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
160	cache_sync();
161	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
162}
163
164static void l2x0_inv_all(void)
165{
166	unsigned long flags;
167
168	/* invalidate all ways */
169	raw_spin_lock_irqsave(&l2x0_lock, flags);
170	/* Invalidating when L2 is enabled is a nono */
171	BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
172	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
173	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
174	cache_sync();
175	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
176}
177
178static void l2x0_inv_range(unsigned long start, unsigned long end)
179{
180	void __iomem *base = l2x0_base;
181	unsigned long flags;
182
183	raw_spin_lock_irqsave(&l2x0_lock, flags);
184	if (start & (CACHE_LINE_SIZE - 1)) {
185		start &= ~(CACHE_LINE_SIZE - 1);
186		debug_writel(0x03);
187		l2x0_flush_line(start);
188		debug_writel(0x00);
189		start += CACHE_LINE_SIZE;
190	}
191
192	if (end & (CACHE_LINE_SIZE - 1)) {
193		end &= ~(CACHE_LINE_SIZE - 1);
194		debug_writel(0x03);
195		l2x0_flush_line(end);
196		debug_writel(0x00);
197	}
198
199	while (start < end) {
200		unsigned long blk_end = start + min(end - start, 4096UL);
201
202		while (start < blk_end) {
203			l2x0_inv_line(start);
204			start += CACHE_LINE_SIZE;
205		}
206
207		if (blk_end < end) {
208			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
209			raw_spin_lock_irqsave(&l2x0_lock, flags);
210		}
211	}
212	cache_wait(base + L2X0_INV_LINE_PA, 1);
213	cache_sync();
214	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
215}
216
217static void l2x0_clean_range(unsigned long start, unsigned long end)
218{
219	void __iomem *base = l2x0_base;
220	unsigned long flags;
221
222	if ((end - start) >= l2x0_size) {
223		l2x0_clean_all();
224		return;
225	}
226
227	raw_spin_lock_irqsave(&l2x0_lock, flags);
228	start &= ~(CACHE_LINE_SIZE - 1);
229	while (start < end) {
230		unsigned long blk_end = start + min(end - start, 4096UL);
231
232		while (start < blk_end) {
233			l2x0_clean_line(start);
234			start += CACHE_LINE_SIZE;
235		}
236
237		if (blk_end < end) {
238			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
239			raw_spin_lock_irqsave(&l2x0_lock, flags);
240		}
241	}
242	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
243	cache_sync();
244	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
245}
246
247static void l2x0_flush_range(unsigned long start, unsigned long end)
248{
249	void __iomem *base = l2x0_base;
250	unsigned long flags;
251
252	if ((end - start) >= l2x0_size) {
253		l2x0_flush_all();
254		return;
255	}
256
257	raw_spin_lock_irqsave(&l2x0_lock, flags);
258	start &= ~(CACHE_LINE_SIZE - 1);
259	while (start < end) {
260		unsigned long blk_end = start + min(end - start, 4096UL);
261
262		debug_writel(0x03);
263		while (start < blk_end) {
264			l2x0_flush_line(start);
265			start += CACHE_LINE_SIZE;
266		}
267		debug_writel(0x00);
268
269		if (blk_end < end) {
270			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
271			raw_spin_lock_irqsave(&l2x0_lock, flags);
272		}
273	}
274	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
275	cache_sync();
276	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
277}
278
279static void l2x0_disable(void)
280{
281	unsigned long flags;
282
283	raw_spin_lock_irqsave(&l2x0_lock, flags);
284	__l2x0_flush_all();
285	writel_relaxed(0, l2x0_base + L2X0_CTRL);
286	dsb();
287	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
288}
289
290static void l2x0_unlock(u32 cache_id)
291{
292	int lockregs;
293	int i;
294
295	if (cache_id == L2X0_CACHE_ID_PART_L310)
296		lockregs = 8;
297	else
298		/* L210 and unknown types */
299		lockregs = 1;
300
301	for (i = 0; i < lockregs; i++) {
302		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
303			       i * L2X0_LOCKDOWN_STRIDE);
304		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
305			       i * L2X0_LOCKDOWN_STRIDE);
306	}
307}
308
309void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
310{
311	u32 aux;
312	u32 cache_id;
313	u32 way_size = 0;
314	int ways;
315	const char *type;
316
317	l2x0_base = base;
318
319	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
320	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
321
322	aux &= aux_mask;
323	aux |= aux_val;
324
325	/* Determine the number of ways */
326	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
327	case L2X0_CACHE_ID_PART_L310:
328		if (aux & (1 << 16))
329			ways = 16;
330		else
331			ways = 8;
332		type = "L310";
333#ifdef CONFIG_PL310_ERRATA_753970
334		/* Unmapped register. */
335		sync_reg_offset = L2X0_DUMMY_REG;
336#endif
337		outer_cache.set_debug = pl310_set_debug;
338		break;
339	case L2X0_CACHE_ID_PART_L210:
340		ways = (aux >> 13) & 0xf;
341		type = "L210";
342		break;
343	default:
344		/* Assume unknown chips have 8 ways */
345		ways = 8;
346		type = "L2x0 series";
347		break;
348	}
349
350	l2x0_way_mask = (1 << ways) - 1;
351
352	/*
353	 * L2 cache Size =  Way size * Number of ways
354	 */
355	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
356	way_size = 1 << (way_size + 3);
357	l2x0_size = ways * way_size * SZ_1K;
358
359	/*
360	 * Check if l2x0 controller is already enabled.
361	 * If you are booting from non-secure mode
362	 * accessing the below registers will fault.
363	 */
364	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
365		/* Make sure that I&D is not locked down when starting */
366		l2x0_unlock(cache_id);
367
368		/* l2x0 controller is disabled */
369		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
370
371		l2x0_saved_regs.aux_ctrl = aux;
372
373		l2x0_inv_all();
374
375		/* enable L2X0 */
376		writel_relaxed(1, l2x0_base + L2X0_CTRL);
377	}
378
379	outer_cache.inv_range = l2x0_inv_range;
380	outer_cache.clean_range = l2x0_clean_range;
381	outer_cache.flush_range = l2x0_flush_range;
382	outer_cache.sync = l2x0_cache_sync;
383	outer_cache.flush_all = l2x0_flush_all;
384	outer_cache.inv_all = l2x0_inv_all;
385	outer_cache.disable = l2x0_disable;
 
386
387	printk(KERN_INFO "%s cache controller enabled\n", type);
388	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
389			ways, cache_id, aux, l2x0_size);
390}
391
392#ifdef CONFIG_OF
393static void __init l2x0_of_setup(const struct device_node *np,
394				 u32 *aux_val, u32 *aux_mask)
395{
396	u32 data[2] = { 0, 0 };
397	u32 tag = 0;
398	u32 dirty = 0;
399	u32 val = 0, mask = 0;
400
401	of_property_read_u32(np, "arm,tag-latency", &tag);
402	if (tag) {
403		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
404		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
405	}
406
407	of_property_read_u32_array(np, "arm,data-latency",
408				   data, ARRAY_SIZE(data));
409	if (data[0] && data[1]) {
410		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
411			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
412		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
413		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
414	}
415
416	of_property_read_u32(np, "arm,dirty-latency", &dirty);
417	if (dirty) {
418		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
419		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
420	}
421
422	*aux_val &= ~mask;
423	*aux_val |= val;
424	*aux_mask &= ~mask;
425}
426
427static void __init pl310_of_setup(const struct device_node *np,
428				  u32 *aux_val, u32 *aux_mask)
429{
430	u32 data[3] = { 0, 0, 0 };
431	u32 tag[3] = { 0, 0, 0 };
432	u32 filter[2] = { 0, 0 };
433
434	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
435	if (tag[0] && tag[1] && tag[2])
436		writel_relaxed(
437			((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
438			((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
439			((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
440			l2x0_base + L2X0_TAG_LATENCY_CTRL);
441
442	of_property_read_u32_array(np, "arm,data-latency",
443				   data, ARRAY_SIZE(data));
444	if (data[0] && data[1] && data[2])
445		writel_relaxed(
446			((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
447			((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
448			((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
449			l2x0_base + L2X0_DATA_LATENCY_CTRL);
450
451	of_property_read_u32_array(np, "arm,filter-ranges",
452				   filter, ARRAY_SIZE(filter));
453	if (filter[1]) {
454		writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
455			       l2x0_base + L2X0_ADDR_FILTER_END);
456		writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
457			       l2x0_base + L2X0_ADDR_FILTER_START);
458	}
459}
460
461static void __init pl310_save(void)
462{
463	u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
464		L2X0_CACHE_ID_RTL_MASK;
465
466	l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
467		L2X0_TAG_LATENCY_CTRL);
468	l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
469		L2X0_DATA_LATENCY_CTRL);
470	l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
471		L2X0_ADDR_FILTER_END);
472	l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
473		L2X0_ADDR_FILTER_START);
474
475	if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
476		/*
477		 * From r2p0, there is Prefetch offset/control register
478		 */
479		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
480			L2X0_PREFETCH_CTRL);
481		/*
482		 * From r3p0, there is Power control register
483		 */
484		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
485			l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
486				L2X0_POWER_CTRL);
487	}
488}
489
490static void l2x0_resume(void)
491{
492	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
493		/* restore aux ctrl and enable l2 */
494		l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
495
496		writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
497			L2X0_AUX_CTRL);
498
499		l2x0_inv_all();
500
501		writel_relaxed(1, l2x0_base + L2X0_CTRL);
502	}
503}
504
505static void pl310_resume(void)
506{
507	u32 l2x0_revision;
508
509	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
510		/* restore pl310 setup */
511		writel_relaxed(l2x0_saved_regs.tag_latency,
512			l2x0_base + L2X0_TAG_LATENCY_CTRL);
513		writel_relaxed(l2x0_saved_regs.data_latency,
514			l2x0_base + L2X0_DATA_LATENCY_CTRL);
515		writel_relaxed(l2x0_saved_regs.filter_end,
516			l2x0_base + L2X0_ADDR_FILTER_END);
517		writel_relaxed(l2x0_saved_regs.filter_start,
518			l2x0_base + L2X0_ADDR_FILTER_START);
519
520		l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
521			L2X0_CACHE_ID_RTL_MASK;
522
523		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
524			writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
525				l2x0_base + L2X0_PREFETCH_CTRL);
526			if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
527				writel_relaxed(l2x0_saved_regs.pwr_ctrl,
528					l2x0_base + L2X0_POWER_CTRL);
529		}
530	}
531
532	l2x0_resume();
533}
534
535static const struct l2x0_of_data pl310_data = {
536	pl310_of_setup,
537	pl310_save,
538	pl310_resume,
539};
540
541static const struct l2x0_of_data l2x0_data = {
542	l2x0_of_setup,
543	NULL,
544	l2x0_resume,
545};
546
547static const struct of_device_id l2x0_ids[] __initconst = {
548	{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
549	{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
550	{ .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
551	{}
552};
553
554int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
555{
556	struct device_node *np;
557	struct l2x0_of_data *data;
558	struct resource res;
559
560	np = of_find_matching_node(NULL, l2x0_ids);
561	if (!np)
562		return -ENODEV;
563
564	if (of_address_to_resource(np, 0, &res))
565		return -ENODEV;
566
567	l2x0_base = ioremap(res.start, resource_size(&res));
568	if (!l2x0_base)
569		return -ENOMEM;
570
571	l2x0_saved_regs.phy_base = res.start;
572
573	data = of_match_node(l2x0_ids, np)->data;
574
575	/* L2 configuration can only be changed if the cache is disabled */
576	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
577		if (data->setup)
578			data->setup(np, &aux_val, &aux_mask);
579	}
580
581	if (data->save)
582		data->save();
583
584	l2x0_init(l2x0_base, aux_val, aux_mask);
585
586	outer_cache.resume = data->resume;
587	return 0;
588}
589#endif
v3.1
  1/*
  2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
  3 *
  4 * Copyright (C) 2007 ARM Limited
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, write to the Free Software
 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 18 */
 
 19#include <linux/init.h>
 20#include <linux/spinlock.h>
 21#include <linux/io.h>
 
 
 22
 23#include <asm/cacheflush.h>
 24#include <asm/hardware/cache-l2x0.h>
 25
 26#define CACHE_LINE_SIZE		32
 27
 28static void __iomem *l2x0_base;
 29static DEFINE_SPINLOCK(l2x0_lock);
 30static uint32_t l2x0_way_mask;	/* Bitmask of active ways */
 31static uint32_t l2x0_size;
 
 
 
 
 
 
 
 
 
 32
 33static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
 34{
 35	/* wait for cache operation by line or way to complete */
 36	while (readl_relaxed(reg) & mask)
 37		;
 38}
 39
 40#ifdef CONFIG_CACHE_PL310
 41static inline void cache_wait(void __iomem *reg, unsigned long mask)
 42{
 43	/* cache operations by line are atomic on PL310 */
 44}
 45#else
 46#define cache_wait	cache_wait_way
 47#endif
 48
 49static inline void cache_sync(void)
 50{
 51	void __iomem *base = l2x0_base;
 52
 53#ifdef CONFIG_ARM_ERRATA_753970
 54	/* write to an unmmapped register */
 55	writel_relaxed(0, base + L2X0_DUMMY_REG);
 56#else
 57	writel_relaxed(0, base + L2X0_CACHE_SYNC);
 58#endif
 59	cache_wait(base + L2X0_CACHE_SYNC, 1);
 60}
 61
 62static inline void l2x0_clean_line(unsigned long addr)
 63{
 64	void __iomem *base = l2x0_base;
 65	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 66	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
 67}
 68
 69static inline void l2x0_inv_line(unsigned long addr)
 70{
 71	void __iomem *base = l2x0_base;
 72	cache_wait(base + L2X0_INV_LINE_PA, 1);
 73	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 74}
 75
 76#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
 
 
 
 
 
 77
 78#define debug_writel(val)	outer_cache.set_debug(val)
 79
 80static void l2x0_set_debug(unsigned long val)
 81{
 82	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
 83}
 84#else
 85/* Optimised out for non-errata case */
 86static inline void debug_writel(unsigned long val)
 87{
 88}
 89
 90#define l2x0_set_debug	NULL
 91#endif
 92
 93#ifdef CONFIG_PL310_ERRATA_588369
 94static inline void l2x0_flush_line(unsigned long addr)
 95{
 96	void __iomem *base = l2x0_base;
 97
 98	/* Clean by PA followed by Invalidate by PA */
 99	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
100	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
101	cache_wait(base + L2X0_INV_LINE_PA, 1);
102	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
103}
104#else
105
106static inline void l2x0_flush_line(unsigned long addr)
107{
108	void __iomem *base = l2x0_base;
109	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
110	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
111}
112#endif
113
114static void l2x0_cache_sync(void)
115{
116	unsigned long flags;
117
118	spin_lock_irqsave(&l2x0_lock, flags);
119	cache_sync();
120	spin_unlock_irqrestore(&l2x0_lock, flags);
121}
122
123static void __l2x0_flush_all(void)
124{
125	debug_writel(0x03);
126	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
127	cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
128	cache_sync();
129	debug_writel(0x00);
130}
131
132static void l2x0_flush_all(void)
133{
134	unsigned long flags;
135
136	/* clean all ways */
137	spin_lock_irqsave(&l2x0_lock, flags);
138	__l2x0_flush_all();
139	spin_unlock_irqrestore(&l2x0_lock, flags);
140}
141
142static void l2x0_clean_all(void)
143{
144	unsigned long flags;
145
146	/* clean all ways */
147	spin_lock_irqsave(&l2x0_lock, flags);
148	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
149	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
150	cache_sync();
151	spin_unlock_irqrestore(&l2x0_lock, flags);
152}
153
154static void l2x0_inv_all(void)
155{
156	unsigned long flags;
157
158	/* invalidate all ways */
159	spin_lock_irqsave(&l2x0_lock, flags);
160	/* Invalidating when L2 is enabled is a nono */
161	BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
162	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
163	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
164	cache_sync();
165	spin_unlock_irqrestore(&l2x0_lock, flags);
166}
167
168static void l2x0_inv_range(unsigned long start, unsigned long end)
169{
170	void __iomem *base = l2x0_base;
171	unsigned long flags;
172
173	spin_lock_irqsave(&l2x0_lock, flags);
174	if (start & (CACHE_LINE_SIZE - 1)) {
175		start &= ~(CACHE_LINE_SIZE - 1);
176		debug_writel(0x03);
177		l2x0_flush_line(start);
178		debug_writel(0x00);
179		start += CACHE_LINE_SIZE;
180	}
181
182	if (end & (CACHE_LINE_SIZE - 1)) {
183		end &= ~(CACHE_LINE_SIZE - 1);
184		debug_writel(0x03);
185		l2x0_flush_line(end);
186		debug_writel(0x00);
187	}
188
189	while (start < end) {
190		unsigned long blk_end = start + min(end - start, 4096UL);
191
192		while (start < blk_end) {
193			l2x0_inv_line(start);
194			start += CACHE_LINE_SIZE;
195		}
196
197		if (blk_end < end) {
198			spin_unlock_irqrestore(&l2x0_lock, flags);
199			spin_lock_irqsave(&l2x0_lock, flags);
200		}
201	}
202	cache_wait(base + L2X0_INV_LINE_PA, 1);
203	cache_sync();
204	spin_unlock_irqrestore(&l2x0_lock, flags);
205}
206
207static void l2x0_clean_range(unsigned long start, unsigned long end)
208{
209	void __iomem *base = l2x0_base;
210	unsigned long flags;
211
212	if ((end - start) >= l2x0_size) {
213		l2x0_clean_all();
214		return;
215	}
216
217	spin_lock_irqsave(&l2x0_lock, flags);
218	start &= ~(CACHE_LINE_SIZE - 1);
219	while (start < end) {
220		unsigned long blk_end = start + min(end - start, 4096UL);
221
222		while (start < blk_end) {
223			l2x0_clean_line(start);
224			start += CACHE_LINE_SIZE;
225		}
226
227		if (blk_end < end) {
228			spin_unlock_irqrestore(&l2x0_lock, flags);
229			spin_lock_irqsave(&l2x0_lock, flags);
230		}
231	}
232	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
233	cache_sync();
234	spin_unlock_irqrestore(&l2x0_lock, flags);
235}
236
237static void l2x0_flush_range(unsigned long start, unsigned long end)
238{
239	void __iomem *base = l2x0_base;
240	unsigned long flags;
241
242	if ((end - start) >= l2x0_size) {
243		l2x0_flush_all();
244		return;
245	}
246
247	spin_lock_irqsave(&l2x0_lock, flags);
248	start &= ~(CACHE_LINE_SIZE - 1);
249	while (start < end) {
250		unsigned long blk_end = start + min(end - start, 4096UL);
251
252		debug_writel(0x03);
253		while (start < blk_end) {
254			l2x0_flush_line(start);
255			start += CACHE_LINE_SIZE;
256		}
257		debug_writel(0x00);
258
259		if (blk_end < end) {
260			spin_unlock_irqrestore(&l2x0_lock, flags);
261			spin_lock_irqsave(&l2x0_lock, flags);
262		}
263	}
264	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
265	cache_sync();
266	spin_unlock_irqrestore(&l2x0_lock, flags);
267}
268
269static void l2x0_disable(void)
270{
271	unsigned long flags;
272
273	spin_lock_irqsave(&l2x0_lock, flags);
274	__l2x0_flush_all();
275	writel_relaxed(0, l2x0_base + L2X0_CTRL);
276	dsb();
277	spin_unlock_irqrestore(&l2x0_lock, flags);
278}
279
280static void __init l2x0_unlock(__u32 cache_id)
281{
282	int lockregs;
283	int i;
284
285	if (cache_id == L2X0_CACHE_ID_PART_L310)
286		lockregs = 8;
287	else
288		/* L210 and unknown types */
289		lockregs = 1;
290
291	for (i = 0; i < lockregs; i++) {
292		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
293			       i * L2X0_LOCKDOWN_STRIDE);
294		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
295			       i * L2X0_LOCKDOWN_STRIDE);
296	}
297}
298
299void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
300{
301	__u32 aux;
302	__u32 cache_id;
303	__u32 way_size = 0;
304	int ways;
305	const char *type;
306
307	l2x0_base = base;
308
309	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
310	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
311
312	aux &= aux_mask;
313	aux |= aux_val;
314
315	/* Determine the number of ways */
316	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
317	case L2X0_CACHE_ID_PART_L310:
318		if (aux & (1 << 16))
319			ways = 16;
320		else
321			ways = 8;
322		type = "L310";
 
 
 
 
 
323		break;
324	case L2X0_CACHE_ID_PART_L210:
325		ways = (aux >> 13) & 0xf;
326		type = "L210";
327		break;
328	default:
329		/* Assume unknown chips have 8 ways */
330		ways = 8;
331		type = "L2x0 series";
332		break;
333	}
334
335	l2x0_way_mask = (1 << ways) - 1;
336
337	/*
338	 * L2 cache Size =  Way size * Number of ways
339	 */
340	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
341	way_size = 1 << (way_size + 3);
342	l2x0_size = ways * way_size * SZ_1K;
343
344	/*
345	 * Check if l2x0 controller is already enabled.
346	 * If you are booting from non-secure mode
347	 * accessing the below registers will fault.
348	 */
349	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
350		/* Make sure that I&D is not locked down when starting */
351		l2x0_unlock(cache_id);
352
353		/* l2x0 controller is disabled */
354		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
355
 
 
356		l2x0_inv_all();
357
358		/* enable L2X0 */
359		writel_relaxed(1, l2x0_base + L2X0_CTRL);
360	}
361
362	outer_cache.inv_range = l2x0_inv_range;
363	outer_cache.clean_range = l2x0_clean_range;
364	outer_cache.flush_range = l2x0_flush_range;
365	outer_cache.sync = l2x0_cache_sync;
366	outer_cache.flush_all = l2x0_flush_all;
367	outer_cache.inv_all = l2x0_inv_all;
368	outer_cache.disable = l2x0_disable;
369	outer_cache.set_debug = l2x0_set_debug;
370
371	printk(KERN_INFO "%s cache controller enabled\n", type);
372	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
373			ways, cache_id, aux, l2x0_size);
374}