Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
  3 *
  4 * Copyright (C) 2007 ARM Limited
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, write to the Free Software
 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 18 */
 
 19#include <linux/init.h>
 20#include <linux/spinlock.h>
 21#include <linux/io.h>
 
 
 22
 23#include <asm/cacheflush.h>
 24#include <asm/hardware/cache-l2x0.h>
 
 
 25
 26#define CACHE_LINE_SIZE		32
 27
 28static void __iomem *l2x0_base;
 29static DEFINE_SPINLOCK(l2x0_lock);
 30static uint32_t l2x0_way_mask;	/* Bitmask of active ways */
 31static uint32_t l2x0_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32
 33static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
 34{
 35	/* wait for cache operation by line or way to complete */
 36	while (readl_relaxed(reg) & mask)
 37		;
 38}
 39
 40#ifdef CONFIG_CACHE_PL310
 41static inline void cache_wait(void __iomem *reg, unsigned long mask)
 42{
 43	/* cache operations by line are atomic on PL310 */
 44}
 45#else
 46#define cache_wait	cache_wait_way
 47#endif
 48
 49static inline void cache_sync(void)
 50{
 51	void __iomem *base = l2x0_base;
 52
 53#ifdef CONFIG_ARM_ERRATA_753970
 54	/* write to an unmmapped register */
 55	writel_relaxed(0, base + L2X0_DUMMY_REG);
 56#else
 57	writel_relaxed(0, base + L2X0_CACHE_SYNC);
 58#endif
 59	cache_wait(base + L2X0_CACHE_SYNC, 1);
 60}
 61
 62static inline void l2x0_clean_line(unsigned long addr)
 63{
 64	void __iomem *base = l2x0_base;
 65	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 66	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
 67}
 68
 69static inline void l2x0_inv_line(unsigned long addr)
 70{
 71	void __iomem *base = l2x0_base;
 72	cache_wait(base + L2X0_INV_LINE_PA, 1);
 73	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 74}
 75
 76#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
 
 
 
 
 
 77
 78#define debug_writel(val)	outer_cache.set_debug(val)
 79
 80static void l2x0_set_debug(unsigned long val)
 81{
 82	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
 83}
 84#else
 85/* Optimised out for non-errata case */
 86static inline void debug_writel(unsigned long val)
 87{
 88}
 89
 90#define l2x0_set_debug	NULL
 91#endif
 92
 93#ifdef CONFIG_PL310_ERRATA_588369
 94static inline void l2x0_flush_line(unsigned long addr)
 95{
 96	void __iomem *base = l2x0_base;
 97
 98	/* Clean by PA followed by Invalidate by PA */
 99	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
100	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
101	cache_wait(base + L2X0_INV_LINE_PA, 1);
102	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
103}
104#else
105
106static inline void l2x0_flush_line(unsigned long addr)
107{
108	void __iomem *base = l2x0_base;
109	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
110	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
111}
112#endif
113
114static void l2x0_cache_sync(void)
115{
116	unsigned long flags;
117
118	spin_lock_irqsave(&l2x0_lock, flags);
119	cache_sync();
120	spin_unlock_irqrestore(&l2x0_lock, flags);
121}
122
123static void __l2x0_flush_all(void)
124{
125	debug_writel(0x03);
126	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
127	cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
128	cache_sync();
129	debug_writel(0x00);
130}
131
132static void l2x0_flush_all(void)
133{
134	unsigned long flags;
135
136	/* clean all ways */
137	spin_lock_irqsave(&l2x0_lock, flags);
138	__l2x0_flush_all();
139	spin_unlock_irqrestore(&l2x0_lock, flags);
140}
141
142static void l2x0_clean_all(void)
143{
144	unsigned long flags;
145
146	/* clean all ways */
147	spin_lock_irqsave(&l2x0_lock, flags);
148	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
149	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
150	cache_sync();
151	spin_unlock_irqrestore(&l2x0_lock, flags);
152}
153
154static void l2x0_inv_all(void)
155{
156	unsigned long flags;
157
158	/* invalidate all ways */
159	spin_lock_irqsave(&l2x0_lock, flags);
160	/* Invalidating when L2 is enabled is a nono */
161	BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
162	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
163	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
164	cache_sync();
165	spin_unlock_irqrestore(&l2x0_lock, flags);
166}
167
168static void l2x0_inv_range(unsigned long start, unsigned long end)
169{
170	void __iomem *base = l2x0_base;
171	unsigned long flags;
172
173	spin_lock_irqsave(&l2x0_lock, flags);
174	if (start & (CACHE_LINE_SIZE - 1)) {
175		start &= ~(CACHE_LINE_SIZE - 1);
176		debug_writel(0x03);
177		l2x0_flush_line(start);
178		debug_writel(0x00);
179		start += CACHE_LINE_SIZE;
180	}
181
182	if (end & (CACHE_LINE_SIZE - 1)) {
183		end &= ~(CACHE_LINE_SIZE - 1);
184		debug_writel(0x03);
185		l2x0_flush_line(end);
186		debug_writel(0x00);
187	}
188
189	while (start < end) {
190		unsigned long blk_end = start + min(end - start, 4096UL);
191
192		while (start < blk_end) {
193			l2x0_inv_line(start);
194			start += CACHE_LINE_SIZE;
195		}
196
197		if (blk_end < end) {
198			spin_unlock_irqrestore(&l2x0_lock, flags);
199			spin_lock_irqsave(&l2x0_lock, flags);
200		}
201	}
202	cache_wait(base + L2X0_INV_LINE_PA, 1);
203	cache_sync();
204	spin_unlock_irqrestore(&l2x0_lock, flags);
205}
206
207static void l2x0_clean_range(unsigned long start, unsigned long end)
208{
209	void __iomem *base = l2x0_base;
210	unsigned long flags;
211
212	if ((end - start) >= l2x0_size) {
213		l2x0_clean_all();
214		return;
215	}
216
217	spin_lock_irqsave(&l2x0_lock, flags);
218	start &= ~(CACHE_LINE_SIZE - 1);
219	while (start < end) {
220		unsigned long blk_end = start + min(end - start, 4096UL);
221
222		while (start < blk_end) {
223			l2x0_clean_line(start);
224			start += CACHE_LINE_SIZE;
225		}
226
227		if (blk_end < end) {
228			spin_unlock_irqrestore(&l2x0_lock, flags);
229			spin_lock_irqsave(&l2x0_lock, flags);
230		}
231	}
232	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
233	cache_sync();
234	spin_unlock_irqrestore(&l2x0_lock, flags);
235}
236
237static void l2x0_flush_range(unsigned long start, unsigned long end)
238{
239	void __iomem *base = l2x0_base;
240	unsigned long flags;
241
242	if ((end - start) >= l2x0_size) {
243		l2x0_flush_all();
244		return;
245	}
246
247	spin_lock_irqsave(&l2x0_lock, flags);
248	start &= ~(CACHE_LINE_SIZE - 1);
249	while (start < end) {
250		unsigned long blk_end = start + min(end - start, 4096UL);
251
252		debug_writel(0x03);
253		while (start < blk_end) {
254			l2x0_flush_line(start);
255			start += CACHE_LINE_SIZE;
256		}
257		debug_writel(0x00);
258
259		if (blk_end < end) {
260			spin_unlock_irqrestore(&l2x0_lock, flags);
261			spin_lock_irqsave(&l2x0_lock, flags);
262		}
263	}
264	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
265	cache_sync();
266	spin_unlock_irqrestore(&l2x0_lock, flags);
267}
268
269static void l2x0_disable(void)
270{
271	unsigned long flags;
272
273	spin_lock_irqsave(&l2x0_lock, flags);
274	__l2x0_flush_all();
275	writel_relaxed(0, l2x0_base + L2X0_CTRL);
276	dsb();
277	spin_unlock_irqrestore(&l2x0_lock, flags);
278}
279
280static void __init l2x0_unlock(__u32 cache_id)
281{
282	int lockregs;
283	int i;
284
285	if (cache_id == L2X0_CACHE_ID_PART_L310)
 
286		lockregs = 8;
287	else
 
 
 
 
288		/* L210 and unknown types */
289		lockregs = 1;
 
 
290
291	for (i = 0; i < lockregs; i++) {
292		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
293			       i * L2X0_LOCKDOWN_STRIDE);
294		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
295			       i * L2X0_LOCKDOWN_STRIDE);
296	}
297}
298
299void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
300{
301	__u32 aux;
302	__u32 cache_id;
303	__u32 way_size = 0;
304	int ways;
 
305	const char *type;
306
307	l2x0_base = base;
308
309	cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
 
 
310	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
311
312	aux &= aux_mask;
313	aux |= aux_val;
314
315	/* Determine the number of ways */
316	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
317	case L2X0_CACHE_ID_PART_L310:
318		if (aux & (1 << 16))
319			ways = 16;
320		else
321			ways = 8;
322		type = "L310";
 
 
 
 
 
 
323		break;
324	case L2X0_CACHE_ID_PART_L210:
325		ways = (aux >> 13) & 0xf;
326		type = "L210";
327		break;
 
 
 
 
 
 
 
 
328	default:
329		/* Assume unknown chips have 8 ways */
330		ways = 8;
331		type = "L2x0 series";
332		break;
333	}
334
335	l2x0_way_mask = (1 << ways) - 1;
336
337	/*
338	 * L2 cache Size =  Way size * Number of ways
339	 */
340	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
341	way_size = 1 << (way_size + 3);
 
342	l2x0_size = ways * way_size * SZ_1K;
343
344	/*
345	 * Check if l2x0 controller is already enabled.
346	 * If you are booting from non-secure mode
347	 * accessing the below registers will fault.
348	 */
349	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
350		/* Make sure that I&D is not locked down when starting */
351		l2x0_unlock(cache_id);
352
353		/* l2x0 controller is disabled */
354		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
355
356		l2x0_inv_all();
357
358		/* enable L2X0 */
359		writel_relaxed(1, l2x0_base + L2X0_CTRL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
360	}
 
361
362	outer_cache.inv_range = l2x0_inv_range;
363	outer_cache.clean_range = l2x0_clean_range;
364	outer_cache.flush_range = l2x0_flush_range;
365	outer_cache.sync = l2x0_cache_sync;
366	outer_cache.flush_all = l2x0_flush_all;
367	outer_cache.inv_all = l2x0_inv_all;
368	outer_cache.disable = l2x0_disable;
369	outer_cache.set_debug = l2x0_set_debug;
370
371	printk(KERN_INFO "%s cache controller enabled\n", type);
372	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
373			ways, cache_id, aux, l2x0_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
374}
v3.15
   1/*
   2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
   3 *
   4 * Copyright (C) 2007 ARM Limited
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18 */
  19#include <linux/err.h>
  20#include <linux/init.h>
  21#include <linux/spinlock.h>
  22#include <linux/io.h>
  23#include <linux/of.h>
  24#include <linux/of_address.h>
  25
  26#include <asm/cacheflush.h>
  27#include <asm/hardware/cache-l2x0.h>
  28#include "cache-tauros3.h"
  29#include "cache-aurora-l2.h"
  30
  31#define CACHE_LINE_SIZE		32
  32
  33static void __iomem *l2x0_base;
  34static DEFINE_RAW_SPINLOCK(l2x0_lock);
  35static u32 l2x0_way_mask;	/* Bitmask of active ways */
  36static u32 l2x0_size;
  37static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
  38
  39/* Aurora don't have the cache ID register available, so we have to
  40 * pass it though the device tree */
  41static u32  cache_id_part_number_from_dt;
  42
  43struct l2x0_regs l2x0_saved_regs;
  44
  45struct l2x0_of_data {
  46	void (*setup)(const struct device_node *, u32 *, u32 *);
  47	void (*save)(void);
  48	struct outer_cache_fns outer_cache;
  49};
  50
  51static bool of_init = false;
  52
  53static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
  54{
  55	/* wait for cache operation by line or way to complete */
  56	while (readl_relaxed(reg) & mask)
  57		cpu_relax();
  58}
  59
  60#ifdef CONFIG_CACHE_PL310
  61static inline void cache_wait(void __iomem *reg, unsigned long mask)
  62{
  63	/* cache operations by line are atomic on PL310 */
  64}
  65#else
  66#define cache_wait	cache_wait_way
  67#endif
  68
  69static inline void cache_sync(void)
  70{
  71	void __iomem *base = l2x0_base;
  72
  73	writel_relaxed(0, base + sync_reg_offset);
 
 
 
 
 
  74	cache_wait(base + L2X0_CACHE_SYNC, 1);
  75}
  76
  77static inline void l2x0_clean_line(unsigned long addr)
  78{
  79	void __iomem *base = l2x0_base;
  80	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
  81	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
  82}
  83
  84static inline void l2x0_inv_line(unsigned long addr)
  85{
  86	void __iomem *base = l2x0_base;
  87	cache_wait(base + L2X0_INV_LINE_PA, 1);
  88	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
  89}
  90
  91#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
  92static inline void debug_writel(unsigned long val)
  93{
  94	if (outer_cache.set_debug)
  95		outer_cache.set_debug(val);
  96}
  97
  98static void pl310_set_debug(unsigned long val)
 
 
  99{
 100	writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
 101}
 102#else
 103/* Optimised out for non-errata case */
 104static inline void debug_writel(unsigned long val)
 105{
 106}
 107
 108#define pl310_set_debug	NULL
 109#endif
 110
 111#ifdef CONFIG_PL310_ERRATA_588369
 112static inline void l2x0_flush_line(unsigned long addr)
 113{
 114	void __iomem *base = l2x0_base;
 115
 116	/* Clean by PA followed by Invalidate by PA */
 117	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 118	writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
 119	cache_wait(base + L2X0_INV_LINE_PA, 1);
 120	writel_relaxed(addr, base + L2X0_INV_LINE_PA);
 121}
 122#else
 123
 124static inline void l2x0_flush_line(unsigned long addr)
 125{
 126	void __iomem *base = l2x0_base;
 127	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
 128	writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
 129}
 130#endif
 131
 132static void l2x0_cache_sync(void)
 133{
 134	unsigned long flags;
 135
 136	raw_spin_lock_irqsave(&l2x0_lock, flags);
 137	cache_sync();
 138	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 139}
 140
 141static void __l2x0_flush_all(void)
 142{
 143	debug_writel(0x03);
 144	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
 145	cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
 146	cache_sync();
 147	debug_writel(0x00);
 148}
 149
 150static void l2x0_flush_all(void)
 151{
 152	unsigned long flags;
 153
 154	/* clean all ways */
 155	raw_spin_lock_irqsave(&l2x0_lock, flags);
 156	__l2x0_flush_all();
 157	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 158}
 159
 160static void l2x0_clean_all(void)
 161{
 162	unsigned long flags;
 163
 164	/* clean all ways */
 165	raw_spin_lock_irqsave(&l2x0_lock, flags);
 166	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
 167	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
 168	cache_sync();
 169	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 170}
 171
 172static void l2x0_inv_all(void)
 173{
 174	unsigned long flags;
 175
 176	/* invalidate all ways */
 177	raw_spin_lock_irqsave(&l2x0_lock, flags);
 178	/* Invalidating when L2 is enabled is a nono */
 179	BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
 180	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
 181	cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
 182	cache_sync();
 183	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 184}
 185
 186static void l2x0_inv_range(unsigned long start, unsigned long end)
 187{
 188	void __iomem *base = l2x0_base;
 189	unsigned long flags;
 190
 191	raw_spin_lock_irqsave(&l2x0_lock, flags);
 192	if (start & (CACHE_LINE_SIZE - 1)) {
 193		start &= ~(CACHE_LINE_SIZE - 1);
 194		debug_writel(0x03);
 195		l2x0_flush_line(start);
 196		debug_writel(0x00);
 197		start += CACHE_LINE_SIZE;
 198	}
 199
 200	if (end & (CACHE_LINE_SIZE - 1)) {
 201		end &= ~(CACHE_LINE_SIZE - 1);
 202		debug_writel(0x03);
 203		l2x0_flush_line(end);
 204		debug_writel(0x00);
 205	}
 206
 207	while (start < end) {
 208		unsigned long blk_end = start + min(end - start, 4096UL);
 209
 210		while (start < blk_end) {
 211			l2x0_inv_line(start);
 212			start += CACHE_LINE_SIZE;
 213		}
 214
 215		if (blk_end < end) {
 216			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 217			raw_spin_lock_irqsave(&l2x0_lock, flags);
 218		}
 219	}
 220	cache_wait(base + L2X0_INV_LINE_PA, 1);
 221	cache_sync();
 222	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 223}
 224
 225static void l2x0_clean_range(unsigned long start, unsigned long end)
 226{
 227	void __iomem *base = l2x0_base;
 228	unsigned long flags;
 229
 230	if ((end - start) >= l2x0_size) {
 231		l2x0_clean_all();
 232		return;
 233	}
 234
 235	raw_spin_lock_irqsave(&l2x0_lock, flags);
 236	start &= ~(CACHE_LINE_SIZE - 1);
 237	while (start < end) {
 238		unsigned long blk_end = start + min(end - start, 4096UL);
 239
 240		while (start < blk_end) {
 241			l2x0_clean_line(start);
 242			start += CACHE_LINE_SIZE;
 243		}
 244
 245		if (blk_end < end) {
 246			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 247			raw_spin_lock_irqsave(&l2x0_lock, flags);
 248		}
 249	}
 250	cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
 251	cache_sync();
 252	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 253}
 254
 255static void l2x0_flush_range(unsigned long start, unsigned long end)
 256{
 257	void __iomem *base = l2x0_base;
 258	unsigned long flags;
 259
 260	if ((end - start) >= l2x0_size) {
 261		l2x0_flush_all();
 262		return;
 263	}
 264
 265	raw_spin_lock_irqsave(&l2x0_lock, flags);
 266	start &= ~(CACHE_LINE_SIZE - 1);
 267	while (start < end) {
 268		unsigned long blk_end = start + min(end - start, 4096UL);
 269
 270		debug_writel(0x03);
 271		while (start < blk_end) {
 272			l2x0_flush_line(start);
 273			start += CACHE_LINE_SIZE;
 274		}
 275		debug_writel(0x00);
 276
 277		if (blk_end < end) {
 278			raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 279			raw_spin_lock_irqsave(&l2x0_lock, flags);
 280		}
 281	}
 282	cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
 283	cache_sync();
 284	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 285}
 286
 287static void l2x0_disable(void)
 288{
 289	unsigned long flags;
 290
 291	raw_spin_lock_irqsave(&l2x0_lock, flags);
 292	__l2x0_flush_all();
 293	writel_relaxed(0, l2x0_base + L2X0_CTRL);
 294	dsb(st);
 295	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 296}
 297
 298static void l2x0_unlock(u32 cache_id)
 299{
 300	int lockregs;
 301	int i;
 302
 303	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
 304	case L2X0_CACHE_ID_PART_L310:
 305		lockregs = 8;
 306		break;
 307	case AURORA_CACHE_ID:
 308		lockregs = 4;
 309		break;
 310	default:
 311		/* L210 and unknown types */
 312		lockregs = 1;
 313		break;
 314	}
 315
 316	for (i = 0; i < lockregs; i++) {
 317		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
 318			       i * L2X0_LOCKDOWN_STRIDE);
 319		writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
 320			       i * L2X0_LOCKDOWN_STRIDE);
 321	}
 322}
 323
 324void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
 325{
 326	u32 aux;
 327	u32 cache_id;
 328	u32 way_size = 0;
 329	int ways;
 330	int way_size_shift = L2X0_WAY_SIZE_SHIFT;
 331	const char *type;
 332
 333	l2x0_base = base;
 334	if (cache_id_part_number_from_dt)
 335		cache_id = cache_id_part_number_from_dt;
 336	else
 337		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
 338	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 339
 340	aux &= aux_mask;
 341	aux |= aux_val;
 342
 343	/* Determine the number of ways */
 344	switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
 345	case L2X0_CACHE_ID_PART_L310:
 346		if (aux & (1 << 16))
 347			ways = 16;
 348		else
 349			ways = 8;
 350		type = "L310";
 351#ifdef CONFIG_PL310_ERRATA_753970
 352		/* Unmapped register. */
 353		sync_reg_offset = L2X0_DUMMY_REG;
 354#endif
 355		if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
 356			outer_cache.set_debug = pl310_set_debug;
 357		break;
 358	case L2X0_CACHE_ID_PART_L210:
 359		ways = (aux >> 13) & 0xf;
 360		type = "L210";
 361		break;
 362
 363	case AURORA_CACHE_ID:
 364		sync_reg_offset = AURORA_SYNC_REG;
 365		ways = (aux >> 13) & 0xf;
 366		ways = 2 << ((ways + 1) >> 2);
 367		way_size_shift = AURORA_WAY_SIZE_SHIFT;
 368		type = "Aurora";
 369		break;
 370	default:
 371		/* Assume unknown chips have 8 ways */
 372		ways = 8;
 373		type = "L2x0 series";
 374		break;
 375	}
 376
 377	l2x0_way_mask = (1 << ways) - 1;
 378
 379	/*
 380	 * L2 cache Size =  Way size * Number of ways
 381	 */
 382	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
 383	way_size = 1 << (way_size + way_size_shift);
 384
 385	l2x0_size = ways * way_size * SZ_1K;
 386
 387	/*
 388	 * Check if l2x0 controller is already enabled.
 389	 * If you are booting from non-secure mode
 390	 * accessing the below registers will fault.
 391	 */
 392	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 393		/* Make sure that I&D is not locked down when starting */
 394		l2x0_unlock(cache_id);
 395
 396		/* l2x0 controller is disabled */
 397		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
 398
 399		l2x0_inv_all();
 400
 401		/* enable L2X0 */
 402		writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
 403	}
 404
 405	/* Re-read it in case some bits are reserved. */
 406	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 407
 408	/* Save the value for resuming. */
 409	l2x0_saved_regs.aux_ctrl = aux;
 410
 411	if (!of_init) {
 412		outer_cache.inv_range = l2x0_inv_range;
 413		outer_cache.clean_range = l2x0_clean_range;
 414		outer_cache.flush_range = l2x0_flush_range;
 415		outer_cache.sync = l2x0_cache_sync;
 416		outer_cache.flush_all = l2x0_flush_all;
 417		outer_cache.inv_all = l2x0_inv_all;
 418		outer_cache.disable = l2x0_disable;
 419	}
 420
 421	pr_info("%s cache controller enabled\n", type);
 422	pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
 423		ways, cache_id, aux, l2x0_size >> 10);
 424}
 425
 426#ifdef CONFIG_OF
 427static int l2_wt_override;
 428
 429/*
 430 * Note that the end addresses passed to Linux primitives are
 431 * noninclusive, while the hardware cache range operations use
 432 * inclusive start and end addresses.
 433 */
 434static unsigned long calc_range_end(unsigned long start, unsigned long end)
 435{
 436	/*
 437	 * Limit the number of cache lines processed at once,
 438	 * since cache range operations stall the CPU pipeline
 439	 * until completion.
 440	 */
 441	if (end > start + MAX_RANGE_SIZE)
 442		end = start + MAX_RANGE_SIZE;
 443
 444	/*
 445	 * Cache range operations can't straddle a page boundary.
 446	 */
 447	if (end > PAGE_ALIGN(start+1))
 448		end = PAGE_ALIGN(start+1);
 449
 450	return end;
 451}
 452
 453/*
 454 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
 455 * and range operations only do a TLB lookup on the start address.
 456 */
 457static void aurora_pa_range(unsigned long start, unsigned long end,
 458			unsigned long offset)
 459{
 460	unsigned long flags;
 461
 462	raw_spin_lock_irqsave(&l2x0_lock, flags);
 463	writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
 464	writel_relaxed(end, l2x0_base + offset);
 465	raw_spin_unlock_irqrestore(&l2x0_lock, flags);
 466
 467	cache_sync();
 468}
 469
 470static void aurora_inv_range(unsigned long start, unsigned long end)
 471{
 472	/*
 473	 * round start and end adresses up to cache line size
 474	 */
 475	start &= ~(CACHE_LINE_SIZE - 1);
 476	end = ALIGN(end, CACHE_LINE_SIZE);
 477
 478	/*
 479	 * Invalidate all full cache lines between 'start' and 'end'.
 480	 */
 481	while (start < end) {
 482		unsigned long range_end = calc_range_end(start, end);
 483		aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 484				AURORA_INVAL_RANGE_REG);
 485		start = range_end;
 486	}
 487}
 488
 489static void aurora_clean_range(unsigned long start, unsigned long end)
 490{
 491	/*
 492	 * If L2 is forced to WT, the L2 will always be clean and we
 493	 * don't need to do anything here.
 494	 */
 495	if (!l2_wt_override) {
 496		start &= ~(CACHE_LINE_SIZE - 1);
 497		end = ALIGN(end, CACHE_LINE_SIZE);
 498		while (start != end) {
 499			unsigned long range_end = calc_range_end(start, end);
 500			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 501					AURORA_CLEAN_RANGE_REG);
 502			start = range_end;
 503		}
 504	}
 505}
 506
 507static void aurora_flush_range(unsigned long start, unsigned long end)
 508{
 509	start &= ~(CACHE_LINE_SIZE - 1);
 510	end = ALIGN(end, CACHE_LINE_SIZE);
 511	while (start != end) {
 512		unsigned long range_end = calc_range_end(start, end);
 513		/*
 514		 * If L2 is forced to WT, the L2 will always be clean and we
 515		 * just need to invalidate.
 516		 */
 517		if (l2_wt_override)
 518			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 519							AURORA_INVAL_RANGE_REG);
 520		else
 521			aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
 522							AURORA_FLUSH_RANGE_REG);
 523		start = range_end;
 524	}
 525}
 526
 527/*
 528 * For certain Broadcom SoCs, depending on the address range, different offsets
 529 * need to be added to the address before passing it to L2 for
 530 * invalidation/clean/flush
 531 *
 532 * Section Address Range              Offset        EMI
 533 *   1     0x00000000 - 0x3FFFFFFF    0x80000000    VC
 534 *   2     0x40000000 - 0xBFFFFFFF    0x40000000    SYS
 535 *   3     0xC0000000 - 0xFFFFFFFF    0x80000000    VC
 536 *
 537 * When the start and end addresses have crossed two different sections, we
 538 * need to break the L2 operation into two, each within its own section.
 539 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
 540 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
 541 * 0xC0000000 - 0xC0001000
 542 *
 543 * Note 1:
 544 * By breaking a single L2 operation into two, we may potentially suffer some
 545 * performance hit, but keep in mind the cross section case is very rare
 546 *
 547 * Note 2:
 548 * We do not need to handle the case when the start address is in
 549 * Section 1 and the end address is in Section 3, since it is not a valid use
 550 * case
 551 *
 552 * Note 3:
 553 * Section 1 in practical terms can no longer be used on rev A2. Because of
 554 * that the code does not need to handle section 1 at all.
 555 *
 556 */
 557#define BCM_SYS_EMI_START_ADDR        0x40000000UL
 558#define BCM_VC_EMI_SEC3_START_ADDR    0xC0000000UL
 559
 560#define BCM_SYS_EMI_OFFSET            0x40000000UL
 561#define BCM_VC_EMI_OFFSET             0x80000000UL
 562
 563static inline int bcm_addr_is_sys_emi(unsigned long addr)
 564{
 565	return (addr >= BCM_SYS_EMI_START_ADDR) &&
 566		(addr < BCM_VC_EMI_SEC3_START_ADDR);
 567}
 568
 569static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
 570{
 571	if (bcm_addr_is_sys_emi(addr))
 572		return addr + BCM_SYS_EMI_OFFSET;
 573	else
 574		return addr + BCM_VC_EMI_OFFSET;
 575}
 576
 577static void bcm_inv_range(unsigned long start, unsigned long end)
 578{
 579	unsigned long new_start, new_end;
 580
 581	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
 582
 583	if (unlikely(end <= start))
 584		return;
 585
 586	new_start = bcm_l2_phys_addr(start);
 587	new_end = bcm_l2_phys_addr(end);
 588
 589	/* normal case, no cross section between start and end */
 590	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
 591		l2x0_inv_range(new_start, new_end);
 592		return;
 593	}
 594
 595	/* They cross sections, so it can only be a cross from section
 596	 * 2 to section 3
 597	 */
 598	l2x0_inv_range(new_start,
 599		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
 600	l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
 601		new_end);
 602}
 603
 604static void bcm_clean_range(unsigned long start, unsigned long end)
 605{
 606	unsigned long new_start, new_end;
 607
 608	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
 609
 610	if (unlikely(end <= start))
 611		return;
 612
 613	if ((end - start) >= l2x0_size) {
 614		l2x0_clean_all();
 615		return;
 616	}
 617
 618	new_start = bcm_l2_phys_addr(start);
 619	new_end = bcm_l2_phys_addr(end);
 620
 621	/* normal case, no cross section between start and end */
 622	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
 623		l2x0_clean_range(new_start, new_end);
 624		return;
 625	}
 626
 627	/* They cross sections, so it can only be a cross from section
 628	 * 2 to section 3
 629	 */
 630	l2x0_clean_range(new_start,
 631		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
 632	l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
 633		new_end);
 634}
 635
 636static void bcm_flush_range(unsigned long start, unsigned long end)
 637{
 638	unsigned long new_start, new_end;
 639
 640	BUG_ON(start < BCM_SYS_EMI_START_ADDR);
 641
 642	if (unlikely(end <= start))
 643		return;
 644
 645	if ((end - start) >= l2x0_size) {
 646		l2x0_flush_all();
 647		return;
 648	}
 649
 650	new_start = bcm_l2_phys_addr(start);
 651	new_end = bcm_l2_phys_addr(end);
 652
 653	/* normal case, no cross section between start and end */
 654	if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
 655		l2x0_flush_range(new_start, new_end);
 656		return;
 657	}
 658
 659	/* They cross sections, so it can only be a cross from section
 660	 * 2 to section 3
 661	 */
 662	l2x0_flush_range(new_start,
 663		bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
 664	l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
 665		new_end);
 666}
 667
 668static void __init l2x0_of_setup(const struct device_node *np,
 669				 u32 *aux_val, u32 *aux_mask)
 670{
 671	u32 data[2] = { 0, 0 };
 672	u32 tag = 0;
 673	u32 dirty = 0;
 674	u32 val = 0, mask = 0;
 675
 676	of_property_read_u32(np, "arm,tag-latency", &tag);
 677	if (tag) {
 678		mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
 679		val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
 680	}
 681
 682	of_property_read_u32_array(np, "arm,data-latency",
 683				   data, ARRAY_SIZE(data));
 684	if (data[0] && data[1]) {
 685		mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
 686			L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
 687		val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
 688		       ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
 689	}
 690
 691	of_property_read_u32(np, "arm,dirty-latency", &dirty);
 692	if (dirty) {
 693		mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
 694		val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
 695	}
 696
 697	*aux_val &= ~mask;
 698	*aux_val |= val;
 699	*aux_mask &= ~mask;
 700}
 701
 702static void __init pl310_of_setup(const struct device_node *np,
 703				  u32 *aux_val, u32 *aux_mask)
 704{
 705	u32 data[3] = { 0, 0, 0 };
 706	u32 tag[3] = { 0, 0, 0 };
 707	u32 filter[2] = { 0, 0 };
 708
 709	of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
 710	if (tag[0] && tag[1] && tag[2])
 711		writel_relaxed(
 712			((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
 713			((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
 714			((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
 715			l2x0_base + L2X0_TAG_LATENCY_CTRL);
 716
 717	of_property_read_u32_array(np, "arm,data-latency",
 718				   data, ARRAY_SIZE(data));
 719	if (data[0] && data[1] && data[2])
 720		writel_relaxed(
 721			((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
 722			((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
 723			((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
 724			l2x0_base + L2X0_DATA_LATENCY_CTRL);
 725
 726	of_property_read_u32_array(np, "arm,filter-ranges",
 727				   filter, ARRAY_SIZE(filter));
 728	if (filter[1]) {
 729		writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
 730			       l2x0_base + L2X0_ADDR_FILTER_END);
 731		writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
 732			       l2x0_base + L2X0_ADDR_FILTER_START);
 733	}
 734}
 735
 736static void __init pl310_save(void)
 737{
 738	u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
 739		L2X0_CACHE_ID_RTL_MASK;
 740
 741	l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
 742		L2X0_TAG_LATENCY_CTRL);
 743	l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
 744		L2X0_DATA_LATENCY_CTRL);
 745	l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
 746		L2X0_ADDR_FILTER_END);
 747	l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
 748		L2X0_ADDR_FILTER_START);
 749
 750	if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
 751		/*
 752		 * From r2p0, there is Prefetch offset/control register
 753		 */
 754		l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
 755			L2X0_PREFETCH_CTRL);
 756		/*
 757		 * From r3p0, there is Power control register
 758		 */
 759		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
 760			l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
 761				L2X0_POWER_CTRL);
 762	}
 763}
 764
 765static void aurora_save(void)
 766{
 767	l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
 768	l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 769}
 770
 771static void __init tauros3_save(void)
 772{
 773	l2x0_saved_regs.aux2_ctrl =
 774		readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
 775	l2x0_saved_regs.prefetch_ctrl =
 776		readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
 777}
 778
 779static void l2x0_resume(void)
 780{
 781	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 782		/* restore aux ctrl and enable l2 */
 783		l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
 784
 785		writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
 786			L2X0_AUX_CTRL);
 787
 788		l2x0_inv_all();
 789
 790		writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
 791	}
 792}
 793
 794static void pl310_resume(void)
 795{
 796	u32 l2x0_revision;
 797
 798	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 799		/* restore pl310 setup */
 800		writel_relaxed(l2x0_saved_regs.tag_latency,
 801			l2x0_base + L2X0_TAG_LATENCY_CTRL);
 802		writel_relaxed(l2x0_saved_regs.data_latency,
 803			l2x0_base + L2X0_DATA_LATENCY_CTRL);
 804		writel_relaxed(l2x0_saved_regs.filter_end,
 805			l2x0_base + L2X0_ADDR_FILTER_END);
 806		writel_relaxed(l2x0_saved_regs.filter_start,
 807			l2x0_base + L2X0_ADDR_FILTER_START);
 808
 809		l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
 810			L2X0_CACHE_ID_RTL_MASK;
 811
 812		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
 813			writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
 814				l2x0_base + L2X0_PREFETCH_CTRL);
 815			if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
 816				writel_relaxed(l2x0_saved_regs.pwr_ctrl,
 817					l2x0_base + L2X0_POWER_CTRL);
 818		}
 819	}
 820
 821	l2x0_resume();
 822}
 823
 824static void aurora_resume(void)
 825{
 826	if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 827		writel_relaxed(l2x0_saved_regs.aux_ctrl,
 828				l2x0_base + L2X0_AUX_CTRL);
 829		writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
 830	}
 831}
 832
 833static void tauros3_resume(void)
 834{
 835	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 836		writel_relaxed(l2x0_saved_regs.aux2_ctrl,
 837			       l2x0_base + TAUROS3_AUX2_CTRL);
 838		writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
 839			       l2x0_base + L2X0_PREFETCH_CTRL);
 840	}
 841
 842	l2x0_resume();
 843}
 844
 845static void __init aurora_broadcast_l2_commands(void)
 846{
 847	__u32 u;
 848	/* Enable Broadcasting of cache commands to L2*/
 849	__asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
 850	u |= AURORA_CTRL_FW;		/* Set the FW bit */
 851	__asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
 852	isb();
 853}
 854
 855static void __init aurora_of_setup(const struct device_node *np,
 856				u32 *aux_val, u32 *aux_mask)
 857{
 858	u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
 859	u32 mask =  AURORA_ACR_REPLACEMENT_MASK;
 860
 861	of_property_read_u32(np, "cache-id-part",
 862			&cache_id_part_number_from_dt);
 863
 864	/* Determine and save the write policy */
 865	l2_wt_override = of_property_read_bool(np, "wt-override");
 866
 867	if (l2_wt_override) {
 868		val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
 869		mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
 870	}
 871
 872	*aux_val &= ~mask;
 873	*aux_val |= val;
 874	*aux_mask &= ~mask;
 875}
 876
 877static const struct l2x0_of_data pl310_data = {
 878	.setup = pl310_of_setup,
 879	.save  = pl310_save,
 880	.outer_cache = {
 881		.resume      = pl310_resume,
 882		.inv_range   = l2x0_inv_range,
 883		.clean_range = l2x0_clean_range,
 884		.flush_range = l2x0_flush_range,
 885		.sync        = l2x0_cache_sync,
 886		.flush_all   = l2x0_flush_all,
 887		.inv_all     = l2x0_inv_all,
 888		.disable     = l2x0_disable,
 889	},
 890};
 891
 892static const struct l2x0_of_data l2x0_data = {
 893	.setup = l2x0_of_setup,
 894	.save  = NULL,
 895	.outer_cache = {
 896		.resume      = l2x0_resume,
 897		.inv_range   = l2x0_inv_range,
 898		.clean_range = l2x0_clean_range,
 899		.flush_range = l2x0_flush_range,
 900		.sync        = l2x0_cache_sync,
 901		.flush_all   = l2x0_flush_all,
 902		.inv_all     = l2x0_inv_all,
 903		.disable     = l2x0_disable,
 904	},
 905};
 906
 907static const struct l2x0_of_data aurora_with_outer_data = {
 908	.setup = aurora_of_setup,
 909	.save  = aurora_save,
 910	.outer_cache = {
 911		.resume      = aurora_resume,
 912		.inv_range   = aurora_inv_range,
 913		.clean_range = aurora_clean_range,
 914		.flush_range = aurora_flush_range,
 915		.sync        = l2x0_cache_sync,
 916		.flush_all   = l2x0_flush_all,
 917		.inv_all     = l2x0_inv_all,
 918		.disable     = l2x0_disable,
 919	},
 920};
 921
 922static const struct l2x0_of_data aurora_no_outer_data = {
 923	.setup = aurora_of_setup,
 924	.save  = aurora_save,
 925	.outer_cache = {
 926		.resume      = aurora_resume,
 927	},
 928};
 929
 930static const struct l2x0_of_data tauros3_data = {
 931	.setup = NULL,
 932	.save  = tauros3_save,
 933	/* Tauros3 broadcasts L1 cache operations to L2 */
 934	.outer_cache = {
 935		.resume      = tauros3_resume,
 936	},
 937};
 938
 939static const struct l2x0_of_data bcm_l2x0_data = {
 940	.setup = pl310_of_setup,
 941	.save  = pl310_save,
 942	.outer_cache = {
 943		.resume      = pl310_resume,
 944		.inv_range   = bcm_inv_range,
 945		.clean_range = bcm_clean_range,
 946		.flush_range = bcm_flush_range,
 947		.sync        = l2x0_cache_sync,
 948		.flush_all   = l2x0_flush_all,
 949		.inv_all     = l2x0_inv_all,
 950		.disable     = l2x0_disable,
 951	},
 952};
 953
 954static const struct of_device_id l2x0_ids[] __initconst = {
 955	{ .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
 956	{ .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
 957	{ .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
 958	{ .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
 959	  .data = (void *)&bcm_l2x0_data},
 960	{ .compatible = "brcm,bcm11351-a2-pl310-cache",
 961	  .data = (void *)&bcm_l2x0_data},
 962	{ .compatible = "marvell,aurora-outer-cache",
 963	  .data = (void *)&aurora_with_outer_data},
 964	{ .compatible = "marvell,aurora-system-cache",
 965	  .data = (void *)&aurora_no_outer_data},
 966	{ .compatible = "marvell,tauros3-cache",
 967	  .data = (void *)&tauros3_data },
 968	{}
 969};
 970
 971int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
 972{
 973	struct device_node *np;
 974	const struct l2x0_of_data *data;
 975	struct resource res;
 976
 977	np = of_find_matching_node(NULL, l2x0_ids);
 978	if (!np)
 979		return -ENODEV;
 980
 981	if (of_address_to_resource(np, 0, &res))
 982		return -ENODEV;
 983
 984	l2x0_base = ioremap(res.start, resource_size(&res));
 985	if (!l2x0_base)
 986		return -ENOMEM;
 987
 988	l2x0_saved_regs.phy_base = res.start;
 989
 990	data = of_match_node(l2x0_ids, np)->data;
 991
 992	/* L2 configuration can only be changed if the cache is disabled */
 993	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
 994		if (data->setup)
 995			data->setup(np, &aux_val, &aux_mask);
 996
 997		/* For aurora cache in no outer mode select the
 998		 * correct mode using the coprocessor*/
 999		if (data == &aurora_no_outer_data)
1000			aurora_broadcast_l2_commands();
1001	}
1002
1003	if (data->save)
1004		data->save();
1005
1006	of_init = true;
1007	memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
1008	l2x0_init(l2x0_base, aux_val, aux_mask);
1009
1010	return 0;
1011}
1012#endif